repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
HewlettPackard/python-hpOneView
hpOneView/resources/resource.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/resource.py#L613-L638
def build_subresource_uri(self, resource_id_or_uri=None, subresource_id_or_uri=None, subresource_path=''): """Helps to build a URI with resource path and its sub resource path. Args: resoure_id_or_uri: ID/URI of the main resource. subresource_id__or_uri: ID/URI of the sub resource. subresource_path: Sub resource path to be added with the URI. Returns: Returns URI """ if subresource_id_or_uri and "/" in subresource_id_or_uri: return subresource_id_or_uri else: if not resource_id_or_uri: raise exceptions.HPOneViewValueError(RESOURCE_ID_OR_URI_REQUIRED) resource_uri = self.build_uri(resource_id_or_uri) uri = "{}/{}/{}".format(resource_uri, subresource_path, str(subresource_id_or_uri or '')) uri = uri.replace("//", "/") if uri.endswith("/"): uri = uri[:-1] return uri
[ "def", "build_subresource_uri", "(", "self", ",", "resource_id_or_uri", "=", "None", ",", "subresource_id_or_uri", "=", "None", ",", "subresource_path", "=", "''", ")", ":", "if", "subresource_id_or_uri", "and", "\"/\"", "in", "subresource_id_or_uri", ":", "return", "subresource_id_or_uri", "else", ":", "if", "not", "resource_id_or_uri", ":", "raise", "exceptions", ".", "HPOneViewValueError", "(", "RESOURCE_ID_OR_URI_REQUIRED", ")", "resource_uri", "=", "self", ".", "build_uri", "(", "resource_id_or_uri", ")", "uri", "=", "\"{}/{}/{}\"", ".", "format", "(", "resource_uri", ",", "subresource_path", ",", "str", "(", "subresource_id_or_uri", "or", "''", ")", ")", "uri", "=", "uri", ".", "replace", "(", "\"//\"", ",", "\"/\"", ")", "if", "uri", ".", "endswith", "(", "\"/\"", ")", ":", "uri", "=", "uri", "[", ":", "-", "1", "]", "return", "uri" ]
Helps to build a URI with resource path and its sub resource path. Args: resoure_id_or_uri: ID/URI of the main resource. subresource_id__or_uri: ID/URI of the sub resource. subresource_path: Sub resource path to be added with the URI. Returns: Returns URI
[ "Helps", "to", "build", "a", "URI", "with", "resource", "path", "and", "its", "sub", "resource", "path", "." ]
python
train
37.153846
peterbrittain/asciimatics
asciimatics/widgets.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/widgets.py#L1587-L1595
def _pick_colours(self, palette_name, selected=False): """ Pick the rendering colour for a widget based on the current state. :param palette_name: The stem name for the widget - e.g. "button". :param selected: Whether this item is selected or not. :returns: A colour tuple (fg, attr, bg) to be used. """ return self._frame.palette[self._pick_palette_key(palette_name, selected)]
[ "def", "_pick_colours", "(", "self", ",", "palette_name", ",", "selected", "=", "False", ")", ":", "return", "self", ".", "_frame", ".", "palette", "[", "self", ".", "_pick_palette_key", "(", "palette_name", ",", "selected", ")", "]" ]
Pick the rendering colour for a widget based on the current state. :param palette_name: The stem name for the widget - e.g. "button". :param selected: Whether this item is selected or not. :returns: A colour tuple (fg, attr, bg) to be used.
[ "Pick", "the", "rendering", "colour", "for", "a", "widget", "based", "on", "the", "current", "state", "." ]
python
train
47.444444
FSX/misaka
misaka/api.py
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L96-L125
def html(text, extensions=0, render_flags=0): """ Convert markdown text to HTML. ``extensions`` can be a list or tuple of extensions (e.g. ``('fenced-code', 'footnotes', 'strikethrough')``) or an integer (e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``). ``render_flags`` can be a list or tuple of flags (e.g. ``('skip-html', 'hard-wrap')``) or an integer (e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``). """ extensions = args_to_int(extension_map, extensions) render_flags = args_to_int(html_flag_map, render_flags) ib = lib.hoedown_buffer_new(IUNIT) ob = lib.hoedown_buffer_new(OUNIT) renderer = lib.hoedown_html_renderer_new(render_flags, 0) document = lib.hoedown_document_new(renderer, extensions, 16); lib.hoedown_buffer_puts(ib, text.encode('utf-8')) lib.hoedown_document_render(document, ob, ib.data, ib.size); lib.hoedown_buffer_free(ib); lib.hoedown_document_free(document); lib.hoedown_html_renderer_free(renderer); try: return to_string(ob) finally: lib.hoedown_buffer_free(ob);
[ "def", "html", "(", "text", ",", "extensions", "=", "0", ",", "render_flags", "=", "0", ")", ":", "extensions", "=", "args_to_int", "(", "extension_map", ",", "extensions", ")", "render_flags", "=", "args_to_int", "(", "html_flag_map", ",", "render_flags", ")", "ib", "=", "lib", ".", "hoedown_buffer_new", "(", "IUNIT", ")", "ob", "=", "lib", ".", "hoedown_buffer_new", "(", "OUNIT", ")", "renderer", "=", "lib", ".", "hoedown_html_renderer_new", "(", "render_flags", ",", "0", ")", "document", "=", "lib", ".", "hoedown_document_new", "(", "renderer", ",", "extensions", ",", "16", ")", "lib", ".", "hoedown_buffer_puts", "(", "ib", ",", "text", ".", "encode", "(", "'utf-8'", ")", ")", "lib", ".", "hoedown_document_render", "(", "document", ",", "ob", ",", "ib", ".", "data", ",", "ib", ".", "size", ")", "lib", ".", "hoedown_buffer_free", "(", "ib", ")", "lib", ".", "hoedown_document_free", "(", "document", ")", "lib", ".", "hoedown_html_renderer_free", "(", "renderer", ")", "try", ":", "return", "to_string", "(", "ob", ")", "finally", ":", "lib", ".", "hoedown_buffer_free", "(", "ob", ")" ]
Convert markdown text to HTML. ``extensions`` can be a list or tuple of extensions (e.g. ``('fenced-code', 'footnotes', 'strikethrough')``) or an integer (e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``). ``render_flags`` can be a list or tuple of flags (e.g. ``('skip-html', 'hard-wrap')``) or an integer (e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``).
[ "Convert", "markdown", "text", "to", "HTML", "." ]
python
train
35.933333
wreckage/django-happenings
happenings/utils/displays.py
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/displays.py#L25-L40
def add_occurrences(events, count): """ Adds an occurrence key to the event object w/ a list of occurrences and adds a popover (for use with twitter bootstrap). The occurrence is added so that each event can be aware of what day(s) it occurs in the month. """ for day in count: for item in count[day]: for event in events: if event.pk == item[1]: try: event.occurrence.append(day) except AttributeError: event.occurrence = [] event.occurrence.append(day)
[ "def", "add_occurrences", "(", "events", ",", "count", ")", ":", "for", "day", "in", "count", ":", "for", "item", "in", "count", "[", "day", "]", ":", "for", "event", "in", "events", ":", "if", "event", ".", "pk", "==", "item", "[", "1", "]", ":", "try", ":", "event", ".", "occurrence", ".", "append", "(", "day", ")", "except", "AttributeError", ":", "event", ".", "occurrence", "=", "[", "]", "event", ".", "occurrence", ".", "append", "(", "day", ")" ]
Adds an occurrence key to the event object w/ a list of occurrences and adds a popover (for use with twitter bootstrap). The occurrence is added so that each event can be aware of what day(s) it occurs in the month.
[ "Adds", "an", "occurrence", "key", "to", "the", "event", "object", "w", "/", "a", "list", "of", "occurrences", "and", "adds", "a", "popover", "(", "for", "use", "with", "twitter", "bootstrap", ")", ".", "The", "occurrence", "is", "added", "so", "that", "each", "event", "can", "be", "aware", "of", "what", "day", "(", "s", ")", "it", "occurs", "in", "the", "month", "." ]
python
test
38.4375
gwastro/pycbc
pycbc/workflow/plotting.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/plotting.py#L274-L296
def make_ifar_plot(workflow, trigger_file, out_dir, tags=None, hierarchical_level=None): """ Creates a node in the workflow for plotting cumulative histogram of IFAR values. """ if hierarchical_level is not None and tags: tags = [("HIERARCHICAL_LEVEL_{:02d}".format( hierarchical_level))] + tags elif hierarchical_level is not None and not tags: tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)] elif hierarchical_level is None and not tags: tags = [] makedir(out_dir) node = PlotExecutable(workflow.cp, 'page_ifar', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--trigger-file', trigger_file) if hierarchical_level is not None: node.add_opt('--use-hierarchical-level', hierarchical_level) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files[0]
[ "def", "make_ifar_plot", "(", "workflow", ",", "trigger_file", ",", "out_dir", ",", "tags", "=", "None", ",", "hierarchical_level", "=", "None", ")", ":", "if", "hierarchical_level", "is", "not", "None", "and", "tags", ":", "tags", "=", "[", "(", "\"HIERARCHICAL_LEVEL_{:02d}\"", ".", "format", "(", "hierarchical_level", ")", ")", "]", "+", "tags", "elif", "hierarchical_level", "is", "not", "None", "and", "not", "tags", ":", "tags", "=", "[", "\"HIERARCHICAL_LEVEL_{:02d}\"", ".", "format", "(", "hierarchical_level", ")", "]", "elif", "hierarchical_level", "is", "None", "and", "not", "tags", ":", "tags", "=", "[", "]", "makedir", "(", "out_dir", ")", "node", "=", "PlotExecutable", "(", "workflow", ".", "cp", ",", "'page_ifar'", ",", "ifos", "=", "workflow", ".", "ifos", ",", "out_dir", "=", "out_dir", ",", "tags", "=", "tags", ")", ".", "create_node", "(", ")", "node", ".", "add_input_opt", "(", "'--trigger-file'", ",", "trigger_file", ")", "if", "hierarchical_level", "is", "not", "None", ":", "node", ".", "add_opt", "(", "'--use-hierarchical-level'", ",", "hierarchical_level", ")", "node", ".", "new_output_file_opt", "(", "workflow", ".", "analysis_time", ",", "'.png'", ",", "'--output-file'", ")", "workflow", "+=", "node", "return", "node", ".", "output_files", "[", "0", "]" ]
Creates a node in the workflow for plotting cumulative histogram of IFAR values.
[ "Creates", "a", "node", "in", "the", "workflow", "for", "plotting", "cumulative", "histogram", "of", "IFAR", "values", "." ]
python
train
42.478261
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L920-L931
def deref(o, timeout_s=None, timeout_val=None): """Dereference a Deref object and return its contents. If o is an object implementing IBlockingDeref and timeout_s and timeout_val are supplied, deref will wait at most timeout_s seconds, returning timeout_val if timeout_s seconds elapse and o has not returned.""" if isinstance(o, IDeref): return o.deref() elif isinstance(o, IBlockingDeref): return o.deref(timeout_s, timeout_val) raise TypeError(f"Object of type {type(o)} cannot be dereferenced")
[ "def", "deref", "(", "o", ",", "timeout_s", "=", "None", ",", "timeout_val", "=", "None", ")", ":", "if", "isinstance", "(", "o", ",", "IDeref", ")", ":", "return", "o", ".", "deref", "(", ")", "elif", "isinstance", "(", "o", ",", "IBlockingDeref", ")", ":", "return", "o", ".", "deref", "(", "timeout_s", ",", "timeout_val", ")", "raise", "TypeError", "(", "f\"Object of type {type(o)} cannot be dereferenced\"", ")" ]
Dereference a Deref object and return its contents. If o is an object implementing IBlockingDeref and timeout_s and timeout_val are supplied, deref will wait at most timeout_s seconds, returning timeout_val if timeout_s seconds elapse and o has not returned.
[ "Dereference", "a", "Deref", "object", "and", "return", "its", "contents", "." ]
python
test
44.666667
saltstack/salt
salt/states/win_iis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_iis.py#L480-L573
def container_setting(name, container, settings=None): ''' Set the value of the setting for an IIS container. :param str name: The name of the IIS container. :param str container: The type of IIS container. The container types are: AppPools, Sites, SslBindings :param str settings: A dictionary of the setting names and their values. Example of usage for the ``AppPools`` container: .. code-block:: yaml site0-apppool-setting: win_iis.container_setting: - name: site0 - container: AppPools - settings: managedPipelineMode: Integrated processModel.maxProcesses: 1 processModel.userName: TestUser processModel.password: TestPassword processModel.identityType: SpecificUser Example of usage for the ``Sites`` container: .. code-block:: yaml site0-site-setting: win_iis.container_setting: - name: site0 - container: Sites - settings: logFile.logFormat: W3C logFile.period: Daily limits.maxUrlSegments: 32 ''' identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'} ret = {'name': name, 'changes': {}, 'comment': str(), 'result': None} if not settings: ret['comment'] = 'No settings to change provided.' ret['result'] = True return ret ret_settings = { 'changes': {}, 'failures': {}, } current_settings = __salt__['win_iis.get_container_setting'](name=name, container=container, settings=settings.keys()) for setting in settings: # map identity type from numeric to string for comparing if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys(): settings[setting] = identityType_map2string[settings[setting]] if str(settings[setting]) != str(current_settings[setting]): ret_settings['changes'][setting] = {'old': current_settings[setting], 'new': settings[setting]} if not ret_settings['changes']: ret['comment'] = 'Settings already contain the provided values.' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'Settings will be changed.' ret['changes'] = ret_settings return ret __salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings) new_settings = __salt__['win_iis.get_container_setting'](name=name, container=container, settings=settings.keys()) for setting in settings: if str(settings[setting]) != str(new_settings[setting]): ret_settings['failures'][setting] = {'old': current_settings[setting], 'new': new_settings[setting]} ret_settings['changes'].pop(setting, None) if ret_settings['failures']: ret['comment'] = 'Some settings failed to change.' ret['changes'] = ret_settings ret['result'] = False else: ret['comment'] = 'Set settings to contain the provided values.' ret['changes'] = ret_settings['changes'] ret['result'] = True return ret
[ "def", "container_setting", "(", "name", ",", "container", ",", "settings", "=", "None", ")", ":", "identityType_map2string", "=", "{", "0", ":", "'LocalSystem'", ",", "1", ":", "'LocalService'", ",", "2", ":", "'NetworkService'", ",", "3", ":", "'SpecificUser'", ",", "4", ":", "'ApplicationPoolIdentity'", "}", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "str", "(", ")", ",", "'result'", ":", "None", "}", "if", "not", "settings", ":", "ret", "[", "'comment'", "]", "=", "'No settings to change provided.'", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "ret_settings", "=", "{", "'changes'", ":", "{", "}", ",", "'failures'", ":", "{", "}", ",", "}", "current_settings", "=", "__salt__", "[", "'win_iis.get_container_setting'", "]", "(", "name", "=", "name", ",", "container", "=", "container", ",", "settings", "=", "settings", ".", "keys", "(", ")", ")", "for", "setting", "in", "settings", ":", "# map identity type from numeric to string for comparing", "if", "setting", "==", "'processModel.identityType'", "and", "settings", "[", "setting", "]", "in", "identityType_map2string", ".", "keys", "(", ")", ":", "settings", "[", "setting", "]", "=", "identityType_map2string", "[", "settings", "[", "setting", "]", "]", "if", "str", "(", "settings", "[", "setting", "]", ")", "!=", "str", "(", "current_settings", "[", "setting", "]", ")", ":", "ret_settings", "[", "'changes'", "]", "[", "setting", "]", "=", "{", "'old'", ":", "current_settings", "[", "setting", "]", ",", "'new'", ":", "settings", "[", "setting", "]", "}", "if", "not", "ret_settings", "[", "'changes'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Settings already contain the provided values.'", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Settings will be changed.'", "ret", "[", "'changes'", "]", "=", "ret_settings", "return", "ret", "__salt__", "[", "'win_iis.set_container_setting'", "]", "(", "name", "=", "name", ",", "container", "=", "container", ",", "settings", "=", "settings", ")", "new_settings", "=", "__salt__", "[", "'win_iis.get_container_setting'", "]", "(", "name", "=", "name", ",", "container", "=", "container", ",", "settings", "=", "settings", ".", "keys", "(", ")", ")", "for", "setting", "in", "settings", ":", "if", "str", "(", "settings", "[", "setting", "]", ")", "!=", "str", "(", "new_settings", "[", "setting", "]", ")", ":", "ret_settings", "[", "'failures'", "]", "[", "setting", "]", "=", "{", "'old'", ":", "current_settings", "[", "setting", "]", ",", "'new'", ":", "new_settings", "[", "setting", "]", "}", "ret_settings", "[", "'changes'", "]", ".", "pop", "(", "setting", ",", "None", ")", "if", "ret_settings", "[", "'failures'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Some settings failed to change.'", "ret", "[", "'changes'", "]", "=", "ret_settings", "ret", "[", "'result'", "]", "=", "False", "else", ":", "ret", "[", "'comment'", "]", "=", "'Set settings to contain the provided values.'", "ret", "[", "'changes'", "]", "=", "ret_settings", "[", "'changes'", "]", "ret", "[", "'result'", "]", "=", "True", "return", "ret" ]
Set the value of the setting for an IIS container. :param str name: The name of the IIS container. :param str container: The type of IIS container. The container types are: AppPools, Sites, SslBindings :param str settings: A dictionary of the setting names and their values. Example of usage for the ``AppPools`` container: .. code-block:: yaml site0-apppool-setting: win_iis.container_setting: - name: site0 - container: AppPools - settings: managedPipelineMode: Integrated processModel.maxProcesses: 1 processModel.userName: TestUser processModel.password: TestPassword processModel.identityType: SpecificUser Example of usage for the ``Sites`` container: .. code-block:: yaml site0-site-setting: win_iis.container_setting: - name: site0 - container: Sites - settings: logFile.logFormat: W3C logFile.period: Daily limits.maxUrlSegments: 32
[ "Set", "the", "value", "of", "the", "setting", "for", "an", "IIS", "container", "." ]
python
train
38.861702
google/pybadges
pybadges/precalculate_text.py
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculate_text.py#L66-L84
def generate_encodeable_characters(characters: Iterable[str], encodings: Iterable[str]) -> Iterable[str]: """Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings. """ for c in characters: for encoding in encodings: try: c.encode(encoding) yield c except UnicodeEncodeError: pass
[ "def", "generate_encodeable_characters", "(", "characters", ":", "Iterable", "[", "str", "]", ",", "encodings", ":", "Iterable", "[", "str", "]", ")", "->", "Iterable", "[", "str", "]", ":", "for", "c", "in", "characters", ":", "for", "encoding", "in", "encodings", ":", "try", ":", "c", ".", "encode", "(", "encoding", ")", "yield", "c", "except", "UnicodeEncodeError", ":", "pass" ]
Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings.
[ "Generates", "the", "subset", "of", "characters", "that", "can", "be", "encoded", "by", "encodings", "." ]
python
test
36.157895
kaniblu/pydumper
dumper/__init__.py
https://github.com/kaniblu/pydumper/blob/ce61b96b09604b52d4bab667ac1862755ca21f3b/dumper/__init__.py#L33-L46
def load(name, path=None, ext="dat", silent=False): """ Loads an object from file with given name and extension. Optionally the path can be specified as well. """ filename = __get_filename(path, name, ext) if not os.path.exists(filename): if not silent: raise ValueException("Specified input filename doesn't exist.") return None with open(filename, "rb") as f: return pickle.load(f)
[ "def", "load", "(", "name", ",", "path", "=", "None", ",", "ext", "=", "\"dat\"", ",", "silent", "=", "False", ")", ":", "filename", "=", "__get_filename", "(", "path", ",", "name", ",", "ext", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "if", "not", "silent", ":", "raise", "ValueException", "(", "\"Specified input filename doesn't exist.\"", ")", "return", "None", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "return", "pickle", ".", "load", "(", "f", ")" ]
Loads an object from file with given name and extension. Optionally the path can be specified as well.
[ "Loads", "an", "object", "from", "file", "with", "given", "name", "and", "extension", ".", "Optionally", "the", "path", "can", "be", "specified", "as", "well", "." ]
python
train
31.142857
kmerkmer/pymer
pymer/base.py
https://github.com/kmerkmer/pymer/blob/c22802436b3756a2e92829c9b234bde6217b683a/pymer/base.py#L38-L41
def unconsume(self, seq): '''Subtracts all k-mers in sequence.''' for kmer in iter_kmers(seq, self.k, canonical=self.canonical): self._decr(kmer)
[ "def", "unconsume", "(", "self", ",", "seq", ")", ":", "for", "kmer", "in", "iter_kmers", "(", "seq", ",", "self", ".", "k", ",", "canonical", "=", "self", ".", "canonical", ")", ":", "self", ".", "_decr", "(", "kmer", ")" ]
Subtracts all k-mers in sequence.
[ "Subtracts", "all", "k", "-", "mers", "in", "sequence", "." ]
python
train
42.5
google/python-adb
adb/common_cli.py
https://github.com/google/python-adb/blob/d9b94b2dda555c14674c19806debb8449c0e9652/adb/common_cli.py#L146-L164
def StartCli(args, adb_commands, extra=None, **device_kwargs): """Starts a common CLI interface for this usb path and protocol.""" try: dev = adb_commands() dev.ConnectDevice(port_path=args.port_path, serial=args.serial, default_timeout_ms=args.timeout_ms, **device_kwargs) except usb_exceptions.DeviceNotFoundError as e: print('No device found: {}'.format(e), file=sys.stderr) return 1 except usb_exceptions.CommonUsbError as e: print('Could not connect to device: {}'.format(e), file=sys.stderr) return 1 try: return _RunMethod(dev, args, extra or {}) except Exception as e: # pylint: disable=broad-except sys.stdout.write(str(e)) return 1 finally: dev.Close()
[ "def", "StartCli", "(", "args", ",", "adb_commands", ",", "extra", "=", "None", ",", "*", "*", "device_kwargs", ")", ":", "try", ":", "dev", "=", "adb_commands", "(", ")", "dev", ".", "ConnectDevice", "(", "port_path", "=", "args", ".", "port_path", ",", "serial", "=", "args", ".", "serial", ",", "default_timeout_ms", "=", "args", ".", "timeout_ms", ",", "*", "*", "device_kwargs", ")", "except", "usb_exceptions", ".", "DeviceNotFoundError", "as", "e", ":", "print", "(", "'No device found: {}'", ".", "format", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "1", "except", "usb_exceptions", ".", "CommonUsbError", "as", "e", ":", "print", "(", "'Could not connect to device: {}'", ".", "format", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "1", "try", ":", "return", "_RunMethod", "(", "dev", ",", "args", ",", "extra", "or", "{", "}", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "sys", ".", "stdout", ".", "write", "(", "str", "(", "e", ")", ")", "return", "1", "finally", ":", "dev", ".", "Close", "(", ")" ]
Starts a common CLI interface for this usb path and protocol.
[ "Starts", "a", "common", "CLI", "interface", "for", "this", "usb", "path", "and", "protocol", "." ]
python
train
41
PySimpleGUI/PySimpleGUI
DemoPrograms/Demo_Uno_Card_Game.py
https://github.com/PySimpleGUI/PySimpleGUI/blob/08184197f5bd4580ab5e5aca28bdda30f87b86fc/DemoPrograms/Demo_Uno_Card_Game.py#L633-L655
def setColor(self, color): '''Sets Card's color and escape code.''' if color == 'blue': self.color = 'blue' self.colorCode = self.colors['blue'] self.colorCodeDark = self.colors['dblue'] elif color == 'red': self.color = 'red' self.colorCode = self.colors['red'] self.colorCodeDark = self.colors['dred'] elif color == 'yellow': self.color = 'yellow' self.colorCode = self.colors['yellow'] self.colorCodeDark = self.colors['dyellow'] elif color == 'green': self.color = 'green' self.colorCode = self.colors['green'] self.colorCodeDark = self.colors['dgreen'] elif color == 'wild': # No color modification self.wild = True self.color = 'wild' self.colorCodeDark = self.colors['dwild'] self.colorCode = self.colors['wild']
[ "def", "setColor", "(", "self", ",", "color", ")", ":", "if", "color", "==", "'blue'", ":", "self", ".", "color", "=", "'blue'", "self", ".", "colorCode", "=", "self", ".", "colors", "[", "'blue'", "]", "self", ".", "colorCodeDark", "=", "self", ".", "colors", "[", "'dblue'", "]", "elif", "color", "==", "'red'", ":", "self", ".", "color", "=", "'red'", "self", ".", "colorCode", "=", "self", ".", "colors", "[", "'red'", "]", "self", ".", "colorCodeDark", "=", "self", ".", "colors", "[", "'dred'", "]", "elif", "color", "==", "'yellow'", ":", "self", ".", "color", "=", "'yellow'", "self", ".", "colorCode", "=", "self", ".", "colors", "[", "'yellow'", "]", "self", ".", "colorCodeDark", "=", "self", ".", "colors", "[", "'dyellow'", "]", "elif", "color", "==", "'green'", ":", "self", ".", "color", "=", "'green'", "self", ".", "colorCode", "=", "self", ".", "colors", "[", "'green'", "]", "self", ".", "colorCodeDark", "=", "self", ".", "colors", "[", "'dgreen'", "]", "elif", "color", "==", "'wild'", ":", "# No color modification", "self", ".", "wild", "=", "True", "self", ".", "color", "=", "'wild'", "self", ".", "colorCodeDark", "=", "self", ".", "colors", "[", "'dwild'", "]", "self", ".", "colorCode", "=", "self", ".", "colors", "[", "'wild'", "]" ]
Sets Card's color and escape code.
[ "Sets", "Card", "s", "color", "and", "escape", "code", "." ]
python
train
40.782609
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L23349-L23389
def create_base_storage(self, logical_size, variant): """Starts creating a hard disk storage unit (fixed/dynamic, according to the variant flags) in the background. The previous storage unit created for this object, if any, must first be deleted using :py:func:`delete_storage` , otherwise the operation will fail. Before the operation starts, the medium is placed in :py:attr:`MediumState.creating` state. If the create operation fails, the medium will be placed back in :py:attr:`MediumState.not_created` state. After the returned progress object reports that the operation has successfully completed, the medium state will be set to :py:attr:`MediumState.created` , the medium will be remembered by this VirtualBox installation and may be attached to virtual machines. in logical_size of type int Maximum logical size of the medium in bytes. in variant of type :class:`MediumVariant` Exact image variant which should be created (as a combination of :py:class:`MediumVariant` flags). return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`VBoxErrorNotSupported` The variant of storage creation operation is not supported. See """ if not isinstance(logical_size, baseinteger): raise TypeError("logical_size can only be an instance of type baseinteger") if not isinstance(variant, list): raise TypeError("variant can only be an instance of type list") for a in variant[:10]: if not isinstance(a, MediumVariant): raise TypeError( "array can only contain objects of type MediumVariant") progress = self._call("createBaseStorage", in_p=[logical_size, variant]) progress = IProgress(progress) return progress
[ "def", "create_base_storage", "(", "self", ",", "logical_size", ",", "variant", ")", ":", "if", "not", "isinstance", "(", "logical_size", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"logical_size can only be an instance of type baseinteger\"", ")", "if", "not", "isinstance", "(", "variant", ",", "list", ")", ":", "raise", "TypeError", "(", "\"variant can only be an instance of type list\"", ")", "for", "a", "in", "variant", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "MediumVariant", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type MediumVariant\"", ")", "progress", "=", "self", ".", "_call", "(", "\"createBaseStorage\"", ",", "in_p", "=", "[", "logical_size", ",", "variant", "]", ")", "progress", "=", "IProgress", "(", "progress", ")", "return", "progress" ]
Starts creating a hard disk storage unit (fixed/dynamic, according to the variant flags) in the background. The previous storage unit created for this object, if any, must first be deleted using :py:func:`delete_storage` , otherwise the operation will fail. Before the operation starts, the medium is placed in :py:attr:`MediumState.creating` state. If the create operation fails, the medium will be placed back in :py:attr:`MediumState.not_created` state. After the returned progress object reports that the operation has successfully completed, the medium state will be set to :py:attr:`MediumState.created` , the medium will be remembered by this VirtualBox installation and may be attached to virtual machines. in logical_size of type int Maximum logical size of the medium in bytes. in variant of type :class:`MediumVariant` Exact image variant which should be created (as a combination of :py:class:`MediumVariant` flags). return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`VBoxErrorNotSupported` The variant of storage creation operation is not supported. See
[ "Starts", "creating", "a", "hard", "disk", "storage", "unit", "(", "fixed", "/", "dynamic", "according", "to", "the", "variant", "flags", ")", "in", "the", "background", ".", "The", "previous", "storage", "unit", "created", "for", "this", "object", "if", "any", "must", "first", "be", "deleted", "using", ":", "py", ":", "func", ":", "delete_storage", "otherwise", "the", "operation", "will", "fail", ".", "Before", "the", "operation", "starts", "the", "medium", "is", "placed", "in", ":", "py", ":", "attr", ":", "MediumState", ".", "creating", "state", ".", "If", "the", "create", "operation", "fails", "the", "medium", "will", "be", "placed", "back", "in", ":", "py", ":", "attr", ":", "MediumState", ".", "not_created", "state", ".", "After", "the", "returned", "progress", "object", "reports", "that", "the", "operation", "has", "successfully", "completed", "the", "medium", "state", "will", "be", "set", "to", ":", "py", ":", "attr", ":", "MediumState", ".", "created", "the", "medium", "will", "be", "remembered", "by", "this", "VirtualBox", "installation", "and", "may", "be", "attached", "to", "virtual", "machines", "." ]
python
train
48.414634
saltstack/salt
salt/utils/systemd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/systemd.py#L84-L94
def has_scope(context=None): ''' Scopes were introduced in systemd 205, this function returns a boolean which is true when the minion is systemd-booted and running systemd>=205. ''' if not booted(context): return False _sd_version = version(context) if _sd_version is None: return False return _sd_version >= 205
[ "def", "has_scope", "(", "context", "=", "None", ")", ":", "if", "not", "booted", "(", "context", ")", ":", "return", "False", "_sd_version", "=", "version", "(", "context", ")", "if", "_sd_version", "is", "None", ":", "return", "False", "return", "_sd_version", ">=", "205" ]
Scopes were introduced in systemd 205, this function returns a boolean which is true when the minion is systemd-booted and running systemd>=205.
[ "Scopes", "were", "introduced", "in", "systemd", "205", "this", "function", "returns", "a", "boolean", "which", "is", "true", "when", "the", "minion", "is", "systemd", "-", "booted", "and", "running", "systemd", ">", "=", "205", "." ]
python
train
31.818182
elehcimd/pynb
fabfile.py
https://github.com/elehcimd/pynb/blob/a32af1f0e574f880eccda4a46aede6d65151f8c9/fabfile.py#L99-L110
def docker_start(develop=True): """ Start docker container """ curr_dir = os.path.dirname(os.path.realpath(__file__)) local('docker run --rm --name pynb -d -ti -p 127.0.0.1:8889:8888 -v {}:/code -t pynb'.format(curr_dir)) if develop: # Install package in develop mode: the code in /code is mapped to the installed package. docker_exec('python3 setup.py develop') print('Jupyter available at http://127.0.0.1:8889')
[ "def", "docker_start", "(", "develop", "=", "True", ")", ":", "curr_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "local", "(", "'docker run --rm --name pynb -d -ti -p 127.0.0.1:8889:8888 -v {}:/code -t pynb'", ".", "format", "(", "curr_dir", ")", ")", "if", "develop", ":", "# Install package in develop mode: the code in /code is mapped to the installed package.", "docker_exec", "(", "'python3 setup.py develop'", ")", "print", "(", "'Jupyter available at http://127.0.0.1:8889'", ")" ]
Start docker container
[ "Start", "docker", "container" ]
python
train
37.5
dhilipsiva/orm-choices
orm_choices/core.py
https://github.com/dhilipsiva/orm-choices/blob/e76391722bb761fa81402dcf0668eb0b93b486b3/orm_choices/core.py#L26-L36
def choices(klass): """ Decorator to set `CHOICES` and other attributes """ _choices = [] for attr in user_attributes(klass.Meta): val = getattr(klass.Meta, attr) setattr(klass, attr, val[0]) _choices.append((val[0], val[1])) setattr(klass, 'CHOICES', tuple(_choices)) return klass
[ "def", "choices", "(", "klass", ")", ":", "_choices", "=", "[", "]", "for", "attr", "in", "user_attributes", "(", "klass", ".", "Meta", ")", ":", "val", "=", "getattr", "(", "klass", ".", "Meta", ",", "attr", ")", "setattr", "(", "klass", ",", "attr", ",", "val", "[", "0", "]", ")", "_choices", ".", "append", "(", "(", "val", "[", "0", "]", ",", "val", "[", "1", "]", ")", ")", "setattr", "(", "klass", ",", "'CHOICES'", ",", "tuple", "(", "_choices", ")", ")", "return", "klass" ]
Decorator to set `CHOICES` and other attributes
[ "Decorator", "to", "set", "CHOICES", "and", "other", "attributes" ]
python
train
29.363636
Ezhil-Language-Foundation/open-tamil
solthiruthi/typographical.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/typographical.py#L20-L48
def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None): """ ed = 1 by default, pos - internal variable for algorithm """ alternates = cm.get(word_in[pos],[]) if not candidates: candidates = [] assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]' if (pos >len(word_in)) or ed == 0: return candidates pfx = '' sfx = '' curr_candidates = [] for p in range(0,pos): pfx = pfx + word_in[p] for p in range(pos+1,len(word_in)): sfx = sfx + word_in[p] for alt in alternates: word_alt = pfx + alt + sfx if not (word_alt in candidates): candidates.append( word_alt ) curr_candidates.append( word_alt ) for n_pos in range(pos,len(word_in)): # already what we have ' candidates ' of this round are edit-distance 1 for word in curr_candidates: oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates) if level == 0: #candidates.append(word_in) for n_pos in range(pos,len(word_in)): oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates) return candidates
[ "def", "oridam_generate_patterns", "(", "word_in", ",", "cm", ",", "ed", "=", "1", ",", "level", "=", "0", ",", "pos", "=", "0", ",", "candidates", "=", "None", ")", ":", "alternates", "=", "cm", ".", "get", "(", "word_in", "[", "pos", "]", ",", "[", "]", ")", "if", "not", "candidates", ":", "candidates", "=", "[", "]", "assert", "ed", "<=", "len", "(", "word_in", ")", ",", "'edit distance has to be comparable to word size [ins/del not explored]'", "if", "(", "pos", ">", "len", "(", "word_in", ")", ")", "or", "ed", "==", "0", ":", "return", "candidates", "pfx", "=", "''", "sfx", "=", "''", "curr_candidates", "=", "[", "]", "for", "p", "in", "range", "(", "0", ",", "pos", ")", ":", "pfx", "=", "pfx", "+", "word_in", "[", "p", "]", "for", "p", "in", "range", "(", "pos", "+", "1", ",", "len", "(", "word_in", ")", ")", ":", "sfx", "=", "sfx", "+", "word_in", "[", "p", "]", "for", "alt", "in", "alternates", ":", "word_alt", "=", "pfx", "+", "alt", "+", "sfx", "if", "not", "(", "word_alt", "in", "candidates", ")", ":", "candidates", ".", "append", "(", "word_alt", ")", "curr_candidates", ".", "append", "(", "word_alt", ")", "for", "n_pos", "in", "range", "(", "pos", ",", "len", "(", "word_in", ")", ")", ":", "# already what we have ' candidates ' of this round are edit-distance 1", "for", "word", "in", "curr_candidates", ":", "oridam_generate_patterns", "(", "word", ",", "cm", ",", "ed", "-", "1", ",", "level", "+", "1", ",", "n_pos", ",", "candidates", ")", "if", "level", "==", "0", ":", "#candidates.append(word_in)", "for", "n_pos", "in", "range", "(", "pos", ",", "len", "(", "word_in", ")", ")", ":", "oridam_generate_patterns", "(", "word_in", ",", "cm", ",", "ed", ",", "level", "+", "1", ",", "n_pos", ",", "candidates", ")", "return", "candidates" ]
ed = 1 by default, pos - internal variable for algorithm
[ "ed", "=", "1", "by", "default", "pos", "-", "internal", "variable", "for", "algorithm" ]
python
train
40.758621
Duke-GCB/lando-messaging
lando_messaging/workqueue.py
https://github.com/Duke-GCB/lando-messaging/blob/b90ccc79a874714e0776af8badf505bb2b56c0ec/lando_messaging/workqueue.py#L127-L138
def receive_loop_with_callback(self, queue_name, callback): """ Process incoming messages with callback until close is called. :param queue_name: str: name of the queue to poll :param callback: func(ch, method, properties, body) called with data when data arrives :return: """ self.connect() channel = self.create_channel(queue_name) channel.basic_qos(prefetch_count=1) channel.basic_consume(callback, queue=queue_name) channel.start_consuming()
[ "def", "receive_loop_with_callback", "(", "self", ",", "queue_name", ",", "callback", ")", ":", "self", ".", "connect", "(", ")", "channel", "=", "self", ".", "create_channel", "(", "queue_name", ")", "channel", ".", "basic_qos", "(", "prefetch_count", "=", "1", ")", "channel", ".", "basic_consume", "(", "callback", ",", "queue", "=", "queue_name", ")", "channel", ".", "start_consuming", "(", ")" ]
Process incoming messages with callback until close is called. :param queue_name: str: name of the queue to poll :param callback: func(ch, method, properties, body) called with data when data arrives :return:
[ "Process", "incoming", "messages", "with", "callback", "until", "close", "is", "called", ".", ":", "param", "queue_name", ":", "str", ":", "name", "of", "the", "queue", "to", "poll", ":", "param", "callback", ":", "func", "(", "ch", "method", "properties", "body", ")", "called", "with", "data", "when", "data", "arrives", ":", "return", ":" ]
python
train
43.5
hydraplatform/hydra-base
hydra_base/lib/network.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/network.py#L577-L652
def _get_all_resource_attributes(network_id, template_id=None): """ Get all the attributes for the nodes, links and groups of a network. Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link. """ base_qry = db.DBSession.query( ResourceAttr.id.label('id'), ResourceAttr.ref_key.label('ref_key'), ResourceAttr.cr_date.label('cr_date'), ResourceAttr.attr_is_var.label('attr_is_var'), ResourceAttr.node_id.label('node_id'), ResourceAttr.link_id.label('link_id'), ResourceAttr.group_id.label('group_id'), ResourceAttr.network_id.label('network_id'), ResourceAttr.attr_id.label('attr_id'), Attr.name.label('name'), Attr.dimension_id.label('dimension_id'), ).filter(Attr.id==ResourceAttr.attr_id) all_node_attribute_qry = base_qry.join(Node).filter(Node.network_id==network_id) all_link_attribute_qry = base_qry.join(Link).filter(Link.network_id==network_id) all_group_attribute_qry = base_qry.join(ResourceGroup).filter(ResourceGroup.network_id==network_id) network_attribute_qry = base_qry.filter(ResourceAttr.network_id==network_id) #Filter the group attributes by template if template_id is not None: all_node_attribute_qry = all_node_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) all_link_attribute_qry = all_link_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) all_group_attribute_qry = all_group_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) network_attribute_qry = network_attribute_qry.join(ResourceType, ResourceAttr.network_id==ResourceType.network_id).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) x = time.time() logging.info("Getting all attributes using execute") attribute_qry = all_node_attribute_qry.union(all_link_attribute_qry, all_group_attribute_qry, network_attribute_qry) all_attributes = db.DBSession.execute(attribute_qry.statement).fetchall() log.info("%s attrs retrieved in %s", len(all_attributes), time.time()-x) logging.info("Attributes retrieved. Processing results...") x = time.time() node_attr_dict = dict() link_attr_dict = dict() group_attr_dict = dict() network_attr_dict = dict() for attr in all_attributes: if attr.ref_key == 'NODE': nodeattr = node_attr_dict.get(attr.node_id, []) nodeattr.append(attr) node_attr_dict[attr.node_id] = nodeattr elif attr.ref_key == 'LINK': linkattr = link_attr_dict.get(attr.link_id, []) linkattr.append(attr) link_attr_dict[attr.link_id] = linkattr elif attr.ref_key == 'GROUP': groupattr = group_attr_dict.get(attr.group_id, []) groupattr.append(attr) group_attr_dict[attr.group_id] = groupattr elif attr.ref_key == 'NETWORK': networkattr = network_attr_dict.get(attr.network_id, []) networkattr.append(attr) network_attr_dict[attr.network_id] = networkattr all_attributes = { 'NODE' : node_attr_dict, 'LINK' : link_attr_dict, 'GROUP': group_attr_dict, 'NETWORK': network_attr_dict, } logging.info("Attributes processed in %s", time.time()-x) return all_attributes
[ "def", "_get_all_resource_attributes", "(", "network_id", ",", "template_id", "=", "None", ")", ":", "base_qry", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceAttr", ".", "id", ".", "label", "(", "'id'", ")", ",", "ResourceAttr", ".", "ref_key", ".", "label", "(", "'ref_key'", ")", ",", "ResourceAttr", ".", "cr_date", ".", "label", "(", "'cr_date'", ")", ",", "ResourceAttr", ".", "attr_is_var", ".", "label", "(", "'attr_is_var'", ")", ",", "ResourceAttr", ".", "node_id", ".", "label", "(", "'node_id'", ")", ",", "ResourceAttr", ".", "link_id", ".", "label", "(", "'link_id'", ")", ",", "ResourceAttr", ".", "group_id", ".", "label", "(", "'group_id'", ")", ",", "ResourceAttr", ".", "network_id", ".", "label", "(", "'network_id'", ")", ",", "ResourceAttr", ".", "attr_id", ".", "label", "(", "'attr_id'", ")", ",", "Attr", ".", "name", ".", "label", "(", "'name'", ")", ",", "Attr", ".", "dimension_id", ".", "label", "(", "'dimension_id'", ")", ",", ")", ".", "filter", "(", "Attr", ".", "id", "==", "ResourceAttr", ".", "attr_id", ")", "all_node_attribute_qry", "=", "base_qry", ".", "join", "(", "Node", ")", ".", "filter", "(", "Node", ".", "network_id", "==", "network_id", ")", "all_link_attribute_qry", "=", "base_qry", ".", "join", "(", "Link", ")", ".", "filter", "(", "Link", ".", "network_id", "==", "network_id", ")", "all_group_attribute_qry", "=", "base_qry", ".", "join", "(", "ResourceGroup", ")", ".", "filter", "(", "ResourceGroup", ".", "network_id", "==", "network_id", ")", "network_attribute_qry", "=", "base_qry", ".", "filter", "(", "ResourceAttr", ".", "network_id", "==", "network_id", ")", "#Filter the group attributes by template", "if", "template_id", "is", "not", "None", ":", "all_node_attribute_qry", "=", "all_node_attribute_qry", ".", "join", "(", "ResourceType", ")", ".", "join", "(", "TemplateType", ")", ".", "join", "(", "TypeAttr", ")", ".", "filter", "(", "TemplateType", ".", "template_id", "==", "template_id", ")", ".", "filter", "(", "ResourceAttr", ".", "attr_id", "==", "TypeAttr", ".", "attr_id", ")", "all_link_attribute_qry", "=", "all_link_attribute_qry", ".", "join", "(", "ResourceType", ")", ".", "join", "(", "TemplateType", ")", ".", "join", "(", "TypeAttr", ")", ".", "filter", "(", "TemplateType", ".", "template_id", "==", "template_id", ")", ".", "filter", "(", "ResourceAttr", ".", "attr_id", "==", "TypeAttr", ".", "attr_id", ")", "all_group_attribute_qry", "=", "all_group_attribute_qry", ".", "join", "(", "ResourceType", ")", ".", "join", "(", "TemplateType", ")", ".", "join", "(", "TypeAttr", ")", ".", "filter", "(", "TemplateType", ".", "template_id", "==", "template_id", ")", ".", "filter", "(", "ResourceAttr", ".", "attr_id", "==", "TypeAttr", ".", "attr_id", ")", "network_attribute_qry", "=", "network_attribute_qry", ".", "join", "(", "ResourceType", ",", "ResourceAttr", ".", "network_id", "==", "ResourceType", ".", "network_id", ")", ".", "join", "(", "TemplateType", ")", ".", "join", "(", "TypeAttr", ")", ".", "filter", "(", "TemplateType", ".", "template_id", "==", "template_id", ")", ".", "filter", "(", "ResourceAttr", ".", "attr_id", "==", "TypeAttr", ".", "attr_id", ")", "x", "=", "time", ".", "time", "(", ")", "logging", ".", "info", "(", "\"Getting all attributes using execute\"", ")", "attribute_qry", "=", "all_node_attribute_qry", ".", "union", "(", "all_link_attribute_qry", ",", "all_group_attribute_qry", ",", "network_attribute_qry", ")", "all_attributes", "=", "db", ".", "DBSession", ".", "execute", "(", "attribute_qry", ".", "statement", ")", ".", "fetchall", "(", ")", "log", ".", "info", "(", "\"%s attrs retrieved in %s\"", ",", "len", "(", "all_attributes", ")", ",", "time", ".", "time", "(", ")", "-", "x", ")", "logging", ".", "info", "(", "\"Attributes retrieved. Processing results...\"", ")", "x", "=", "time", ".", "time", "(", ")", "node_attr_dict", "=", "dict", "(", ")", "link_attr_dict", "=", "dict", "(", ")", "group_attr_dict", "=", "dict", "(", ")", "network_attr_dict", "=", "dict", "(", ")", "for", "attr", "in", "all_attributes", ":", "if", "attr", ".", "ref_key", "==", "'NODE'", ":", "nodeattr", "=", "node_attr_dict", ".", "get", "(", "attr", ".", "node_id", ",", "[", "]", ")", "nodeattr", ".", "append", "(", "attr", ")", "node_attr_dict", "[", "attr", ".", "node_id", "]", "=", "nodeattr", "elif", "attr", ".", "ref_key", "==", "'LINK'", ":", "linkattr", "=", "link_attr_dict", ".", "get", "(", "attr", ".", "link_id", ",", "[", "]", ")", "linkattr", ".", "append", "(", "attr", ")", "link_attr_dict", "[", "attr", ".", "link_id", "]", "=", "linkattr", "elif", "attr", ".", "ref_key", "==", "'GROUP'", ":", "groupattr", "=", "group_attr_dict", ".", "get", "(", "attr", ".", "group_id", ",", "[", "]", ")", "groupattr", ".", "append", "(", "attr", ")", "group_attr_dict", "[", "attr", ".", "group_id", "]", "=", "groupattr", "elif", "attr", ".", "ref_key", "==", "'NETWORK'", ":", "networkattr", "=", "network_attr_dict", ".", "get", "(", "attr", ".", "network_id", ",", "[", "]", ")", "networkattr", ".", "append", "(", "attr", ")", "network_attr_dict", "[", "attr", ".", "network_id", "]", "=", "networkattr", "all_attributes", "=", "{", "'NODE'", ":", "node_attr_dict", ",", "'LINK'", ":", "link_attr_dict", ",", "'GROUP'", ":", "group_attr_dict", ",", "'NETWORK'", ":", "network_attr_dict", ",", "}", "logging", ".", "info", "(", "\"Attributes processed in %s\"", ",", "time", ".", "time", "(", ")", "-", "x", ")", "return", "all_attributes" ]
Get all the attributes for the nodes, links and groups of a network. Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link.
[ "Get", "all", "the", "attributes", "for", "the", "nodes", "links", "and", "groups", "of", "a", "network", ".", "Return", "these", "attributes", "as", "a", "dictionary", "keyed", "on", "type", "(", "NODE", "LINK", "GROUP", ")", "then", "by", "ID", "of", "the", "node", "or", "link", "." ]
python
train
52.315789
SwissDataScienceCenter/renku-python
renku/cli/rerun.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/rerun.py#L134-L187
def rerun(client, revision, roots, siblings, inputs, paths): """Recreate files generated by a sequence of ``run`` commands.""" graph = Graph(client) outputs = graph.build(paths=paths, revision=revision) # Check or extend siblings of outputs. outputs = siblings(graph, outputs) output_paths = {node.path for node in outputs} # Normalize and check all starting paths. roots = {graph.normalize_path(root) for root in roots} assert not roots & output_paths, '--from colides with output paths' # Generate workflow and check inputs. # NOTE The workflow creation is done before opening a new file. workflow = inputs( client, graph.ascwl( input_paths=roots, output_paths=output_paths, outputs=outputs, ) ) # Make sure all inputs are pulled from a storage. client.pull_paths_from_storage( *(path for _, path in workflow.iter_input_files(client.workflow_path)) ) # Store the generated workflow used for updating paths. import yaml output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex) with output_file.open('w') as f: f.write( yaml.dump( ascwl( workflow, filter=lambda _, x: x is not None, basedir=client.workflow_path, ), default_flow_style=False ) ) # Execute the workflow and relocate all output files. from ._cwl import execute # FIXME get new output paths for edited tools # output_paths = {path for _, path in workflow.iter_output_files()} execute( client, output_file, output_paths=output_paths, )
[ "def", "rerun", "(", "client", ",", "revision", ",", "roots", ",", "siblings", ",", "inputs", ",", "paths", ")", ":", "graph", "=", "Graph", "(", "client", ")", "outputs", "=", "graph", ".", "build", "(", "paths", "=", "paths", ",", "revision", "=", "revision", ")", "# Check or extend siblings of outputs.", "outputs", "=", "siblings", "(", "graph", ",", "outputs", ")", "output_paths", "=", "{", "node", ".", "path", "for", "node", "in", "outputs", "}", "# Normalize and check all starting paths.", "roots", "=", "{", "graph", ".", "normalize_path", "(", "root", ")", "for", "root", "in", "roots", "}", "assert", "not", "roots", "&", "output_paths", ",", "'--from colides with output paths'", "# Generate workflow and check inputs.", "# NOTE The workflow creation is done before opening a new file.", "workflow", "=", "inputs", "(", "client", ",", "graph", ".", "ascwl", "(", "input_paths", "=", "roots", ",", "output_paths", "=", "output_paths", ",", "outputs", "=", "outputs", ",", ")", ")", "# Make sure all inputs are pulled from a storage.", "client", ".", "pull_paths_from_storage", "(", "*", "(", "path", "for", "_", ",", "path", "in", "workflow", ".", "iter_input_files", "(", "client", ".", "workflow_path", ")", ")", ")", "# Store the generated workflow used for updating paths.", "import", "yaml", "output_file", "=", "client", ".", "workflow_path", "/", "'{0}.cwl'", ".", "format", "(", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "with", "output_file", ".", "open", "(", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "yaml", ".", "dump", "(", "ascwl", "(", "workflow", ",", "filter", "=", "lambda", "_", ",", "x", ":", "x", "is", "not", "None", ",", "basedir", "=", "client", ".", "workflow_path", ",", ")", ",", "default_flow_style", "=", "False", ")", ")", "# Execute the workflow and relocate all output files.", "from", ".", "_cwl", "import", "execute", "# FIXME get new output paths for edited tools", "# output_paths = {path for _, path in workflow.iter_output_files()}", "execute", "(", "client", ",", "output_file", ",", "output_paths", "=", "output_paths", ",", ")" ]
Recreate files generated by a sequence of ``run`` commands.
[ "Recreate", "files", "generated", "by", "a", "sequence", "of", "run", "commands", "." ]
python
train
31.666667
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L496-L503
def get_track_by_mbid(self, mbid): """Looks up a track by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "track.getInfo", params).execute(True) return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
[ "def", "get_track_by_mbid", "(", "self", ",", "mbid", ")", ":", "params", "=", "{", "\"mbid\"", ":", "mbid", "}", "doc", "=", "_Request", "(", "self", ",", "\"track.getInfo\"", ",", "params", ")", ".", "execute", "(", "True", ")", "return", "Track", "(", "_extract", "(", "doc", ",", "\"name\"", ",", "1", ")", ",", "_extract", "(", "doc", ",", "\"name\"", ")", ",", "self", ")" ]
Looks up a track by its MusicBrainz ID
[ "Looks", "up", "a", "track", "by", "its", "MusicBrainz", "ID" ]
python
train
32.375
jupyter-widgets/ipyleaflet
ipyleaflet/xarray_ds.py
https://github.com/jupyter-widgets/ipyleaflet/blob/74488d4699a5663fc28aabf94ebf08d956a30598/ipyleaflet/xarray_ds.py#L11-L106
def ds2json(ds, u_var, v_var, lat_dim='latitude', lon_dim='longitude', units=None): """ Assumes that the velocity components are given on a regular grid (fixed spacing in latitude and longitude). Parameters ---------- u_var : str Name of the U-component (zonal) variable. v_var : str Name of the V-component (meridional) variable. lat_dim : str, optional Name of the latitude dimension/coordinate (default: 'latitude'). lon_dim : str, optional Name of the longitude dimension/coordinate (default: 'longitude'). units : str, optional Velocity units (default: try getting units from the 'units' attributes of `u_var` and `v_var`). """ import numpy as np ds = ds.copy() for var_name in (u_var, v_var): var_dims = ds[var_name].dims if set(var_dims) != set([lat_dim, lon_dim]): raise ValueError( "Invalid dimensions for variable '{}' in Dataset: " "should include only {}, found {}." .format(var_name, (lat_dim, lon_dim), var_dims) ) # If dataset contains nans replace with 0 ds[var_name] = ds[var_name].fillna(0) if units is None: u_var_units = ds[u_var].attrs.get('units') v_var_units = ds[v_var].attrs.get('units') if u_var_units != v_var_units: raise ValueError( "Different units found for U-component '{}' and " "V-component '{}' variables: '{}' and '{}'" .format(u_var, v_var, u_var_units, v_var_units)) units = u_var_units if units is None: units = '' # Data should be in gaussian grid format (latitudes descending) if np.any(np.diff(ds[lat_dim].values) >= 0): ds = ds.sel(**{lat_dim: slice(None, None, -1)}) # infer grid specifications (assume a rectangular grid) lat = ds[lat_dim].values lon = ds[lon_dim].values lon_left = float(lon.min()) lon_right = float(lon.max()) lat_lower = float(lat.min()) lat_upper = float(lat.max()) dx = float((lon_right - lon_left) / (lon.size - 1)) dy = float((lat_upper - lat_lower) / (lat.size - 1)) nx = lon.size ny = lat.size u_v_spec = ([2, 3], ["Eastward current", "Northward current"], [u_var, v_var]) velocity_data = [] for p_number, p_name, var_name in zip(*u_v_spec): velocity_data.append({ "header": { "parameterUnit": units, "parameterNumber": p_number, "dx": dx, "dy": dy, "parameterNumberName": p_name, "la1": lat_upper, "la2": lat_lower, "parameterCategory": 2, "lo2": lon_right, "nx": nx, "ny": ny, "refTime": "2017-02-01 23:00:00", "lo1": lon_left }, "data": ds[var_name].values.flatten().tolist() }) return velocity_data
[ "def", "ds2json", "(", "ds", ",", "u_var", ",", "v_var", ",", "lat_dim", "=", "'latitude'", ",", "lon_dim", "=", "'longitude'", ",", "units", "=", "None", ")", ":", "import", "numpy", "as", "np", "ds", "=", "ds", ".", "copy", "(", ")", "for", "var_name", "in", "(", "u_var", ",", "v_var", ")", ":", "var_dims", "=", "ds", "[", "var_name", "]", ".", "dims", "if", "set", "(", "var_dims", ")", "!=", "set", "(", "[", "lat_dim", ",", "lon_dim", "]", ")", ":", "raise", "ValueError", "(", "\"Invalid dimensions for variable '{}' in Dataset: \"", "\"should include only {}, found {}.\"", ".", "format", "(", "var_name", ",", "(", "lat_dim", ",", "lon_dim", ")", ",", "var_dims", ")", ")", "# If dataset contains nans replace with 0", "ds", "[", "var_name", "]", "=", "ds", "[", "var_name", "]", ".", "fillna", "(", "0", ")", "if", "units", "is", "None", ":", "u_var_units", "=", "ds", "[", "u_var", "]", ".", "attrs", ".", "get", "(", "'units'", ")", "v_var_units", "=", "ds", "[", "v_var", "]", ".", "attrs", ".", "get", "(", "'units'", ")", "if", "u_var_units", "!=", "v_var_units", ":", "raise", "ValueError", "(", "\"Different units found for U-component '{}' and \"", "\"V-component '{}' variables: '{}' and '{}'\"", ".", "format", "(", "u_var", ",", "v_var", ",", "u_var_units", ",", "v_var_units", ")", ")", "units", "=", "u_var_units", "if", "units", "is", "None", ":", "units", "=", "''", "# Data should be in gaussian grid format (latitudes descending)", "if", "np", ".", "any", "(", "np", ".", "diff", "(", "ds", "[", "lat_dim", "]", ".", "values", ")", ">=", "0", ")", ":", "ds", "=", "ds", ".", "sel", "(", "*", "*", "{", "lat_dim", ":", "slice", "(", "None", ",", "None", ",", "-", "1", ")", "}", ")", "# infer grid specifications (assume a rectangular grid)", "lat", "=", "ds", "[", "lat_dim", "]", ".", "values", "lon", "=", "ds", "[", "lon_dim", "]", ".", "values", "lon_left", "=", "float", "(", "lon", ".", "min", "(", ")", ")", "lon_right", "=", "float", "(", "lon", ".", "max", "(", ")", ")", "lat_lower", "=", "float", "(", "lat", ".", "min", "(", ")", ")", "lat_upper", "=", "float", "(", "lat", ".", "max", "(", ")", ")", "dx", "=", "float", "(", "(", "lon_right", "-", "lon_left", ")", "/", "(", "lon", ".", "size", "-", "1", ")", ")", "dy", "=", "float", "(", "(", "lat_upper", "-", "lat_lower", ")", "/", "(", "lat", ".", "size", "-", "1", ")", ")", "nx", "=", "lon", ".", "size", "ny", "=", "lat", ".", "size", "u_v_spec", "=", "(", "[", "2", ",", "3", "]", ",", "[", "\"Eastward current\"", ",", "\"Northward current\"", "]", ",", "[", "u_var", ",", "v_var", "]", ")", "velocity_data", "=", "[", "]", "for", "p_number", ",", "p_name", ",", "var_name", "in", "zip", "(", "*", "u_v_spec", ")", ":", "velocity_data", ".", "append", "(", "{", "\"header\"", ":", "{", "\"parameterUnit\"", ":", "units", ",", "\"parameterNumber\"", ":", "p_number", ",", "\"dx\"", ":", "dx", ",", "\"dy\"", ":", "dy", ",", "\"parameterNumberName\"", ":", "p_name", ",", "\"la1\"", ":", "lat_upper", ",", "\"la2\"", ":", "lat_lower", ",", "\"parameterCategory\"", ":", "2", ",", "\"lo2\"", ":", "lon_right", ",", "\"nx\"", ":", "nx", ",", "\"ny\"", ":", "ny", ",", "\"refTime\"", ":", "\"2017-02-01 23:00:00\"", ",", "\"lo1\"", ":", "lon_left", "}", ",", "\"data\"", ":", "ds", "[", "var_name", "]", ".", "values", ".", "flatten", "(", ")", ".", "tolist", "(", ")", "}", ")", "return", "velocity_data" ]
Assumes that the velocity components are given on a regular grid (fixed spacing in latitude and longitude). Parameters ---------- u_var : str Name of the U-component (zonal) variable. v_var : str Name of the V-component (meridional) variable. lat_dim : str, optional Name of the latitude dimension/coordinate (default: 'latitude'). lon_dim : str, optional Name of the longitude dimension/coordinate (default: 'longitude'). units : str, optional Velocity units (default: try getting units from the 'units' attributes of `u_var` and `v_var`).
[ "Assumes", "that", "the", "velocity", "components", "are", "given", "on", "a", "regular", "grid", "(", "fixed", "spacing", "in", "latitude", "and", "longitude", ")", "." ]
python
train
31.0625
cirruscluster/cirruscluster
cirruscluster/ext/ansible/utils/__init__.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/utils/__init__.py#L159-L163
def is_executable(path): '''is the given path executable?''' return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
[ "def", "is_executable", "(", "path", ")", ":", "return", "(", "stat", ".", "S_IXUSR", "&", "os", ".", "stat", "(", "path", ")", "[", "stat", ".", "ST_MODE", "]", "or", "stat", ".", "S_IXGRP", "&", "os", ".", "stat", "(", "path", ")", "[", "stat", ".", "ST_MODE", "]", "or", "stat", ".", "S_IXOTH", "&", "os", ".", "stat", "(", "path", ")", "[", "stat", ".", "ST_MODE", "]", ")" ]
is the given path executable?
[ "is", "the", "given", "path", "executable?" ]
python
train
46.4
mickbad/mblibs
mblibs/fast.py
https://github.com/mickbad/mblibs/blob/c1f423ef107c94e2ab6bd253e9148f6056e0ef75/mblibs/fast.py#L781-L793
def run(self): """ Fonctionnement du thread """ if self.debug: print("Starting " + self.name) # Lancement du programme du thread if isinstance(self.function, str): globals()[self.function](*self.args, **self.kwargs) else: self.function(*self.args, **self.kwargs) if self.debug: print("Exiting " + self.name)
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "debug", ":", "print", "(", "\"Starting \"", "+", "self", ".", "name", ")", "# Lancement du programme du thread", "if", "isinstance", "(", "self", ".", "function", ",", "str", ")", ":", "globals", "(", ")", "[", "self", ".", "function", "]", "(", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")", "else", ":", "self", ".", "function", "(", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")", "if", "self", ".", "debug", ":", "print", "(", "\"Exiting \"", "+", "self", ".", "name", ")" ]
Fonctionnement du thread
[ "Fonctionnement", "du", "thread" ]
python
train
24.692308
nirum/descent
descent/proxops.py
https://github.com/nirum/descent/blob/074c8452f15a0da638668a4fe139fde06ccfae7f/descent/proxops.py#L186-L219
def smooth(x, rho, penalty, axis=0, newshape=None): """ Applies a smoothing operator along one dimension currently only accepts a matrix as input Parameters ---------- penalty : float axis : int, optional Axis along which to apply the smoothing (Default: 0) newshape : tuple, optional Desired shape of the parameters to apply the nuclear norm to. The given parameters are reshaped to an array with this shape, or not reshaped if the value of newshape is None. (Default: None) """ orig_shape = x.shape if newshape is not None: x = x.reshape(newshape) # Apply Laplacian smoothing (l2 norm on the parameters multiplied by # the laplacian) n = x.shape[axis] lap_op = spdiags([(2 + rho / penalty) * np.ones(n), -1 * np.ones(n), -1 * np.ones(n)], [0, -1, 1], n, n, format='csc') A = penalty * lap_op b = rho * np.rollaxis(x, axis, 0) return np.rollaxis(spsolve(A, b), axis, 0).reshape(orig_shape)
[ "def", "smooth", "(", "x", ",", "rho", ",", "penalty", ",", "axis", "=", "0", ",", "newshape", "=", "None", ")", ":", "orig_shape", "=", "x", ".", "shape", "if", "newshape", "is", "not", "None", ":", "x", "=", "x", ".", "reshape", "(", "newshape", ")", "# Apply Laplacian smoothing (l2 norm on the parameters multiplied by", "# the laplacian)", "n", "=", "x", ".", "shape", "[", "axis", "]", "lap_op", "=", "spdiags", "(", "[", "(", "2", "+", "rho", "/", "penalty", ")", "*", "np", ".", "ones", "(", "n", ")", ",", "-", "1", "*", "np", ".", "ones", "(", "n", ")", ",", "-", "1", "*", "np", ".", "ones", "(", "n", ")", "]", ",", "[", "0", ",", "-", "1", ",", "1", "]", ",", "n", ",", "n", ",", "format", "=", "'csc'", ")", "A", "=", "penalty", "*", "lap_op", "b", "=", "rho", "*", "np", ".", "rollaxis", "(", "x", ",", "axis", ",", "0", ")", "return", "np", ".", "rollaxis", "(", "spsolve", "(", "A", ",", "b", ")", ",", "axis", ",", "0", ")", ".", "reshape", "(", "orig_shape", ")" ]
Applies a smoothing operator along one dimension currently only accepts a matrix as input Parameters ---------- penalty : float axis : int, optional Axis along which to apply the smoothing (Default: 0) newshape : tuple, optional Desired shape of the parameters to apply the nuclear norm to. The given parameters are reshaped to an array with this shape, or not reshaped if the value of newshape is None. (Default: None)
[ "Applies", "a", "smoothing", "operator", "along", "one", "dimension" ]
python
valid
30
DataONEorg/d1_python
client_cli/src/d1_cli/impl/command_processor.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_processor.py#L242-L247
def _output(self, file_like_object, path=None): """Display or save file like object.""" if not path: self._output_to_display(file_like_object) else: self._output_to_file(file_like_object, path)
[ "def", "_output", "(", "self", ",", "file_like_object", ",", "path", "=", "None", ")", ":", "if", "not", "path", ":", "self", ".", "_output_to_display", "(", "file_like_object", ")", "else", ":", "self", ".", "_output_to_file", "(", "file_like_object", ",", "path", ")" ]
Display or save file like object.
[ "Display", "or", "save", "file", "like", "object", "." ]
python
train
39.333333
saltstack/salt
salt/modules/bcache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bcache.py#L451-L561
def device(dev, stats=False, config=False, internals=False, superblock=False): ''' Check the state of a single bcache device CLI example: .. code-block:: bash salt '*' bcache.device bcache0 salt '*' bcache.device /dev/sdc stats=True :param stats: include statistics :param settings: include all settings :param internals: include all internals :param superblock: include superblock info ''' result = {} if not _sysfs_attr(_bcpath(dev), None, 'error', '{0} is not a bcache fo any kind'.format(dev)): return False elif _bcsys(dev, 'set'): # ---------------- It's the cache itself ---------------- result['uuid'] = uuid() base_attr = ['block_size', 'bucket_size', 'cache_available_percent', 'cache_replacement_policy', 'congested'] # ---------------- Parse through both the blockdev & the FS ---------------- result.update(_sysfs_parse(_bcpath(dev), base_attr, stats, config, internals)) result.update(_sysfs_parse(_fspath(), base_attr, stats, config, internals)) result.update(result.pop('base')) else: # ---------------- It's a backing device ---------------- back_uuid = uuid(dev) if back_uuid is not None: result['cache'] = back_uuid try: result['dev'] = os.path.basename(_bcsys(dev, 'dev')) except Exception: pass result['bdev'] = _bdev(dev) base_attr = ['cache_mode', 'running', 'state', 'writeback_running'] base_path = _bcpath(dev) result.update(_sysfs_parse(base_path, base_attr, stats, config, internals)) result.update(result.pop('base')) # ---------------- Modifications ---------------- state = [result['state']] if result.pop('running'): state.append('running') else: state.append('stopped') if 'writeback_running' in result: if result.pop('writeback_running'): state.append('writeback_running') else: state.append('writeback_stopped') result['state'] = state # ---------------- Statistics ---------------- if 'stats' in result: replre = r'(stats|cache)_' statres = result['stats'] for attr in result['stats']: if '/' not in attr: key = re.sub(replre, '', attr) statres[key] = statres.pop(attr) else: stat, key = attr.split('/', 1) stat = re.sub(replre, '', stat) key = re.sub(replre, '', key) if stat not in statres: statres[stat] = {} statres[stat][key] = statres.pop(attr) result['stats'] = statres # ---------------- Internals ---------------- if internals: interres = result.pop('inter_ro', {}) interres.update(result.pop('inter_rw', {})) if interres: for key in interres: if key.startswith('internal'): nkey = re.sub(r'internal[s/]*', '', key) interres[nkey] = interres.pop(key) key = nkey if key.startswith(('btree', 'writeback')): mkey, skey = re.split(r'_', key, maxsplit=1) if mkey not in interres: interres[mkey] = {} interres[mkey][skey] = interres.pop(key) result['internals'] = interres # ---------------- Config ---------------- if config: configres = result['config'] for key in configres: if key.startswith('writeback'): mkey, skey = re.split(r'_', key, maxsplit=1) if mkey not in configres: configres[mkey] = {} configres[mkey][skey] = configres.pop(key) result['config'] = configres # ---------------- Superblock ---------------- if superblock: result['superblock'] = super_(dev) return result
[ "def", "device", "(", "dev", ",", "stats", "=", "False", ",", "config", "=", "False", ",", "internals", "=", "False", ",", "superblock", "=", "False", ")", ":", "result", "=", "{", "}", "if", "not", "_sysfs_attr", "(", "_bcpath", "(", "dev", ")", ",", "None", ",", "'error'", ",", "'{0} is not a bcache fo any kind'", ".", "format", "(", "dev", ")", ")", ":", "return", "False", "elif", "_bcsys", "(", "dev", ",", "'set'", ")", ":", "# ---------------- It's the cache itself ----------------", "result", "[", "'uuid'", "]", "=", "uuid", "(", ")", "base_attr", "=", "[", "'block_size'", ",", "'bucket_size'", ",", "'cache_available_percent'", ",", "'cache_replacement_policy'", ",", "'congested'", "]", "# ---------------- Parse through both the blockdev & the FS ----------------", "result", ".", "update", "(", "_sysfs_parse", "(", "_bcpath", "(", "dev", ")", ",", "base_attr", ",", "stats", ",", "config", ",", "internals", ")", ")", "result", ".", "update", "(", "_sysfs_parse", "(", "_fspath", "(", ")", ",", "base_attr", ",", "stats", ",", "config", ",", "internals", ")", ")", "result", ".", "update", "(", "result", ".", "pop", "(", "'base'", ")", ")", "else", ":", "# ---------------- It's a backing device ----------------", "back_uuid", "=", "uuid", "(", "dev", ")", "if", "back_uuid", "is", "not", "None", ":", "result", "[", "'cache'", "]", "=", "back_uuid", "try", ":", "result", "[", "'dev'", "]", "=", "os", ".", "path", ".", "basename", "(", "_bcsys", "(", "dev", ",", "'dev'", ")", ")", "except", "Exception", ":", "pass", "result", "[", "'bdev'", "]", "=", "_bdev", "(", "dev", ")", "base_attr", "=", "[", "'cache_mode'", ",", "'running'", ",", "'state'", ",", "'writeback_running'", "]", "base_path", "=", "_bcpath", "(", "dev", ")", "result", ".", "update", "(", "_sysfs_parse", "(", "base_path", ",", "base_attr", ",", "stats", ",", "config", ",", "internals", ")", ")", "result", ".", "update", "(", "result", ".", "pop", "(", "'base'", ")", ")", "# ---------------- Modifications ----------------", "state", "=", "[", "result", "[", "'state'", "]", "]", "if", "result", ".", "pop", "(", "'running'", ")", ":", "state", ".", "append", "(", "'running'", ")", "else", ":", "state", ".", "append", "(", "'stopped'", ")", "if", "'writeback_running'", "in", "result", ":", "if", "result", ".", "pop", "(", "'writeback_running'", ")", ":", "state", ".", "append", "(", "'writeback_running'", ")", "else", ":", "state", ".", "append", "(", "'writeback_stopped'", ")", "result", "[", "'state'", "]", "=", "state", "# ---------------- Statistics ----------------", "if", "'stats'", "in", "result", ":", "replre", "=", "r'(stats|cache)_'", "statres", "=", "result", "[", "'stats'", "]", "for", "attr", "in", "result", "[", "'stats'", "]", ":", "if", "'/'", "not", "in", "attr", ":", "key", "=", "re", ".", "sub", "(", "replre", ",", "''", ",", "attr", ")", "statres", "[", "key", "]", "=", "statres", ".", "pop", "(", "attr", ")", "else", ":", "stat", ",", "key", "=", "attr", ".", "split", "(", "'/'", ",", "1", ")", "stat", "=", "re", ".", "sub", "(", "replre", ",", "''", ",", "stat", ")", "key", "=", "re", ".", "sub", "(", "replre", ",", "''", ",", "key", ")", "if", "stat", "not", "in", "statres", ":", "statres", "[", "stat", "]", "=", "{", "}", "statres", "[", "stat", "]", "[", "key", "]", "=", "statres", ".", "pop", "(", "attr", ")", "result", "[", "'stats'", "]", "=", "statres", "# ---------------- Internals ----------------", "if", "internals", ":", "interres", "=", "result", ".", "pop", "(", "'inter_ro'", ",", "{", "}", ")", "interres", ".", "update", "(", "result", ".", "pop", "(", "'inter_rw'", ",", "{", "}", ")", ")", "if", "interres", ":", "for", "key", "in", "interres", ":", "if", "key", ".", "startswith", "(", "'internal'", ")", ":", "nkey", "=", "re", ".", "sub", "(", "r'internal[s/]*'", ",", "''", ",", "key", ")", "interres", "[", "nkey", "]", "=", "interres", ".", "pop", "(", "key", ")", "key", "=", "nkey", "if", "key", ".", "startswith", "(", "(", "'btree'", ",", "'writeback'", ")", ")", ":", "mkey", ",", "skey", "=", "re", ".", "split", "(", "r'_'", ",", "key", ",", "maxsplit", "=", "1", ")", "if", "mkey", "not", "in", "interres", ":", "interres", "[", "mkey", "]", "=", "{", "}", "interres", "[", "mkey", "]", "[", "skey", "]", "=", "interres", ".", "pop", "(", "key", ")", "result", "[", "'internals'", "]", "=", "interres", "# ---------------- Config ----------------", "if", "config", ":", "configres", "=", "result", "[", "'config'", "]", "for", "key", "in", "configres", ":", "if", "key", ".", "startswith", "(", "'writeback'", ")", ":", "mkey", ",", "skey", "=", "re", ".", "split", "(", "r'_'", ",", "key", ",", "maxsplit", "=", "1", ")", "if", "mkey", "not", "in", "configres", ":", "configres", "[", "mkey", "]", "=", "{", "}", "configres", "[", "mkey", "]", "[", "skey", "]", "=", "configres", ".", "pop", "(", "key", ")", "result", "[", "'config'", "]", "=", "configres", "# ---------------- Superblock ----------------", "if", "superblock", ":", "result", "[", "'superblock'", "]", "=", "super_", "(", "dev", ")", "return", "result" ]
Check the state of a single bcache device CLI example: .. code-block:: bash salt '*' bcache.device bcache0 salt '*' bcache.device /dev/sdc stats=True :param stats: include statistics :param settings: include all settings :param internals: include all internals :param superblock: include superblock info
[ "Check", "the", "state", "of", "a", "single", "bcache", "device" ]
python
train
35.720721
apple/turicreate
src/unity/python/turicreate/_gl_pickle.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/_gl_pickle.py#L33-L50
def _is_not_pickle_safe_gl_model_class(obj_class): """ Check if a Turi create model is pickle safe. The function does it by checking that _CustomModel is the base class. Parameters ---------- obj_class : Class to be checked. Returns ---------- True if the GLC class is a model and is pickle safe. """ if issubclass(obj_class, _toolkits._model.CustomModel): return not obj_class._is_gl_pickle_safe() return False
[ "def", "_is_not_pickle_safe_gl_model_class", "(", "obj_class", ")", ":", "if", "issubclass", "(", "obj_class", ",", "_toolkits", ".", "_model", ".", "CustomModel", ")", ":", "return", "not", "obj_class", ".", "_is_gl_pickle_safe", "(", ")", "return", "False" ]
Check if a Turi create model is pickle safe. The function does it by checking that _CustomModel is the base class. Parameters ---------- obj_class : Class to be checked. Returns ---------- True if the GLC class is a model and is pickle safe.
[ "Check", "if", "a", "Turi", "create", "model", "is", "pickle", "safe", "." ]
python
train
25.388889
Clinical-Genomics/scout
scout/build/variant/clnsig.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/build/variant/clnsig.py#L2-L10
def build_clnsig(clnsig_info): """docstring for build_clnsig""" clnsig_obj = dict( value = clnsig_info['value'], accession = clnsig_info.get('accession'), revstat = clnsig_info.get('revstat') ) return clnsig_obj
[ "def", "build_clnsig", "(", "clnsig_info", ")", ":", "clnsig_obj", "=", "dict", "(", "value", "=", "clnsig_info", "[", "'value'", "]", ",", "accession", "=", "clnsig_info", ".", "get", "(", "'accession'", ")", ",", "revstat", "=", "clnsig_info", ".", "get", "(", "'revstat'", ")", ")", "return", "clnsig_obj" ]
docstring for build_clnsig
[ "docstring", "for", "build_clnsig" ]
python
test
27.555556
senaite/senaite.core
bika/lims/browser/analysisrequest/add2.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/add2.py#L988-L993
def get_method_info(self, obj): """Returns the info for a Method """ info = self.get_base_info(obj) info.update({}) return info
[ "def", "get_method_info", "(", "self", ",", "obj", ")", ":", "info", "=", "self", ".", "get_base_info", "(", "obj", ")", "info", ".", "update", "(", "{", "}", ")", "return", "info" ]
Returns the info for a Method
[ "Returns", "the", "info", "for", "a", "Method" ]
python
train
27
santosjorge/cufflinks
cufflinks/datagen.py
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/datagen.py#L173-L197
def lines(n_traces=5,n=100,columns=None,dateIndex=True,mode=None): """ Returns a DataFrame with the required format for a scatter (lines) plot Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace columns : [str] List of column names dateIndex : bool If True it will return a datetime index if False it will return a enumerated index mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names """ index=pd.date_range('1/1/15',periods=n) if dateIndex else list(range(n)) df=pd.DataFrame(np.random.randn(n,n_traces),index=index, columns=getName(n_traces,columns=columns,mode=mode)) return df.cumsum()
[ "def", "lines", "(", "n_traces", "=", "5", ",", "n", "=", "100", ",", "columns", "=", "None", ",", "dateIndex", "=", "True", ",", "mode", "=", "None", ")", ":", "index", "=", "pd", ".", "date_range", "(", "'1/1/15'", ",", "periods", "=", "n", ")", "if", "dateIndex", "else", "list", "(", "range", "(", "n", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "np", ".", "random", ".", "randn", "(", "n", ",", "n_traces", ")", ",", "index", "=", "index", ",", "columns", "=", "getName", "(", "n_traces", ",", "columns", "=", "columns", ",", "mode", "=", "mode", ")", ")", "return", "df", ".", "cumsum", "(", ")" ]
Returns a DataFrame with the required format for a scatter (lines) plot Parameters: ----------- n_traces : int Number of traces n : int Number of points for each trace columns : [str] List of column names dateIndex : bool If True it will return a datetime index if False it will return a enumerated index mode : string Format for each item 'abc' for alphabet columns 'stocks' for random stock names
[ "Returns", "a", "DataFrame", "with", "the", "required", "format", "for", "a", "scatter", "(", "lines", ")", "plot" ]
python
train
28.08
quantmind/pulsar
examples/helloworld/manage.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/examples/helloworld/manage.py#L36-L39
def server(description=None, **kwargs): '''Create the :class:`.WSGIServer` running :func:`hello`.''' description = description or 'Pulsar Hello World Application' return wsgi.WSGIServer(hello, description=description, **kwargs)
[ "def", "server", "(", "description", "=", "None", ",", "*", "*", "kwargs", ")", ":", "description", "=", "description", "or", "'Pulsar Hello World Application'", "return", "wsgi", ".", "WSGIServer", "(", "hello", ",", "description", "=", "description", ",", "*", "*", "kwargs", ")" ]
Create the :class:`.WSGIServer` running :func:`hello`.
[ "Create", "the", ":", "class", ":", ".", "WSGIServer", "running", ":", "func", ":", "hello", "." ]
python
train
59
googledatalab/pydatalab
google/datalab/bigquery/_query.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query.py#L230-L322
def execute_async(self, output_options=None, sampling=None, context=None, query_params=None): """ Initiate the query and return a QueryJob. Args: output_options: a QueryOutput object describing how to execute the query sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. query_params: a dictionary containing query parameter types and values, passed to BigQuery. Returns: A Job object that can wait on creating a table or exporting to a file If the output is a table, the Job object additionally has run statistics and query results Raises: Exception if query could not be executed. """ # Default behavior is to execute to a table if output_options is None: output_options = QueryOutput.table() # First, execute the query into a table, using a temporary one if no name is specified batch = output_options.priority == 'low' append = output_options.table_mode == 'append' overwrite = output_options.table_mode == 'overwrite' table_name = output_options.table_name context = context or google.datalab.Context.default() api = _api.Api(context) if table_name is not None: table_name = _utils.parse_table_name(table_name, api.project_id) sql = self._expanded_sql(sampling) try: query_result = api.jobs_insert_query(sql, table_name=table_name, append=append, overwrite=overwrite, batch=batch, use_cache=output_options.use_cache, allow_large_results=output_options.allow_large_results, table_definitions=self.data_sources, query_params=query_params) except Exception as e: raise e if 'jobReference' not in query_result: raise Exception('Unexpected response from server') job_id = query_result['jobReference']['jobId'] if not table_name: try: destination = query_result['configuration']['query']['destinationTable'] table_name = (destination['projectId'], destination['datasetId'], destination['tableId']) except KeyError: # The query was in error raise Exception(_utils.format_query_errors(query_result['status']['errors'])) execute_job = _query_job.QueryJob(job_id, table_name, sql, context=context) # If all we need is to execute the query to a table, we're done if output_options.type == 'table': return execute_job # Otherwise, build an async Job that waits on the query execution then carries out # the specific export operation else: export_args = export_kwargs = None if output_options.type == 'file': if output_options.file_path.startswith('gs://'): export_func = execute_job.result().extract export_args = [output_options.file_path] export_kwargs = { 'format': output_options.file_format, 'csv_delimiter': output_options.csv_delimiter, 'csv_header': output_options.csv_header, 'compress': output_options.compress_file } else: export_func = execute_job.result().to_file export_args = [output_options.file_path] export_kwargs = { 'format': output_options.file_format, 'csv_delimiter': output_options.csv_delimiter, 'csv_header': output_options.csv_header } elif output_options.type == 'dataframe': export_func = execute_job.result().to_dataframe export_args = [] export_kwargs = { 'start_row': output_options.dataframe_start_row, 'max_rows': output_options.dataframe_max_rows } # Perform the export operation with the specified parameters export_func = google.datalab.utils.async_function(export_func) return export_func(*export_args, **export_kwargs)
[ "def", "execute_async", "(", "self", ",", "output_options", "=", "None", ",", "sampling", "=", "None", ",", "context", "=", "None", ",", "query_params", "=", "None", ")", ":", "# Default behavior is to execute to a table", "if", "output_options", "is", "None", ":", "output_options", "=", "QueryOutput", ".", "table", "(", ")", "# First, execute the query into a table, using a temporary one if no name is specified", "batch", "=", "output_options", ".", "priority", "==", "'low'", "append", "=", "output_options", ".", "table_mode", "==", "'append'", "overwrite", "=", "output_options", ".", "table_mode", "==", "'overwrite'", "table_name", "=", "output_options", ".", "table_name", "context", "=", "context", "or", "google", ".", "datalab", ".", "Context", ".", "default", "(", ")", "api", "=", "_api", ".", "Api", "(", "context", ")", "if", "table_name", "is", "not", "None", ":", "table_name", "=", "_utils", ".", "parse_table_name", "(", "table_name", ",", "api", ".", "project_id", ")", "sql", "=", "self", ".", "_expanded_sql", "(", "sampling", ")", "try", ":", "query_result", "=", "api", ".", "jobs_insert_query", "(", "sql", ",", "table_name", "=", "table_name", ",", "append", "=", "append", ",", "overwrite", "=", "overwrite", ",", "batch", "=", "batch", ",", "use_cache", "=", "output_options", ".", "use_cache", ",", "allow_large_results", "=", "output_options", ".", "allow_large_results", ",", "table_definitions", "=", "self", ".", "data_sources", ",", "query_params", "=", "query_params", ")", "except", "Exception", "as", "e", ":", "raise", "e", "if", "'jobReference'", "not", "in", "query_result", ":", "raise", "Exception", "(", "'Unexpected response from server'", ")", "job_id", "=", "query_result", "[", "'jobReference'", "]", "[", "'jobId'", "]", "if", "not", "table_name", ":", "try", ":", "destination", "=", "query_result", "[", "'configuration'", "]", "[", "'query'", "]", "[", "'destinationTable'", "]", "table_name", "=", "(", "destination", "[", "'projectId'", "]", ",", "destination", "[", "'datasetId'", "]", ",", "destination", "[", "'tableId'", "]", ")", "except", "KeyError", ":", "# The query was in error", "raise", "Exception", "(", "_utils", ".", "format_query_errors", "(", "query_result", "[", "'status'", "]", "[", "'errors'", "]", ")", ")", "execute_job", "=", "_query_job", ".", "QueryJob", "(", "job_id", ",", "table_name", ",", "sql", ",", "context", "=", "context", ")", "# If all we need is to execute the query to a table, we're done", "if", "output_options", ".", "type", "==", "'table'", ":", "return", "execute_job", "# Otherwise, build an async Job that waits on the query execution then carries out", "# the specific export operation", "else", ":", "export_args", "=", "export_kwargs", "=", "None", "if", "output_options", ".", "type", "==", "'file'", ":", "if", "output_options", ".", "file_path", ".", "startswith", "(", "'gs://'", ")", ":", "export_func", "=", "execute_job", ".", "result", "(", ")", ".", "extract", "export_args", "=", "[", "output_options", ".", "file_path", "]", "export_kwargs", "=", "{", "'format'", ":", "output_options", ".", "file_format", ",", "'csv_delimiter'", ":", "output_options", ".", "csv_delimiter", ",", "'csv_header'", ":", "output_options", ".", "csv_header", ",", "'compress'", ":", "output_options", ".", "compress_file", "}", "else", ":", "export_func", "=", "execute_job", ".", "result", "(", ")", ".", "to_file", "export_args", "=", "[", "output_options", ".", "file_path", "]", "export_kwargs", "=", "{", "'format'", ":", "output_options", ".", "file_format", ",", "'csv_delimiter'", ":", "output_options", ".", "csv_delimiter", ",", "'csv_header'", ":", "output_options", ".", "csv_header", "}", "elif", "output_options", ".", "type", "==", "'dataframe'", ":", "export_func", "=", "execute_job", ".", "result", "(", ")", ".", "to_dataframe", "export_args", "=", "[", "]", "export_kwargs", "=", "{", "'start_row'", ":", "output_options", ".", "dataframe_start_row", ",", "'max_rows'", ":", "output_options", ".", "dataframe_max_rows", "}", "# Perform the export operation with the specified parameters", "export_func", "=", "google", ".", "datalab", ".", "utils", ".", "async_function", "(", "export_func", ")", "return", "export_func", "(", "*", "export_args", ",", "*", "*", "export_kwargs", ")" ]
Initiate the query and return a QueryJob. Args: output_options: a QueryOutput object describing how to execute the query sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. query_params: a dictionary containing query parameter types and values, passed to BigQuery. Returns: A Job object that can wait on creating a table or exporting to a file If the output is a table, the Job object additionally has run statistics and query results Raises: Exception if query could not be executed.
[ "Initiate", "the", "query", "and", "return", "a", "QueryJob", "." ]
python
train
44.408602
annoviko/pyclustering
pyclustering/core/som_wrapper.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/core/som_wrapper.py#L174-L188
def som_get_capture_objects(som_pointer): """! @brief Returns list of indexes of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. """ ccore = ccore_library.get() ccore.som_get_capture_objects.restype = POINTER(pyclustering_package) package = ccore.som_get_capture_objects(som_pointer) result = package_extractor(package).extract() return result
[ "def", "som_get_capture_objects", "(", "som_pointer", ")", ":", "ccore", "=", "ccore_library", ".", "get", "(", ")", "ccore", ".", "som_get_capture_objects", ".", "restype", "=", "POINTER", "(", "pyclustering_package", ")", "package", "=", "ccore", ".", "som_get_capture_objects", "(", "som_pointer", ")", "result", "=", "package_extractor", "(", "package", ")", ".", "extract", "(", ")", "return", "result" ]
! @brief Returns list of indexes of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map.
[ "!" ]
python
valid
31.066667
gwastro/pycbc
pycbc/inference/models/base.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base.py#L439-L460
def get_current_stats(self, names=None): """Return one or more of the current stats as a tuple. This function does no computation. It only returns what has already been calculated. If a stat hasn't been calculated, it will be returned as ``numpy.nan``. Parameters ---------- names : list of str, optional Specify the names of the stats to retrieve. If ``None`` (the default), will return ``default_stats``. Returns ------- tuple : The current values of the requested stats, as a tuple. The order of the stats is the same as the names. """ if names is None: names = self.default_stats return self._current_stats.getstats(names)
[ "def", "get_current_stats", "(", "self", ",", "names", "=", "None", ")", ":", "if", "names", "is", "None", ":", "names", "=", "self", ".", "default_stats", "return", "self", ".", "_current_stats", ".", "getstats", "(", "names", ")" ]
Return one or more of the current stats as a tuple. This function does no computation. It only returns what has already been calculated. If a stat hasn't been calculated, it will be returned as ``numpy.nan``. Parameters ---------- names : list of str, optional Specify the names of the stats to retrieve. If ``None`` (the default), will return ``default_stats``. Returns ------- tuple : The current values of the requested stats, as a tuple. The order of the stats is the same as the names.
[ "Return", "one", "or", "more", "of", "the", "current", "stats", "as", "a", "tuple", "." ]
python
train
35.090909
kylejusticemagnuson/pyti
pyti/detrended_price_oscillator.py
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/detrended_price_oscillator.py#L8-L19
def detrended_price_oscillator(data, period): """ Detrended Price Oscillator. Formula: DPO = DATA[i] - Avg(DATA[period/2 + 1]) """ catch_errors.check_for_period_error(data, period) period = int(period) dop = [data[idx] - np.mean(data[idx+1-(int(period/2)+1):idx+1]) for idx in range(period-1, len(data))] dop = fill_for_noncomputable_vals(data, dop) return dop
[ "def", "detrended_price_oscillator", "(", "data", ",", "period", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "dop", "=", "[", "data", "[", "idx", "]", "-", "np", ".", "mean", "(", "data", "[", "idx", "+", "1", "-", "(", "int", "(", "period", "/", "2", ")", "+", "1", ")", ":", "idx", "+", "1", "]", ")", "for", "idx", "in", "range", "(", "period", "-", "1", ",", "len", "(", "data", ")", ")", "]", "dop", "=", "fill_for_noncomputable_vals", "(", "data", ",", "dop", ")", "return", "dop" ]
Detrended Price Oscillator. Formula: DPO = DATA[i] - Avg(DATA[period/2 + 1])
[ "Detrended", "Price", "Oscillator", "." ]
python
train
32.5
google/grr
grr/server/grr_response_server/databases/mem_cronjobs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_cronjobs.py#L23-L41
def ReadCronJobs(self, cronjob_ids=None): """Reads a cronjob from the database.""" if cronjob_ids is None: res = [job.Copy() for job in itervalues(self.cronjobs)] else: res = [] for job_id in cronjob_ids: try: res.append(self.cronjobs[job_id].Copy()) except KeyError: raise db.UnknownCronJobError("Cron job with id %s not found." % job_id) for job in res: lease = self.cronjob_leases.get(job.cron_job_id) if lease: job.leased_until, job.leased_by = lease return res
[ "def", "ReadCronJobs", "(", "self", ",", "cronjob_ids", "=", "None", ")", ":", "if", "cronjob_ids", "is", "None", ":", "res", "=", "[", "job", ".", "Copy", "(", ")", "for", "job", "in", "itervalues", "(", "self", ".", "cronjobs", ")", "]", "else", ":", "res", "=", "[", "]", "for", "job_id", "in", "cronjob_ids", ":", "try", ":", "res", ".", "append", "(", "self", ".", "cronjobs", "[", "job_id", "]", ".", "Copy", "(", ")", ")", "except", "KeyError", ":", "raise", "db", ".", "UnknownCronJobError", "(", "\"Cron job with id %s not found.\"", "%", "job_id", ")", "for", "job", "in", "res", ":", "lease", "=", "self", ".", "cronjob_leases", ".", "get", "(", "job", ".", "cron_job_id", ")", "if", "lease", ":", "job", ".", "leased_until", ",", "job", ".", "leased_by", "=", "lease", "return", "res" ]
Reads a cronjob from the database.
[ "Reads", "a", "cronjob", "from", "the", "database", "." ]
python
train
30.631579
genialis/resolwe
resolwe/flow/execution_engines/workflow/__init__.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/execution_engines/workflow/__init__.py#L85-L156
def evaluate(self, data): """Evaluate the code needed to compute a given Data object.""" expression_engine = data.process.requirements.get('expression-engine', None) if expression_engine is not None: expression_engine = self.get_expression_engine(expression_engine) # Parse steps. steps = data.process.run.get('program', None) if steps is None: return if not isinstance(steps, list): raise ExecutionError('Workflow program must be a list of steps.') # Expression engine evaluation context. context = { 'input': data.input, 'steps': collections.OrderedDict(), } for index, step in enumerate(steps): try: step_id = step['id'] step_slug = step['run'] except KeyError as error: raise ExecutionError('Incorrect definition of step "{}", missing property "{}".'.format( step.get('id', index), error )) # Fetch target process. process = Process.objects.filter(slug=step_slug).order_by('-version').first() if not process: raise ExecutionError('Incorrect definition of step "{}", invalid process "{}".'.format( step_id, step_slug )) # Process all input variables. step_input = step.get('input', {}) if not isinstance(step_input, dict): raise ExecutionError('Incorrect definition of step "{}", input must be a dictionary.'.format( step_id )) data_input = self._evaluate_expressions(expression_engine, step_id, step_input, context) # Create the data object. data_object = Data.objects.create( process=process, contributor=data.contributor, tags=data.tags, input=data_input, ) DataDependency.objects.create( parent=data, child=data_object, kind=DataDependency.KIND_SUBPROCESS, ) # Copy permissions. copy_permissions(data, data_object) # Copy collections. for collection in data.collection_set.all(): collection.data.add(data_object) context['steps'][step_id] = data_object.pk # Immediately set our status to done and output all data object identifiers. data.output = { 'steps': list(context['steps'].values()), } data.status = Data.STATUS_DONE
[ "def", "evaluate", "(", "self", ",", "data", ")", ":", "expression_engine", "=", "data", ".", "process", ".", "requirements", ".", "get", "(", "'expression-engine'", ",", "None", ")", "if", "expression_engine", "is", "not", "None", ":", "expression_engine", "=", "self", ".", "get_expression_engine", "(", "expression_engine", ")", "# Parse steps.", "steps", "=", "data", ".", "process", ".", "run", ".", "get", "(", "'program'", ",", "None", ")", "if", "steps", "is", "None", ":", "return", "if", "not", "isinstance", "(", "steps", ",", "list", ")", ":", "raise", "ExecutionError", "(", "'Workflow program must be a list of steps.'", ")", "# Expression engine evaluation context.", "context", "=", "{", "'input'", ":", "data", ".", "input", ",", "'steps'", ":", "collections", ".", "OrderedDict", "(", ")", ",", "}", "for", "index", ",", "step", "in", "enumerate", "(", "steps", ")", ":", "try", ":", "step_id", "=", "step", "[", "'id'", "]", "step_slug", "=", "step", "[", "'run'", "]", "except", "KeyError", "as", "error", ":", "raise", "ExecutionError", "(", "'Incorrect definition of step \"{}\", missing property \"{}\".'", ".", "format", "(", "step", ".", "get", "(", "'id'", ",", "index", ")", ",", "error", ")", ")", "# Fetch target process.", "process", "=", "Process", ".", "objects", ".", "filter", "(", "slug", "=", "step_slug", ")", ".", "order_by", "(", "'-version'", ")", ".", "first", "(", ")", "if", "not", "process", ":", "raise", "ExecutionError", "(", "'Incorrect definition of step \"{}\", invalid process \"{}\".'", ".", "format", "(", "step_id", ",", "step_slug", ")", ")", "# Process all input variables.", "step_input", "=", "step", ".", "get", "(", "'input'", ",", "{", "}", ")", "if", "not", "isinstance", "(", "step_input", ",", "dict", ")", ":", "raise", "ExecutionError", "(", "'Incorrect definition of step \"{}\", input must be a dictionary.'", ".", "format", "(", "step_id", ")", ")", "data_input", "=", "self", ".", "_evaluate_expressions", "(", "expression_engine", ",", "step_id", ",", "step_input", ",", "context", ")", "# Create the data object.", "data_object", "=", "Data", ".", "objects", ".", "create", "(", "process", "=", "process", ",", "contributor", "=", "data", ".", "contributor", ",", "tags", "=", "data", ".", "tags", ",", "input", "=", "data_input", ",", ")", "DataDependency", ".", "objects", ".", "create", "(", "parent", "=", "data", ",", "child", "=", "data_object", ",", "kind", "=", "DataDependency", ".", "KIND_SUBPROCESS", ",", ")", "# Copy permissions.", "copy_permissions", "(", "data", ",", "data_object", ")", "# Copy collections.", "for", "collection", "in", "data", ".", "collection_set", ".", "all", "(", ")", ":", "collection", ".", "data", ".", "add", "(", "data_object", ")", "context", "[", "'steps'", "]", "[", "step_id", "]", "=", "data_object", ".", "pk", "# Immediately set our status to done and output all data object identifiers.", "data", ".", "output", "=", "{", "'steps'", ":", "list", "(", "context", "[", "'steps'", "]", ".", "values", "(", ")", ")", ",", "}", "data", ".", "status", "=", "Data", ".", "STATUS_DONE" ]
Evaluate the code needed to compute a given Data object.
[ "Evaluate", "the", "code", "needed", "to", "compute", "a", "given", "Data", "object", "." ]
python
train
36.208333
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L121-L150
def document(self, document): """ Associate a :class:`~elasticsearch_dsl.Document` subclass with an index. This means that, when this index is created, it will contain the mappings for the ``Document``. If the ``Document`` class doesn't have a default index yet (by defining ``class Index``), this instance will be used. Can be used as a decorator:: i = Index('blog') @i.document class Post(Document): title = Text() # create the index, including Post mappings i.create() # .search() will now return a Search object that will return # properly deserialized Post instances s = i.search() """ self._doc_types.append(document) # If the document index does not have any name, that means the user # did not set any index already to the document. # So set this index as document index if document._index._name is None: document._index = self return document
[ "def", "document", "(", "self", ",", "document", ")", ":", "self", ".", "_doc_types", ".", "append", "(", "document", ")", "# If the document index does not have any name, that means the user", "# did not set any index already to the document.", "# So set this index as document index", "if", "document", ".", "_index", ".", "_name", "is", "None", ":", "document", ".", "_index", "=", "self", "return", "document" ]
Associate a :class:`~elasticsearch_dsl.Document` subclass with an index. This means that, when this index is created, it will contain the mappings for the ``Document``. If the ``Document`` class doesn't have a default index yet (by defining ``class Index``), this instance will be used. Can be used as a decorator:: i = Index('blog') @i.document class Post(Document): title = Text() # create the index, including Post mappings i.create() # .search() will now return a Search object that will return # properly deserialized Post instances s = i.search()
[ "Associate", "a", ":", "class", ":", "~elasticsearch_dsl", ".", "Document", "subclass", "with", "an", "index", ".", "This", "means", "that", "when", "this", "index", "is", "created", "it", "will", "contain", "the", "mappings", "for", "the", "Document", ".", "If", "the", "Document", "class", "doesn", "t", "have", "a", "default", "index", "yet", "(", "by", "defining", "class", "Index", ")", "this", "instance", "will", "be", "used", ".", "Can", "be", "used", "as", "a", "decorator", "::" ]
python
train
35.2
jtwhite79/pyemu
pyemu/la.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/la.py#L1058-L1078
def get_cso_dataframe(self): """ get a dataframe of composite observation sensitivity, as returned by PEST in the seo file. Note that this formulation deviates slightly from the PEST documentation in that the values are divided by (npar-1) rather than by (npar). The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1) Returns: cso : pandas.DataFrame """ assert self.jco is not None assert self.pst is not None weights = self.pst.observation_data.loc[self.jco.to_dataframe().index,"weight"].copy().values cso = np.diag(np.sqrt((self.qhalfx.x.dot(self.qhalfx.x.T))))/(float(self.pst.npar-1)) cso_df = pd.DataFrame.from_dict({'obnme':self.jco.to_dataframe().index,'cso':cso}) cso_df.index=cso_df['obnme'] cso_df.drop('obnme', axis=1, inplace=True) return cso_df
[ "def", "get_cso_dataframe", "(", "self", ")", ":", "assert", "self", ".", "jco", "is", "not", "None", "assert", "self", ".", "pst", "is", "not", "None", "weights", "=", "self", ".", "pst", ".", "observation_data", ".", "loc", "[", "self", ".", "jco", ".", "to_dataframe", "(", ")", ".", "index", ",", "\"weight\"", "]", ".", "copy", "(", ")", ".", "values", "cso", "=", "np", ".", "diag", "(", "np", ".", "sqrt", "(", "(", "self", ".", "qhalfx", ".", "x", ".", "dot", "(", "self", ".", "qhalfx", ".", "x", ".", "T", ")", ")", ")", ")", "/", "(", "float", "(", "self", ".", "pst", ".", "npar", "-", "1", ")", ")", "cso_df", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "{", "'obnme'", ":", "self", ".", "jco", ".", "to_dataframe", "(", ")", ".", "index", ",", "'cso'", ":", "cso", "}", ")", "cso_df", ".", "index", "=", "cso_df", "[", "'obnme'", "]", "cso_df", ".", "drop", "(", "'obnme'", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "return", "cso_df" ]
get a dataframe of composite observation sensitivity, as returned by PEST in the seo file. Note that this formulation deviates slightly from the PEST documentation in that the values are divided by (npar-1) rather than by (npar). The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1) Returns: cso : pandas.DataFrame
[ "get", "a", "dataframe", "of", "composite", "observation", "sensitivity", "as", "returned", "by", "PEST", "in", "the", "seo", "file", "." ]
python
train
42.095238
rmorshea/spectate
spectate/core.py
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L311-L341
def watch(value, spectator_type=Spectator): """Register a :class:`Specatator` to a :class:`Watchable` and return it. In order to register callbacks to an eventful object, you need to create a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple object that has methods for adding, deleting, and triggering callbacks. To create a spectator we call ``spectator = watch(x)``, where x is a Watchable instance. Parameters ---------- value : Watchable A :class:`Watchable` instance. spectator_type : Spectator The type of spectator that will be returned. Returns ------- spectator: spectator_type The :class:`Specatator` (specified by ``spectator_type``) that is was registered to the given instance. """ if isinstance(value, Watchable): wtype = type(value) else: raise TypeError("Expected a Watchable, not %r." % value) spectator = getattr(value, "_instance_spectator", None) if not isinstance(spectator, Spectator): spectator = spectator_type(wtype) value._instance_spectator = spectator return spectator
[ "def", "watch", "(", "value", ",", "spectator_type", "=", "Spectator", ")", ":", "if", "isinstance", "(", "value", ",", "Watchable", ")", ":", "wtype", "=", "type", "(", "value", ")", "else", ":", "raise", "TypeError", "(", "\"Expected a Watchable, not %r.\"", "%", "value", ")", "spectator", "=", "getattr", "(", "value", ",", "\"_instance_spectator\"", ",", "None", ")", "if", "not", "isinstance", "(", "spectator", ",", "Spectator", ")", ":", "spectator", "=", "spectator_type", "(", "wtype", ")", "value", ".", "_instance_spectator", "=", "spectator", "return", "spectator" ]
Register a :class:`Specatator` to a :class:`Watchable` and return it. In order to register callbacks to an eventful object, you need to create a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple object that has methods for adding, deleting, and triggering callbacks. To create a spectator we call ``spectator = watch(x)``, where x is a Watchable instance. Parameters ---------- value : Watchable A :class:`Watchable` instance. spectator_type : Spectator The type of spectator that will be returned. Returns ------- spectator: spectator_type The :class:`Specatator` (specified by ``spectator_type``) that is was registered to the given instance.
[ "Register", "a", ":", "class", ":", "Specatator", "to", "a", ":", "class", ":", "Watchable", "and", "return", "it", "." ]
python
train
36.870968
the01/python-paps
examples/measure/echo_server.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/examples/measure/echo_server.py#L56-L75
def create(host, port): """ Prepare server to execute :return: Modules to execute, cmd line function :rtype: list[WrapperServer], callable | None """ wrapper = WrapperEchoServer({ 'server': None }) d = { 'listen_port': port, 'changer': wrapper } if host: d['listen_bind_ip'] = host ses = EchoServer(d) wrapper.server = ses return [wrapper], cmd_line
[ "def", "create", "(", "host", ",", "port", ")", ":", "wrapper", "=", "WrapperEchoServer", "(", "{", "'server'", ":", "None", "}", ")", "d", "=", "{", "'listen_port'", ":", "port", ",", "'changer'", ":", "wrapper", "}", "if", "host", ":", "d", "[", "'listen_bind_ip'", "]", "=", "host", "ses", "=", "EchoServer", "(", "d", ")", "wrapper", ".", "server", "=", "ses", "return", "[", "wrapper", "]", ",", "cmd_line" ]
Prepare server to execute :return: Modules to execute, cmd line function :rtype: list[WrapperServer], callable | None
[ "Prepare", "server", "to", "execute" ]
python
train
20.8
Dallinger/Dallinger
dallinger/command_line.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/command_line.py#L343-L350
def dallinger(): """Dallinger command-line utility.""" from logging.config import fileConfig fileConfig( os.path.join(os.path.dirname(__file__), "logging.ini"), disable_existing_loggers=False, )
[ "def", "dallinger", "(", ")", ":", "from", "logging", ".", "config", "import", "fileConfig", "fileConfig", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"logging.ini\"", ")", ",", "disable_existing_loggers", "=", "False", ",", ")" ]
Dallinger command-line utility.
[ "Dallinger", "command", "-", "line", "utility", "." ]
python
train
27.5
saltstack/salt
salt/states/modjk_worker.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/modjk_worker.py#L220-L241
def disable(name, lbn, target, profile='default', tgt_type='glob'): ''' .. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Disable the named worker from the lbn load balancers at the targeted minions. The worker will get traffic only for current sessions and won't get new ones. Example: .. code-block:: yaml disable-before-deploy: modjk_worker.disable: - name: {{ grains['id'] }} - lbn: application - target: 'roles:balancer' - tgt_type: grain ''' return _talk2modjk(name, lbn, target, 'worker_disable', profile, tgt_type)
[ "def", "disable", "(", "name", ",", "lbn", ",", "target", ",", "profile", "=", "'default'", ",", "tgt_type", "=", "'glob'", ")", ":", "return", "_talk2modjk", "(", "name", ",", "lbn", ",", "target", ",", "'worker_disable'", ",", "profile", ",", "tgt_type", ")" ]
.. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Disable the named worker from the lbn load balancers at the targeted minions. The worker will get traffic only for current sessions and won't get new ones. Example: .. code-block:: yaml disable-before-deploy: modjk_worker.disable: - name: {{ grains['id'] }} - lbn: application - target: 'roles:balancer' - tgt_type: grain
[ "..", "versionchanged", "::", "2017", ".", "7", ".", "0", "The", "expr_form", "argument", "has", "been", "renamed", "to", "tgt_type", "earlier", "releases", "must", "use", "expr_form", "." ]
python
train
32
mathandy/svgpathtools
svgpathtools/document.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/document.py#L324-L338
def add_group(self, group_attribs=None, parent=None): """Add an empty group element to the SVG.""" if parent is None: parent = self.tree.getroot() elif not self.contains_group(parent): warnings.warn('The requested group {0} does not belong to ' 'this Document'.format(parent)) if group_attribs is None: group_attribs = {} else: group_attribs = group_attribs.copy() return SubElement(parent, '{{{0}}}g'.format( SVG_NAMESPACE['svg']), group_attribs)
[ "def", "add_group", "(", "self", ",", "group_attribs", "=", "None", ",", "parent", "=", "None", ")", ":", "if", "parent", "is", "None", ":", "parent", "=", "self", ".", "tree", ".", "getroot", "(", ")", "elif", "not", "self", ".", "contains_group", "(", "parent", ")", ":", "warnings", ".", "warn", "(", "'The requested group {0} does not belong to '", "'this Document'", ".", "format", "(", "parent", ")", ")", "if", "group_attribs", "is", "None", ":", "group_attribs", "=", "{", "}", "else", ":", "group_attribs", "=", "group_attribs", ".", "copy", "(", ")", "return", "SubElement", "(", "parent", ",", "'{{{0}}}g'", ".", "format", "(", "SVG_NAMESPACE", "[", "'svg'", "]", ")", ",", "group_attribs", ")" ]
Add an empty group element to the SVG.
[ "Add", "an", "empty", "group", "element", "to", "the", "SVG", "." ]
python
train
37.933333
camsci/meteor-pi
src/pythonModules/meteorpi_client/meteorpi_client/__init__.py
https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_client/meteorpi_client/__init__.py#L69-L76
def list_observatories(self): """ Get the IDs of all observatories with have stored observations on this server. :return: a sequence of strings containing observatories IDs """ response = requests.get(self.base_url + '/obstories').text return safe_load(response)
[ "def", "list_observatories", "(", "self", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "base_url", "+", "'/obstories'", ")", ".", "text", "return", "safe_load", "(", "response", ")" ]
Get the IDs of all observatories with have stored observations on this server. :return: a sequence of strings containing observatories IDs
[ "Get", "the", "IDs", "of", "all", "observatories", "with", "have", "stored", "observations", "on", "this", "server", "." ]
python
train
38
saltstack/salt
salt/modules/rbenv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rbenv.py#L413-L442
def do_with_ruby(ruby, cmdline, runas=None): ''' Execute a ruby command with rbenv's shims using a specific ruby version CLI Example: .. code-block:: bash salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' runas=deploy ''' if not cmdline: # This is a positional argument so this should never happen, but this # will handle cases where someone explicitly passes a false value for # cmdline. raise SaltInvocationError('Command must be specified') try: cmdline = salt.utils.args.shlex_split(cmdline) except AttributeError: cmdline = salt.utils.args.shlex_split(six.text_type(cmdline)) env = {} if ruby: env['RBENV_VERSION'] = ruby cmd = cmdline else: cmd = cmdline return do(cmd, runas=runas, env=env)
[ "def", "do_with_ruby", "(", "ruby", ",", "cmdline", ",", "runas", "=", "None", ")", ":", "if", "not", "cmdline", ":", "# This is a positional argument so this should never happen, but this", "# will handle cases where someone explicitly passes a false value for", "# cmdline.", "raise", "SaltInvocationError", "(", "'Command must be specified'", ")", "try", ":", "cmdline", "=", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "cmdline", ")", "except", "AttributeError", ":", "cmdline", "=", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "six", ".", "text_type", "(", "cmdline", ")", ")", "env", "=", "{", "}", "if", "ruby", ":", "env", "[", "'RBENV_VERSION'", "]", "=", "ruby", "cmd", "=", "cmdline", "else", ":", "cmd", "=", "cmdline", "return", "do", "(", "cmd", ",", "runas", "=", "runas", ",", "env", "=", "env", ")" ]
Execute a ruby command with rbenv's shims using a specific ruby version CLI Example: .. code-block:: bash salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' runas=deploy
[ "Execute", "a", "ruby", "command", "with", "rbenv", "s", "shims", "using", "a", "specific", "ruby", "version" ]
python
train
29.066667
bitprophet/ssh
setup_helper.py
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/setup_helper.py#L34-L89
def make_tarball(base_name, base_dir, compress='gzip', verbose=False, dry_run=False): """Create a tar file from all the files under 'base_dir'. This file may be compressed. :param compress: Compression algorithms. Supported algorithms are: 'gzip': (the default) 'compress' 'bzip2' None For 'gzip' and 'bzip2' the internal tarfile module will be used. For 'compress' the .tar will be created using tarfile, and then we will spawn 'compress' afterwards. The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", ".bz2" or ".Z"). Return the output filename. """ # XXX GNU tar 1.13 has a nifty option to add a prefix directory. # It's pretty new, though, so we certainly can't require it -- # but it would be nice to take advantage of it to skip the # "create a tree of hardlinks" step! (Would also be nice to # detect GNU tar to use its 'z' option and save a step.) compress_ext = { 'gzip': ".gz", 'bzip2': '.bz2', 'compress': ".Z" } # flags for compression program, each element of list will be an argument tarfile_compress_flag = {'gzip':'gz', 'bzip2':'bz2'} compress_flags = {'compress': ["-f"]} if compress is not None and compress not in compress_ext.keys(): raise ValueError("bad value for 'compress': must be None, 'gzip'," "'bzip2' or 'compress'") archive_name = base_name + ".tar" if compress and compress in tarfile_compress_flag: archive_name += compress_ext[compress] mode = 'w:' + tarfile_compress_flag.get(compress, '') mkpath(os.path.dirname(archive_name), dry_run=dry_run) log.info('Creating tar file %s with mode %s' % (archive_name, mode)) if not dry_run: tar = tarfile.open(archive_name, mode=mode) # This recursively adds everything underneath base_dir tar.add(base_dir) tar.close() if compress and compress not in tarfile_compress_flag: spawn([compress] + compress_flags[compress] + [archive_name], dry_run=dry_run) return archive_name + compress_ext[compress] else: return archive_name
[ "def", "make_tarball", "(", "base_name", ",", "base_dir", ",", "compress", "=", "'gzip'", ",", "verbose", "=", "False", ",", "dry_run", "=", "False", ")", ":", "# XXX GNU tar 1.13 has a nifty option to add a prefix directory.", "# It's pretty new, though, so we certainly can't require it --", "# but it would be nice to take advantage of it to skip the", "# \"create a tree of hardlinks\" step! (Would also be nice to", "# detect GNU tar to use its 'z' option and save a step.)", "compress_ext", "=", "{", "'gzip'", ":", "\".gz\"", ",", "'bzip2'", ":", "'.bz2'", ",", "'compress'", ":", "\".Z\"", "}", "# flags for compression program, each element of list will be an argument", "tarfile_compress_flag", "=", "{", "'gzip'", ":", "'gz'", ",", "'bzip2'", ":", "'bz2'", "}", "compress_flags", "=", "{", "'compress'", ":", "[", "\"-f\"", "]", "}", "if", "compress", "is", "not", "None", "and", "compress", "not", "in", "compress_ext", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "\"bad value for 'compress': must be None, 'gzip',\"", "\"'bzip2' or 'compress'\"", ")", "archive_name", "=", "base_name", "+", "\".tar\"", "if", "compress", "and", "compress", "in", "tarfile_compress_flag", ":", "archive_name", "+=", "compress_ext", "[", "compress", "]", "mode", "=", "'w:'", "+", "tarfile_compress_flag", ".", "get", "(", "compress", ",", "''", ")", "mkpath", "(", "os", ".", "path", ".", "dirname", "(", "archive_name", ")", ",", "dry_run", "=", "dry_run", ")", "log", ".", "info", "(", "'Creating tar file %s with mode %s'", "%", "(", "archive_name", ",", "mode", ")", ")", "if", "not", "dry_run", ":", "tar", "=", "tarfile", ".", "open", "(", "archive_name", ",", "mode", "=", "mode", ")", "# This recursively adds everything underneath base_dir", "tar", ".", "add", "(", "base_dir", ")", "tar", ".", "close", "(", ")", "if", "compress", "and", "compress", "not", "in", "tarfile_compress_flag", ":", "spawn", "(", "[", "compress", "]", "+", "compress_flags", "[", "compress", "]", "+", "[", "archive_name", "]", ",", "dry_run", "=", "dry_run", ")", "return", "archive_name", "+", "compress_ext", "[", "compress", "]", "else", ":", "return", "archive_name" ]
Create a tar file from all the files under 'base_dir'. This file may be compressed. :param compress: Compression algorithms. Supported algorithms are: 'gzip': (the default) 'compress' 'bzip2' None For 'gzip' and 'bzip2' the internal tarfile module will be used. For 'compress' the .tar will be created using tarfile, and then we will spawn 'compress' afterwards. The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", ".bz2" or ".Z"). Return the output filename.
[ "Create", "a", "tar", "file", "from", "all", "the", "files", "under", "base_dir", ".", "This", "file", "may", "be", "compressed", "." ]
python
train
39.875
skelsec/minidump
minidump/minidumpreader.py
https://github.com/skelsec/minidump/blob/0c4dcabe6f11d7a403440919ffa9e3c9889c5212/minidump/minidumpreader.py#L118-L139
def read(self, size = -1): """ Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment """ if size < -1: raise Exception('You shouldnt be doing this') if size == -1: t = self.current_segment.remaining_len(self.current_position) if not t: return None old_new_pos = self.current_position self.current_position = self.current_segment.end_address return self.current_segment.data[old_new_pos - self.current_segment.start_address:] t = self.current_position + size if not self.current_segment.inrange(t): raise Exception('Would read over segment boundaries!') old_new_pos = self.current_position self.current_position = t return self.current_segment.data[old_new_pos - self.current_segment.start_address :t - self.current_segment.start_address]
[ "def", "read", "(", "self", ",", "size", "=", "-", "1", ")", ":", "if", "size", "<", "-", "1", ":", "raise", "Exception", "(", "'You shouldnt be doing this'", ")", "if", "size", "==", "-", "1", ":", "t", "=", "self", ".", "current_segment", ".", "remaining_len", "(", "self", ".", "current_position", ")", "if", "not", "t", ":", "return", "None", "old_new_pos", "=", "self", ".", "current_position", "self", ".", "current_position", "=", "self", ".", "current_segment", ".", "end_address", "return", "self", ".", "current_segment", ".", "data", "[", "old_new_pos", "-", "self", ".", "current_segment", ".", "start_address", ":", "]", "t", "=", "self", ".", "current_position", "+", "size", "if", "not", "self", ".", "current_segment", ".", "inrange", "(", "t", ")", ":", "raise", "Exception", "(", "'Would read over segment boundaries!'", ")", "old_new_pos", "=", "self", ".", "current_position", "self", ".", "current_position", "=", "t", "return", "self", ".", "current_segment", ".", "data", "[", "old_new_pos", "-", "self", ".", "current_segment", ".", "start_address", ":", "t", "-", "self", ".", "current_segment", ".", "start_address", "]" ]
Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment
[ "Returns", "data", "bytes", "of", "size", "size", "from", "the", "current", "segment", ".", "If", "size", "is", "-", "1", "it", "returns", "all", "the", "remaining", "data", "bytes", "from", "memory", "segment" ]
python
train
38.681818
lensacom/sparkit-learn
splearn/feature_extraction/text.py
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/feature_extraction/text.py#L169-L197
def _count_vocab(self, analyzed_docs): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ vocabulary = self.vocabulary_ j_indices = _make_int_array() indptr = _make_int_array() indptr.append(0) for doc in analyzed_docs: for feature in doc: try: j_indices.append(vocabulary[feature]) except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue indptr.append(len(j_indices)) j_indices = frombuffer_empty(j_indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) values = np.ones(len(j_indices)) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sum_duplicates() if self.binary: X.data.fill(1) return X
[ "def", "_count_vocab", "(", "self", ",", "analyzed_docs", ")", ":", "vocabulary", "=", "self", ".", "vocabulary_", "j_indices", "=", "_make_int_array", "(", ")", "indptr", "=", "_make_int_array", "(", ")", "indptr", ".", "append", "(", "0", ")", "for", "doc", "in", "analyzed_docs", ":", "for", "feature", "in", "doc", ":", "try", ":", "j_indices", ".", "append", "(", "vocabulary", "[", "feature", "]", ")", "except", "KeyError", ":", "# Ignore out-of-vocabulary items for fixed_vocab=True", "continue", "indptr", ".", "append", "(", "len", "(", "j_indices", ")", ")", "j_indices", "=", "frombuffer_empty", "(", "j_indices", ",", "dtype", "=", "np", ".", "intc", ")", "indptr", "=", "np", ".", "frombuffer", "(", "indptr", ",", "dtype", "=", "np", ".", "intc", ")", "values", "=", "np", ".", "ones", "(", "len", "(", "j_indices", ")", ")", "X", "=", "sp", ".", "csr_matrix", "(", "(", "values", ",", "j_indices", ",", "indptr", ")", ",", "shape", "=", "(", "len", "(", "indptr", ")", "-", "1", ",", "len", "(", "vocabulary", ")", ")", ",", "dtype", "=", "self", ".", "dtype", ")", "X", ".", "sum_duplicates", "(", ")", "if", "self", ".", "binary", ":", "X", ".", "data", ".", "fill", "(", "1", ")", "return", "X" ]
Create sparse feature matrix, and vocabulary where fixed_vocab=False
[ "Create", "sparse", "feature", "matrix", "and", "vocabulary", "where", "fixed_vocab", "=", "False" ]
python
test
33.965517
kensho-technologies/graphql-compiler
graphql_compiler/compiler/emit_match.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/emit_match.py#L21-L50
def _first_step_to_match(match_step): """Transform the very first MATCH step into a MATCH query string.""" parts = [] if match_step.root_block is not None: if not isinstance(match_step.root_block, QueryRoot): raise AssertionError(u'Expected None or QueryRoot root block, received: ' u'{} {}'.format(match_step.root_block, match_step)) match_step.root_block.validate() start_class = get_only_element_from_collection(match_step.root_block.start_class) parts.append(u'class: %s' % (start_class,)) # MATCH steps with a QueryRoot root block shouldn't have a 'coerce_type_block'. if match_step.coerce_type_block is not None: raise AssertionError(u'Invalid MATCH step: {}'.format(match_step)) if match_step.where_block: match_step.where_block.validate() parts.append(u'where: (%s)' % (match_step.where_block.predicate.to_match(),)) if match_step.as_block is None: raise AssertionError(u'Found a MATCH step without a corresponding Location. ' u'This should never happen: {}'.format(match_step)) else: match_step.as_block.validate() parts.append(u'as: %s' % (_get_vertex_location_name(match_step.as_block.location),)) return u'{{ %s }}' % (u', '.join(parts),)
[ "def", "_first_step_to_match", "(", "match_step", ")", ":", "parts", "=", "[", "]", "if", "match_step", ".", "root_block", "is", "not", "None", ":", "if", "not", "isinstance", "(", "match_step", ".", "root_block", ",", "QueryRoot", ")", ":", "raise", "AssertionError", "(", "u'Expected None or QueryRoot root block, received: '", "u'{} {}'", ".", "format", "(", "match_step", ".", "root_block", ",", "match_step", ")", ")", "match_step", ".", "root_block", ".", "validate", "(", ")", "start_class", "=", "get_only_element_from_collection", "(", "match_step", ".", "root_block", ".", "start_class", ")", "parts", ".", "append", "(", "u'class: %s'", "%", "(", "start_class", ",", ")", ")", "# MATCH steps with a QueryRoot root block shouldn't have a 'coerce_type_block'.", "if", "match_step", ".", "coerce_type_block", "is", "not", "None", ":", "raise", "AssertionError", "(", "u'Invalid MATCH step: {}'", ".", "format", "(", "match_step", ")", ")", "if", "match_step", ".", "where_block", ":", "match_step", ".", "where_block", ".", "validate", "(", ")", "parts", ".", "append", "(", "u'where: (%s)'", "%", "(", "match_step", ".", "where_block", ".", "predicate", ".", "to_match", "(", ")", ",", ")", ")", "if", "match_step", ".", "as_block", "is", "None", ":", "raise", "AssertionError", "(", "u'Found a MATCH step without a corresponding Location. '", "u'This should never happen: {}'", ".", "format", "(", "match_step", ")", ")", "else", ":", "match_step", ".", "as_block", ".", "validate", "(", ")", "parts", ".", "append", "(", "u'as: %s'", "%", "(", "_get_vertex_location_name", "(", "match_step", ".", "as_block", ".", "location", ")", ",", ")", ")", "return", "u'{{ %s }}'", "%", "(", "u', '", ".", "join", "(", "parts", ")", ",", ")" ]
Transform the very first MATCH step into a MATCH query string.
[ "Transform", "the", "very", "first", "MATCH", "step", "into", "a", "MATCH", "query", "string", "." ]
python
train
43.933333
MartinThoma/hwrt
hwrt/create_ffiles.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/create_ffiles.py#L62-L85
def _create_translation_file(feature_folder, dataset_name, translation, formula_id2index): """ Write a loop-up file that contains the direct (record-wise) lookup information. Parameters ---------- feature_folder : Path to the feature files. dataset_name : 'traindata', 'validdata' or 'testdata'. translation : list of triples (raw data id, formula in latex, formula id) """ translationfilename = "%s/translation-%s.csv" % (feature_folder, dataset_name) with open(translationfilename, "w") as f: f.write("index,raw_data_id,latex,formula_id\n") for el in translation: f.write("%i,%i,%s,%i\n" % (formula_id2index[el[2]], el[0], el[1], el[2]))
[ "def", "_create_translation_file", "(", "feature_folder", ",", "dataset_name", ",", "translation", ",", "formula_id2index", ")", ":", "translationfilename", "=", "\"%s/translation-%s.csv\"", "%", "(", "feature_folder", ",", "dataset_name", ")", "with", "open", "(", "translationfilename", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"index,raw_data_id,latex,formula_id\\n\"", ")", "for", "el", "in", "translation", ":", "f", ".", "write", "(", "\"%i,%i,%s,%i\\n\"", "%", "(", "formula_id2index", "[", "el", "[", "2", "]", "]", ",", "el", "[", "0", "]", ",", "el", "[", "1", "]", ",", "el", "[", "2", "]", ")", ")" ]
Write a loop-up file that contains the direct (record-wise) lookup information. Parameters ---------- feature_folder : Path to the feature files. dataset_name : 'traindata', 'validdata' or 'testdata'. translation : list of triples (raw data id, formula in latex, formula id)
[ "Write", "a", "loop", "-", "up", "file", "that", "contains", "the", "direct", "(", "record", "-", "wise", ")", "lookup", "information", "." ]
python
train
37.166667
tanghaibao/jcvi
jcvi/utils/table.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/table.py#L32-L77
def tabulate(d, transpose=False, thousands=True, key_fun=None, sep=',', align=True): """ d is a dictionary, keyed by tuple(A, B). Goal is to put A in rows, B in columns, report data in table form. >>> d = {(1,'a'):3, (1,'b'):4, (2,'a'):5, (2,'b'):0} >>> print tabulate(d) =========== o a b ----------- 1 3 4 2 5 0 ----------- >>> print tabulate(d, transpose=True) =========== o 1 2 ----------- a 3 5 b 4 0 ----------- """ pairs = d.keys() rows, cols = zip(*pairs) if transpose: rows, cols = cols, rows rows = sorted(set(rows)) cols = sorted(set(cols)) header = ["o"] + list(cols) table = [] for r in rows: combo = [(r, c) for c in cols] if transpose: combo = [(c, r) for (r, c) in combo] data = [d.get(x, "n/a") for x in combo] data = ["{0:.1f}".format(x) if isinstance(x, float) else x for x in data] if key_fun: data = [key_fun(x) for x in data] table.append([str(r)] + data) if not align: formatted = load_csv(header, table, sep=sep) return "\n".join(formatted) return loadtable(header, table, thousands=thousands)
[ "def", "tabulate", "(", "d", ",", "transpose", "=", "False", ",", "thousands", "=", "True", ",", "key_fun", "=", "None", ",", "sep", "=", "','", ",", "align", "=", "True", ")", ":", "pairs", "=", "d", ".", "keys", "(", ")", "rows", ",", "cols", "=", "zip", "(", "*", "pairs", ")", "if", "transpose", ":", "rows", ",", "cols", "=", "cols", ",", "rows", "rows", "=", "sorted", "(", "set", "(", "rows", ")", ")", "cols", "=", "sorted", "(", "set", "(", "cols", ")", ")", "header", "=", "[", "\"o\"", "]", "+", "list", "(", "cols", ")", "table", "=", "[", "]", "for", "r", "in", "rows", ":", "combo", "=", "[", "(", "r", ",", "c", ")", "for", "c", "in", "cols", "]", "if", "transpose", ":", "combo", "=", "[", "(", "c", ",", "r", ")", "for", "(", "r", ",", "c", ")", "in", "combo", "]", "data", "=", "[", "d", ".", "get", "(", "x", ",", "\"n/a\"", ")", "for", "x", "in", "combo", "]", "data", "=", "[", "\"{0:.1f}\"", ".", "format", "(", "x", ")", "if", "isinstance", "(", "x", ",", "float", ")", "else", "x", "for", "x", "in", "data", "]", "if", "key_fun", ":", "data", "=", "[", "key_fun", "(", "x", ")", "for", "x", "in", "data", "]", "table", ".", "append", "(", "[", "str", "(", "r", ")", "]", "+", "data", ")", "if", "not", "align", ":", "formatted", "=", "load_csv", "(", "header", ",", "table", ",", "sep", "=", "sep", ")", "return", "\"\\n\"", ".", "join", "(", "formatted", ")", "return", "loadtable", "(", "header", ",", "table", ",", "thousands", "=", "thousands", ")" ]
d is a dictionary, keyed by tuple(A, B). Goal is to put A in rows, B in columns, report data in table form. >>> d = {(1,'a'):3, (1,'b'):4, (2,'a'):5, (2,'b'):0} >>> print tabulate(d) =========== o a b ----------- 1 3 4 2 5 0 ----------- >>> print tabulate(d, transpose=True) =========== o 1 2 ----------- a 3 5 b 4 0 -----------
[ "d", "is", "a", "dictionary", "keyed", "by", "tuple", "(", "A", "B", ")", ".", "Goal", "is", "to", "put", "A", "in", "rows", "B", "in", "columns", "report", "data", "in", "table", "form", "." ]
python
train
26.956522
pyamg/pyamg
pyamg/classical/split.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/split.py#L240-L291
def CLJP(S, color=False): """Compute a C/F splitting using the parallel CLJP algorithm. Parameters ---------- S : csr_matrix Strength of connection matrix indicating the strength between nodes i and j (S_ij) color : bool use the CLJP coloring approach Returns ------- splitting : array Array of length of S of ones (coarse) and zeros (fine) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.classical.split import CLJP >>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices >>> splitting = CLJP(S) See Also -------- MIS, PMIS, CLJPc References ---------- .. [8] David M. Alber and Luke N. Olson "Parallel coarse-grid selection" Numerical Linear Algebra with Applications 2007; 14:611-643. """ if not isspmatrix_csr(S): raise TypeError('expected csr_matrix') S = remove_diagonal(S) colorid = 0 if color: colorid = 1 T = S.T.tocsr() # transpose S for efficient column access splitting = np.empty(S.shape[0], dtype='intc') amg_core.cljp_naive_splitting(S.shape[0], S.indptr, S.indices, T.indptr, T.indices, splitting, colorid) return splitting
[ "def", "CLJP", "(", "S", ",", "color", "=", "False", ")", ":", "if", "not", "isspmatrix_csr", "(", "S", ")", ":", "raise", "TypeError", "(", "'expected csr_matrix'", ")", "S", "=", "remove_diagonal", "(", "S", ")", "colorid", "=", "0", "if", "color", ":", "colorid", "=", "1", "T", "=", "S", ".", "T", ".", "tocsr", "(", ")", "# transpose S for efficient column access", "splitting", "=", "np", ".", "empty", "(", "S", ".", "shape", "[", "0", "]", ",", "dtype", "=", "'intc'", ")", "amg_core", ".", "cljp_naive_splitting", "(", "S", ".", "shape", "[", "0", "]", ",", "S", ".", "indptr", ",", "S", ".", "indices", ",", "T", ".", "indptr", ",", "T", ".", "indices", ",", "splitting", ",", "colorid", ")", "return", "splitting" ]
Compute a C/F splitting using the parallel CLJP algorithm. Parameters ---------- S : csr_matrix Strength of connection matrix indicating the strength between nodes i and j (S_ij) color : bool use the CLJP coloring approach Returns ------- splitting : array Array of length of S of ones (coarse) and zeros (fine) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.classical.split import CLJP >>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices >>> splitting = CLJP(S) See Also -------- MIS, PMIS, CLJPc References ---------- .. [8] David M. Alber and Luke N. Olson "Parallel coarse-grid selection" Numerical Linear Algebra with Applications 2007; 14:611-643.
[ "Compute", "a", "C", "/", "F", "splitting", "using", "the", "parallel", "CLJP", "algorithm", "." ]
python
train
25.807692
SwissDataScienceCenter/renku-python
renku/cli/_git.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/_git.py#L32-L39
def get_git_home(path='.'): """Get Git path from the current context.""" ctx = click.get_current_context(silent=True) if ctx and GIT_KEY in ctx.meta: return ctx.meta[GIT_KEY] from git import Repo return Repo(path, search_parent_directories=True).working_dir
[ "def", "get_git_home", "(", "path", "=", "'.'", ")", ":", "ctx", "=", "click", ".", "get_current_context", "(", "silent", "=", "True", ")", "if", "ctx", "and", "GIT_KEY", "in", "ctx", ".", "meta", ":", "return", "ctx", ".", "meta", "[", "GIT_KEY", "]", "from", "git", "import", "Repo", "return", "Repo", "(", "path", ",", "search_parent_directories", "=", "True", ")", ".", "working_dir" ]
Get Git path from the current context.
[ "Get", "Git", "path", "from", "the", "current", "context", "." ]
python
train
34.875
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/bus.py
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/bus.py#L202-L218
def crc8(data): """ Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code """ crc = 0 for byte in data: crc ^= byte for _ in range(8): if crc & 0x01: crc = (crc >> 1) ^ 0x8C else: crc >>= 1 crc &= 0xFF return crc
[ "def", "crc8", "(", "data", ")", ":", "crc", "=", "0", "for", "byte", "in", "data", ":", "crc", "^=", "byte", "for", "_", "in", "range", "(", "8", ")", ":", "if", "crc", "&", "0x01", ":", "crc", "=", "(", "crc", ">>", "1", ")", "^", "0x8C", "else", ":", "crc", ">>=", "1", "crc", "&=", "0xFF", "return", "crc" ]
Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code
[ "Perform", "the", "1", "-", "Wire", "CRC", "check", "on", "the", "provided", "data", "." ]
python
train
25.176471
StackStorm/pybind
pybind/nos/v6_0_2f/interface/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/__init__.py#L396-L422
def _set_fc_port(self, v, load=False): """ Setter method for fc_port, mapped from YANG variable /interface/fc_port (list) If this variable is read-only (config: false) in the source YANG file, then _set_fc_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fc_port() directly. YANG Description: The list of fibrechannel interfaces in the managed device. Each row represents a fibrechannel interface. The list provides a way to discover all the fibrechannel interfaces in a managed device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",fc_port.fc_port, yang_name="fc-port", rest_name="FibreChannel", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}), is_container='list', yang_name="fc-port", rest_name="FibreChannel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fc_port must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",fc_port.fc_port, yang_name="fc-port", rest_name="FibreChannel", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}), is_container='list', yang_name="fc-port", rest_name="FibreChannel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""", }) self.__fc_port = t if hasattr(self, '_set'): self._set()
[ "def", "_set_fc_port", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"name\"", ",", "fc_port", ".", "fc_port", ",", "yang_name", "=", "\"fc-port\"", ",", "rest_name", "=", "\"FibreChannel\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "True", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'The list of fibrechannel interfaces.'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'alt-name'", ":", "u'FibreChannel'", ",", "u'sort-priority'", ":", "u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL'", ",", "u'cli-suppress-no'", ":", "None", ",", "u'cli-suppress-show-path'", ":", "None", ",", "u'display-when'", ":", "u'/vcsmode/vcs-mode = \"true\"'", ",", "u'cli-custom-range-actionpoint'", ":", "u'FcRangeCliActionpoint'", ",", "u'cli-custom-range-enumerator'", ":", "u'FcRangeCliActionpoint'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-no-match-completion'", ":", "None", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'callpoint'", ":", "u'interface_fcport'", ",", "u'cli-mode-name'", ":", "u'conf-if-fi-$(name)'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"fc-port\"", ",", "rest_name", "=", "\"FibreChannel\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'The list of fibrechannel interfaces.'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'alt-name'", ":", "u'FibreChannel'", ",", "u'sort-priority'", ":", "u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL'", ",", "u'cli-suppress-no'", ":", "None", ",", "u'cli-suppress-show-path'", ":", "None", ",", "u'display-when'", ":", "u'/vcsmode/vcs-mode = \"true\"'", ",", "u'cli-custom-range-actionpoint'", ":", "u'FcRangeCliActionpoint'", ",", "u'cli-custom-range-enumerator'", ":", "u'FcRangeCliActionpoint'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-no-match-completion'", ":", "None", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'callpoint'", ":", "u'interface_fcport'", ",", "u'cli-mode-name'", ":", "u'conf-if-fi-$(name)'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"fc_port must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"name\",fc_port.fc_port, yang_name=\"fc-port\", rest_name=\"FibreChannel\", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = \"true\"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}), is_container='list', yang_name=\"fc-port\", rest_name=\"FibreChannel\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = \"true\"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__fc_port", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for fc_port, mapped from YANG variable /interface/fc_port (list) If this variable is read-only (config: false) in the source YANG file, then _set_fc_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fc_port() directly. YANG Description: The list of fibrechannel interfaces in the managed device. Each row represents a fibrechannel interface. The list provides a way to discover all the fibrechannel interfaces in a managed device.
[ "Setter", "method", "for", "fc_port", "mapped", "from", "YANG", "variable", "/", "interface", "/", "fc_port", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_fc_port", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_fc_port", "()", "directly", "." ]
python
train
161.259259
jmcgeheeiv/pyfakefs
pyfakefs/fake_scandir.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_scandir.py#L81-L102
def stat(self, follow_symlinks=True): """Return a stat_result object for this entry. Args: follow_symlinks: If False and the entry is a symlink, return the result for the symlink, otherwise for the object it points to. """ if follow_symlinks: if self._statresult_symlink is None: file_object = self._filesystem.resolve(self.path) if self._filesystem.is_windows_fs: file_object.st_nlink = 0 self._statresult_symlink = file_object.stat_result.copy() return self._statresult_symlink if self._statresult is None: file_object = self._filesystem.lresolve(self.path) self._inode = file_object.st_ino if self._filesystem.is_windows_fs: file_object.st_nlink = 0 self._statresult = file_object.stat_result.copy() return self._statresult
[ "def", "stat", "(", "self", ",", "follow_symlinks", "=", "True", ")", ":", "if", "follow_symlinks", ":", "if", "self", ".", "_statresult_symlink", "is", "None", ":", "file_object", "=", "self", ".", "_filesystem", ".", "resolve", "(", "self", ".", "path", ")", "if", "self", ".", "_filesystem", ".", "is_windows_fs", ":", "file_object", ".", "st_nlink", "=", "0", "self", ".", "_statresult_symlink", "=", "file_object", ".", "stat_result", ".", "copy", "(", ")", "return", "self", ".", "_statresult_symlink", "if", "self", ".", "_statresult", "is", "None", ":", "file_object", "=", "self", ".", "_filesystem", ".", "lresolve", "(", "self", ".", "path", ")", "self", ".", "_inode", "=", "file_object", ".", "st_ino", "if", "self", ".", "_filesystem", ".", "is_windows_fs", ":", "file_object", ".", "st_nlink", "=", "0", "self", ".", "_statresult", "=", "file_object", ".", "stat_result", ".", "copy", "(", ")", "return", "self", ".", "_statresult" ]
Return a stat_result object for this entry. Args: follow_symlinks: If False and the entry is a symlink, return the result for the symlink, otherwise for the object it points to.
[ "Return", "a", "stat_result", "object", "for", "this", "entry", "." ]
python
train
42.681818
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L309-L333
def close_filenos(preserve): """ Close unprotected file descriptors Close all open file descriptors that are not in preserve. If ulimit -nofile is "unlimited", all is defined filenos <= 4096, else all is <= the output of resource.getrlimit(). :param preserve: set with protected files :type preserve: set :return: None """ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fileno in range(maxfd): if fileno not in preserve: try: os.close(fileno) except OSError as err: if not err.errno == errno.EBADF: raise DaemonError( 'Failed to close file descriptor {0}: {1}' .format(fileno, err))
[ "def", "close_filenos", "(", "preserve", ")", ":", "maxfd", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NOFILE", ")", "[", "1", "]", "if", "maxfd", "==", "resource", ".", "RLIM_INFINITY", ":", "maxfd", "=", "4096", "for", "fileno", "in", "range", "(", "maxfd", ")", ":", "if", "fileno", "not", "in", "preserve", ":", "try", ":", "os", ".", "close", "(", "fileno", ")", "except", "OSError", "as", "err", ":", "if", "not", "err", ".", "errno", "==", "errno", ".", "EBADF", ":", "raise", "DaemonError", "(", "'Failed to close file descriptor {0}: {1}'", ".", "format", "(", "fileno", ",", "err", ")", ")" ]
Close unprotected file descriptors Close all open file descriptors that are not in preserve. If ulimit -nofile is "unlimited", all is defined filenos <= 4096, else all is <= the output of resource.getrlimit(). :param preserve: set with protected files :type preserve: set :return: None
[ "Close", "unprotected", "file", "descriptors" ]
python
train
32.24
googleapis/google-cloud-python
oslogin/google/cloud/oslogin_v1/gapic/os_login_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/oslogin/google/cloud/oslogin_v1/gapic/os_login_service_client.py#L490-L562
def update_ssh_public_key( self, name, ssh_public_key, update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Updates an SSH public key and returns the profile information. This method supports patch semantics. Example: >>> from google.cloud import oslogin_v1 >>> >>> client = oslogin_v1.OsLoginServiceClient() >>> >>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]') >>> >>> # TODO: Initialize `ssh_public_key`: >>> ssh_public_key = {} >>> >>> response = client.update_ssh_public_key(name, ssh_public_key) Args: name (str): The fingerprint of the public key to update. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format ``users/{user}/sshPublicKeys/{fingerprint}``. ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.oslogin_v1.types.SshPublicKey` update_mask (Union[dict, ~google.cloud.oslogin_v1.types.FieldMask]): Mask to control which fields get updated. Updates all if not present. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.oslogin_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "update_ssh_public_key" not in self._inner_api_calls: self._inner_api_calls[ "update_ssh_public_key" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.update_ssh_public_key, default_retry=self._method_configs["UpdateSshPublicKey"].retry, default_timeout=self._method_configs["UpdateSshPublicKey"].timeout, client_info=self._client_info, ) request = oslogin_pb2.UpdateSshPublicKeyRequest( name=name, ssh_public_key=ssh_public_key, update_mask=update_mask ) return self._inner_api_calls["update_ssh_public_key"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "update_ssh_public_key", "(", "self", ",", "name", ",", "ssh_public_key", ",", "update_mask", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"update_ssh_public_key\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"update_ssh_public_key\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "update_ssh_public_key", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"UpdateSshPublicKey\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"UpdateSshPublicKey\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "oslogin_pb2", ".", "UpdateSshPublicKeyRequest", "(", "name", "=", "name", ",", "ssh_public_key", "=", "ssh_public_key", ",", "update_mask", "=", "update_mask", ")", "return", "self", ".", "_inner_api_calls", "[", "\"update_ssh_public_key\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Updates an SSH public key and returns the profile information. This method supports patch semantics. Example: >>> from google.cloud import oslogin_v1 >>> >>> client = oslogin_v1.OsLoginServiceClient() >>> >>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]') >>> >>> # TODO: Initialize `ssh_public_key`: >>> ssh_public_key = {} >>> >>> response = client.update_ssh_public_key(name, ssh_public_key) Args: name (str): The fingerprint of the public key to update. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format ``users/{user}/sshPublicKeys/{fingerprint}``. ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.oslogin_v1.types.SshPublicKey` update_mask (Union[dict, ~google.cloud.oslogin_v1.types.FieldMask]): Mask to control which fields get updated. Updates all if not present. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.oslogin_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Updates", "an", "SSH", "public", "key", "and", "returns", "the", "profile", "information", ".", "This", "method", "supports", "patch", "semantics", "." ]
python
train
46.808219
phn/angles
angles.py
https://github.com/phn/angles/blob/5c30ed7c3a7412177daaed180bf3b2351b287589/angles.py#L911-L1003
def bear(a1, b1, a2, b2): """Find bearing/position angle between two points on a unit sphere. Parameters ---------- a1, b1 : float Longitude-like and latitude-like angles defining the first point. Both are in radians. a2, b2 : float Longitude-like and latitude-like angles defining the second point. Both are in radians. Notes ----- Position angle of the second point with respect to the first is returned in radians. Position angle is calculated clockwise and counter-clockwise from the direction towards the North pole. It is between [0 and π] if the second point is in the eastern hemisphere w.r.t the first, and between (0, -π) if the second point is in the western hemisphere w.r.t the first. .. warning:: If the first point is at the pole then bearing is undefined and 0 is returned. Results agree with those from SLALIB rountine sla_dbear. See test_bear_against_slalib_dbear() in test_angles.py. Examples -------- >>> from angles import bear, r2d, d2r >>> bear(0, 0, 0, -d2r(90.0)) 3.141592653589793 >>> bear(0, -d2r(90.0), 0, 0) 0.0 >>> bear(0, -d2r(45.0), 0, 0) 0.0 >>> bear(0, -d2r(89.678), 0, 0) 0.0 >>> r2d(bear(d2r(45.0), d2r(45.0), d2r(46.0), d2r(45.0))) 89.64644212193384 >>> r2d(bear(d2r(45.0), d2r(45.0), d2r(44.0), d2r(45.0))) -89.64644212193421 """ # Find perpendicular to the plane containing the base and # z-axis. Then find the perpendicular to the plane containing # the base and the target. The angle between these two is the # position angle or bearing of the target w.r.t the base. Check # sign of the z component of the latter vector to determine # quadrant: 1st and 2nd quadrants are +ve while 3rd and 4th are # negative. # # Tolerance to decide if first is on the pole and also to decide if # the calculated bearing is zero. tol = 1e-15 v1 = CartesianVector.from_spherical(1.0, a1, b1) v2 = CartesianVector.from_spherical(1.0, a2, b2) # Z-axis v0 = CartesianVector.from_spherical(r=1.0, alpha=0.0, delta=d2r(90.0)) if abs(v1.cross(v0).mod) < tol: # The first point is on the pole. Bearing is undefined. warnings.warn( "First point is on the pole. Bearing undefined.") return 0.0 # Vector perpendicular to great circle containing two points. v12 = v1.cross(v2) # Vector perpendicular to great circle containing base and # Z-axis. v10 = v1.cross(v0) # Find angle between these two vectors. dot = v12.dot(v10) cross = v12.cross(v10).mod x = math.atan2(cross, dot) # If z is negative then we are in the 3rd or 4th quadrant. if v12.z < 0: x = -x if abs(x) < tol: return 0.0 else: return x
[ "def", "bear", "(", "a1", ",", "b1", ",", "a2", ",", "b2", ")", ":", "# Find perpendicular to the plane containing the base and", "# z-axis. Then find the perpendicular to the plane containing", "# the base and the target. The angle between these two is the", "# position angle or bearing of the target w.r.t the base. Check", "# sign of the z component of the latter vector to determine", "# quadrant: 1st and 2nd quadrants are +ve while 3rd and 4th are", "# negative.", "#", "# Tolerance to decide if first is on the pole and also to decide if", "# the calculated bearing is zero.", "tol", "=", "1e-15", "v1", "=", "CartesianVector", ".", "from_spherical", "(", "1.0", ",", "a1", ",", "b1", ")", "v2", "=", "CartesianVector", ".", "from_spherical", "(", "1.0", ",", "a2", ",", "b2", ")", "# Z-axis", "v0", "=", "CartesianVector", ".", "from_spherical", "(", "r", "=", "1.0", ",", "alpha", "=", "0.0", ",", "delta", "=", "d2r", "(", "90.0", ")", ")", "if", "abs", "(", "v1", ".", "cross", "(", "v0", ")", ".", "mod", ")", "<", "tol", ":", "# The first point is on the pole. Bearing is undefined.", "warnings", ".", "warn", "(", "\"First point is on the pole. Bearing undefined.\"", ")", "return", "0.0", "# Vector perpendicular to great circle containing two points.", "v12", "=", "v1", ".", "cross", "(", "v2", ")", "# Vector perpendicular to great circle containing base and", "# Z-axis.", "v10", "=", "v1", ".", "cross", "(", "v0", ")", "# Find angle between these two vectors.", "dot", "=", "v12", ".", "dot", "(", "v10", ")", "cross", "=", "v12", ".", "cross", "(", "v10", ")", ".", "mod", "x", "=", "math", ".", "atan2", "(", "cross", ",", "dot", ")", "# If z is negative then we are in the 3rd or 4th quadrant.", "if", "v12", ".", "z", "<", "0", ":", "x", "=", "-", "x", "if", "abs", "(", "x", ")", "<", "tol", ":", "return", "0.0", "else", ":", "return", "x" ]
Find bearing/position angle between two points on a unit sphere. Parameters ---------- a1, b1 : float Longitude-like and latitude-like angles defining the first point. Both are in radians. a2, b2 : float Longitude-like and latitude-like angles defining the second point. Both are in radians. Notes ----- Position angle of the second point with respect to the first is returned in radians. Position angle is calculated clockwise and counter-clockwise from the direction towards the North pole. It is between [0 and π] if the second point is in the eastern hemisphere w.r.t the first, and between (0, -π) if the second point is in the western hemisphere w.r.t the first. .. warning:: If the first point is at the pole then bearing is undefined and 0 is returned. Results agree with those from SLALIB rountine sla_dbear. See test_bear_against_slalib_dbear() in test_angles.py. Examples -------- >>> from angles import bear, r2d, d2r >>> bear(0, 0, 0, -d2r(90.0)) 3.141592653589793 >>> bear(0, -d2r(90.0), 0, 0) 0.0 >>> bear(0, -d2r(45.0), 0, 0) 0.0 >>> bear(0, -d2r(89.678), 0, 0) 0.0 >>> r2d(bear(d2r(45.0), d2r(45.0), d2r(46.0), d2r(45.0))) 89.64644212193384 >>> r2d(bear(d2r(45.0), d2r(45.0), d2r(44.0), d2r(45.0))) -89.64644212193421
[ "Find", "bearing", "/", "position", "angle", "between", "two", "points", "on", "a", "unit", "sphere", "." ]
python
train
30
doconix/django-mako-plus
django_mako_plus/router/data.py
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/router/data.py#L104-L109
def get_template_loader(self, subdir='templates'): '''App-specific function to get the current app's template loader''' if self.request is None: raise ValueError("this method can only be called after the view middleware is run. Check that `django_mako_plus.middleware` is in MIDDLEWARE.") dmp = apps.get_app_config('django_mako_plus') return dmp.engine.get_template_loader(self.app, subdir)
[ "def", "get_template_loader", "(", "self", ",", "subdir", "=", "'templates'", ")", ":", "if", "self", ".", "request", "is", "None", ":", "raise", "ValueError", "(", "\"this method can only be called after the view middleware is run. Check that `django_mako_plus.middleware` is in MIDDLEWARE.\"", ")", "dmp", "=", "apps", ".", "get_app_config", "(", "'django_mako_plus'", ")", "return", "dmp", ".", "engine", ".", "get_template_loader", "(", "self", ".", "app", ",", "subdir", ")" ]
App-specific function to get the current app's template loader
[ "App", "-", "specific", "function", "to", "get", "the", "current", "app", "s", "template", "loader" ]
python
train
71.5
TC01/calcpkg
calcrepo/output.py
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/output.py#L61-L69
def getLoggingLocation(self): """Return the path for the calcpkg.log file - at the moment, only use a Linux path since I don't know where Windows thinks logs should go.""" if sys.platform == "win32": modulePath = os.path.realpath(__file__) modulePath = modulePath[:modulePath.rfind("/")] return modulePath else: return "/tmp" return ""
[ "def", "getLoggingLocation", "(", "self", ")", ":", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "modulePath", "=", "os", ".", "path", ".", "realpath", "(", "__file__", ")", "modulePath", "=", "modulePath", "[", ":", "modulePath", ".", "rfind", "(", "\"/\"", ")", "]", "return", "modulePath", "else", ":", "return", "\"/tmp\"", "return", "\"\"" ]
Return the path for the calcpkg.log file - at the moment, only use a Linux path since I don't know where Windows thinks logs should go.
[ "Return", "the", "path", "for", "the", "calcpkg", ".", "log", "file", "-", "at", "the", "moment", "only", "use", "a", "Linux", "path", "since", "I", "don", "t", "know", "where", "Windows", "thinks", "logs", "should", "go", "." ]
python
train
38.555556
Qiskit/qiskit-terra
qiskit/circuit/instructionset.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/circuit/instructionset.py#L57-L61
def c_if(self, classical, val): """Add classical control register to all instructions.""" for gate in self.instructions: gate.c_if(classical, val) return self
[ "def", "c_if", "(", "self", ",", "classical", ",", "val", ")", ":", "for", "gate", "in", "self", ".", "instructions", ":", "gate", ".", "c_if", "(", "classical", ",", "val", ")", "return", "self" ]
Add classical control register to all instructions.
[ "Add", "classical", "control", "register", "to", "all", "instructions", "." ]
python
test
38
citruz/beacontools
beacontools/scanner.py
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L240-L244
def terminate(self): """Signal runner to stop and join thread.""" self.toggle_scan(False) self.keep_going = False self.join()
[ "def", "terminate", "(", "self", ")", ":", "self", ".", "toggle_scan", "(", "False", ")", "self", ".", "keep_going", "=", "False", "self", ".", "join", "(", ")" ]
Signal runner to stop and join thread.
[ "Signal", "runner", "to", "stop", "and", "join", "thread", "." ]
python
train
30.6
openstack/networking-cisco
networking_cisco/plugins/cisco/device_manager/plugging_drivers/vif_hotplug_plugging_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/plugging_drivers/vif_hotplug_plugging_driver.py#L59-L86
def create_hosting_device_resources(self, context, complementary_id, tenant_id, mgmt_context, max_hosted): """Create resources for a hosting device in a plugin specific way.""" mgmt_port = None if mgmt_context and mgmt_context.get('mgmt_nw_id') and tenant_id: # Create port for mgmt interface p_spec = {'port': { 'tenant_id': tenant_id, 'admin_state_up': True, 'name': 'mgmt', 'network_id': mgmt_context['mgmt_nw_id'], 'mac_address': bc.constants.ATTR_NOT_SPECIFIED, 'fixed_ips': self._mgmt_subnet_spec(context, mgmt_context), 'device_id': "", # Use device_owner attribute to ensure we can query for these # ports even before Nova has set device_id attribute. 'device_owner': complementary_id}} try: mgmt_port = self._core_plugin.create_port(context, p_spec) except n_exc.NeutronException as e: LOG.error('Error %s when creating management port. ' 'Cleaning up.', e) self.delete_hosting_device_resources( context, tenant_id, mgmt_port) mgmt_port = None # We are setting the 'ports' to an empty list as it is expected by # the callee: device_handling_db._create_svc_vm_hosting_devices() return {'mgmt_port': mgmt_port, 'ports': []}
[ "def", "create_hosting_device_resources", "(", "self", ",", "context", ",", "complementary_id", ",", "tenant_id", ",", "mgmt_context", ",", "max_hosted", ")", ":", "mgmt_port", "=", "None", "if", "mgmt_context", "and", "mgmt_context", ".", "get", "(", "'mgmt_nw_id'", ")", "and", "tenant_id", ":", "# Create port for mgmt interface", "p_spec", "=", "{", "'port'", ":", "{", "'tenant_id'", ":", "tenant_id", ",", "'admin_state_up'", ":", "True", ",", "'name'", ":", "'mgmt'", ",", "'network_id'", ":", "mgmt_context", "[", "'mgmt_nw_id'", "]", ",", "'mac_address'", ":", "bc", ".", "constants", ".", "ATTR_NOT_SPECIFIED", ",", "'fixed_ips'", ":", "self", ".", "_mgmt_subnet_spec", "(", "context", ",", "mgmt_context", ")", ",", "'device_id'", ":", "\"\"", ",", "# Use device_owner attribute to ensure we can query for these", "# ports even before Nova has set device_id attribute.", "'device_owner'", ":", "complementary_id", "}", "}", "try", ":", "mgmt_port", "=", "self", ".", "_core_plugin", ".", "create_port", "(", "context", ",", "p_spec", ")", "except", "n_exc", ".", "NeutronException", "as", "e", ":", "LOG", ".", "error", "(", "'Error %s when creating management port. '", "'Cleaning up.'", ",", "e", ")", "self", ".", "delete_hosting_device_resources", "(", "context", ",", "tenant_id", ",", "mgmt_port", ")", "mgmt_port", "=", "None", "# We are setting the 'ports' to an empty list as it is expected by", "# the callee: device_handling_db._create_svc_vm_hosting_devices()", "return", "{", "'mgmt_port'", ":", "mgmt_port", ",", "'ports'", ":", "[", "]", "}" ]
Create resources for a hosting device in a plugin specific way.
[ "Create", "resources", "for", "a", "hosting", "device", "in", "a", "plugin", "specific", "way", "." ]
python
train
53.892857
dmwm/DBS
Server/Python/src/dbs/web/DBSWriterModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSWriterModel.py#L404-L437
def updateFile(self, logical_file_name=[], is_file_valid=1, lost=0, dataset=''): """ API to update file status :param logical_file_name: logical_file_name to update (optional), but must have either a fln or a dataset :type logical_file_name: str :param is_file_valid: valid=1, invalid=0 (Required) :type is_file_valid: bool :param lost: default lost=0 (optional) :type lost: bool :param dataset: default dataset='' (optional),but must have either a fln or a dataset :type dataset: basestring """ if lost in [1, True, 'True', 'true', '1', 'y', 'yes']: lost = 1 if is_file_valid in [1, True, 'True', 'true', '1', 'y', 'yes']: dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception,\ "Lost file must set to invalid" ) else: lost = 0 for f in logical_file_name, dataset: if '*' in f or '%' in f: dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No \ wildcard allow in LFN or dataset for updatefile API." ) try: self.dbsFile.updateStatus(logical_file_name, is_file_valid, lost, dataset) except HTTPError as he: raise he except Exception as ex: sError = "DBSWriterModel/updateFile. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "updateFile", "(", "self", ",", "logical_file_name", "=", "[", "]", ",", "is_file_valid", "=", "1", ",", "lost", "=", "0", ",", "dataset", "=", "''", ")", ":", "if", "lost", "in", "[", "1", ",", "True", ",", "'True'", ",", "'true'", ",", "'1'", ",", "'y'", ",", "'yes'", "]", ":", "lost", "=", "1", "if", "is_file_valid", "in", "[", "1", ",", "True", ",", "'True'", ",", "'true'", ",", "'1'", ",", "'y'", ",", "'yes'", "]", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "dbsExceptionCode", "[", "\"dbsException-invalid-input2\"", "]", ",", "self", ".", "logger", ".", "exception", ",", "\"Lost file must set to invalid\"", ")", "else", ":", "lost", "=", "0", "for", "f", "in", "logical_file_name", ",", "dataset", ":", "if", "'*'", "in", "f", "or", "'%'", "in", "f", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "dbsExceptionCode", "[", "\"dbsException-invalid-input2\"", "]", ",", "self", ".", "logger", ".", "exception", ",", "\"No \\\n wildcard allow in LFN or dataset for updatefile API.\"", ")", "try", ":", "self", ".", "dbsFile", ".", "updateStatus", "(", "logical_file_name", ",", "is_file_valid", ",", "lost", ",", "dataset", ")", "except", "HTTPError", "as", "he", ":", "raise", "he", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSWriterModel/updateFile. %s\\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")" ]
API to update file status :param logical_file_name: logical_file_name to update (optional), but must have either a fln or a dataset :type logical_file_name: str :param is_file_valid: valid=1, invalid=0 (Required) :type is_file_valid: bool :param lost: default lost=0 (optional) :type lost: bool :param dataset: default dataset='' (optional),but must have either a fln or a dataset :type dataset: basestring
[ "API", "to", "update", "file", "status" ]
python
train
50.676471
ethereum/py-evm
eth/db/journal.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/journal.py#L379-L408
def commit(self, changeset_id: uuid.UUID) -> None: """ Commits a given changeset. This merges the given changeset and all subsequent changesets into the previous changeset giving precidence to later changesets in case of any conflicting keys. If this is the base changeset then all changes will be written to the underlying database and the Journal starts a new recording. Typically, callers won't have access to the base changeset, because it is dropped during .reset() which is called in JournalDB(). """ self._validate_changeset(changeset_id) journal_data = self.journal.commit_changeset(changeset_id) if self.journal.is_empty(): # Ensure the journal automatically restarts recording after # it has been persisted to the underlying db self.reset() for key, value in journal_data.items(): try: if value is DELETED_ENTRY: del self.wrapped_db[key] elif value is ERASE_CREATED_ENTRY: pass else: self.wrapped_db[key] = cast(bytes, value) except Exception: self._reapply_changeset_to_journal(changeset_id, journal_data) raise
[ "def", "commit", "(", "self", ",", "changeset_id", ":", "uuid", ".", "UUID", ")", "->", "None", ":", "self", ".", "_validate_changeset", "(", "changeset_id", ")", "journal_data", "=", "self", ".", "journal", ".", "commit_changeset", "(", "changeset_id", ")", "if", "self", ".", "journal", ".", "is_empty", "(", ")", ":", "# Ensure the journal automatically restarts recording after", "# it has been persisted to the underlying db", "self", ".", "reset", "(", ")", "for", "key", ",", "value", "in", "journal_data", ".", "items", "(", ")", ":", "try", ":", "if", "value", "is", "DELETED_ENTRY", ":", "del", "self", ".", "wrapped_db", "[", "key", "]", "elif", "value", "is", "ERASE_CREATED_ENTRY", ":", "pass", "else", ":", "self", ".", "wrapped_db", "[", "key", "]", "=", "cast", "(", "bytes", ",", "value", ")", "except", "Exception", ":", "self", ".", "_reapply_changeset_to_journal", "(", "changeset_id", ",", "journal_data", ")", "raise" ]
Commits a given changeset. This merges the given changeset and all subsequent changesets into the previous changeset giving precidence to later changesets in case of any conflicting keys. If this is the base changeset then all changes will be written to the underlying database and the Journal starts a new recording. Typically, callers won't have access to the base changeset, because it is dropped during .reset() which is called in JournalDB().
[ "Commits", "a", "given", "changeset", ".", "This", "merges", "the", "given", "changeset", "and", "all", "subsequent", "changesets", "into", "the", "previous", "changeset", "giving", "precidence", "to", "later", "changesets", "in", "case", "of", "any", "conflicting", "keys", "." ]
python
train
44.8
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L63-L111
def load_data(self, mode="train", format="csv"): """ Load data from flat data files containing total track information and information about each timestep. The two sets are combined using merge operations on the Track IDs. Additional member information is gathered from the appropriate member file. Args: mode: "train" or "forecast" format: file format being used. Default is "csv" """ if mode in self.data.keys(): run_dates = pd.DatetimeIndex(start=self.start_dates[mode], end=self.end_dates[mode],freq="1D") run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date] print(run_date_str) all_total_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*total_" + self.ensemble_name + "*." + format)) all_step_track_files = sorted(glob(getattr(self, mode + "_data_path") + "*step_" + self.ensemble_name + "*." + format)) total_track_files = [] for track_file in all_total_track_files: file_date = track_file.split("_")[-1][:-4] if file_date in run_date_str: total_track_files.append(track_file) step_track_files = [] for step_file in all_step_track_files: file_date = step_file.split("_")[-1][:-4] if file_date in run_date_str: step_track_files.append(step_file) self.data[mode]["total"] = pd.concat(map(pd.read_csv, total_track_files), ignore_index=True) self.data[mode]["total"] = self.data[mode]["total"].fillna(value=0) self.data[mode]["total"] = self.data[mode]["total"].replace([np.inf, -np.inf], 0) self.data[mode]["step"] = pd.concat(map(pd.read_csv, step_track_files), ignore_index=True) self.data[mode]["step"] = self.data[mode]["step"].fillna(value=0) self.data[mode]["step"] = self.data[mode]["step"].replace([np.inf, -np.inf], 0) if mode == "forecast": self.data[mode]["step"] = self.data[mode]["step"].drop_duplicates("Step_ID") self.data[mode]["member"] = pd.read_csv(self.member_files[mode]) self.data[mode]["combo"] = pd.merge(self.data[mode]["step"], self.data[mode]["total"], on=["Track_ID", "Ensemble_Name", "Ensemble_Member", "Run_Date"]) self.data[mode]["combo"] = pd.merge(self.data[mode]["combo"], self.data[mode]["member"], on="Ensemble_Member") self.data[mode]["total_group"] = pd.merge(self.data[mode]["total"], self.data[mode]["member"], on="Ensemble_Member")
[ "def", "load_data", "(", "self", ",", "mode", "=", "\"train\"", ",", "format", "=", "\"csv\"", ")", ":", "if", "mode", "in", "self", ".", "data", ".", "keys", "(", ")", ":", "run_dates", "=", "pd", ".", "DatetimeIndex", "(", "start", "=", "self", ".", "start_dates", "[", "mode", "]", ",", "end", "=", "self", ".", "end_dates", "[", "mode", "]", ",", "freq", "=", "\"1D\"", ")", "run_date_str", "=", "[", "d", ".", "strftime", "(", "\"%Y%m%d-%H%M\"", ")", "for", "d", "in", "run_dates", ".", "date", "]", "print", "(", "run_date_str", ")", "all_total_track_files", "=", "sorted", "(", "glob", "(", "getattr", "(", "self", ",", "mode", "+", "\"_data_path\"", ")", "+", "\"*total_\"", "+", "self", ".", "ensemble_name", "+", "\"*.\"", "+", "format", ")", ")", "all_step_track_files", "=", "sorted", "(", "glob", "(", "getattr", "(", "self", ",", "mode", "+", "\"_data_path\"", ")", "+", "\"*step_\"", "+", "self", ".", "ensemble_name", "+", "\"*.\"", "+", "format", ")", ")", "total_track_files", "=", "[", "]", "for", "track_file", "in", "all_total_track_files", ":", "file_date", "=", "track_file", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", "if", "file_date", "in", "run_date_str", ":", "total_track_files", ".", "append", "(", "track_file", ")", "step_track_files", "=", "[", "]", "for", "step_file", "in", "all_step_track_files", ":", "file_date", "=", "step_file", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", ":", "-", "4", "]", "if", "file_date", "in", "run_date_str", ":", "step_track_files", ".", "append", "(", "step_file", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", "=", "pd", ".", "concat", "(", "map", "(", "pd", ".", "read_csv", ",", "total_track_files", ")", ",", "ignore_index", "=", "True", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ".", "fillna", "(", "value", "=", "0", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ".", "replace", "(", "[", "np", ".", "inf", ",", "-", "np", ".", "inf", "]", ",", "0", ")", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "pd", ".", "concat", "(", "map", "(", "pd", ".", "read_csv", ",", "step_track_files", ")", ",", "ignore_index", "=", "True", ")", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ".", "fillna", "(", "value", "=", "0", ")", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ".", "replace", "(", "[", "np", ".", "inf", ",", "-", "np", ".", "inf", "]", ",", "0", ")", "if", "mode", "==", "\"forecast\"", ":", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", "=", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ".", "drop_duplicates", "(", "\"Step_ID\"", ")", "self", ".", "data", "[", "mode", "]", "[", "\"member\"", "]", "=", "pd", ".", "read_csv", "(", "self", ".", "member_files", "[", "mode", "]", ")", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", "=", "pd", ".", "merge", "(", "self", ".", "data", "[", "mode", "]", "[", "\"step\"", "]", ",", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ",", "on", "=", "[", "\"Track_ID\"", ",", "\"Ensemble_Name\"", ",", "\"Ensemble_Member\"", ",", "\"Run_Date\"", "]", ")", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", "=", "pd", ".", "merge", "(", "self", ".", "data", "[", "mode", "]", "[", "\"combo\"", "]", ",", "self", ".", "data", "[", "mode", "]", "[", "\"member\"", "]", ",", "on", "=", "\"Ensemble_Member\"", ")", "self", ".", "data", "[", "mode", "]", "[", "\"total_group\"", "]", "=", "pd", ".", "merge", "(", "self", ".", "data", "[", "mode", "]", "[", "\"total\"", "]", ",", "self", ".", "data", "[", "mode", "]", "[", "\"member\"", "]", ",", "on", "=", "\"Ensemble_Member\"", ")" ]
Load data from flat data files containing total track information and information about each timestep. The two sets are combined using merge operations on the Track IDs. Additional member information is gathered from the appropriate member file. Args: mode: "train" or "forecast" format: file format being used. Default is "csv"
[ "Load", "data", "from", "flat", "data", "files", "containing", "total", "track", "information", "and", "information", "about", "each", "timestep", ".", "The", "two", "sets", "are", "combined", "using", "merge", "operations", "on", "the", "Track", "IDs", ".", "Additional", "member", "information", "is", "gathered", "from", "the", "appropriate", "member", "file", ".", "Args", ":", "mode", ":", "train", "or", "forecast", "format", ":", "file", "format", "being", "used", ".", "Default", "is", "csv" ]
python
train
64.306122
keenlabs/KeenClient-Python
keen/api.py
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/api.py#L97-L110
def post_event(self, event): """ Posts a single event to the Keen IO API. The write key must be set first. :param event: an Event to upload """ url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version, self.project_id, event.event_collection) headers = utilities.headers(self.write_key) payload = event.to_json() response = self.fulfill(HTTPMethods.POST, url, data=payload, headers=headers, timeout=self.post_timeout) self._error_handling(response)
[ "def", "post_event", "(", "self", ",", "event", ")", ":", "url", "=", "\"{0}/{1}/projects/{2}/events/{3}\"", ".", "format", "(", "self", ".", "base_url", ",", "self", ".", "api_version", ",", "self", ".", "project_id", ",", "event", ".", "event_collection", ")", "headers", "=", "utilities", ".", "headers", "(", "self", ".", "write_key", ")", "payload", "=", "event", ".", "to_json", "(", ")", "response", "=", "self", ".", "fulfill", "(", "HTTPMethods", ".", "POST", ",", "url", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "timeout", "=", "self", ".", "post_timeout", ")", "self", ".", "_error_handling", "(", "response", ")" ]
Posts a single event to the Keen IO API. The write key must be set first. :param event: an Event to upload
[ "Posts", "a", "single", "event", "to", "the", "Keen", "IO", "API", ".", "The", "write", "key", "must", "be", "set", "first", "." ]
python
train
45.785714
blockstack/virtualchain
virtualchain/lib/encoding.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/encoding.py#L133-L148
def num_to_var_int(x): """ (bitcoin-specific): convert an integer into a variable-length integer """ x = int(x) if x < 253: return from_int_to_byte(x) elif x < 65536: return from_int_to_byte(253) + encode(x, 256, 2)[::-1] elif x < 4294967296: return from_int_to_byte(254) + encode(x, 256, 4)[::-1] else: return from_int_to_byte(255) + encode(x, 256, 8)[::-1]
[ "def", "num_to_var_int", "(", "x", ")", ":", "x", "=", "int", "(", "x", ")", "if", "x", "<", "253", ":", "return", "from_int_to_byte", "(", "x", ")", "elif", "x", "<", "65536", ":", "return", "from_int_to_byte", "(", "253", ")", "+", "encode", "(", "x", ",", "256", ",", "2", ")", "[", ":", ":", "-", "1", "]", "elif", "x", "<", "4294967296", ":", "return", "from_int_to_byte", "(", "254", ")", "+", "encode", "(", "x", ",", "256", ",", "4", ")", "[", ":", ":", "-", "1", "]", "else", ":", "return", "from_int_to_byte", "(", "255", ")", "+", "encode", "(", "x", ",", "256", ",", "8", ")", "[", ":", ":", "-", "1", "]" ]
(bitcoin-specific): convert an integer into a variable-length integer
[ "(", "bitcoin", "-", "specific", ")", ":", "convert", "an", "integer", "into", "a", "variable", "-", "length", "integer" ]
python
train
25.625
rodluger/everest
everest/mathutils.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L104-L136
def Scatter(y, win=13, remove_outliers=False): ''' Return the scatter in ppm based on the median running standard deviation for a window size of :py:obj:`win` = 13 cadences (for K2, this is ~6.5 hours, as in VJ14). :param ndarray y: The array whose CDPP is to be computed :param int win: The window size in cadences. Default `13` :param bool remove_outliers: Clip outliers at 5 sigma before computing \ the CDPP? Default `False` ''' if remove_outliers: # Remove 5-sigma outliers from data # smoothed on a 1 day timescale if len(y) >= 50: ys = y - Smooth(y, 50) else: ys = y M = np.nanmedian(ys) MAD = 1.4826 * np.nanmedian(np.abs(ys - M)) out = [] for i, _ in enumerate(y): if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD): out.append(i) out = np.array(out, dtype=int) y = np.delete(y, out) if len(y): return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win) for yi in Chunks(y, win, all=True)]) else: return np.nan
[ "def", "Scatter", "(", "y", ",", "win", "=", "13", ",", "remove_outliers", "=", "False", ")", ":", "if", "remove_outliers", ":", "# Remove 5-sigma outliers from data", "# smoothed on a 1 day timescale", "if", "len", "(", "y", ")", ">=", "50", ":", "ys", "=", "y", "-", "Smooth", "(", "y", ",", "50", ")", "else", ":", "ys", "=", "y", "M", "=", "np", ".", "nanmedian", "(", "ys", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "ys", "-", "M", ")", ")", "out", "=", "[", "]", "for", "i", ",", "_", "in", "enumerate", "(", "y", ")", ":", "if", "(", "ys", "[", "i", "]", ">", "M", "+", "5", "*", "MAD", ")", "or", "(", "ys", "[", "i", "]", "<", "M", "-", "5", "*", "MAD", ")", ":", "out", ".", "append", "(", "i", ")", "out", "=", "np", ".", "array", "(", "out", ",", "dtype", "=", "int", ")", "y", "=", "np", ".", "delete", "(", "y", ",", "out", ")", "if", "len", "(", "y", ")", ":", "return", "1.e6", "*", "np", ".", "nanmedian", "(", "[", "np", ".", "std", "(", "yi", ")", "/", "np", ".", "sqrt", "(", "win", ")", "for", "yi", "in", "Chunks", "(", "y", ",", "win", ",", "all", "=", "True", ")", "]", ")", "else", ":", "return", "np", ".", "nan" ]
Return the scatter in ppm based on the median running standard deviation for a window size of :py:obj:`win` = 13 cadences (for K2, this is ~6.5 hours, as in VJ14). :param ndarray y: The array whose CDPP is to be computed :param int win: The window size in cadences. Default `13` :param bool remove_outliers: Clip outliers at 5 sigma before computing \ the CDPP? Default `False`
[ "Return", "the", "scatter", "in", "ppm", "based", "on", "the", "median", "running", "standard", "deviation", "for", "a", "window", "size", "of", ":", "py", ":", "obj", ":", "win", "=", "13", "cadences", "(", "for", "K2", "this", "is", "~6", ".", "5", "hours", "as", "in", "VJ14", ")", "." ]
python
train
34.030303
bihealth/vcfpy
vcfpy/parser.py
https://github.com/bihealth/vcfpy/blob/99e2165df30f11e0c95f3170f31bc5191d9e9e15/vcfpy/parser.py#L777-L789
def _check_samples_line(klass, arr): """Peform additional check on samples line""" if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER): if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER: raise exceptions.IncorrectVCFFormat( "Sample header line indicates no sample but does not " "equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER)) ) elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER: raise exceptions.IncorrectVCFFormat( 'Sample header line (starting with "#CHROM") does not ' "start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER)) )
[ "def", "_check_samples_line", "(", "klass", ",", "arr", ")", ":", "if", "len", "(", "arr", ")", "<=", "len", "(", "REQUIRE_NO_SAMPLE_HEADER", ")", ":", "if", "tuple", "(", "arr", ")", "!=", "REQUIRE_NO_SAMPLE_HEADER", ":", "raise", "exceptions", ".", "IncorrectVCFFormat", "(", "\"Sample header line indicates no sample but does not \"", "\"equal required prefix {}\"", ".", "format", "(", "\"\\t\"", ".", "join", "(", "REQUIRE_NO_SAMPLE_HEADER", ")", ")", ")", "elif", "tuple", "(", "arr", "[", ":", "len", "(", "REQUIRE_SAMPLE_HEADER", ")", "]", ")", "!=", "REQUIRE_SAMPLE_HEADER", ":", "raise", "exceptions", ".", "IncorrectVCFFormat", "(", "'Sample header line (starting with \"#CHROM\") does not '", "\"start with required prefix {}\"", ".", "format", "(", "\"\\t\"", ".", "join", "(", "REQUIRE_SAMPLE_HEADER", ")", ")", ")" ]
Peform additional check on samples line
[ "Peform", "additional", "check", "on", "samples", "line" ]
python
train
56
google/grr
grr/server/grr_response_server/databases/mem_clients.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_clients.py#L220-L228
def ListClientsForKeywords(self, keywords, start_time=None): """Lists the clients associated with keywords.""" res = {kw: [] for kw in keywords} for kw in keywords: for client_id, timestamp in iteritems(self.keywords.get(kw, {})): if start_time is not None and timestamp < start_time: continue res[kw].append(client_id) return res
[ "def", "ListClientsForKeywords", "(", "self", ",", "keywords", ",", "start_time", "=", "None", ")", ":", "res", "=", "{", "kw", ":", "[", "]", "for", "kw", "in", "keywords", "}", "for", "kw", "in", "keywords", ":", "for", "client_id", ",", "timestamp", "in", "iteritems", "(", "self", ".", "keywords", ".", "get", "(", "kw", ",", "{", "}", ")", ")", ":", "if", "start_time", "is", "not", "None", "and", "timestamp", "<", "start_time", ":", "continue", "res", "[", "kw", "]", ".", "append", "(", "client_id", ")", "return", "res" ]
Lists the clients associated with keywords.
[ "Lists", "the", "clients", "associated", "with", "keywords", "." ]
python
train
41.111111
alex-kostirin/pyatomac
atomac/AXClasses.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/AXClasses.py#L98-L120
def getFrontmostApp(cls): """Get the current frontmost application. Raise a ValueError exception if no GUI applications are found. """ # Refresh the runningApplications list apps = cls._getRunningApps() for app in apps: pid = app.processIdentifier() ref = cls.getAppRefByPid(pid) try: if ref.AXFrontmost: return ref except (_a11y.ErrorUnsupported, _a11y.ErrorCannotComplete, _a11y.ErrorAPIDisabled, _a11y.ErrorNotImplemented): # Some applications do not have an explicit GUI # and so will not have an AXFrontmost attribute # Trying to read attributes from Google Chrome Helper returns # ErrorAPIDisabled for some reason - opened radar bug 12837995 pass raise ValueError('No GUI application found.')
[ "def", "getFrontmostApp", "(", "cls", ")", ":", "# Refresh the runningApplications list", "apps", "=", "cls", ".", "_getRunningApps", "(", ")", "for", "app", "in", "apps", ":", "pid", "=", "app", ".", "processIdentifier", "(", ")", "ref", "=", "cls", ".", "getAppRefByPid", "(", "pid", ")", "try", ":", "if", "ref", ".", "AXFrontmost", ":", "return", "ref", "except", "(", "_a11y", ".", "ErrorUnsupported", ",", "_a11y", ".", "ErrorCannotComplete", ",", "_a11y", ".", "ErrorAPIDisabled", ",", "_a11y", ".", "ErrorNotImplemented", ")", ":", "# Some applications do not have an explicit GUI", "# and so will not have an AXFrontmost attribute", "# Trying to read attributes from Google Chrome Helper returns", "# ErrorAPIDisabled for some reason - opened radar bug 12837995", "pass", "raise", "ValueError", "(", "'No GUI application found.'", ")" ]
Get the current frontmost application. Raise a ValueError exception if no GUI applications are found.
[ "Get", "the", "current", "frontmost", "application", "." ]
python
valid
41.608696
orb-framework/orb
orb/core/model.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/model.py#L898-L929
def validate(self, columns=None): """ Validates the current record object to make sure it is ok to commit to the database. If the optional override dictionary is passed in, then it will use the given values vs. the one stored with this record object which can be useful to check to see if the record will be valid before it is committed. :param overrides | <dict> :return <bool> """ schema = self.schema() if not columns: ignore_flags = orb.Column.Flags.Virtual | orb.Column.Flags.ReadOnly columns = schema.columns(flags=~ignore_flags).values() use_indexes = True else: use_indexes = False # validate the column values values = self.values(key='column', columns=columns) for col, value in values.items(): if not col.validate(value): return False # valide the index values if use_indexes: for index in self.schema().indexes().values(): if not index.validate(self, values): return False return True
[ "def", "validate", "(", "self", ",", "columns", "=", "None", ")", ":", "schema", "=", "self", ".", "schema", "(", ")", "if", "not", "columns", ":", "ignore_flags", "=", "orb", ".", "Column", ".", "Flags", ".", "Virtual", "|", "orb", ".", "Column", ".", "Flags", ".", "ReadOnly", "columns", "=", "schema", ".", "columns", "(", "flags", "=", "~", "ignore_flags", ")", ".", "values", "(", ")", "use_indexes", "=", "True", "else", ":", "use_indexes", "=", "False", "# validate the column values", "values", "=", "self", ".", "values", "(", "key", "=", "'column'", ",", "columns", "=", "columns", ")", "for", "col", ",", "value", "in", "values", ".", "items", "(", ")", ":", "if", "not", "col", ".", "validate", "(", "value", ")", ":", "return", "False", "# valide the index values", "if", "use_indexes", ":", "for", "index", "in", "self", ".", "schema", "(", ")", ".", "indexes", "(", ")", ".", "values", "(", ")", ":", "if", "not", "index", ".", "validate", "(", "self", ",", "values", ")", ":", "return", "False", "return", "True" ]
Validates the current record object to make sure it is ok to commit to the database. If the optional override dictionary is passed in, then it will use the given values vs. the one stored with this record object which can be useful to check to see if the record will be valid before it is committed. :param overrides | <dict> :return <bool>
[ "Validates", "the", "current", "record", "object", "to", "make", "sure", "it", "is", "ok", "to", "commit", "to", "the", "database", ".", "If", "the", "optional", "override", "dictionary", "is", "passed", "in", "then", "it", "will", "use", "the", "given", "values", "vs", ".", "the", "one", "stored", "with", "this", "record", "object", "which", "can", "be", "useful", "to", "check", "to", "see", "if", "the", "record", "will", "be", "valid", "before", "it", "is", "committed", "." ]
python
train
35.625
saltstack/salt
salt/grains/core.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/core.py#L1559-L1584
def _parse_cpe_name(cpe): ''' Parse CPE_NAME data from the os-release Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe :param cpe: :return: ''' part = { 'o': 'operating system', 'h': 'hardware', 'a': 'application', } ret = {} cpe = (cpe or '').split(':') if len(cpe) > 4 and cpe[0] == 'cpe': if cpe[1].startswith('/'): # WFN to URI ret['vendor'], ret['product'], ret['version'] = cpe[2:5] ret['phase'] = cpe[5] if len(cpe) > 5 else None ret['part'] = part.get(cpe[1][1:]) elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]] ret['part'] = part.get(cpe[2]) return ret
[ "def", "_parse_cpe_name", "(", "cpe", ")", ":", "part", "=", "{", "'o'", ":", "'operating system'", ",", "'h'", ":", "'hardware'", ",", "'a'", ":", "'application'", ",", "}", "ret", "=", "{", "}", "cpe", "=", "(", "cpe", "or", "''", ")", ".", "split", "(", "':'", ")", "if", "len", "(", "cpe", ")", ">", "4", "and", "cpe", "[", "0", "]", "==", "'cpe'", ":", "if", "cpe", "[", "1", "]", ".", "startswith", "(", "'/'", ")", ":", "# WFN to URI", "ret", "[", "'vendor'", "]", ",", "ret", "[", "'product'", "]", ",", "ret", "[", "'version'", "]", "=", "cpe", "[", "2", ":", "5", "]", "ret", "[", "'phase'", "]", "=", "cpe", "[", "5", "]", "if", "len", "(", "cpe", ")", ">", "5", "else", "None", "ret", "[", "'part'", "]", "=", "part", ".", "get", "(", "cpe", "[", "1", "]", "[", "1", ":", "]", ")", "elif", "len", "(", "cpe", ")", "==", "13", "and", "cpe", "[", "1", "]", "==", "'2.3'", ":", "# WFN to a string", "ret", "[", "'vendor'", "]", ",", "ret", "[", "'product'", "]", ",", "ret", "[", "'version'", "]", ",", "ret", "[", "'phase'", "]", "=", "[", "x", "if", "x", "!=", "'*'", "else", "None", "for", "x", "in", "cpe", "[", "3", ":", "7", "]", "]", "ret", "[", "'part'", "]", "=", "part", ".", "get", "(", "cpe", "[", "2", "]", ")", "return", "ret" ]
Parse CPE_NAME data from the os-release Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe :param cpe: :return:
[ "Parse", "CPE_NAME", "data", "from", "the", "os", "-", "release" ]
python
train
32.884615
angr/angr
angr/analyses/ddg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/ddg.py#L1484-L1508
def _filter_defs_at_call_sites(self, defs): """ If we are not tracing into the function that are called in a real execution, we should properly filter the defs to account for the behavior of the skipped function at this call site. This function is a WIP. See TODOs inside. :param defs: :return: """ # TODO: make definition killing architecture independent and calling convention independent # TODO: use information from a calling convention analysis filtered_defs = LiveDefinitions() for variable, locs in defs.items(): if isinstance(variable, SimRegisterVariable): if self.project.arch.name == 'X86': if variable.reg in (self.project.arch.registers['eax'][0], self.project.arch.registers['ecx'][0], self.project.arch.registers['edx'][0]): continue filtered_defs.add_defs(variable, locs) return filtered_defs
[ "def", "_filter_defs_at_call_sites", "(", "self", ",", "defs", ")", ":", "# TODO: make definition killing architecture independent and calling convention independent", "# TODO: use information from a calling convention analysis", "filtered_defs", "=", "LiveDefinitions", "(", ")", "for", "variable", ",", "locs", "in", "defs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "variable", ",", "SimRegisterVariable", ")", ":", "if", "self", ".", "project", ".", "arch", ".", "name", "==", "'X86'", ":", "if", "variable", ".", "reg", "in", "(", "self", ".", "project", ".", "arch", ".", "registers", "[", "'eax'", "]", "[", "0", "]", ",", "self", ".", "project", ".", "arch", ".", "registers", "[", "'ecx'", "]", "[", "0", "]", ",", "self", ".", "project", ".", "arch", ".", "registers", "[", "'edx'", "]", "[", "0", "]", ")", ":", "continue", "filtered_defs", ".", "add_defs", "(", "variable", ",", "locs", ")", "return", "filtered_defs" ]
If we are not tracing into the function that are called in a real execution, we should properly filter the defs to account for the behavior of the skipped function at this call site. This function is a WIP. See TODOs inside. :param defs: :return:
[ "If", "we", "are", "not", "tracing", "into", "the", "function", "that", "are", "called", "in", "a", "real", "execution", "we", "should", "properly", "filter", "the", "defs", "to", "account", "for", "the", "behavior", "of", "the", "skipped", "function", "at", "this", "call", "site", "." ]
python
train
41.96
matllubos/django-is-core
is_core/forms/widgets.py
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/forms/widgets.py#L52-L55
def build_attrs(self, *args, **kwargs): "Helper function for building an attribute dictionary." self.attrs = self.widget.build_attrs(*args, **kwargs) return self.attrs
[ "def", "build_attrs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "attrs", "=", "self", ".", "widget", ".", "build_attrs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "attrs" ]
Helper function for building an attribute dictionary.
[ "Helper", "function", "for", "building", "an", "attribute", "dictionary", "." ]
python
train
47
tensorflow/probability
tensorflow_probability/python/vi/csiszar_divergence.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L51-L118
def amari_alpha(logu, alpha=1., self_normalized=False, name=None): """The Amari-alpha Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True`, the Amari-alpha Csiszar-function is: ```none f(u) = { -log(u) + (u - 1), alpha = 0 { u log(u) - (u - 1), alpha = 1 { [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise ``` When `self_normalized = False` the `(u - 1)` terms are omitted. Warning: when `alpha != 0` and/or `self_normalized = True` this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. For more information, see: A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences: Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp. 1532-1568, 2010. Args: logu: `float`-like `Tensor` representing `log(u)` from above. alpha: `float`-like Python scalar. (See Mathematical Details for meaning.) self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. Raises: TypeError: if `alpha` is `None` or a `Tensor`. TypeError: if `self_normalized` is `None` or a `Tensor`. """ with tf.compat.v1.name_scope(name, "amari_alpha", [logu]): if alpha is None or tf.is_tensor(alpha): raise TypeError("`alpha` cannot be `None` or `Tensor` type.") if (self_normalized is None or tf.is_tensor(self_normalized)): raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.") logu = tf.convert_to_tensor(value=logu, name="logu") if alpha == 0.: f = -logu elif alpha == 1.: f = tf.exp(logu) * logu else: f = tf.math.expm1(alpha * logu) / (alpha * (alpha - 1.)) if not self_normalized: return f if alpha == 0.: return f + tf.math.expm1(logu) elif alpha == 1.: return f - tf.math.expm1(logu) else: return f - tf.math.expm1(logu) / (alpha - 1.)
[ "def", "amari_alpha", "(", "logu", ",", "alpha", "=", "1.", ",", "self_normalized", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"amari_alpha\"", ",", "[", "logu", "]", ")", ":", "if", "alpha", "is", "None", "or", "tf", ".", "is_tensor", "(", "alpha", ")", ":", "raise", "TypeError", "(", "\"`alpha` cannot be `None` or `Tensor` type.\"", ")", "if", "(", "self_normalized", "is", "None", "or", "tf", ".", "is_tensor", "(", "self_normalized", ")", ")", ":", "raise", "TypeError", "(", "\"`self_normalized` cannot be `None` or `Tensor` type.\"", ")", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "if", "alpha", "==", "0.", ":", "f", "=", "-", "logu", "elif", "alpha", "==", "1.", ":", "f", "=", "tf", ".", "exp", "(", "logu", ")", "*", "logu", "else", ":", "f", "=", "tf", ".", "math", ".", "expm1", "(", "alpha", "*", "logu", ")", "/", "(", "alpha", "*", "(", "alpha", "-", "1.", ")", ")", "if", "not", "self_normalized", ":", "return", "f", "if", "alpha", "==", "0.", ":", "return", "f", "+", "tf", ".", "math", ".", "expm1", "(", "logu", ")", "elif", "alpha", "==", "1.", ":", "return", "f", "-", "tf", ".", "math", ".", "expm1", "(", "logu", ")", "else", ":", "return", "f", "-", "tf", ".", "math", ".", "expm1", "(", "logu", ")", "/", "(", "alpha", "-", "1.", ")" ]
The Amari-alpha Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True`, the Amari-alpha Csiszar-function is: ```none f(u) = { -log(u) + (u - 1), alpha = 0 { u log(u) - (u - 1), alpha = 1 { [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise ``` When `self_normalized = False` the `(u - 1)` terms are omitted. Warning: when `alpha != 0` and/or `self_normalized = True` this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. For more information, see: A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences: Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp. 1532-1568, 2010. Args: logu: `float`-like `Tensor` representing `log(u)` from above. alpha: `float`-like Python scalar. (See Mathematical Details for meaning.) self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. Raises: TypeError: if `alpha` is `None` or a `Tensor`. TypeError: if `self_normalized` is `None` or a `Tensor`.
[ "The", "Amari", "-", "alpha", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
python
test
33.352941
quantumlib/Cirq
cirq/google/engine/engine.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/engine/engine.py#L414-L428
def get_job(self, job_resource_name: str) -> Dict: """Returns metadata about a previously created job. See get_job_result if you want the results of the job and not just metadata about the job. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. Returns: A dictionary containing the metadata. """ return self.service.projects().programs().jobs().get( name=job_resource_name).execute()
[ "def", "get_job", "(", "self", ",", "job_resource_name", ":", "str", ")", "->", "Dict", ":", "return", "self", ".", "service", ".", "projects", "(", ")", ".", "programs", "(", ")", ".", "jobs", "(", ")", ".", "get", "(", "name", "=", "job_resource_name", ")", ".", "execute", "(", ")" ]
Returns metadata about a previously created job. See get_job_result if you want the results of the job and not just metadata about the job. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. Returns: A dictionary containing the metadata.
[ "Returns", "metadata", "about", "a", "previously", "created", "job", "." ]
python
train
35.466667
spyder-ide/spyder
spyder/plugins/breakpoints/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/breakpoints/plugin.py#L77-L96
def register_plugin(self): """Register plugin in Spyder's main window""" self.breakpoints.edit_goto.connect(self.main.editor.load) #self.redirect_stdio.connect(self.main.redirect_internalshell_stdio) self.breakpoints.clear_all_breakpoints.connect( self.main.editor.clear_all_breakpoints) self.breakpoints.clear_breakpoint.connect( self.main.editor.clear_breakpoint) self.main.editor.breakpoints_saved.connect(self.breakpoints.set_data) self.breakpoints.set_or_edit_conditional_breakpoint.connect( self.main.editor.set_or_edit_conditional_breakpoint) self.main.add_dockwidget(self) list_action = create_action(self, _("List breakpoints"), triggered=self.show) list_action.setEnabled(True) pos = self.main.debug_menu_actions.index('list_breakpoints') self.main.debug_menu_actions.insert(pos, list_action) self.main.editor.pythonfile_dependent_actions += [list_action]
[ "def", "register_plugin", "(", "self", ")", ":", "self", ".", "breakpoints", ".", "edit_goto", ".", "connect", "(", "self", ".", "main", ".", "editor", ".", "load", ")", "#self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)\r", "self", ".", "breakpoints", ".", "clear_all_breakpoints", ".", "connect", "(", "self", ".", "main", ".", "editor", ".", "clear_all_breakpoints", ")", "self", ".", "breakpoints", ".", "clear_breakpoint", ".", "connect", "(", "self", ".", "main", ".", "editor", ".", "clear_breakpoint", ")", "self", ".", "main", ".", "editor", ".", "breakpoints_saved", ".", "connect", "(", "self", ".", "breakpoints", ".", "set_data", ")", "self", ".", "breakpoints", ".", "set_or_edit_conditional_breakpoint", ".", "connect", "(", "self", ".", "main", ".", "editor", ".", "set_or_edit_conditional_breakpoint", ")", "self", ".", "main", ".", "add_dockwidget", "(", "self", ")", "list_action", "=", "create_action", "(", "self", ",", "_", "(", "\"List breakpoints\"", ")", ",", "triggered", "=", "self", ".", "show", ")", "list_action", ".", "setEnabled", "(", "True", ")", "pos", "=", "self", ".", "main", ".", "debug_menu_actions", ".", "index", "(", "'list_breakpoints'", ")", "self", ".", "main", ".", "debug_menu_actions", ".", "insert", "(", "pos", ",", "list_action", ")", "self", ".", "main", ".", "editor", ".", "pythonfile_dependent_actions", "+=", "[", "list_action", "]" ]
Register plugin in Spyder's main window
[ "Register", "plugin", "in", "Spyder", "s", "main", "window" ]
python
train
55.05
tensorflow/probability
tensorflow_probability/python/stats/sample_stats.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L686-L694
def _squeeze(x, axis): """A version of squeeze that works with dynamic axis.""" x = tf.convert_to_tensor(value=x, name='x') if axis is None: return tf.squeeze(x, axis=None) axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32) axis += tf.zeros([1], dtype=axis.dtype) # Make axis at least 1d. keep_axis, _ = tf.compat.v1.setdiff1d(tf.range(0, tf.rank(x)), axis) return tf.reshape(x, tf.gather(tf.shape(input=x), keep_axis))
[ "def", "_squeeze", "(", "x", ",", "axis", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "'x'", ")", "if", "axis", "is", "None", ":", "return", "tf", ".", "squeeze", "(", "x", ",", "axis", "=", "None", ")", "axis", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "axis", ",", "name", "=", "'axis'", ",", "dtype", "=", "tf", ".", "int32", ")", "axis", "+=", "tf", ".", "zeros", "(", "[", "1", "]", ",", "dtype", "=", "axis", ".", "dtype", ")", "# Make axis at least 1d.", "keep_axis", ",", "_", "=", "tf", ".", "compat", ".", "v1", ".", "setdiff1d", "(", "tf", ".", "range", "(", "0", ",", "tf", ".", "rank", "(", "x", ")", ")", ",", "axis", ")", "return", "tf", ".", "reshape", "(", "x", ",", "tf", ".", "gather", "(", "tf", ".", "shape", "(", "input", "=", "x", ")", ",", "keep_axis", ")", ")" ]
A version of squeeze that works with dynamic axis.
[ "A", "version", "of", "squeeze", "that", "works", "with", "dynamic", "axis", "." ]
python
test
49.777778
mcieslik-mctp/papy
src/numap/NuMap.py
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/numap/NuMap.py#L766-L842
def imports(modules, forgive=False): """ Should be used as a decorator to *attach* import statments to function definitions. These imports are added to the global (i.e. module-level of the decorated function) namespace. Two forms of import statements are supported (in the following examples ``foo``, ``bar``, ``oof, and ``rab`` are modules not classes or functions):: import foo, bar # -> @imports(['foo', 'bar']) import foo.oof as oof import bar.rab as rab # -> @imports(['foo.oof', 'bar.rab']) It provides support for alternatives:: try: import foo except ImportError: import bar which is expressed as:: @imports(['foo,bar']) or alternatively:: try: import foo.oof as oof except ImportError: import bar.rab as oof becomes:: @imports(['foo.oof,bar.rab']) This import is available in the body of the function as ``oof`` All needed imports should be attached for every function (even if two function are in the same module and have the same ``globals``) Arguments: - modules (``list``) A list of modules in the following forms ``['foo', 'bar', ..., 'baz']`` or ``['foo.oof', 'bar.rab', ..., 'baz.zab']`` - forgive (``bool``) [default: ``False``] If ``True`` will not raise `ImportError`` """ def wrap(f): if modules: # attach import to function setattr(f, 'imports', modules) for alternatives in modules: # alternatives are comma seperated alternatives = alternatives.split(',') # we import the part of the import X.Y.Z -> Z mod_name = alternatives[0].split('.')[-1] for mod in alternatives: mod = mod.strip().split('.') try: if len(mod) == 1: module = __import__(mod[0]) else: module = getattr(__import__('.'.join(mod[:-1]), \ fromlist=[mod[-1]]), mod[-1]) f.func_globals[mod_name] = module break # import only one except ImportError: pass else: if forgive: # no break -> no import warnings.warn('Failed to import %s' % alternatives) else: raise ImportError('Failed to import %s' % alternatives) return f return wrap
[ "def", "imports", "(", "modules", ",", "forgive", "=", "False", ")", ":", "def", "wrap", "(", "f", ")", ":", "if", "modules", ":", "# attach import to function", "setattr", "(", "f", ",", "'imports'", ",", "modules", ")", "for", "alternatives", "in", "modules", ":", "# alternatives are comma seperated", "alternatives", "=", "alternatives", ".", "split", "(", "','", ")", "# we import the part of the import X.Y.Z -> Z", "mod_name", "=", "alternatives", "[", "0", "]", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "for", "mod", "in", "alternatives", ":", "mod", "=", "mod", ".", "strip", "(", ")", ".", "split", "(", "'.'", ")", "try", ":", "if", "len", "(", "mod", ")", "==", "1", ":", "module", "=", "__import__", "(", "mod", "[", "0", "]", ")", "else", ":", "module", "=", "getattr", "(", "__import__", "(", "'.'", ".", "join", "(", "mod", "[", ":", "-", "1", "]", ")", ",", "fromlist", "=", "[", "mod", "[", "-", "1", "]", "]", ")", ",", "mod", "[", "-", "1", "]", ")", "f", ".", "func_globals", "[", "mod_name", "]", "=", "module", "break", "# import only one", "except", "ImportError", ":", "pass", "else", ":", "if", "forgive", ":", "# no break -> no import", "warnings", ".", "warn", "(", "'Failed to import %s'", "%", "alternatives", ")", "else", ":", "raise", "ImportError", "(", "'Failed to import %s'", "%", "alternatives", ")", "return", "f", "return", "wrap" ]
Should be used as a decorator to *attach* import statments to function definitions. These imports are added to the global (i.e. module-level of the decorated function) namespace. Two forms of import statements are supported (in the following examples ``foo``, ``bar``, ``oof, and ``rab`` are modules not classes or functions):: import foo, bar # -> @imports(['foo', 'bar']) import foo.oof as oof import bar.rab as rab # -> @imports(['foo.oof', 'bar.rab']) It provides support for alternatives:: try: import foo except ImportError: import bar which is expressed as:: @imports(['foo,bar']) or alternatively:: try: import foo.oof as oof except ImportError: import bar.rab as oof becomes:: @imports(['foo.oof,bar.rab']) This import is available in the body of the function as ``oof`` All needed imports should be attached for every function (even if two function are in the same module and have the same ``globals``) Arguments: - modules (``list``) A list of modules in the following forms ``['foo', 'bar', ..., 'baz']`` or ``['foo.oof', 'bar.rab', ..., 'baz.zab']`` - forgive (``bool``) [default: ``False``] If ``True`` will not raise `ImportError``
[ "Should", "be", "used", "as", "a", "decorator", "to", "*", "attach", "*", "import", "statments", "to", "function", "definitions", ".", "These", "imports", "are", "added", "to", "the", "global", "(", "i", ".", "e", ".", "module", "-", "level", "of", "the", "decorated", "function", ")", "namespace", ".", "Two", "forms", "of", "import", "statements", "are", "supported", "(", "in", "the", "following", "examples", "foo", "bar", "oof", "and", "rab", "are", "modules", "not", "classes", "or", "functions", ")", "::" ]
python
train
35.311688
markchil/gptools
gptools/utils.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1626-L1656
def generate_set_partitions(set_): """Generate all of the partitions of a set. This is a helper function that utilizes the restricted growth strings from :py:func:`generate_set_partition_strings`. The partitions are returned in lexicographic order. Parameters ---------- set_ : :py:class:`Array` or other Array-like, (`m`,) The set to find the partitions of. Returns ------- partitions : list of lists of :py:class:`Array` The number of elements in the outer list is equal to the number of partitions, which is the len(`m`)^th Bell number. Each of the inner lists corresponds to a single possible partition. The length of an inner list is therefore equal to the number of blocks. Each of the arrays in an inner list is hence a block. """ set_ = scipy.asarray(set_) strings = generate_set_partition_strings(len(set_)) partitions = [] for string in strings: blocks = [] for block_num in scipy.unique(string): blocks.append(set_[string == block_num]) partitions.append(blocks) return partitions
[ "def", "generate_set_partitions", "(", "set_", ")", ":", "set_", "=", "scipy", ".", "asarray", "(", "set_", ")", "strings", "=", "generate_set_partition_strings", "(", "len", "(", "set_", ")", ")", "partitions", "=", "[", "]", "for", "string", "in", "strings", ":", "blocks", "=", "[", "]", "for", "block_num", "in", "scipy", ".", "unique", "(", "string", ")", ":", "blocks", ".", "append", "(", "set_", "[", "string", "==", "block_num", "]", ")", "partitions", ".", "append", "(", "blocks", ")", "return", "partitions" ]
Generate all of the partitions of a set. This is a helper function that utilizes the restricted growth strings from :py:func:`generate_set_partition_strings`. The partitions are returned in lexicographic order. Parameters ---------- set_ : :py:class:`Array` or other Array-like, (`m`,) The set to find the partitions of. Returns ------- partitions : list of lists of :py:class:`Array` The number of elements in the outer list is equal to the number of partitions, which is the len(`m`)^th Bell number. Each of the inner lists corresponds to a single possible partition. The length of an inner list is therefore equal to the number of blocks. Each of the arrays in an inner list is hence a block.
[ "Generate", "all", "of", "the", "partitions", "of", "a", "set", ".", "This", "is", "a", "helper", "function", "that", "utilizes", "the", "restricted", "growth", "strings", "from", ":", "py", ":", "func", ":", "generate_set_partition_strings", ".", "The", "partitions", "are", "returned", "in", "lexicographic", "order", ".", "Parameters", "----------", "set_", ":", ":", "py", ":", "class", ":", "Array", "or", "other", "Array", "-", "like", "(", "m", ")", "The", "set", "to", "find", "the", "partitions", "of", ".", "Returns", "-------", "partitions", ":", "list", "of", "lists", "of", ":", "py", ":", "class", ":", "Array", "The", "number", "of", "elements", "in", "the", "outer", "list", "is", "equal", "to", "the", "number", "of", "partitions", "which", "is", "the", "len", "(", "m", ")", "^th", "Bell", "number", ".", "Each", "of", "the", "inner", "lists", "corresponds", "to", "a", "single", "possible", "partition", ".", "The", "length", "of", "an", "inner", "list", "is", "therefore", "equal", "to", "the", "number", "of", "blocks", ".", "Each", "of", "the", "arrays", "in", "an", "inner", "list", "is", "hence", "a", "block", "." ]
python
train
36.419355
nickoala/telepot
telepot/delegate.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/delegate.py#L324-L337
def pair(seeders, delegator_factory, *args, **kwargs): """ The basic pair producer. :return: a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple. :param seeders: If it is a seeder function or a list of one seeder function, it is returned as the final seeder. If it is a list of more than one seeder function, they are chained together before returned as the final seeder. """ return (chain(*seeders) if len(seeders) > 1 else seeders[0], delegator_factory(*args, **kwargs))
[ "def", "pair", "(", "seeders", ",", "delegator_factory", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "(", "chain", "(", "*", "seeders", ")", "if", "len", "(", "seeders", ")", ">", "1", "else", "seeders", "[", "0", "]", ",", "delegator_factory", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
The basic pair producer. :return: a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple. :param seeders: If it is a seeder function or a list of one seeder function, it is returned as the final seeder. If it is a list of more than one seeder function, they are chained together before returned as the final seeder.
[ "The", "basic", "pair", "producer", "." ]
python
train
38.071429
redhat-cip/python-dciclient
dciclient/v1/api/base.py
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L45-L66
def iter(context, resource, **kwargs): """List all resources""" data = utils.sanitize_kwargs(**kwargs) id = data.pop('id', None) subresource = data.pop('subresource', None) data['limit'] = data.get('limit', 20) if subresource: uri = '%s/%s/%s/%s' % (context.dci_cs_api, resource, id, subresource) resource = subresource else: uri = '%s/%s' % (context.dci_cs_api, resource) data['offset'] = 0 while True: j = context.session.get(uri, timeout=HTTP_TIMEOUT, params=data).json() if len(j[resource]): for i in j[resource]: yield i else: break data['offset'] += data['limit']
[ "def", "iter", "(", "context", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "data", "=", "utils", ".", "sanitize_kwargs", "(", "*", "*", "kwargs", ")", "id", "=", "data", ".", "pop", "(", "'id'", ",", "None", ")", "subresource", "=", "data", ".", "pop", "(", "'subresource'", ",", "None", ")", "data", "[", "'limit'", "]", "=", "data", ".", "get", "(", "'limit'", ",", "20", ")", "if", "subresource", ":", "uri", "=", "'%s/%s/%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ",", "id", ",", "subresource", ")", "resource", "=", "subresource", "else", ":", "uri", "=", "'%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ")", "data", "[", "'offset'", "]", "=", "0", "while", "True", ":", "j", "=", "context", ".", "session", ".", "get", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ",", "params", "=", "data", ")", ".", "json", "(", ")", "if", "len", "(", "j", "[", "resource", "]", ")", ":", "for", "i", "in", "j", "[", "resource", "]", ":", "yield", "i", "else", ":", "break", "data", "[", "'offset'", "]", "+=", "data", "[", "'limit'", "]" ]
List all resources
[ "List", "all", "resources" ]
python
train
31
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/consoleapp.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/consoleapp.py#L202-L246
def init_connection_file(self): """find the connection file, and load the info if found. The current working directory and the current profile's security directory will be searched for the file if it is not given by absolute path. When attempting to connect to an existing kernel and the `--existing` argument does not match an existing file, it will be interpreted as a fileglob, and the matching file in the current profile's security dir with the latest access time will be used. After this method is called, self.connection_file contains the *full path* to the connection file, never just its name. """ if self.existing: try: cf = find_connection_file(self.existing) except Exception: self.log.critical("Could not find existing kernel connection file %s", self.existing) self.exit(1) self.log.info("Connecting to existing kernel: %s" % cf) self.connection_file = cf else: # not existing, check if we are going to write the file # and ensure that self.connection_file is a full path, not just the shortname try: cf = find_connection_file(self.connection_file) except Exception: # file might not exist if self.connection_file == os.path.basename(self.connection_file): # just shortname, put it in security dir cf = os.path.join(self.profile_dir.security_dir, self.connection_file) else: cf = self.connection_file self.connection_file = cf # should load_connection_file only be used for existing? # as it is now, this allows reusing ports if an existing # file is requested try: self.load_connection_file() except Exception: self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) self.exit(1)
[ "def", "init_connection_file", "(", "self", ")", ":", "if", "self", ".", "existing", ":", "try", ":", "cf", "=", "find_connection_file", "(", "self", ".", "existing", ")", "except", "Exception", ":", "self", ".", "log", ".", "critical", "(", "\"Could not find existing kernel connection file %s\"", ",", "self", ".", "existing", ")", "self", ".", "exit", "(", "1", ")", "self", ".", "log", ".", "info", "(", "\"Connecting to existing kernel: %s\"", "%", "cf", ")", "self", ".", "connection_file", "=", "cf", "else", ":", "# not existing, check if we are going to write the file", "# and ensure that self.connection_file is a full path, not just the shortname", "try", ":", "cf", "=", "find_connection_file", "(", "self", ".", "connection_file", ")", "except", "Exception", ":", "# file might not exist", "if", "self", ".", "connection_file", "==", "os", ".", "path", ".", "basename", "(", "self", ".", "connection_file", ")", ":", "# just shortname, put it in security dir", "cf", "=", "os", ".", "path", ".", "join", "(", "self", ".", "profile_dir", ".", "security_dir", ",", "self", ".", "connection_file", ")", "else", ":", "cf", "=", "self", ".", "connection_file", "self", ".", "connection_file", "=", "cf", "# should load_connection_file only be used for existing?", "# as it is now, this allows reusing ports if an existing", "# file is requested", "try", ":", "self", ".", "load_connection_file", "(", ")", "except", "Exception", ":", "self", ".", "log", ".", "error", "(", "\"Failed to load connection file: %r\"", ",", "self", ".", "connection_file", ",", "exc_info", "=", "True", ")", "self", ".", "exit", "(", "1", ")" ]
find the connection file, and load the info if found. The current working directory and the current profile's security directory will be searched for the file if it is not given by absolute path. When attempting to connect to an existing kernel and the `--existing` argument does not match an existing file, it will be interpreted as a fileglob, and the matching file in the current profile's security dir with the latest access time will be used. After this method is called, self.connection_file contains the *full path* to the connection file, never just its name.
[ "find", "the", "connection", "file", "and", "load", "the", "info", "if", "found", ".", "The", "current", "working", "directory", "and", "the", "current", "profile", "s", "security", "directory", "will", "be", "searched", "for", "the", "file", "if", "it", "is", "not", "given", "by", "absolute", "path", ".", "When", "attempting", "to", "connect", "to", "an", "existing", "kernel", "and", "the", "--", "existing", "argument", "does", "not", "match", "an", "existing", "file", "it", "will", "be", "interpreted", "as", "a", "fileglob", "and", "the", "matching", "file", "in", "the", "current", "profile", "s", "security", "dir", "with", "the", "latest", "access", "time", "will", "be", "used", ".", "After", "this", "method", "is", "called", "self", ".", "connection_file", "contains", "the", "*", "full", "path", "*", "to", "the", "connection", "file", "never", "just", "its", "name", "." ]
python
test
46.311111
pypa/pipenv
pipenv/vendor/cerberus/validator.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/validator.py#L1036-L1047
def _validate_forbidden(self, forbidden_values, field, value): """ {'type': 'list'} """ if isinstance(value, _str_type): if value in forbidden_values: self._error(field, errors.FORBIDDEN_VALUE, value) elif isinstance(value, Sequence): forbidden = set(value) & set(forbidden_values) if forbidden: self._error(field, errors.FORBIDDEN_VALUES, list(forbidden)) elif isinstance(value, int): if value in forbidden_values: self._error(field, errors.FORBIDDEN_VALUE, value)
[ "def", "_validate_forbidden", "(", "self", ",", "forbidden_values", ",", "field", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "_str_type", ")", ":", "if", "value", "in", "forbidden_values", ":", "self", ".", "_error", "(", "field", ",", "errors", ".", "FORBIDDEN_VALUE", ",", "value", ")", "elif", "isinstance", "(", "value", ",", "Sequence", ")", ":", "forbidden", "=", "set", "(", "value", ")", "&", "set", "(", "forbidden_values", ")", "if", "forbidden", ":", "self", ".", "_error", "(", "field", ",", "errors", ".", "FORBIDDEN_VALUES", ",", "list", "(", "forbidden", ")", ")", "elif", "isinstance", "(", "value", ",", "int", ")", ":", "if", "value", "in", "forbidden_values", ":", "self", ".", "_error", "(", "field", ",", "errors", ".", "FORBIDDEN_VALUE", ",", "value", ")" ]
{'type': 'list'}
[ "{", "type", ":", "list", "}" ]
python
train
48.5
satellogic/telluric
telluric/georaster.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1586-L1597
def _calc_footprint(self): """Return rectangle in world coordinates, as GeoVector.""" corners = [self.corner(corner) for corner in self.corner_types()] coords = [] for corner in corners: shape = corner.get_shape(corner.crs) coords.append([shape.x, shape.y]) shp = Polygon(coords) # TODO use GeoVector.from_bounds self._footprint = GeoVector(shp, self.crs) return self._footprint
[ "def", "_calc_footprint", "(", "self", ")", ":", "corners", "=", "[", "self", ".", "corner", "(", "corner", ")", "for", "corner", "in", "self", ".", "corner_types", "(", ")", "]", "coords", "=", "[", "]", "for", "corner", "in", "corners", ":", "shape", "=", "corner", ".", "get_shape", "(", "corner", ".", "crs", ")", "coords", ".", "append", "(", "[", "shape", ".", "x", ",", "shape", ".", "y", "]", ")", "shp", "=", "Polygon", "(", "coords", ")", "# TODO use GeoVector.from_bounds", "self", ".", "_footprint", "=", "GeoVector", "(", "shp", ",", "self", ".", "crs", ")", "return", "self", ".", "_footprint" ]
Return rectangle in world coordinates, as GeoVector.
[ "Return", "rectangle", "in", "world", "coordinates", "as", "GeoVector", "." ]
python
train
38.083333
phoebe-project/phoebe2
phoebe/parameters/constraint.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/constraint.py#L421-L459
def t0_ref_supconj(b, orbit, solve_for=None, **kwargs): """ Create a constraint for t0_ref in an orbit - allowing translating between t0_ref and t0_supconj. :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str orbit: the label of the orbit in which this constraint should be built :parameter str solve_for: if 't0_ref' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 't0_supconj', 'per0', 'period') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function) """ orbit_ps = _get_system_ps(b, orbit) metawargs = orbit_ps.meta metawargs.pop('qualifier') # by default both t0s exist in an orbit, so we don't have to worry about creating either t0_ref = b.get_parameter(qualifier='t0_ref', **metawargs) t0_supconj = b.get_parameter(qualifier='t0_supconj', **metawargs) period = b.get_parameter(qualifier='period', **metawargs) ecc = b.get_parameter(qualifier='ecc', **metawargs) per0 = b.get_parameter(qualifier='per0', **metawargs) if solve_for in [None, t0_ref]: lhs = t0_ref rhs = t0_supconj_to_ref(t0_supconj, period, ecc, per0) elif solve_for == t0_supconj: lhs = t0_supconj rhs = t0_ref_to_supconj(t0_ref, period, ecc, per0) else: raise NotImplementedError return lhs, rhs, {'orbit': orbit}
[ "def", "t0_ref_supconj", "(", "b", ",", "orbit", ",", "solve_for", "=", "None", ",", "*", "*", "kwargs", ")", ":", "orbit_ps", "=", "_get_system_ps", "(", "b", ",", "orbit", ")", "metawargs", "=", "orbit_ps", ".", "meta", "metawargs", ".", "pop", "(", "'qualifier'", ")", "# by default both t0s exist in an orbit, so we don't have to worry about creating either", "t0_ref", "=", "b", ".", "get_parameter", "(", "qualifier", "=", "'t0_ref'", ",", "*", "*", "metawargs", ")", "t0_supconj", "=", "b", ".", "get_parameter", "(", "qualifier", "=", "'t0_supconj'", ",", "*", "*", "metawargs", ")", "period", "=", "b", ".", "get_parameter", "(", "qualifier", "=", "'period'", ",", "*", "*", "metawargs", ")", "ecc", "=", "b", ".", "get_parameter", "(", "qualifier", "=", "'ecc'", ",", "*", "*", "metawargs", ")", "per0", "=", "b", ".", "get_parameter", "(", "qualifier", "=", "'per0'", ",", "*", "*", "metawargs", ")", "if", "solve_for", "in", "[", "None", ",", "t0_ref", "]", ":", "lhs", "=", "t0_ref", "rhs", "=", "t0_supconj_to_ref", "(", "t0_supconj", ",", "period", ",", "ecc", ",", "per0", ")", "elif", "solve_for", "==", "t0_supconj", ":", "lhs", "=", "t0_supconj", "rhs", "=", "t0_ref_to_supconj", "(", "t0_ref", ",", "period", ",", "ecc", ",", "per0", ")", "else", ":", "raise", "NotImplementedError", "return", "lhs", ",", "rhs", ",", "{", "'orbit'", ":", "orbit", "}" ]
Create a constraint for t0_ref in an orbit - allowing translating between t0_ref and t0_supconj. :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str orbit: the label of the orbit in which this constraint should be built :parameter str solve_for: if 't0_ref' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 't0_supconj', 'per0', 'period') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function)
[ "Create", "a", "constraint", "for", "t0_ref", "in", "an", "orbit", "-", "allowing", "translating", "between", "t0_ref", "and", "t0_supconj", "." ]
python
train
37.025641
chrisrink10/basilisp
src/basilisp/lang/reader.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/reader.py#L138-L143
def pushback(self) -> None: """Push one character back onto the stream, allowing it to be read again.""" if abs(self._idx - 1) > self._pushback_depth: raise IndexError("Exceeded pushback depth") self._idx -= 1
[ "def", "pushback", "(", "self", ")", "->", "None", ":", "if", "abs", "(", "self", ".", "_idx", "-", "1", ")", ">", "self", ".", "_pushback_depth", ":", "raise", "IndexError", "(", "\"Exceeded pushback depth\"", ")", "self", ".", "_idx", "-=", "1" ]
Push one character back onto the stream, allowing it to be read again.
[ "Push", "one", "character", "back", "onto", "the", "stream", "allowing", "it", "to", "be", "read", "again", "." ]
python
test
41.333333
d0c-s4vage/pfp
pfp/interp.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L1505-L1516
def _str_to_int(self, string): """Check for the hex """ string = string.lower() if string.endswith("l"): string = string[:-1] if string.lower().startswith("0x"): # should always match match = re.match(r'0[xX]([a-fA-F0-9]+)', string) return int(match.group(1), 0x10) else: return int(string)
[ "def", "_str_to_int", "(", "self", ",", "string", ")", ":", "string", "=", "string", ".", "lower", "(", ")", "if", "string", ".", "endswith", "(", "\"l\"", ")", ":", "string", "=", "string", "[", ":", "-", "1", "]", "if", "string", ".", "lower", "(", ")", ".", "startswith", "(", "\"0x\"", ")", ":", "# should always match", "match", "=", "re", ".", "match", "(", "r'0[xX]([a-fA-F0-9]+)'", ",", "string", ")", "return", "int", "(", "match", ".", "group", "(", "1", ")", ",", "0x10", ")", "else", ":", "return", "int", "(", "string", ")" ]
Check for the hex
[ "Check", "for", "the", "hex" ]
python
train
32.25
DataBiosphere/toil
src/toil/lib/docker.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/docker.py#L477-L497
def containerIsRunning(container_name): """ Checks whether the container is running or not. :param container_name: Name of the container being checked. :returns: True if status is 'running', False if status is anything else, and None if the container does not exist. """ client = docker.from_env(version='auto') try: this_container = client.containers.get(container_name) if this_container.status == 'running': return True else: # this_container.status == 'exited', 'restarting', or 'paused' return False except NotFound: return None except requests.exceptions.HTTPError as e: logger.debug("Server error attempting to call container: ", container_name) raise create_api_error_from_http_exception(e)
[ "def", "containerIsRunning", "(", "container_name", ")", ":", "client", "=", "docker", ".", "from_env", "(", "version", "=", "'auto'", ")", "try", ":", "this_container", "=", "client", ".", "containers", ".", "get", "(", "container_name", ")", "if", "this_container", ".", "status", "==", "'running'", ":", "return", "True", "else", ":", "# this_container.status == 'exited', 'restarting', or 'paused'", "return", "False", "except", "NotFound", ":", "return", "None", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "logger", ".", "debug", "(", "\"Server error attempting to call container: \"", ",", "container_name", ")", "raise", "create_api_error_from_http_exception", "(", "e", ")" ]
Checks whether the container is running or not. :param container_name: Name of the container being checked. :returns: True if status is 'running', False if status is anything else, and None if the container does not exist.
[ "Checks", "whether", "the", "container", "is", "running", "or", "not", ".", ":", "param", "container_name", ":", "Name", "of", "the", "container", "being", "checked", ".", ":", "returns", ":", "True", "if", "status", "is", "running", "False", "if", "status", "is", "anything", "else", "and", "None", "if", "the", "container", "does", "not", "exist", "." ]
python
train
39.238095
saltstack/salt
salt/returners/memcache_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/memcache_return.py#L213-L223
def get_jids(): ''' Return a list of all job ids ''' serv = _get_serv(ret=None) jids = _get_list(serv, 'jids') loads = serv.get_multi(jids) # {jid: load, jid: load, ...} ret = {} for jid, load in six.iteritems(loads): ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret
[ "def", "get_jids", "(", ")", ":", "serv", "=", "_get_serv", "(", "ret", "=", "None", ")", "jids", "=", "_get_list", "(", "serv", ",", "'jids'", ")", "loads", "=", "serv", ".", "get_multi", "(", "jids", ")", "# {jid: load, jid: load, ...}", "ret", "=", "{", "}", "for", "jid", ",", "load", "in", "six", ".", "iteritems", "(", "loads", ")", ":", "ret", "[", "jid", "]", "=", "salt", ".", "utils", ".", "jid", ".", "format_jid_instance", "(", "jid", ",", "salt", ".", "utils", ".", "json", ".", "loads", "(", "load", ")", ")", "return", "ret" ]
Return a list of all job ids
[ "Return", "a", "list", "of", "all", "job", "ids" ]
python
train
31.181818
saltstack/salt
salt/states/smartos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L452-L485
def source_absent(name): ''' Ensure an image source is absent on the computenode name : string source url ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if name not in __salt__['imgadm.sources'](): # source is absent ret['result'] = True ret['comment'] = 'image source {0} is absent'.format(name) else: # remove source if __opts__['test']: res = {} ret['result'] = True else: res = __salt__['imgadm.source_delete'](name) ret['result'] = (name not in res) if ret['result']: ret['comment'] = 'image source {0} deleted'.format(name) ret['changes'][name] = 'deleted' else: ret['comment'] = 'image source {0} not deleted'.format(name) if 'Error' in res: ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error']) return ret
[ "def", "source_absent", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "name", "not", "in", "__salt__", "[", "'imgadm.sources'", "]", "(", ")", ":", "# source is absent", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'image source {0} is absent'", ".", "format", "(", "name", ")", "else", ":", "# remove source", "if", "__opts__", "[", "'test'", "]", ":", "res", "=", "{", "}", "ret", "[", "'result'", "]", "=", "True", "else", ":", "res", "=", "__salt__", "[", "'imgadm.source_delete'", "]", "(", "name", ")", "ret", "[", "'result'", "]", "=", "(", "name", "not", "in", "res", ")", "if", "ret", "[", "'result'", "]", ":", "ret", "[", "'comment'", "]", "=", "'image source {0} deleted'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'deleted'", "else", ":", "ret", "[", "'comment'", "]", "=", "'image source {0} not deleted'", ".", "format", "(", "name", ")", "if", "'Error'", "in", "res", ":", "ret", "[", "'comment'", "]", "=", "'{0}: {1}'", ".", "format", "(", "ret", "[", "'comment'", "]", ",", "res", "[", "'Error'", "]", ")", "return", "ret" ]
Ensure an image source is absent on the computenode name : string source url
[ "Ensure", "an", "image", "source", "is", "absent", "on", "the", "computenode" ]
python
train
28.470588
inveniosoftware/invenio-access
invenio_access/cli.py
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L30-L35
def lazy_result(f): """Decorate function to return LazyProxy.""" @wraps(f) def decorated(ctx, param, value): return LocalProxy(lambda: f(ctx, param, value)) return decorated
[ "def", "lazy_result", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "decorated", "(", "ctx", ",", "param", ",", "value", ")", ":", "return", "LocalProxy", "(", "lambda", ":", "f", "(", "ctx", ",", "param", ",", "value", ")", ")", "return", "decorated" ]
Decorate function to return LazyProxy.
[ "Decorate", "function", "to", "return", "LazyProxy", "." ]
python
train
32
tanghaibao/jcvi
jcvi/formats/blast.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L255-L336
def filter(args): """ %prog filter test.blast Produce a new blast file and filter based on: - score: >= cutoff - pctid: >= cutoff - hitlen: >= cutoff - evalue: <= cutoff - ids: valid ids Use --inverse to obtain the complementary records for the criteria above. - noself: remove self-self hits """ p = OptionParser(filter.__doc__) p.add_option("--score", dest="score", default=0, type="int", help="Score cutoff") p.set_align(pctid=95, hitlen=100, evalue=.01) p.add_option("--noself", default=False, action="store_true", help="Remove self-self hits") p.add_option("--ids", help="Path to file with ids to retain") p.add_option("--inverse", default=False, action="store_true", help="Similar to grep -v, inverse") p.set_outfile(outfile=None) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) if opts.ids: ids = set() for row in must_open(opts.ids): if row[0] == "#": continue row = row.replace(",", "\t") ids.update(row.split()) else: ids = None blastfile, = args inverse = opts.inverse outfile = opts.outfile fp = must_open(blastfile) score, pctid, hitlen, evalue, noself = \ opts.score, opts.pctid, opts.hitlen, opts.evalue, opts.noself newblastfile = blastfile + ".P{0}L{1}".format(int(pctid), hitlen) if \ outfile is None else outfile if inverse: newblastfile += ".inverse" fw = must_open(newblastfile, "w") for row in fp: if row[0] == '#': continue c = BlastLine(row) if ids: if c.query in ids and c.subject in ids: noids = False else: noids = True else: noids = None remove = c.score < score or \ c.pctid < pctid or \ c.hitlen < hitlen or \ c.evalue > evalue or \ noids if inverse: remove = not remove remove = remove or (noself and c.query == c.subject) if not remove: print(row.rstrip(), file=fw) fw.close() return newblastfile
[ "def", "filter", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "filter", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--score\"", ",", "dest", "=", "\"score\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Score cutoff\"", ")", "p", ".", "set_align", "(", "pctid", "=", "95", ",", "hitlen", "=", "100", ",", "evalue", "=", ".01", ")", "p", ".", "add_option", "(", "\"--noself\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Remove self-self hits\"", ")", "p", ".", "add_option", "(", "\"--ids\"", ",", "help", "=", "\"Path to file with ids to retain\"", ")", "p", ".", "add_option", "(", "\"--inverse\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Similar to grep -v, inverse\"", ")", "p", ".", "set_outfile", "(", "outfile", "=", "None", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "if", "opts", ".", "ids", ":", "ids", "=", "set", "(", ")", "for", "row", "in", "must_open", "(", "opts", ".", "ids", ")", ":", "if", "row", "[", "0", "]", "==", "\"#\"", ":", "continue", "row", "=", "row", ".", "replace", "(", "\",\"", ",", "\"\\t\"", ")", "ids", ".", "update", "(", "row", ".", "split", "(", ")", ")", "else", ":", "ids", "=", "None", "blastfile", ",", "=", "args", "inverse", "=", "opts", ".", "inverse", "outfile", "=", "opts", ".", "outfile", "fp", "=", "must_open", "(", "blastfile", ")", "score", ",", "pctid", ",", "hitlen", ",", "evalue", ",", "noself", "=", "opts", ".", "score", ",", "opts", ".", "pctid", ",", "opts", ".", "hitlen", ",", "opts", ".", "evalue", ",", "opts", ".", "noself", "newblastfile", "=", "blastfile", "+", "\".P{0}L{1}\"", ".", "format", "(", "int", "(", "pctid", ")", ",", "hitlen", ")", "if", "outfile", "is", "None", "else", "outfile", "if", "inverse", ":", "newblastfile", "+=", "\".inverse\"", "fw", "=", "must_open", "(", "newblastfile", ",", "\"w\"", ")", "for", "row", "in", "fp", ":", "if", "row", "[", "0", "]", "==", "'#'", ":", "continue", "c", "=", "BlastLine", "(", "row", ")", "if", "ids", ":", "if", "c", ".", "query", "in", "ids", "and", "c", ".", "subject", "in", "ids", ":", "noids", "=", "False", "else", ":", "noids", "=", "True", "else", ":", "noids", "=", "None", "remove", "=", "c", ".", "score", "<", "score", "or", "c", ".", "pctid", "<", "pctid", "or", "c", ".", "hitlen", "<", "hitlen", "or", "c", ".", "evalue", ">", "evalue", "or", "noids", "if", "inverse", ":", "remove", "=", "not", "remove", "remove", "=", "remove", "or", "(", "noself", "and", "c", ".", "query", "==", "c", ".", "subject", ")", "if", "not", "remove", ":", "print", "(", "row", ".", "rstrip", "(", ")", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "return", "newblastfile" ]
%prog filter test.blast Produce a new blast file and filter based on: - score: >= cutoff - pctid: >= cutoff - hitlen: >= cutoff - evalue: <= cutoff - ids: valid ids Use --inverse to obtain the complementary records for the criteria above. - noself: remove self-self hits
[ "%prog", "filter", "test", ".", "blast" ]
python
train
27.02439
whiteclover/dbpy
db/__init__.py
https://github.com/whiteclover/dbpy/blob/3d9ce85f55cfb39cced22081e525f79581b26b3a/db/__init__.py#L54-L91
def setup(config, minconn=5, maxconn=10, adapter='mysql', key='default', slave=False): """Setup database :param config dict: is the db adapter config :param key string: the key to identify dabtabase :param adapter string: the dabtabase adapter current support mysql only :param minconn int: the min connection for connection pool :param maxconn int: the max connection for connection pool :param slave boolean: If True the database can be read only. """ global __db if '.' in key: raise TypeError('The DB Key: "%s" Can\'t Contain dot' % (key)) if slave == False and key in __db: raise DBError('The Key: "%s" was set' % (key)) database = DB(config, minconn, maxconn, key, adapter) master_key = key slave_key = key + '.slave' if not slave: __db[master_key] = database if slave_key not in __db: __db[slave_key] = [database] else: if key in __db: databases = __db[slave_key] if len(databases) == 1 and __db[master_key] == databases[0]: __db[slave_key] = [database] else: __db[slave_key].append(database) else: __db[slave_key] = [database]
[ "def", "setup", "(", "config", ",", "minconn", "=", "5", ",", "maxconn", "=", "10", ",", "adapter", "=", "'mysql'", ",", "key", "=", "'default'", ",", "slave", "=", "False", ")", ":", "global", "__db", "if", "'.'", "in", "key", ":", "raise", "TypeError", "(", "'The DB Key: \"%s\" Can\\'t Contain dot'", "%", "(", "key", ")", ")", "if", "slave", "==", "False", "and", "key", "in", "__db", ":", "raise", "DBError", "(", "'The Key: \"%s\" was set'", "%", "(", "key", ")", ")", "database", "=", "DB", "(", "config", ",", "minconn", ",", "maxconn", ",", "key", ",", "adapter", ")", "master_key", "=", "key", "slave_key", "=", "key", "+", "'.slave'", "if", "not", "slave", ":", "__db", "[", "master_key", "]", "=", "database", "if", "slave_key", "not", "in", "__db", ":", "__db", "[", "slave_key", "]", "=", "[", "database", "]", "else", ":", "if", "key", "in", "__db", ":", "databases", "=", "__db", "[", "slave_key", "]", "if", "len", "(", "databases", ")", "==", "1", "and", "__db", "[", "master_key", "]", "==", "databases", "[", "0", "]", ":", "__db", "[", "slave_key", "]", "=", "[", "database", "]", "else", ":", "__db", "[", "slave_key", "]", ".", "append", "(", "database", ")", "else", ":", "__db", "[", "slave_key", "]", "=", "[", "database", "]" ]
Setup database :param config dict: is the db adapter config :param key string: the key to identify dabtabase :param adapter string: the dabtabase adapter current support mysql only :param minconn int: the min connection for connection pool :param maxconn int: the max connection for connection pool :param slave boolean: If True the database can be read only.
[ "Setup", "database" ]
python
train
31.868421
gwastro/pycbc
pycbc/filter/matchedfilter.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/matchedfilter.py#L1282-L1315
def matched_filter(template, data, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, sigmasq=None): """ Return the complex snr. Return the complex snr, along with its associated normalization of the template, matched filtered against the data. Parameters ---------- template : TimeSeries or FrequencySeries The template waveform data : TimeSeries or FrequencySeries The strain data to be filtered. psd : FrequencySeries The noise weighting of the filter. low_frequency_cutoff : {None, float}, optional The frequency to begin the filter calculation. If None, begin at the first frequency after DC. high_frequency_cutoff : {None, float}, optional The frequency to stop the filter calculation. If None, continue to the the nyquist frequency. sigmasq : {None, float}, optional The template normalization. If none, this value is calculated internally. Returns ------- snr : TimeSeries A time series containing the complex snr. """ snr, _, norm = matched_filter_core(template, data, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff, h_norm=sigmasq) return snr * norm
[ "def", "matched_filter", "(", "template", ",", "data", ",", "psd", "=", "None", ",", "low_frequency_cutoff", "=", "None", ",", "high_frequency_cutoff", "=", "None", ",", "sigmasq", "=", "None", ")", ":", "snr", ",", "_", ",", "norm", "=", "matched_filter_core", "(", "template", ",", "data", ",", "psd", "=", "psd", ",", "low_frequency_cutoff", "=", "low_frequency_cutoff", ",", "high_frequency_cutoff", "=", "high_frequency_cutoff", ",", "h_norm", "=", "sigmasq", ")", "return", "snr", "*", "norm" ]
Return the complex snr. Return the complex snr, along with its associated normalization of the template, matched filtered against the data. Parameters ---------- template : TimeSeries or FrequencySeries The template waveform data : TimeSeries or FrequencySeries The strain data to be filtered. psd : FrequencySeries The noise weighting of the filter. low_frequency_cutoff : {None, float}, optional The frequency to begin the filter calculation. If None, begin at the first frequency after DC. high_frequency_cutoff : {None, float}, optional The frequency to stop the filter calculation. If None, continue to the the nyquist frequency. sigmasq : {None, float}, optional The template normalization. If none, this value is calculated internally. Returns ------- snr : TimeSeries A time series containing the complex snr.
[ "Return", "the", "complex", "snr", "." ]
python
train
37.705882