repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
fusionapp/fusion-util
fusion_util/enums.py
https://github.com/fusionapp/fusion-util/blob/089c525799926c8b8bf1117ab22ed055dc99c7e6/fusion_util/enums.py#L69-L79
def from_pairs(cls, doc, pairs): """ Construct an enumeration from an iterable of pairs. :param doc: See `Enum.__init__`. :type pairs: ``Iterable[Tuple[unicode, unicode]]`` :param pairs: Iterable to construct the enumeration from. :rtype: Enum """ values = (EnumItem(value, desc) for value, desc in pairs) return cls(doc=doc, values=values)
[ "def", "from_pairs", "(", "cls", ",", "doc", ",", "pairs", ")", ":", "values", "=", "(", "EnumItem", "(", "value", ",", "desc", ")", "for", "value", ",", "desc", "in", "pairs", ")", "return", "cls", "(", "doc", "=", "doc", ",", "values", "=", "values", ")" ]
Construct an enumeration from an iterable of pairs. :param doc: See `Enum.__init__`. :type pairs: ``Iterable[Tuple[unicode, unicode]]`` :param pairs: Iterable to construct the enumeration from. :rtype: Enum
[ "Construct", "an", "enumeration", "from", "an", "iterable", "of", "pairs", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_exceptions.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_exceptions.py#L258-L270
def str_def(self): """ :term:`string`: The exception as a string in a Python definition-style format, e.g. for parsing by scripts: .. code-block:: text classname={}; read_timeout={}; read_retries={}; message={}; """ return "classname={!r}; read_timeout={!r}; read_retries={!r}; " \ "message={!r};". \ format(self.__class__.__name__, self.read_timeout, self.read_retries, self.args[0])
[ "def", "str_def", "(", "self", ")", ":", "return", "\"classname={!r}; read_timeout={!r}; read_retries={!r}; \"", "\"message={!r};\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "read_timeout", ",", "self", ".", "read_retries", ",", "self", ".", "args", "[", "0", "]", ")" ]
:term:`string`: The exception as a string in a Python definition-style format, e.g. for parsing by scripts: .. code-block:: text classname={}; read_timeout={}; read_retries={}; message={};
[ ":", "term", ":", "string", ":", "The", "exception", "as", "a", "string", "in", "a", "Python", "definition", "-", "style", "format", "e", ".", "g", ".", "for", "parsing", "by", "scripts", ":" ]
python
train
ajenhl/tacl
tacl/jitc.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/jitc.py#L96-L117
def _create_chord_chart(self, data, works, output_dir): """Generates and writes to a file in `output_dir` the data used to display a chord chart. :param data: data to derive the chord data from :type data: `pandas.DataFrame` :param works: works to display :type works: `list` :param output_dir: directory to output data file to :type output_dir: `str` """ matrix = [] chord_data = data.unstack(BASE_WORK)[SHARED] for index, row_data in chord_data.fillna(value=0).iterrows(): matrix.append([value / 100 for value in row_data]) colours = generate_colours(len(works)) colour_works = [{'work': work, 'colour': colour} for work, colour in zip(chord_data, colours)] json_data = json.dumps({'works': colour_works, 'matrix': matrix}) with open(os.path.join(output_dir, 'chord_data.js'), 'w') as fh: fh.write('var chordData = {}'.format(json_data))
[ "def", "_create_chord_chart", "(", "self", ",", "data", ",", "works", ",", "output_dir", ")", ":", "matrix", "=", "[", "]", "chord_data", "=", "data", ".", "unstack", "(", "BASE_WORK", ")", "[", "SHARED", "]", "for", "index", ",", "row_data", "in", "chord_data", ".", "fillna", "(", "value", "=", "0", ")", ".", "iterrows", "(", ")", ":", "matrix", ".", "append", "(", "[", "value", "/", "100", "for", "value", "in", "row_data", "]", ")", "colours", "=", "generate_colours", "(", "len", "(", "works", ")", ")", "colour_works", "=", "[", "{", "'work'", ":", "work", ",", "'colour'", ":", "colour", "}", "for", "work", ",", "colour", "in", "zip", "(", "chord_data", ",", "colours", ")", "]", "json_data", "=", "json", ".", "dumps", "(", "{", "'works'", ":", "colour_works", ",", "'matrix'", ":", "matrix", "}", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'chord_data.js'", ")", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "'var chordData = {}'", ".", "format", "(", "json_data", ")", ")" ]
Generates and writes to a file in `output_dir` the data used to display a chord chart. :param data: data to derive the chord data from :type data: `pandas.DataFrame` :param works: works to display :type works: `list` :param output_dir: directory to output data file to :type output_dir: `str`
[ "Generates", "and", "writes", "to", "a", "file", "in", "output_dir", "the", "data", "used", "to", "display", "a", "chord", "chart", "." ]
python
train
poppy-project/pypot
pypot/dynamixel/io/io.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/io.py#L20-L28
def get_control_mode(self, ids): """ Gets the mode ('joint' or 'wheel') for the specified motors. """ to_get_ids = [id for id in ids if id not in self._known_mode] limits = self.get_angle_limit(to_get_ids, convert=False) modes = ['wheel' if limit == (0, 0) else 'joint' for limit in limits] self._known_mode.update(zip(to_get_ids, modes)) return tuple(self._known_mode[id] for id in ids)
[ "def", "get_control_mode", "(", "self", ",", "ids", ")", ":", "to_get_ids", "=", "[", "id", "for", "id", "in", "ids", "if", "id", "not", "in", "self", ".", "_known_mode", "]", "limits", "=", "self", ".", "get_angle_limit", "(", "to_get_ids", ",", "convert", "=", "False", ")", "modes", "=", "[", "'wheel'", "if", "limit", "==", "(", "0", ",", "0", ")", "else", "'joint'", "for", "limit", "in", "limits", "]", "self", ".", "_known_mode", ".", "update", "(", "zip", "(", "to_get_ids", ",", "modes", ")", ")", "return", "tuple", "(", "self", ".", "_known_mode", "[", "id", "]", "for", "id", "in", "ids", ")" ]
Gets the mode ('joint' or 'wheel') for the specified motors.
[ "Gets", "the", "mode", "(", "joint", "or", "wheel", ")", "for", "the", "specified", "motors", "." ]
python
train
user-cont/conu
conu/apidefs/filesystem.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/apidefs/filesystem.py#L106-L119
def read_file(self, file_path): """ read file specified via 'file_path' and return its content - raises an ConuException if there is an issue accessing the file :param file_path: str, path to the file to read :return: str (not bytes), content of the file """ try: with open(self.p(file_path)) as fd: return fd.read() except IOError as ex: logger.error("error while accessing file %s: %r", file_path, ex) raise ConuException("There was an error while accessing file %s: %r", file_path, ex)
[ "def", "read_file", "(", "self", ",", "file_path", ")", ":", "try", ":", "with", "open", "(", "self", ".", "p", "(", "file_path", ")", ")", "as", "fd", ":", "return", "fd", ".", "read", "(", ")", "except", "IOError", "as", "ex", ":", "logger", ".", "error", "(", "\"error while accessing file %s: %r\"", ",", "file_path", ",", "ex", ")", "raise", "ConuException", "(", "\"There was an error while accessing file %s: %r\"", ",", "file_path", ",", "ex", ")" ]
read file specified via 'file_path' and return its content - raises an ConuException if there is an issue accessing the file :param file_path: str, path to the file to read :return: str (not bytes), content of the file
[ "read", "file", "specified", "via", "file_path", "and", "return", "its", "content", "-", "raises", "an", "ConuException", "if", "there", "is", "an", "issue", "accessing", "the", "file" ]
python
train
jldantas/libmft
libmft/attribute.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1547-L1568
def _from_binary_syn_link(cls, binary_stream): """See base class.""" ''' Offset to target name - 2 (relative to 16th byte) Length of target name - 2 Offset to print name - 2 (relative to 16th byte) Length of print name - 2 Symbolic link flags - 4 ''' offset_target_name, len_target_name, offset_print_name, \ len_print_name, syn_flags = \ cls._REPR.unpack(binary_stream[:cls._REPR.size]) offset = cls._REPR.size + offset_target_name target_name = binary_stream[offset:offset+len_target_name].tobytes().decode("utf_16_le") offset = cls._REPR.size + offset_print_name print_name = binary_stream[offset:offset+len_print_name].tobytes().decode("utf_16_le") nw_obj = cls((target_name, print_name, SymbolicLinkFlags(syn_flags))) _MOD_LOGGER.debug("Attempted to unpack Symbolic Link from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
[ "def", "_from_binary_syn_link", "(", "cls", ",", "binary_stream", ")", ":", "''' Offset to target name - 2 (relative to 16th byte)\n Length of target name - 2\n Offset to print name - 2 (relative to 16th byte)\n Length of print name - 2\n Symbolic link flags - 4\n '''", "offset_target_name", ",", "len_target_name", ",", "offset_print_name", ",", "len_print_name", ",", "syn_flags", "=", "cls", ".", "_REPR", ".", "unpack", "(", "binary_stream", "[", ":", "cls", ".", "_REPR", ".", "size", "]", ")", "offset", "=", "cls", ".", "_REPR", ".", "size", "+", "offset_target_name", "target_name", "=", "binary_stream", "[", "offset", ":", "offset", "+", "len_target_name", "]", ".", "tobytes", "(", ")", ".", "decode", "(", "\"utf_16_le\"", ")", "offset", "=", "cls", ".", "_REPR", ".", "size", "+", "offset_print_name", "print_name", "=", "binary_stream", "[", "offset", ":", "offset", "+", "len_print_name", "]", ".", "tobytes", "(", ")", ".", "decode", "(", "\"utf_16_le\"", ")", "nw_obj", "=", "cls", "(", "(", "target_name", ",", "print_name", ",", "SymbolicLinkFlags", "(", "syn_flags", ")", ")", ")", "_MOD_LOGGER", ".", "debug", "(", "\"Attempted to unpack Symbolic Link from \\\"%s\\\"\\nResult: %s\"", ",", "binary_stream", ".", "tobytes", "(", ")", ",", "nw_obj", ")", "return", "nw_obj" ]
See base class.
[ "See", "base", "class", "." ]
python
train
pauleveritt/kaybee
kaybee/plugins/events.py
https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/events.py#L144-L150
def call_env_updated(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment): """ On the env-updated event, do callbacks """ for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EU): callback(kb_app, sphinx_app, sphinx_env)
[ "def", "call_env_updated", "(", "cls", ",", "kb_app", ",", "sphinx_app", ":", "Sphinx", ",", "sphinx_env", ":", "BuildEnvironment", ")", ":", "for", "callback", "in", "EventAction", ".", "get_callbacks", "(", "kb_app", ",", "SphinxEvent", ".", "EU", ")", ":", "callback", "(", "kb_app", ",", "sphinx_app", ",", "sphinx_env", ")" ]
On the env-updated event, do callbacks
[ "On", "the", "env", "-", "updated", "event", "do", "callbacks" ]
python
train
google/grr
grr/server/grr_response_server/gui/api_call_router_with_approval_checks.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_call_router_with_approval_checks.py#L64-L96
def _CheckAccess(self, username, subject_id, approval_type): """Checks access to a given subject by a given user.""" precondition.AssertType(subject_id, Text) cache_key = (username, subject_id, approval_type) try: self.acl_cache.Get(cache_key) stats_collector_instance.Get().IncrementCounter( "approval_searches", fields=["-", "cache"]) return True except KeyError: stats_collector_instance.Get().IncrementCounter( "approval_searches", fields=["-", "reldb"]) approvals = data_store.REL_DB.ReadApprovalRequests( username, approval_type, subject_id=subject_id, include_expired=False) errors = [] for approval in approvals: try: approval_checks.CheckApprovalRequest(approval) self.acl_cache.Put(cache_key, True) return except access_control.UnauthorizedAccess as e: errors.append(e) subject = approval_checks.BuildLegacySubject(subject_id, approval_type) if not errors: raise access_control.UnauthorizedAccess( "No approval found.", subject=subject) else: raise access_control.UnauthorizedAccess( " ".join(utils.SmartStr(e) for e in errors), subject=subject)
[ "def", "_CheckAccess", "(", "self", ",", "username", ",", "subject_id", ",", "approval_type", ")", ":", "precondition", ".", "AssertType", "(", "subject_id", ",", "Text", ")", "cache_key", "=", "(", "username", ",", "subject_id", ",", "approval_type", ")", "try", ":", "self", ".", "acl_cache", ".", "Get", "(", "cache_key", ")", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"approval_searches\"", ",", "fields", "=", "[", "\"-\"", ",", "\"cache\"", "]", ")", "return", "True", "except", "KeyError", ":", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"approval_searches\"", ",", "fields", "=", "[", "\"-\"", ",", "\"reldb\"", "]", ")", "approvals", "=", "data_store", ".", "REL_DB", ".", "ReadApprovalRequests", "(", "username", ",", "approval_type", ",", "subject_id", "=", "subject_id", ",", "include_expired", "=", "False", ")", "errors", "=", "[", "]", "for", "approval", "in", "approvals", ":", "try", ":", "approval_checks", ".", "CheckApprovalRequest", "(", "approval", ")", "self", ".", "acl_cache", ".", "Put", "(", "cache_key", ",", "True", ")", "return", "except", "access_control", ".", "UnauthorizedAccess", "as", "e", ":", "errors", ".", "append", "(", "e", ")", "subject", "=", "approval_checks", ".", "BuildLegacySubject", "(", "subject_id", ",", "approval_type", ")", "if", "not", "errors", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "\"No approval found.\"", ",", "subject", "=", "subject", ")", "else", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "\" \"", ".", "join", "(", "utils", ".", "SmartStr", "(", "e", ")", "for", "e", "in", "errors", ")", ",", "subject", "=", "subject", ")" ]
Checks access to a given subject by a given user.
[ "Checks", "access", "to", "a", "given", "subject", "by", "a", "given", "user", "." ]
python
train
poldracklab/niworkflows
niworkflows/interfaces/segmentation.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/segmentation.py#L40-L58
def _post_run_hook(self, runtime): ''' generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid ''' self._anat_file = self.inputs.in_files[0] outputs = self.aggregate_outputs(runtime=runtime) self._mask_file = outputs.tissue_class_map # We are skipping the CSF class because with combination with others # it only shows the skullstriping mask self._seg_files = outputs.tissue_class_files[1:] self._masked = False NIWORKFLOWS_LOG.info('Generating report for FAST (in_files %s, ' 'segmentation %s, individual tissue classes %s).', self.inputs.in_files, outputs.tissue_class_map, outputs.tissue_class_files) return super(FASTRPT, self)._post_run_hook(runtime)
[ "def", "_post_run_hook", "(", "self", ",", "runtime", ")", ":", "self", ".", "_anat_file", "=", "self", ".", "inputs", ".", "in_files", "[", "0", "]", "outputs", "=", "self", ".", "aggregate_outputs", "(", "runtime", "=", "runtime", ")", "self", ".", "_mask_file", "=", "outputs", ".", "tissue_class_map", "# We are skipping the CSF class because with combination with others", "# it only shows the skullstriping mask", "self", ".", "_seg_files", "=", "outputs", ".", "tissue_class_files", "[", "1", ":", "]", "self", ".", "_masked", "=", "False", "NIWORKFLOWS_LOG", ".", "info", "(", "'Generating report for FAST (in_files %s, '", "'segmentation %s, individual tissue classes %s).'", ",", "self", ".", "inputs", ".", "in_files", ",", "outputs", ".", "tissue_class_map", ",", "outputs", ".", "tissue_class_files", ")", "return", "super", "(", "FASTRPT", ",", "self", ")", ".", "_post_run_hook", "(", "runtime", ")" ]
generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid
[ "generates", "a", "report", "showing", "nine", "slices", "three", "per", "axis", "of", "an", "arbitrary", "volume", "of", "in_files", "with", "the", "resulting", "segmentation", "overlaid" ]
python
train
MacHu-GWU/single_file_module-project
sfm/binarysearch.py
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/binarysearch.py#L122-L143
def find_ge(array, x): """ Find leftmost item greater than or equal to x. :type array: list :param array: an iterable object that support inex :param x: a comparable value Example:: >>> find_ge([0, 1, 2, 3], 1.0) 1 **中文文档** 寻找最小的大于等于x的数。 """ i = bisect.bisect_left(array, x) if i != len(array): return array[i] raise ValueError
[ "def", "find_ge", "(", "array", ",", "x", ")", ":", "i", "=", "bisect", ".", "bisect_left", "(", "array", ",", "x", ")", "if", "i", "!=", "len", "(", "array", ")", ":", "return", "array", "[", "i", "]", "raise", "ValueError" ]
Find leftmost item greater than or equal to x. :type array: list :param array: an iterable object that support inex :param x: a comparable value Example:: >>> find_ge([0, 1, 2, 3], 1.0) 1 **中文文档** 寻找最小的大于等于x的数。
[ "Find", "leftmost", "item", "greater", "than", "or", "equal", "to", "x", "." ]
python
train
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L418-L437
def _list_audio_files(self, sub_dir=""): """ Parameters ---------- sub_dir : one of `needed_directories`, optional Default is "", which means it'll look through all of subdirs. Returns ------- audio_files : [str] A list whose elements are basenames of the present audiofiles whose formats are `wav` """ audio_files = list() for possibly_audio_file in os.listdir("{}/{}".format(self.src_dir, sub_dir)): file_format = ''.join(possibly_audio_file.split('.')[-1]) if file_format.lower() == "wav": audio_files.append(possibly_audio_file) return audio_files
[ "def", "_list_audio_files", "(", "self", ",", "sub_dir", "=", "\"\"", ")", ":", "audio_files", "=", "list", "(", ")", "for", "possibly_audio_file", "in", "os", ".", "listdir", "(", "\"{}/{}\"", ".", "format", "(", "self", ".", "src_dir", ",", "sub_dir", ")", ")", ":", "file_format", "=", "''", ".", "join", "(", "possibly_audio_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ")", "if", "file_format", ".", "lower", "(", ")", "==", "\"wav\"", ":", "audio_files", ".", "append", "(", "possibly_audio_file", ")", "return", "audio_files" ]
Parameters ---------- sub_dir : one of `needed_directories`, optional Default is "", which means it'll look through all of subdirs. Returns ------- audio_files : [str] A list whose elements are basenames of the present audiofiles whose formats are `wav`
[ "Parameters", "----------", "sub_dir", ":", "one", "of", "needed_directories", "optional", "Default", "is", "which", "means", "it", "ll", "look", "through", "all", "of", "subdirs", "." ]
python
train
galactics/beyond
beyond/frames/iau1980.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/frames/iau1980.py#L72-L76
def precesion(date): # pragma: no cover """Precession as a rotation matrix """ zeta, theta, z = np.deg2rad(_precesion(date)) return rot3(zeta) @ rot2(-theta) @ rot3(z)
[ "def", "precesion", "(", "date", ")", ":", "# pragma: no cover", "zeta", ",", "theta", ",", "z", "=", "np", ".", "deg2rad", "(", "_precesion", "(", "date", ")", ")", "return", "rot3", "(", "zeta", ")", "@", "rot2", "(", "-", "theta", ")", "@", "rot3", "(", "z", ")" ]
Precession as a rotation matrix
[ "Precession", "as", "a", "rotation", "matrix" ]
python
train
AndrewRPorter/yahoo-historical
yahoo_historical/fetch.py
https://github.com/AndrewRPorter/yahoo-historical/blob/7a501af77fec6aa69551edb0485b665ea9bb2727/yahoo_historical/fetch.py#L41-L50
def getData(self, events): """Returns a list of historical data from Yahoo Finance""" if self.interval not in ["1d", "1wk", "1mo"]: raise ValueError("Incorrect interval: valid intervals are 1d, 1wk, 1mo") url = self.api_url % (self.ticker, self.start, self.end, self.interval, events, self.crumb) data = requests.get(url, cookies={'B':self.cookie}) content = StringIO(data.content.decode("utf-8")) return pd.read_csv(content, sep=',')
[ "def", "getData", "(", "self", ",", "events", ")", ":", "if", "self", ".", "interval", "not", "in", "[", "\"1d\"", ",", "\"1wk\"", ",", "\"1mo\"", "]", ":", "raise", "ValueError", "(", "\"Incorrect interval: valid intervals are 1d, 1wk, 1mo\"", ")", "url", "=", "self", ".", "api_url", "%", "(", "self", ".", "ticker", ",", "self", ".", "start", ",", "self", ".", "end", ",", "self", ".", "interval", ",", "events", ",", "self", ".", "crumb", ")", "data", "=", "requests", ".", "get", "(", "url", ",", "cookies", "=", "{", "'B'", ":", "self", ".", "cookie", "}", ")", "content", "=", "StringIO", "(", "data", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "pd", ".", "read_csv", "(", "content", ",", "sep", "=", "','", ")" ]
Returns a list of historical data from Yahoo Finance
[ "Returns", "a", "list", "of", "historical", "data", "from", "Yahoo", "Finance" ]
python
train
teepark/greenhouse
greenhouse/scheduler.py
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/scheduler.py#L263-L300
def schedule(target=None, args=(), kwargs=None): """insert a greenlet into the scheduler If provided a function, it is wrapped in a new greenlet :param target: what to schedule :type target: function or greenlet :param args: arguments for the function (only used if ``target`` is a function) :type args: tuple :param kwargs: keyword arguments for the function (only used if ``target`` is a function) :type kwargs: dict or None :returns: the ``target`` argument This function can also be used as a decorator, either preloading ``args`` and/or ``kwargs`` or not: >>> @schedule >>> def f(): ... print 'hello from f' >>> @schedule(args=('world',)) >>> def f(name): ... print 'hello %s' % name """ if target is None: def decorator(target): return schedule(target, args=args, kwargs=kwargs) return decorator if isinstance(target, compat.greenlet) or target is compat.main_greenlet: glet = target else: glet = greenlet(target, args, kwargs) state.paused.append(glet) return target
[ "def", "schedule", "(", "target", "=", "None", ",", "args", "=", "(", ")", ",", "kwargs", "=", "None", ")", ":", "if", "target", "is", "None", ":", "def", "decorator", "(", "target", ")", ":", "return", "schedule", "(", "target", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "return", "decorator", "if", "isinstance", "(", "target", ",", "compat", ".", "greenlet", ")", "or", "target", "is", "compat", ".", "main_greenlet", ":", "glet", "=", "target", "else", ":", "glet", "=", "greenlet", "(", "target", ",", "args", ",", "kwargs", ")", "state", ".", "paused", ".", "append", "(", "glet", ")", "return", "target" ]
insert a greenlet into the scheduler If provided a function, it is wrapped in a new greenlet :param target: what to schedule :type target: function or greenlet :param args: arguments for the function (only used if ``target`` is a function) :type args: tuple :param kwargs: keyword arguments for the function (only used if ``target`` is a function) :type kwargs: dict or None :returns: the ``target`` argument This function can also be used as a decorator, either preloading ``args`` and/or ``kwargs`` or not: >>> @schedule >>> def f(): ... print 'hello from f' >>> @schedule(args=('world',)) >>> def f(name): ... print 'hello %s' % name
[ "insert", "a", "greenlet", "into", "the", "scheduler" ]
python
train
andreikop/qutepart
qutepart/lines.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/lines.py#L21-L29
def _atomicModification(func): """Decorator Make document modification atomic """ def wrapper(*args, **kwargs): self = args[0] with self._qpart: func(*args, **kwargs) return wrapper
[ "def", "_atomicModification", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", "=", "args", "[", "0", "]", "with", "self", ".", "_qpart", ":", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorator Make document modification atomic
[ "Decorator", "Make", "document", "modification", "atomic" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_notifications.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_notifications.py#L12-L23
def netconf_config_change_changed_by_server_or_user_server_server(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications") changed_by = ET.SubElement(netconf_config_change, "changed-by") server_or_user = ET.SubElement(changed_by, "server-or-user") server = ET.SubElement(server_or_user, "server") server = ET.SubElement(server, "server") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "netconf_config_change_changed_by_server_or_user_server_server", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "netconf_config_change", "=", "ET", ".", "SubElement", "(", "config", ",", "\"netconf-config-change\"", ",", "xmlns", "=", "\"urn:ietf:params:xml:ns:yang:ietf-netconf-notifications\"", ")", "changed_by", "=", "ET", ".", "SubElement", "(", "netconf_config_change", ",", "\"changed-by\"", ")", "server_or_user", "=", "ET", ".", "SubElement", "(", "changed_by", ",", "\"server-or-user\"", ")", "server", "=", "ET", ".", "SubElement", "(", "server_or_user", ",", "\"server\"", ")", "server", "=", "ET", ".", "SubElement", "(", "server", ",", "\"server\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L140-L145
def create_fw_db(self, fw_id, fw_name, tenant_id): """Create FW dict. """ fw_dict = {'fw_id': fw_id, 'name': fw_name, 'tenant_id': tenant_id} # FW DB is already created by FW Mgr # self.add_fw_db(fw_id, fw_dict) self.update_fw_dict(fw_dict)
[ "def", "create_fw_db", "(", "self", ",", "fw_id", ",", "fw_name", ",", "tenant_id", ")", ":", "fw_dict", "=", "{", "'fw_id'", ":", "fw_id", ",", "'name'", ":", "fw_name", ",", "'tenant_id'", ":", "tenant_id", "}", "# FW DB is already created by FW Mgr", "# self.add_fw_db(fw_id, fw_dict)", "self", ".", "update_fw_dict", "(", "fw_dict", ")" ]
Create FW dict.
[ "Create", "FW", "dict", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L24181-L24192
def format_fat(self, quick): """Formats the medium as FAT. Generally only useful for floppy images as no partition table will be created. in quick of type bool Quick format it when set. """ if not isinstance(quick, bool): raise TypeError("quick can only be an instance of type bool") self._call("formatFAT", in_p=[quick])
[ "def", "format_fat", "(", "self", ",", "quick", ")", ":", "if", "not", "isinstance", "(", "quick", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"quick can only be an instance of type bool\"", ")", "self", ".", "_call", "(", "\"formatFAT\"", ",", "in_p", "=", "[", "quick", "]", ")" ]
Formats the medium as FAT. Generally only useful for floppy images as no partition table will be created. in quick of type bool Quick format it when set.
[ "Formats", "the", "medium", "as", "FAT", ".", "Generally", "only", "useful", "for", "floppy", "images", "as", "no", "partition", "table", "will", "be", "created", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/__init__.py#L45-L60
def Scanner(function, *args, **kw): """ Public interface factory function for creating different types of Scanners based on the different types of "functions" that may be supplied. TODO: Deprecate this some day. We've moved the functionality inside the Base class and really don't need this factory function any more. It was, however, used by some of our Tool modules, so the call probably ended up in various people's custom modules patterned on SCons code. """ if SCons.Util.is_Dict(function): return Selector(function, *args, **kw) else: return Base(function, *args, **kw)
[ "def", "Scanner", "(", "function", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "SCons", ".", "Util", ".", "is_Dict", "(", "function", ")", ":", "return", "Selector", "(", "function", ",", "*", "args", ",", "*", "*", "kw", ")", "else", ":", "return", "Base", "(", "function", ",", "*", "args", ",", "*", "*", "kw", ")" ]
Public interface factory function for creating different types of Scanners based on the different types of "functions" that may be supplied. TODO: Deprecate this some day. We've moved the functionality inside the Base class and really don't need this factory function any more. It was, however, used by some of our Tool modules, so the call probably ended up in various people's custom modules patterned on SCons code.
[ "Public", "interface", "factory", "function", "for", "creating", "different", "types", "of", "Scanners", "based", "on", "the", "different", "types", "of", "functions", "that", "may", "be", "supplied", "." ]
python
train
nwilming/ocupy
ocupy/parallel.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L61-L68
def xmlrpc_reschedule(self): """ Reschedule all running tasks. """ if not len(self.scheduled_tasks) == 0: self.reschedule = list(self.scheduled_tasks.items()) self.scheduled_tasks = {} return True
[ "def", "xmlrpc_reschedule", "(", "self", ")", ":", "if", "not", "len", "(", "self", ".", "scheduled_tasks", ")", "==", "0", ":", "self", ".", "reschedule", "=", "list", "(", "self", ".", "scheduled_tasks", ".", "items", "(", ")", ")", "self", ".", "scheduled_tasks", "=", "{", "}", "return", "True" ]
Reschedule all running tasks.
[ "Reschedule", "all", "running", "tasks", "." ]
python
train
alejandroautalan/pygubu
pygubu/builder/__init__.py
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L238-L273
def create_variable(self, varname, vtype=None): """Create a tk variable. If the variable was created previously return that instance. """ var_types = ('string', 'int', 'boolean', 'double') vname = varname var = None type_from_name = 'string' # default type if ':' in varname: type_from_name, vname = varname.split(':') # Fix incorrect order bug #33 if type_from_name not in (var_types): # Swap order type_from_name, vname = vname, type_from_name if type_from_name not in (var_types): raise Exception('Undefined variable type in "{0}"'.format(varname)) if vname in self.tkvariables: var = self.tkvariables[vname] else: if vtype is None: # get type from name if type_from_name == 'int': var = tkinter.IntVar() elif type_from_name == 'boolean': var = tkinter.BooleanVar() elif type_from_name == 'double': var = tkinter.DoubleVar() else: var = tkinter.StringVar() else: var = vtype() self.tkvariables[vname] = var return var
[ "def", "create_variable", "(", "self", ",", "varname", ",", "vtype", "=", "None", ")", ":", "var_types", "=", "(", "'string'", ",", "'int'", ",", "'boolean'", ",", "'double'", ")", "vname", "=", "varname", "var", "=", "None", "type_from_name", "=", "'string'", "# default type", "if", "':'", "in", "varname", ":", "type_from_name", ",", "vname", "=", "varname", ".", "split", "(", "':'", ")", "# Fix incorrect order bug #33", "if", "type_from_name", "not", "in", "(", "var_types", ")", ":", "# Swap order", "type_from_name", ",", "vname", "=", "vname", ",", "type_from_name", "if", "type_from_name", "not", "in", "(", "var_types", ")", ":", "raise", "Exception", "(", "'Undefined variable type in \"{0}\"'", ".", "format", "(", "varname", ")", ")", "if", "vname", "in", "self", ".", "tkvariables", ":", "var", "=", "self", ".", "tkvariables", "[", "vname", "]", "else", ":", "if", "vtype", "is", "None", ":", "# get type from name", "if", "type_from_name", "==", "'int'", ":", "var", "=", "tkinter", ".", "IntVar", "(", ")", "elif", "type_from_name", "==", "'boolean'", ":", "var", "=", "tkinter", ".", "BooleanVar", "(", ")", "elif", "type_from_name", "==", "'double'", ":", "var", "=", "tkinter", ".", "DoubleVar", "(", ")", "else", ":", "var", "=", "tkinter", ".", "StringVar", "(", ")", "else", ":", "var", "=", "vtype", "(", ")", "self", ".", "tkvariables", "[", "vname", "]", "=", "var", "return", "var" ]
Create a tk variable. If the variable was created previously return that instance.
[ "Create", "a", "tk", "variable", ".", "If", "the", "variable", "was", "created", "previously", "return", "that", "instance", "." ]
python
train
openstack/quark
quark/tools/billing.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/billing.py#L57-L118
def main(notify, hour, minute): """Runs billing report. Optionally sends notifications to billing""" # Read the config file and get the admin context config_opts = ['--config-file', '/etc/neutron/neutron.conf'] config.init(config_opts) # Have to load the billing module _after_ config is parsed so # that we get the right network strategy network_strategy.STRATEGY.load() billing.PUBLIC_NETWORK_ID = network_strategy.STRATEGY.get_public_net_id() config.setup_logging() context = neutron_context.get_admin_context() # A query to get all IPAddress objects from the db query = context.session.query(models.IPAddress) (period_start, period_end) = billing.calc_periods(hour, minute) full_day_ips = billing.build_full_day_ips(query, period_start, period_end) partial_day_ips = billing.build_partial_day_ips(query, period_start, period_end) if notify: # '==================== Full Day =============================' for ipaddress in full_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) # '==================== Part Day =============================' for ipaddress in partial_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) else: click.echo('Case 1 ({}):\n'.format(len(full_day_ips))) for ipaddress in full_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end)) click.echo('\n===============================================\n') click.echo('Case 2 ({}):\n'.format(len(partial_day_ips))) for ipaddress in partial_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end))
[ "def", "main", "(", "notify", ",", "hour", ",", "minute", ")", ":", "# Read the config file and get the admin context", "config_opts", "=", "[", "'--config-file'", ",", "'/etc/neutron/neutron.conf'", "]", "config", ".", "init", "(", "config_opts", ")", "# Have to load the billing module _after_ config is parsed so", "# that we get the right network strategy", "network_strategy", ".", "STRATEGY", ".", "load", "(", ")", "billing", ".", "PUBLIC_NETWORK_ID", "=", "network_strategy", ".", "STRATEGY", ".", "get_public_net_id", "(", ")", "config", ".", "setup_logging", "(", ")", "context", "=", "neutron_context", ".", "get_admin_context", "(", ")", "# A query to get all IPAddress objects from the db", "query", "=", "context", ".", "session", ".", "query", "(", "models", ".", "IPAddress", ")", "(", "period_start", ",", "period_end", ")", "=", "billing", ".", "calc_periods", "(", "hour", ",", "minute", ")", "full_day_ips", "=", "billing", ".", "build_full_day_ips", "(", "query", ",", "period_start", ",", "period_end", ")", "partial_day_ips", "=", "billing", ".", "build_partial_day_ips", "(", "query", ",", "period_start", ",", "period_end", ")", "if", "notify", ":", "# '==================== Full Day ============================='", "for", "ipaddress", "in", "full_day_ips", ":", "click", ".", "echo", "(", "'start: {}, end: {}'", ".", "format", "(", "period_start", ",", "period_end", ")", ")", "payload", "=", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "period_start", ",", "end_time", "=", "period_end", ")", "billing", ".", "do_notify", "(", "context", ",", "billing", ".", "IP_EXISTS", ",", "payload", ")", "# '==================== Part Day ============================='", "for", "ipaddress", "in", "partial_day_ips", ":", "click", ".", "echo", "(", "'start: {}, end: {}'", ".", "format", "(", "period_start", ",", "period_end", ")", ")", "payload", "=", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "ipaddress", ".", "allocated_at", ",", "end_time", "=", "period_end", ")", "billing", ".", "do_notify", "(", "context", ",", "billing", ".", "IP_EXISTS", ",", "payload", ")", "else", ":", "click", ".", "echo", "(", "'Case 1 ({}):\\n'", ".", "format", "(", "len", "(", "full_day_ips", ")", ")", ")", "for", "ipaddress", "in", "full_day_ips", ":", "pp", "(", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "period_start", ",", "end_time", "=", "period_end", ")", ")", "click", ".", "echo", "(", "'\\n===============================================\\n'", ")", "click", ".", "echo", "(", "'Case 2 ({}):\\n'", ".", "format", "(", "len", "(", "partial_day_ips", ")", ")", ")", "for", "ipaddress", "in", "partial_day_ips", ":", "pp", "(", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "ipaddress", ".", "allocated_at", ",", "end_time", "=", "period_end", ")", ")" ]
Runs billing report. Optionally sends notifications to billing
[ "Runs", "billing", "report", ".", "Optionally", "sends", "notifications", "to", "billing" ]
python
valid
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L183-L315
def open(cls, filename): """Open a new bundle. Open a bundle from a JSON-formatted PHOEBE 2 file. This is a constructor so should be called as: >>> b = Bundle.open('test.phoebe') :parameter str filename: relative or full path to the file :return: instantiated :class:`Bundle` object """ filename = os.path.expanduser(filename) logger.debug("importing from {}".format(filename)) f = open(filename, 'r') data = json.load(f, object_pairs_hook=parse_json) f.close() b = cls(data) version = b.get_value('phoebe_version') phoebe_version_import = StrictVersion(version if version != 'devel' else '2.1.2') phoebe_version_this = StrictVersion(__version__ if __version__ != 'devel' else '2.1.2') logger.debug("importing from PHOEBE v {} into v {}".format(phoebe_version_import, phoebe_version_this)) # update the entry in the PS, so if this is saved again it will have the new version b.set_value('phoebe_version', __version__) if phoebe_version_import == phoebe_version_this: return b elif phoebe_version_import > phoebe_version_this: warning = "importing from a newer version ({}) of PHOEBE, this may or may not work, consider updating".format(phoebe_version_import) print("WARNING: {}".format(warning)) logger.warning(warning) return b if phoebe_version_import < StrictVersion("2.1.2"): b._import_before_v211 = True warning = "Importing from an older version ({}) of PHOEBE which did not support constraints in solar units. All constraints will remain in SI, but calling set_hierarchy will likely fail.".format(phoebe_version_import) print("WARNING: {}".format(warning)) logger.warning(warning) if phoebe_version_import < StrictVersion("2.1.0"): logger.warning("importing from an older version ({}) of PHOEBE into version {}".format(phoebe_version_import, phoebe_version_this)) def _ps_dict(ps): return {p.qualifier: p.get_quantity() if hasattr(p, 'get_quantity') else p.get_value() for p in ps.to_list()} # rpole -> requiv: https://github.com/phoebe-project/phoebe2/pull/300 dict_stars = {} for star in b.hierarchy.get_stars(): ps_star = b.filter(context='component', component=star) dict_stars[star] = _ps_dict(ps_star) # TODO: actually do the translation rpole = dict_stars[star].pop('rpole', 1.0*u.solRad).to(u.solRad).value # PHOEBE 2.0 didn't have syncpar for contacts if len(b.filter('syncpar', component=star)): F = b.get_value('syncpar', component=star, context='component') else: F = 1.0 parent_orbit = b.hierarchy.get_parent_of(star) component = b.hierarchy.get_primary_or_secondary(star, return_ind=True) sma = b.get_value('sma', component=parent_orbit, context='component', unit=u.solRad) q = b.get_value('q', component=parent_orbit, context='component') d = 1 - b.get_value('ecc', component=parent_orbit) logger.info("roche.rpole_to_requiv_aligned(rpole={}, sma={}, q={}, F={}, d={}, component={})".format(rpole, sma, q, F, d, component)) dict_stars[star]['requiv'] = roche.rpole_to_requiv_aligned(rpole, sma, q, F, d, component=component) b.remove_component(star) for star, dict_star in dict_stars.items(): logger.info("attempting to update component='{}' to new version requirements".format(star)) b.add_component('star', component=star, check_label=False, **dict_star) dict_envs = {} for env in b.hierarchy.get_envelopes(): ps_env = b.filter(context='component', component=env) dict_envs[env] = _ps_dict(ps_env) b.remove_component(env) for env, dict_env in dict_envs.items(): logger.info("attempting to update component='{}' to new version requirements".format(env)) b.add_component('envelope', component=env, check_label=False, **dict_env) # TODO: this probably will fail once more than one contacts are # supported, but will never need that for 2.0->2.1 since # multiples aren't supported (yet) call b.set_hierarchy() to # reset all hieararchy-dependent constraints (including # pot<->requiv) b.set_hierarchy() primary = b.hierarchy.get_stars()[0] b.flip_constraint('pot', component=env, solve_for='requiv@{}'.format(primary), check_nan=False) b.set_value('pot', component=env, context='component', value=dict_env['pot']) b.flip_constraint('requiv', component=primary, solve_for='pot', check_nan=False) # reset all hieararchy-dependent constraints b.set_hierarchy() # mesh datasets: https://github.com/phoebe-project/phoebe2/pull/261, https://github.com/phoebe-project/phoebe2/pull/300 for dataset in b.filter(context='dataset', kind='mesh').datasets: logger.info("attempting to update mesh dataset='{}' to new version requirements".format(dataset)) ps_mesh = b.filter(context='dataset', kind='mesh', dataset=dataset) dict_mesh = _ps_dict(ps_mesh) # NOTE: we will not remove (or update) the dataset from any existing models b.remove_dataset(dataset, context=['dataset', 'constraint', 'compute']) if len(b.filter(dataset=dataset, context='model')): logger.warning("existing model for dataset='{}' models={} will not be removed, but likely will not work with new plotting updates".format(dataset, b.filter(dataset=dataset, context='model').models)) b.add_dataset('mesh', dataset=dataset, check_label=False, **dict_mesh) # vgamma definition: https://github.com/phoebe-project/phoebe2/issues/234 logger.info("updating vgamma to new version requirements") b.set_value('vgamma', -1*b.get_value('vgamma')) # remove phshift parameter: https://github.com/phoebe-project/phoebe2/commit/1fa3a4e1c0f8d80502101e1b1e750f5fb14115cb logger.info("removing any phshift parameters for new version requirements") b.remove_parameters_all(qualifier='phshift') # colon -> long: https://github.com/phoebe-project/phoebe2/issues/211 logger.info("removing any colon parameters for new version requirements") b.remove_parameters_all(qualifier='colon') # make sure constraints are updated according to conf.interactive_constraints b.run_delayed_constraints() return b
[ "def", "open", "(", "cls", ",", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "logger", ".", "debug", "(", "\"importing from {}\"", ".", "format", "(", "filename", ")", ")", "f", "=", "open", "(", "filename", ",", "'r'", ")", "data", "=", "json", ".", "load", "(", "f", ",", "object_pairs_hook", "=", "parse_json", ")", "f", ".", "close", "(", ")", "b", "=", "cls", "(", "data", ")", "version", "=", "b", ".", "get_value", "(", "'phoebe_version'", ")", "phoebe_version_import", "=", "StrictVersion", "(", "version", "if", "version", "!=", "'devel'", "else", "'2.1.2'", ")", "phoebe_version_this", "=", "StrictVersion", "(", "__version__", "if", "__version__", "!=", "'devel'", "else", "'2.1.2'", ")", "logger", ".", "debug", "(", "\"importing from PHOEBE v {} into v {}\"", ".", "format", "(", "phoebe_version_import", ",", "phoebe_version_this", ")", ")", "# update the entry in the PS, so if this is saved again it will have the new version", "b", ".", "set_value", "(", "'phoebe_version'", ",", "__version__", ")", "if", "phoebe_version_import", "==", "phoebe_version_this", ":", "return", "b", "elif", "phoebe_version_import", ">", "phoebe_version_this", ":", "warning", "=", "\"importing from a newer version ({}) of PHOEBE, this may or may not work, consider updating\"", ".", "format", "(", "phoebe_version_import", ")", "print", "(", "\"WARNING: {}\"", ".", "format", "(", "warning", ")", ")", "logger", ".", "warning", "(", "warning", ")", "return", "b", "if", "phoebe_version_import", "<", "StrictVersion", "(", "\"2.1.2\"", ")", ":", "b", ".", "_import_before_v211", "=", "True", "warning", "=", "\"Importing from an older version ({}) of PHOEBE which did not support constraints in solar units. All constraints will remain in SI, but calling set_hierarchy will likely fail.\"", ".", "format", "(", "phoebe_version_import", ")", "print", "(", "\"WARNING: {}\"", ".", "format", "(", "warning", ")", ")", "logger", ".", "warning", "(", "warning", ")", "if", "phoebe_version_import", "<", "StrictVersion", "(", "\"2.1.0\"", ")", ":", "logger", ".", "warning", "(", "\"importing from an older version ({}) of PHOEBE into version {}\"", ".", "format", "(", "phoebe_version_import", ",", "phoebe_version_this", ")", ")", "def", "_ps_dict", "(", "ps", ")", ":", "return", "{", "p", ".", "qualifier", ":", "p", ".", "get_quantity", "(", ")", "if", "hasattr", "(", "p", ",", "'get_quantity'", ")", "else", "p", ".", "get_value", "(", ")", "for", "p", "in", "ps", ".", "to_list", "(", ")", "}", "# rpole -> requiv: https://github.com/phoebe-project/phoebe2/pull/300", "dict_stars", "=", "{", "}", "for", "star", "in", "b", ".", "hierarchy", ".", "get_stars", "(", ")", ":", "ps_star", "=", "b", ".", "filter", "(", "context", "=", "'component'", ",", "component", "=", "star", ")", "dict_stars", "[", "star", "]", "=", "_ps_dict", "(", "ps_star", ")", "# TODO: actually do the translation", "rpole", "=", "dict_stars", "[", "star", "]", ".", "pop", "(", "'rpole'", ",", "1.0", "*", "u", ".", "solRad", ")", ".", "to", "(", "u", ".", "solRad", ")", ".", "value", "# PHOEBE 2.0 didn't have syncpar for contacts", "if", "len", "(", "b", ".", "filter", "(", "'syncpar'", ",", "component", "=", "star", ")", ")", ":", "F", "=", "b", ".", "get_value", "(", "'syncpar'", ",", "component", "=", "star", ",", "context", "=", "'component'", ")", "else", ":", "F", "=", "1.0", "parent_orbit", "=", "b", ".", "hierarchy", ".", "get_parent_of", "(", "star", ")", "component", "=", "b", ".", "hierarchy", ".", "get_primary_or_secondary", "(", "star", ",", "return_ind", "=", "True", ")", "sma", "=", "b", ".", "get_value", "(", "'sma'", ",", "component", "=", "parent_orbit", ",", "context", "=", "'component'", ",", "unit", "=", "u", ".", "solRad", ")", "q", "=", "b", ".", "get_value", "(", "'q'", ",", "component", "=", "parent_orbit", ",", "context", "=", "'component'", ")", "d", "=", "1", "-", "b", ".", "get_value", "(", "'ecc'", ",", "component", "=", "parent_orbit", ")", "logger", ".", "info", "(", "\"roche.rpole_to_requiv_aligned(rpole={}, sma={}, q={}, F={}, d={}, component={})\"", ".", "format", "(", "rpole", ",", "sma", ",", "q", ",", "F", ",", "d", ",", "component", ")", ")", "dict_stars", "[", "star", "]", "[", "'requiv'", "]", "=", "roche", ".", "rpole_to_requiv_aligned", "(", "rpole", ",", "sma", ",", "q", ",", "F", ",", "d", ",", "component", "=", "component", ")", "b", ".", "remove_component", "(", "star", ")", "for", "star", ",", "dict_star", "in", "dict_stars", ".", "items", "(", ")", ":", "logger", ".", "info", "(", "\"attempting to update component='{}' to new version requirements\"", ".", "format", "(", "star", ")", ")", "b", ".", "add_component", "(", "'star'", ",", "component", "=", "star", ",", "check_label", "=", "False", ",", "*", "*", "dict_star", ")", "dict_envs", "=", "{", "}", "for", "env", "in", "b", ".", "hierarchy", ".", "get_envelopes", "(", ")", ":", "ps_env", "=", "b", ".", "filter", "(", "context", "=", "'component'", ",", "component", "=", "env", ")", "dict_envs", "[", "env", "]", "=", "_ps_dict", "(", "ps_env", ")", "b", ".", "remove_component", "(", "env", ")", "for", "env", ",", "dict_env", "in", "dict_envs", ".", "items", "(", ")", ":", "logger", ".", "info", "(", "\"attempting to update component='{}' to new version requirements\"", ".", "format", "(", "env", ")", ")", "b", ".", "add_component", "(", "'envelope'", ",", "component", "=", "env", ",", "check_label", "=", "False", ",", "*", "*", "dict_env", ")", "# TODO: this probably will fail once more than one contacts are", "# supported, but will never need that for 2.0->2.1 since", "# multiples aren't supported (yet) call b.set_hierarchy() to", "# reset all hieararchy-dependent constraints (including", "# pot<->requiv)", "b", ".", "set_hierarchy", "(", ")", "primary", "=", "b", ".", "hierarchy", ".", "get_stars", "(", ")", "[", "0", "]", "b", ".", "flip_constraint", "(", "'pot'", ",", "component", "=", "env", ",", "solve_for", "=", "'requiv@{}'", ".", "format", "(", "primary", ")", ",", "check_nan", "=", "False", ")", "b", ".", "set_value", "(", "'pot'", ",", "component", "=", "env", ",", "context", "=", "'component'", ",", "value", "=", "dict_env", "[", "'pot'", "]", ")", "b", ".", "flip_constraint", "(", "'requiv'", ",", "component", "=", "primary", ",", "solve_for", "=", "'pot'", ",", "check_nan", "=", "False", ")", "# reset all hieararchy-dependent constraints", "b", ".", "set_hierarchy", "(", ")", "# mesh datasets: https://github.com/phoebe-project/phoebe2/pull/261, https://github.com/phoebe-project/phoebe2/pull/300", "for", "dataset", "in", "b", ".", "filter", "(", "context", "=", "'dataset'", ",", "kind", "=", "'mesh'", ")", ".", "datasets", ":", "logger", ".", "info", "(", "\"attempting to update mesh dataset='{}' to new version requirements\"", ".", "format", "(", "dataset", ")", ")", "ps_mesh", "=", "b", ".", "filter", "(", "context", "=", "'dataset'", ",", "kind", "=", "'mesh'", ",", "dataset", "=", "dataset", ")", "dict_mesh", "=", "_ps_dict", "(", "ps_mesh", ")", "# NOTE: we will not remove (or update) the dataset from any existing models", "b", ".", "remove_dataset", "(", "dataset", ",", "context", "=", "[", "'dataset'", ",", "'constraint'", ",", "'compute'", "]", ")", "if", "len", "(", "b", ".", "filter", "(", "dataset", "=", "dataset", ",", "context", "=", "'model'", ")", ")", ":", "logger", ".", "warning", "(", "\"existing model for dataset='{}' models={} will not be removed, but likely will not work with new plotting updates\"", ".", "format", "(", "dataset", ",", "b", ".", "filter", "(", "dataset", "=", "dataset", ",", "context", "=", "'model'", ")", ".", "models", ")", ")", "b", ".", "add_dataset", "(", "'mesh'", ",", "dataset", "=", "dataset", ",", "check_label", "=", "False", ",", "*", "*", "dict_mesh", ")", "# vgamma definition: https://github.com/phoebe-project/phoebe2/issues/234", "logger", ".", "info", "(", "\"updating vgamma to new version requirements\"", ")", "b", ".", "set_value", "(", "'vgamma'", ",", "-", "1", "*", "b", ".", "get_value", "(", "'vgamma'", ")", ")", "# remove phshift parameter: https://github.com/phoebe-project/phoebe2/commit/1fa3a4e1c0f8d80502101e1b1e750f5fb14115cb", "logger", ".", "info", "(", "\"removing any phshift parameters for new version requirements\"", ")", "b", ".", "remove_parameters_all", "(", "qualifier", "=", "'phshift'", ")", "# colon -> long: https://github.com/phoebe-project/phoebe2/issues/211", "logger", ".", "info", "(", "\"removing any colon parameters for new version requirements\"", ")", "b", ".", "remove_parameters_all", "(", "qualifier", "=", "'colon'", ")", "# make sure constraints are updated according to conf.interactive_constraints", "b", ".", "run_delayed_constraints", "(", ")", "return", "b" ]
Open a new bundle. Open a bundle from a JSON-formatted PHOEBE 2 file. This is a constructor so should be called as: >>> b = Bundle.open('test.phoebe') :parameter str filename: relative or full path to the file :return: instantiated :class:`Bundle` object
[ "Open", "a", "new", "bundle", "." ]
python
train
matiasb/python-unrar
unrar/unrarlib.py
https://github.com/matiasb/python-unrar/blob/b1ac46cbcf42f3d3c5c69ab971fe97369a4da617/unrar/unrarlib.py#L199-L205
def _c_func(func, restype, argtypes, errcheck=None): """Wrap c function setting prototype.""" func.restype = restype func.argtypes = argtypes if errcheck is not None: func.errcheck = errcheck return func
[ "def", "_c_func", "(", "func", ",", "restype", ",", "argtypes", ",", "errcheck", "=", "None", ")", ":", "func", ".", "restype", "=", "restype", "func", ".", "argtypes", "=", "argtypes", "if", "errcheck", "is", "not", "None", ":", "func", ".", "errcheck", "=", "errcheck", "return", "func" ]
Wrap c function setting prototype.
[ "Wrap", "c", "function", "setting", "prototype", "." ]
python
valid
gem/oq-engine
openquake/hazardlib/gsim/abrahamson_2014.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_2014.py#L354-L373
def _get_intra_event_std(self, C, mag, sa1180, vs30, vs30measured, rrup): """ Returns Phi as described at pages 1046 and 1047 """ phi_al = self._get_phi_al_regional(C, mag, vs30measured, rrup) derAmp = self._get_derivative(C, sa1180, vs30) phi_amp = 0.4 idx = phi_al < phi_amp if np.any(idx): # In the case of small magnitudes and long periods it is possible # for phi_al to take a value less than phi_amp, which would return # a complex value. According to the GMPE authors in this case # phi_amp should be reduced such that it is fractionally smaller # than phi_al phi_amp = 0.4 * np.ones_like(phi_al) phi_amp[idx] = 0.99 * phi_al[idx] phi_b = np.sqrt(phi_al**2 - phi_amp**2) phi = np.sqrt(phi_b**2 * (1 + derAmp)**2 + phi_amp**2) return phi
[ "def", "_get_intra_event_std", "(", "self", ",", "C", ",", "mag", ",", "sa1180", ",", "vs30", ",", "vs30measured", ",", "rrup", ")", ":", "phi_al", "=", "self", ".", "_get_phi_al_regional", "(", "C", ",", "mag", ",", "vs30measured", ",", "rrup", ")", "derAmp", "=", "self", ".", "_get_derivative", "(", "C", ",", "sa1180", ",", "vs30", ")", "phi_amp", "=", "0.4", "idx", "=", "phi_al", "<", "phi_amp", "if", "np", ".", "any", "(", "idx", ")", ":", "# In the case of small magnitudes and long periods it is possible", "# for phi_al to take a value less than phi_amp, which would return", "# a complex value. According to the GMPE authors in this case", "# phi_amp should be reduced such that it is fractionally smaller", "# than phi_al", "phi_amp", "=", "0.4", "*", "np", ".", "ones_like", "(", "phi_al", ")", "phi_amp", "[", "idx", "]", "=", "0.99", "*", "phi_al", "[", "idx", "]", "phi_b", "=", "np", ".", "sqrt", "(", "phi_al", "**", "2", "-", "phi_amp", "**", "2", ")", "phi", "=", "np", ".", "sqrt", "(", "phi_b", "**", "2", "*", "(", "1", "+", "derAmp", ")", "**", "2", "+", "phi_amp", "**", "2", ")", "return", "phi" ]
Returns Phi as described at pages 1046 and 1047
[ "Returns", "Phi", "as", "described", "at", "pages", "1046", "and", "1047" ]
python
train
fhcrc/taxtastic
taxtastic/taxonomy.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L784-L798
def nary_subtree(self, tax_id, n=2): """Return a list of species tax_ids under *tax_id* such that node under *tax_id* and above the species has two children. """ if tax_id is None: return None parent_id, rank = self._node(tax_id) if rank == 'species': return [tax_id] else: children = self.children_of(tax_id, 2) species_taxids = [] for t in children: species_taxids.extend(self.nary_subtree(t, n)) return species_taxids
[ "def", "nary_subtree", "(", "self", ",", "tax_id", ",", "n", "=", "2", ")", ":", "if", "tax_id", "is", "None", ":", "return", "None", "parent_id", ",", "rank", "=", "self", ".", "_node", "(", "tax_id", ")", "if", "rank", "==", "'species'", ":", "return", "[", "tax_id", "]", "else", ":", "children", "=", "self", ".", "children_of", "(", "tax_id", ",", "2", ")", "species_taxids", "=", "[", "]", "for", "t", "in", "children", ":", "species_taxids", ".", "extend", "(", "self", ".", "nary_subtree", "(", "t", ",", "n", ")", ")", "return", "species_taxids" ]
Return a list of species tax_ids under *tax_id* such that node under *tax_id* and above the species has two children.
[ "Return", "a", "list", "of", "species", "tax_ids", "under", "*", "tax_id", "*", "such", "that", "node", "under", "*", "tax_id", "*", "and", "above", "the", "species", "has", "two", "children", "." ]
python
train
bretth/djset
djset/commands.py
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/commands.py#L16-L25
def _create_djset(args, cls): """ Return a DjSecret object """ name = args.get('--name') settings = args.get('--settings') if name: return cls(name=name) elif settings: return cls(name=settings) else: return cls()
[ "def", "_create_djset", "(", "args", ",", "cls", ")", ":", "name", "=", "args", ".", "get", "(", "'--name'", ")", "settings", "=", "args", ".", "get", "(", "'--settings'", ")", "if", "name", ":", "return", "cls", "(", "name", "=", "name", ")", "elif", "settings", ":", "return", "cls", "(", "name", "=", "settings", ")", "else", ":", "return", "cls", "(", ")" ]
Return a DjSecret object
[ "Return", "a", "DjSecret", "object" ]
python
train
bitesofcode/projex
projex/text.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/text.py#L793-L810
def underscore(text, lower=True): """ Splits all the words from the inputted text into being separated by underscores :sa [[#joinWords]] :param text <str> :return <str> :usage |import projex.text |print projex.text.underscore('TheQuick, Brown, Fox') """ out = joinWords(text, '_') if lower: return out.lower() return out
[ "def", "underscore", "(", "text", ",", "lower", "=", "True", ")", ":", "out", "=", "joinWords", "(", "text", ",", "'_'", ")", "if", "lower", ":", "return", "out", ".", "lower", "(", ")", "return", "out" ]
Splits all the words from the inputted text into being separated by underscores :sa [[#joinWords]] :param text <str> :return <str> :usage |import projex.text |print projex.text.underscore('TheQuick, Brown, Fox')
[ "Splits", "all", "the", "words", "from", "the", "inputted", "text", "into", "being", "separated", "by", "underscores", ":", "sa", "[[", "#joinWords", "]]", ":", "param", "text", "<str", ">", ":", "return", "<str", ">", ":", "usage", "|import", "projex", ".", "text", "|print", "projex", ".", "text", ".", "underscore", "(", "TheQuick", "Brown", "Fox", ")" ]
python
train
fracpete/python-weka-wrapper3
python/weka/core/serialization.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/serialization.py#L68-L84
def read_all(filename): """ Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes. :param filename: the file with the serialized objects :type filename: str :return: the list of JB_OBjects :rtype: list """ array = javabridge.static_call( "Lweka/core/SerializationHelper;", "readAll", "(Ljava/lang/String;)[Ljava/lang/Object;", filename) if array is None: return None else: return javabridge.get_env().get_object_array_elements(array)
[ "def", "read_all", "(", "filename", ")", ":", "array", "=", "javabridge", ".", "static_call", "(", "\"Lweka/core/SerializationHelper;\"", ",", "\"readAll\"", ",", "\"(Ljava/lang/String;)[Ljava/lang/Object;\"", ",", "filename", ")", "if", "array", "is", "None", ":", "return", "None", "else", ":", "return", "javabridge", ".", "get_env", "(", ")", ".", "get_object_array_elements", "(", "array", ")" ]
Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes. :param filename: the file with the serialized objects :type filename: str :return: the list of JB_OBjects :rtype: list
[ "Reads", "the", "serialized", "objects", "from", "disk", ".", "Caller", "must", "wrap", "objects", "in", "appropriate", "Python", "wrapper", "classes", "." ]
python
train
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L1526-L1561
def _extract_links_from_asset_tags_in_text(self, text): """ Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text """ # Extract asset tags from instructions text asset_tags_map = self._extract_asset_tags(text) ids = list(iterkeys(asset_tags_map)) if not ids: return {} # asset tags contain asset names and ids. We need to make another # HTTP request to get asset URL. asset_urls = self._extract_asset_urls(ids) supplement_links = {} # Build supplement links, providing nice titles along the way for asset in asset_urls: title = clean_filename( asset_tags_map[asset['id']]['name'], self._unrestricted_filenames) extension = clean_filename( asset_tags_map[asset['id']]['extension'].strip(), self._unrestricted_filenames) url = asset['url'].strip() if extension not in supplement_links: supplement_links[extension] = [] supplement_links[extension].append((url, title)) return supplement_links
[ "def", "_extract_links_from_asset_tags_in_text", "(", "self", ",", "text", ")", ":", "# Extract asset tags from instructions text", "asset_tags_map", "=", "self", ".", "_extract_asset_tags", "(", "text", ")", "ids", "=", "list", "(", "iterkeys", "(", "asset_tags_map", ")", ")", "if", "not", "ids", ":", "return", "{", "}", "# asset tags contain asset names and ids. We need to make another", "# HTTP request to get asset URL.", "asset_urls", "=", "self", ".", "_extract_asset_urls", "(", "ids", ")", "supplement_links", "=", "{", "}", "# Build supplement links, providing nice titles along the way", "for", "asset", "in", "asset_urls", ":", "title", "=", "clean_filename", "(", "asset_tags_map", "[", "asset", "[", "'id'", "]", "]", "[", "'name'", "]", ",", "self", ".", "_unrestricted_filenames", ")", "extension", "=", "clean_filename", "(", "asset_tags_map", "[", "asset", "[", "'id'", "]", "]", "[", "'extension'", "]", ".", "strip", "(", ")", ",", "self", ".", "_unrestricted_filenames", ")", "url", "=", "asset", "[", "'url'", "]", ".", "strip", "(", ")", "if", "extension", "not", "in", "supplement_links", ":", "supplement_links", "[", "extension", "]", "=", "[", "]", "supplement_links", "[", "extension", "]", ".", "append", "(", "(", "url", ",", "title", ")", ")", "return", "supplement_links" ]
Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text
[ "Scan", "the", "text", "and", "extract", "asset", "tags", "and", "links", "to", "corresponding", "files", "." ]
python
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L377-L393
def count_SMS(self, conditions={}): """ Count all certified sms """ url = self.SMS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
[ "def", "count_SMS", "(", "self", ",", "conditions", "=", "{", "}", ")", ":", "url", "=", "self", ".", "SMS_COUNT_URL", "+", "\"?\"", "for", "key", ",", "value", "in", "conditions", ".", "items", "(", ")", ":", "if", "key", "is", "'ids'", ":", "value", "=", "\",\"", ".", "join", "(", "value", ")", "url", "+=", "'&%s=%s'", "%", "(", "key", ",", "value", ")", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "connection", ".", "set_url", "(", "self", ".", "production", ",", "url", ")", "return", "connection", ".", "get_request", "(", ")" ]
Count all certified sms
[ "Count", "all", "certified", "sms" ]
python
train
pytorch/ignite
ignite/contrib/handlers/base_logger.py
https://github.com/pytorch/ignite/blob/a96bd07cb58822cfb39fd81765135712f1db41ca/ignite/contrib/handlers/base_logger.py#L89-L108
def _setup_output_metrics(self, engine): """Helper method to setup metrics to log """ metrics = {} if self.metric_names is not None: for name in self.metric_names: if name not in engine.state.metrics: warnings.warn("Provided metric name '{}' is missing " "in engine's state metrics: {}".format(name, list(engine.state.metrics.keys()))) continue metrics[name] = engine.state.metrics[name] if self.output_transform is not None: output_dict = self.output_transform(engine.state.output) if not isinstance(output_dict, dict): output_dict = {"output": output_dict} metrics.update({name: value for name, value in output_dict.items()}) return metrics
[ "def", "_setup_output_metrics", "(", "self", ",", "engine", ")", ":", "metrics", "=", "{", "}", "if", "self", ".", "metric_names", "is", "not", "None", ":", "for", "name", "in", "self", ".", "metric_names", ":", "if", "name", "not", "in", "engine", ".", "state", ".", "metrics", ":", "warnings", ".", "warn", "(", "\"Provided metric name '{}' is missing \"", "\"in engine's state metrics: {}\"", ".", "format", "(", "name", ",", "list", "(", "engine", ".", "state", ".", "metrics", ".", "keys", "(", ")", ")", ")", ")", "continue", "metrics", "[", "name", "]", "=", "engine", ".", "state", ".", "metrics", "[", "name", "]", "if", "self", ".", "output_transform", "is", "not", "None", ":", "output_dict", "=", "self", ".", "output_transform", "(", "engine", ".", "state", ".", "output", ")", "if", "not", "isinstance", "(", "output_dict", ",", "dict", ")", ":", "output_dict", "=", "{", "\"output\"", ":", "output_dict", "}", "metrics", ".", "update", "(", "{", "name", ":", "value", "for", "name", ",", "value", "in", "output_dict", ".", "items", "(", ")", "}", ")", "return", "metrics" ]
Helper method to setup metrics to log
[ "Helper", "method", "to", "setup", "metrics", "to", "log" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/dbapi/cursor.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/dbapi/cursor.py#L72-L95
def _set_description(self, schema): """Set description from schema. :type schema: Sequence[google.cloud.bigquery.schema.SchemaField] :param schema: A description of fields in the schema. """ if schema is None: self.description = None return self.description = tuple( [ Column( name=field.name, type_code=field.field_type, display_size=None, internal_size=None, precision=None, scale=None, null_ok=field.is_nullable, ) for field in schema ] )
[ "def", "_set_description", "(", "self", ",", "schema", ")", ":", "if", "schema", "is", "None", ":", "self", ".", "description", "=", "None", "return", "self", ".", "description", "=", "tuple", "(", "[", "Column", "(", "name", "=", "field", ".", "name", ",", "type_code", "=", "field", ".", "field_type", ",", "display_size", "=", "None", ",", "internal_size", "=", "None", ",", "precision", "=", "None", ",", "scale", "=", "None", ",", "null_ok", "=", "field", ".", "is_nullable", ",", ")", "for", "field", "in", "schema", "]", ")" ]
Set description from schema. :type schema: Sequence[google.cloud.bigquery.schema.SchemaField] :param schema: A description of fields in the schema.
[ "Set", "description", "from", "schema", "." ]
python
train
ldomic/lintools
lintools/draw.py
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/draw.py#L29-L48
def draw_hydrogen_bonds(self,color="black"): """For each bond that has been determined to be important, a line gets drawn. """ self.draw_hbonds="" if self.hbonds!=None: for bond in self.hbonds.hbonds_for_drawing: x = str((self.molecule.x_dim-self.molecule.molsize1)/2) y = str((self.molecule.y_dim-self.molecule.molsize2)/2) self.draw_hbonds ="<g id='"+str(bond[0])+"' class='HBonds' transform='translate("+x+","+y+")' x='"+x+"' y='"+y+"'>'" atom = self.topology_data.universe.atoms[bond[0]-1] #zero-based index vs one-based index residue = (atom.resname, str(atom.resid), atom.segid) if bond[2] in ["N","O","H"]: #backbone interactions self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />" self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />" else: #sidechain interactions self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />" self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' stroke-dasharray='5,5' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />" self.draw_hbonds+="</g>"
[ "def", "draw_hydrogen_bonds", "(", "self", ",", "color", "=", "\"black\"", ")", ":", "self", ".", "draw_hbonds", "=", "\"\"", "if", "self", ".", "hbonds", "!=", "None", ":", "for", "bond", "in", "self", ".", "hbonds", ".", "hbonds_for_drawing", ":", "x", "=", "str", "(", "(", "self", ".", "molecule", ".", "x_dim", "-", "self", ".", "molecule", ".", "molsize1", ")", "/", "2", ")", "y", "=", "str", "(", "(", "self", ".", "molecule", ".", "y_dim", "-", "self", ".", "molecule", ".", "molsize2", ")", "/", "2", ")", "self", ".", "draw_hbonds", "=", "\"<g id='\"", "+", "str", "(", "bond", "[", "0", "]", ")", "+", "\"' class='HBonds' transform='translate(\"", "+", "x", "+", "\",\"", "+", "y", "+", "\")' x='\"", "+", "x", "+", "\"' y='\"", "+", "y", "+", "\"'>'\"", "atom", "=", "self", ".", "topology_data", ".", "universe", ".", "atoms", "[", "bond", "[", "0", "]", "-", "1", "]", "#zero-based index vs one-based index", "residue", "=", "(", "atom", ".", "resname", ",", "str", "(", "atom", ".", "resid", ")", ",", "atom", ".", "segid", ")", "if", "bond", "[", "2", "]", "in", "[", "\"N\"", ",", "\"O\"", ",", "\"H\"", "]", ":", "#backbone interactions", "self", ".", "draw_hbonds", "=", "self", ".", "draw_hbonds", "+", "\"<line id='\"", "+", "str", "(", "bond", "[", "0", "]", ")", "+", "\"' class='HBonds' x1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "0", "]", ")", ")", "+", "\"' y1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "1", "]", ")", ")", "+", "\"' x2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "0", "]", ")", ")", "+", "\"' y2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "1", "]", ")", ")", "+", "\"' style='stroke:white;stroke-width:15' />\"", "self", ".", "draw_hbonds", "=", "self", ".", "draw_hbonds", "+", "\"<line id='\"", "+", "str", "(", "bond", "[", "0", "]", ")", "+", "\"' class='HBonds' x1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "0", "]", ")", ")", "+", "\"' y1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "1", "]", ")", ")", "+", "\"' x2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "0", "]", ")", ")", "+", "\"' y2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "1", "]", ")", ")", "+", "\"' style='stroke:\"", "+", "color", "+", "\";stroke-width:4' />\"", "else", ":", "#sidechain interactions", "self", ".", "draw_hbonds", "=", "self", ".", "draw_hbonds", "+", "\"<line id='\"", "+", "str", "(", "bond", "[", "0", "]", ")", "+", "\"' class='HBonds' x1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "0", "]", ")", ")", "+", "\"' y1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "1", "]", ")", ")", "+", "\"' x2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "0", "]", ")", ")", "+", "\"' y2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "1", "]", ")", ")", "+", "\"' style='stroke:white;stroke-width:15' />\"", "self", ".", "draw_hbonds", "=", "self", ".", "draw_hbonds", "+", "\"<line id='\"", "+", "str", "(", "bond", "[", "0", "]", ")", "+", "\"' class='HBonds' stroke-dasharray='5,5' x1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "0", "]", ")", ")", "+", "\"' y1='\"", "+", "str", "(", "int", "(", "self", ".", "molecule", ".", "nearest_points_coords", "[", "residue", "]", "[", "1", "]", ")", ")", "+", "\"' x2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "0", "]", ")", ")", "+", "\"' y2='\"", "+", "str", "(", "float", "(", "self", ".", "molecule", ".", "ligand_atom_coords_from_diagr", "[", "bond", "[", "1", "]", "]", "[", "1", "]", ")", ")", "+", "\"' style='stroke:\"", "+", "color", "+", "\";stroke-width:4' />\"", "self", ".", "draw_hbonds", "+=", "\"</g>\"" ]
For each bond that has been determined to be important, a line gets drawn.
[ "For", "each", "bond", "that", "has", "been", "determined", "to", "be", "important", "a", "line", "gets", "drawn", "." ]
python
train
Hackerfleet/hfos
modules/navdata/hfos/navdata/sensors.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/navdata/hfos/navdata/sensors.py#L290-L318
def navdatapush(self): """ Pushes the current :referenceframe: out to clients. :return: """ try: self.fireEvent(referenceframe({ 'data': self.referenceframe, 'ages': self.referenceages }), "navdata") self.intervalcount += 1 if self.intervalcount == self.passiveinterval and len( self.referenceframe) > 0: self.fireEvent(broadcast('users', { 'component': 'hfos.navdata.sensors', 'action': 'update', 'data': { 'data': self.referenceframe, 'ages': self.referenceages } }), "hfosweb") self.intervalcount = 0 # self.log("Reference frame successfully pushed.", # lvl=verbose) except Exception as e: self.log("Could not push referenceframe: ", e, type(e), lvl=critical)
[ "def", "navdatapush", "(", "self", ")", ":", "try", ":", "self", ".", "fireEvent", "(", "referenceframe", "(", "{", "'data'", ":", "self", ".", "referenceframe", ",", "'ages'", ":", "self", ".", "referenceages", "}", ")", ",", "\"navdata\"", ")", "self", ".", "intervalcount", "+=", "1", "if", "self", ".", "intervalcount", "==", "self", ".", "passiveinterval", "and", "len", "(", "self", ".", "referenceframe", ")", ">", "0", ":", "self", ".", "fireEvent", "(", "broadcast", "(", "'users'", ",", "{", "'component'", ":", "'hfos.navdata.sensors'", ",", "'action'", ":", "'update'", ",", "'data'", ":", "{", "'data'", ":", "self", ".", "referenceframe", ",", "'ages'", ":", "self", ".", "referenceages", "}", "}", ")", ",", "\"hfosweb\"", ")", "self", ".", "intervalcount", "=", "0", "# self.log(\"Reference frame successfully pushed.\",", "# lvl=verbose)", "except", "Exception", "as", "e", ":", "self", ".", "log", "(", "\"Could not push referenceframe: \"", ",", "e", ",", "type", "(", "e", ")", ",", "lvl", "=", "critical", ")" ]
Pushes the current :referenceframe: out to clients. :return:
[ "Pushes", "the", "current", ":", "referenceframe", ":", "out", "to", "clients", "." ]
python
train
GNS3/gns3-server
gns3server/compute/docker/docker_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/docker/docker_vm.py#L669-L704
def _add_ubridge_connection(self, nio, adapter_number): """ Creates a connection in uBridge. :param nio: NIO instance or None if it's a dummy interface (if an interface is missing in ubridge you can't see it via ifconfig in the container) :param adapter_number: adapter number """ try: adapter = self._ethernet_adapters[adapter_number] except IndexError: raise DockerError("Adapter {adapter_number} doesn't exist on Docker container '{name}'".format(name=self.name, adapter_number=adapter_number)) for index in range(4096): if "tap-gns3-e{}".format(index) not in psutil.net_if_addrs(): adapter.host_ifc = "tap-gns3-e{}".format(str(index)) break if adapter.host_ifc is None: raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name, adapter_number=adapter_number)) bridge_name = 'bridge{}'.format(adapter_number) yield from self._ubridge_send('bridge create {}'.format(bridge_name)) self._bridges.add(bridge_name) yield from self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number, hostif=adapter.host_ifc)) log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.host_ifc, self._namespace) try: yield from self._ubridge_send('docker move_to_ns {ifc} {ns} eth{adapter}'.format(ifc=adapter.host_ifc, ns=self._namespace, adapter=adapter_number)) except UbridgeError as e: raise UbridgeNamespaceError(e) if nio: yield from self._connect_nio(adapter_number, nio)
[ "def", "_add_ubridge_connection", "(", "self", ",", "nio", ",", "adapter_number", ")", ":", "try", ":", "adapter", "=", "self", ".", "_ethernet_adapters", "[", "adapter_number", "]", "except", "IndexError", ":", "raise", "DockerError", "(", "\"Adapter {adapter_number} doesn't exist on Docker container '{name}'\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "adapter_number", "=", "adapter_number", ")", ")", "for", "index", "in", "range", "(", "4096", ")", ":", "if", "\"tap-gns3-e{}\"", ".", "format", "(", "index", ")", "not", "in", "psutil", ".", "net_if_addrs", "(", ")", ":", "adapter", ".", "host_ifc", "=", "\"tap-gns3-e{}\"", ".", "format", "(", "str", "(", "index", ")", ")", "break", "if", "adapter", ".", "host_ifc", "is", "None", ":", "raise", "DockerError", "(", "\"Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "adapter_number", "=", "adapter_number", ")", ")", "bridge_name", "=", "'bridge{}'", ".", "format", "(", "adapter_number", ")", "yield", "from", "self", ".", "_ubridge_send", "(", "'bridge create {}'", ".", "format", "(", "bridge_name", ")", ")", "self", ".", "_bridges", ".", "add", "(", "bridge_name", ")", "yield", "from", "self", ".", "_ubridge_send", "(", "'bridge add_nio_tap bridge{adapter_number} {hostif}'", ".", "format", "(", "adapter_number", "=", "adapter_number", ",", "hostif", "=", "adapter", ".", "host_ifc", ")", ")", "log", ".", "debug", "(", "\"Move container %s adapter %s to namespace %s\"", ",", "self", ".", "name", ",", "adapter", ".", "host_ifc", ",", "self", ".", "_namespace", ")", "try", ":", "yield", "from", "self", ".", "_ubridge_send", "(", "'docker move_to_ns {ifc} {ns} eth{adapter}'", ".", "format", "(", "ifc", "=", "adapter", ".", "host_ifc", ",", "ns", "=", "self", ".", "_namespace", ",", "adapter", "=", "adapter_number", ")", ")", "except", "UbridgeError", "as", "e", ":", "raise", "UbridgeNamespaceError", "(", "e", ")", "if", "nio", ":", "yield", "from", "self", ".", "_connect_nio", "(", "adapter_number", ",", "nio", ")" ]
Creates a connection in uBridge. :param nio: NIO instance or None if it's a dummy interface (if an interface is missing in ubridge you can't see it via ifconfig in the container) :param adapter_number: adapter number
[ "Creates", "a", "connection", "in", "uBridge", "." ]
python
train
inasafe/inasafe
safe/gui/tools/geonode_uploader.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/geonode_uploader.py#L119-L129
def fill_layer_combo(self): """Fill layer combobox.""" project = QgsProject.instance() # MapLayers returns a QMap<QString id, QgsMapLayer layer> layers = list(project.mapLayers().values()) extensions = tuple(extension_siblings.keys()) for layer in layers: if layer.source().lower().endswith(extensions): icon = layer_icon(layer) self.layers.addItem(icon, layer.name(), layer.id())
[ "def", "fill_layer_combo", "(", "self", ")", ":", "project", "=", "QgsProject", ".", "instance", "(", ")", "# MapLayers returns a QMap<QString id, QgsMapLayer layer>", "layers", "=", "list", "(", "project", ".", "mapLayers", "(", ")", ".", "values", "(", ")", ")", "extensions", "=", "tuple", "(", "extension_siblings", ".", "keys", "(", ")", ")", "for", "layer", "in", "layers", ":", "if", "layer", ".", "source", "(", ")", ".", "lower", "(", ")", ".", "endswith", "(", "extensions", ")", ":", "icon", "=", "layer_icon", "(", "layer", ")", "self", ".", "layers", ".", "addItem", "(", "icon", ",", "layer", ".", "name", "(", ")", ",", "layer", ".", "id", "(", ")", ")" ]
Fill layer combobox.
[ "Fill", "layer", "combobox", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/internal/special_math.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L291-L380
def log_ndtr(x, series_order=3, name="log_ndtr"): """Log Normal distribution function. For details of the Normal distribution function see `ndtr`. This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or using an asymptotic series. Specifically: - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on `log(1-x) ~= -x, x << 1`. - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique and take a log. - For `x <= lower_segment`, we use the series approximation of erf to compute the log CDF directly. The `lower_segment` is set based on the precision of the input: ``` lower_segment = { -20, x.dtype=float64 { -10, x.dtype=float32 upper_segment = { 8, x.dtype=float64 { 5, x.dtype=float32 ``` When `x < lower_segment`, the `ndtr` asymptotic series approximation is: ``` ndtr(x) = scale * (1 + sum) + R_N scale = exp(-0.5 x**2) / (-x sqrt(2 pi)) sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N} R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3}) ``` where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a [double-factorial](https://en.wikipedia.org/wiki/Double_factorial). Args: x: `Tensor` of type `float32`, `float64`. series_order: Positive Python `integer`. Maximum depth to evaluate the asymptotic expansion. This is the `N` above. name: Python string. A name for the operation (default="log_ndtr"). Returns: log_ndtr: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x.dtype` is not handled. TypeError: if `series_order` is a not Python `integer.` ValueError: if `series_order` is not in `[0, 30]`. """ if not isinstance(series_order, int): raise TypeError("series_order must be a Python integer.") if series_order < 0: raise ValueError("series_order must be non-negative.") if series_order > 30: raise ValueError("series_order must be <= 30.") with tf.name_scope(name): x = tf.convert_to_tensor(value=x, name="x") if dtype_util.base_equal(x.dtype, tf.float64): lower_segment = LOGNDTR_FLOAT64_LOWER upper_segment = LOGNDTR_FLOAT64_UPPER elif dtype_util.base_equal(x.dtype, tf.float32): lower_segment = LOGNDTR_FLOAT32_LOWER upper_segment = LOGNDTR_FLOAT32_UPPER else: raise TypeError("x.dtype=%s is not supported." % x.dtype) # The basic idea here was ported from: # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html # We copy the main idea, with a few changes # * For x >> 1, and X ~ Normal(0, 1), # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x], # which extends the range of validity of this function. # * We use one fixed series_order for all of 'x', rather than adaptive. # * Our docstring properly reflects that this is an asymptotic series, not a # Taylor series. We also provided a correct bound on the remainder. # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when # x=0. This happens even though the branch is unchosen because when x=0 # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan # regardless of whether dy is finite. Note that the minimum is a NOP if # the branch is chosen. return tf.where( tf.greater(x, upper_segment), -_ndtr(-x), # log(1-x) ~= -x, x << 1 tf.where( tf.greater(x, lower_segment), tf.math.log(_ndtr(tf.maximum(x, lower_segment))), _log_ndtr_lower(tf.minimum(x, lower_segment), series_order)))
[ "def", "log_ndtr", "(", "x", ",", "series_order", "=", "3", ",", "name", "=", "\"log_ndtr\"", ")", ":", "if", "not", "isinstance", "(", "series_order", ",", "int", ")", ":", "raise", "TypeError", "(", "\"series_order must be a Python integer.\"", ")", "if", "series_order", "<", "0", ":", "raise", "ValueError", "(", "\"series_order must be non-negative.\"", ")", "if", "series_order", ">", "30", ":", "raise", "ValueError", "(", "\"series_order must be <= 30.\"", ")", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "x", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "\"x\"", ")", "if", "dtype_util", ".", "base_equal", "(", "x", ".", "dtype", ",", "tf", ".", "float64", ")", ":", "lower_segment", "=", "LOGNDTR_FLOAT64_LOWER", "upper_segment", "=", "LOGNDTR_FLOAT64_UPPER", "elif", "dtype_util", ".", "base_equal", "(", "x", ".", "dtype", ",", "tf", ".", "float32", ")", ":", "lower_segment", "=", "LOGNDTR_FLOAT32_LOWER", "upper_segment", "=", "LOGNDTR_FLOAT32_UPPER", "else", ":", "raise", "TypeError", "(", "\"x.dtype=%s is not supported.\"", "%", "x", ".", "dtype", ")", "# The basic idea here was ported from:", "# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html", "# We copy the main idea, with a few changes", "# * For x >> 1, and X ~ Normal(0, 1),", "# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],", "# which extends the range of validity of this function.", "# * We use one fixed series_order for all of 'x', rather than adaptive.", "# * Our docstring properly reflects that this is an asymptotic series, not a", "# Taylor series. We also provided a correct bound on the remainder.", "# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when", "# x=0. This happens even though the branch is unchosen because when x=0", "# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan", "# regardless of whether dy is finite. Note that the minimum is a NOP if", "# the branch is chosen.", "return", "tf", ".", "where", "(", "tf", ".", "greater", "(", "x", ",", "upper_segment", ")", ",", "-", "_ndtr", "(", "-", "x", ")", ",", "# log(1-x) ~= -x, x << 1", "tf", ".", "where", "(", "tf", ".", "greater", "(", "x", ",", "lower_segment", ")", ",", "tf", ".", "math", ".", "log", "(", "_ndtr", "(", "tf", ".", "maximum", "(", "x", ",", "lower_segment", ")", ")", ")", ",", "_log_ndtr_lower", "(", "tf", ".", "minimum", "(", "x", ",", "lower_segment", ")", ",", "series_order", ")", ")", ")" ]
Log Normal distribution function. For details of the Normal distribution function see `ndtr`. This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or using an asymptotic series. Specifically: - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on `log(1-x) ~= -x, x << 1`. - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique and take a log. - For `x <= lower_segment`, we use the series approximation of erf to compute the log CDF directly. The `lower_segment` is set based on the precision of the input: ``` lower_segment = { -20, x.dtype=float64 { -10, x.dtype=float32 upper_segment = { 8, x.dtype=float64 { 5, x.dtype=float32 ``` When `x < lower_segment`, the `ndtr` asymptotic series approximation is: ``` ndtr(x) = scale * (1 + sum) + R_N scale = exp(-0.5 x**2) / (-x sqrt(2 pi)) sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N} R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3}) ``` where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a [double-factorial](https://en.wikipedia.org/wiki/Double_factorial). Args: x: `Tensor` of type `float32`, `float64`. series_order: Positive Python `integer`. Maximum depth to evaluate the asymptotic expansion. This is the `N` above. name: Python string. A name for the operation (default="log_ndtr"). Returns: log_ndtr: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x.dtype` is not handled. TypeError: if `series_order` is a not Python `integer.` ValueError: if `series_order` is not in `[0, 30]`.
[ "Log", "Normal", "distribution", "function", "." ]
python
test
amperser/proselint
app.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/app.py#L34-L58
def check_auth(username, password): """Check if a username / password combination is valid.""" legal_hashes = [ "15a7fdade5fa58d38c6d400770e5c0e948fbc03ba365b704a6d205687738ae46", "057b24043181523e3c3717071953c575bd13862517a8ce228601582a9cbd9dae", "c8d79ae7d388b6da21cb982a819065b18941925179f88041b87de2be9bcde79c", "bb7082271060c91122de8a3bbac5c7d6dcfe1a02902d57b27422f0062f602f72", "90e4d9e4ec680ff40ce00e712b24a67659539e06d50b538ed4a3d960b4f3bda5", "9a9a241b05eeaa0ca2b4323c5a756851c9cd15371a4d71a326749abc47062bf0", "0643786903dab7cbb079796ea4b27a81fb38442381773759dd52ac8615eb6ab2", "886078097635635c1450cf52ca0ec13a887ea4f8cd4b799fdedc650ec1f08781", "d4c4d2d16c7fec2d0d60f0a110eb4fbd9f1bb463033298e01b3141a7e4ca10bc", "83dfe687131d285a22c5379e19f4ebabcdfe8a19bade46a5cdcdc9e9c36b52a2", "7c4000e5d6948055553eb84fc2188ccad068caa1b584303f88dc0c582a0ecd42", "43c693fa32545b7d4106e337fe6edf7db92282795d5bdb80705ef8a0ac7e8030", "ebb17f7f9050e3c1b18f84cbd6333178d575d4baf3aca6dfa0587cc2a48e02d0", "ce910c4368092bf0886e59dc5df0b0ad11f40067b685505c2195463d32fa0418", "86fc704debb389a73775e02f8f0423ffbbb787a1033e531b2e47d40f71ad5560", "308af1914cb90aeb8913548cc37c9b55320875a2c0d2ecfe6afe1bfc02c64326", "bd3486100f2bb29762100b93b1f1cd41655ab05767f78fb1fc4adfe040ebe953", "29f56ee67dd218276984d723b6b105678faa1868a9644f0d9c49109c8322e1d8", "704c3ddde0b5fd3c6971a6ef16991ddff3e241c170ed539094ee668861e01764", "aaebc3ca0fe041a3a595170b8efda22308cd7d843510bf01263f05a1851cb173", ] return hashlib.sha256(username + password).hexdigest() in legal_hashes
[ "def", "check_auth", "(", "username", ",", "password", ")", ":", "legal_hashes", "=", "[", "\"15a7fdade5fa58d38c6d400770e5c0e948fbc03ba365b704a6d205687738ae46\"", ",", "\"057b24043181523e3c3717071953c575bd13862517a8ce228601582a9cbd9dae\"", ",", "\"c8d79ae7d388b6da21cb982a819065b18941925179f88041b87de2be9bcde79c\"", ",", "\"bb7082271060c91122de8a3bbac5c7d6dcfe1a02902d57b27422f0062f602f72\"", ",", "\"90e4d9e4ec680ff40ce00e712b24a67659539e06d50b538ed4a3d960b4f3bda5\"", ",", "\"9a9a241b05eeaa0ca2b4323c5a756851c9cd15371a4d71a326749abc47062bf0\"", ",", "\"0643786903dab7cbb079796ea4b27a81fb38442381773759dd52ac8615eb6ab2\"", ",", "\"886078097635635c1450cf52ca0ec13a887ea4f8cd4b799fdedc650ec1f08781\"", ",", "\"d4c4d2d16c7fec2d0d60f0a110eb4fbd9f1bb463033298e01b3141a7e4ca10bc\"", ",", "\"83dfe687131d285a22c5379e19f4ebabcdfe8a19bade46a5cdcdc9e9c36b52a2\"", ",", "\"7c4000e5d6948055553eb84fc2188ccad068caa1b584303f88dc0c582a0ecd42\"", ",", "\"43c693fa32545b7d4106e337fe6edf7db92282795d5bdb80705ef8a0ac7e8030\"", ",", "\"ebb17f7f9050e3c1b18f84cbd6333178d575d4baf3aca6dfa0587cc2a48e02d0\"", ",", "\"ce910c4368092bf0886e59dc5df0b0ad11f40067b685505c2195463d32fa0418\"", ",", "\"86fc704debb389a73775e02f8f0423ffbbb787a1033e531b2e47d40f71ad5560\"", ",", "\"308af1914cb90aeb8913548cc37c9b55320875a2c0d2ecfe6afe1bfc02c64326\"", ",", "\"bd3486100f2bb29762100b93b1f1cd41655ab05767f78fb1fc4adfe040ebe953\"", ",", "\"29f56ee67dd218276984d723b6b105678faa1868a9644f0d9c49109c8322e1d8\"", ",", "\"704c3ddde0b5fd3c6971a6ef16991ddff3e241c170ed539094ee668861e01764\"", ",", "\"aaebc3ca0fe041a3a595170b8efda22308cd7d843510bf01263f05a1851cb173\"", ",", "]", "return", "hashlib", ".", "sha256", "(", "username", "+", "password", ")", ".", "hexdigest", "(", ")", "in", "legal_hashes" ]
Check if a username / password combination is valid.
[ "Check", "if", "a", "username", "/", "password", "combination", "is", "valid", "." ]
python
train
Netflix-Skunkworks/cloudaux
cloudaux/aws/elbv2.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/aws/elbv2.py#L24-L33
def describe_listeners(load_balancer_arn=None, listener_arns=None, client=None): """ Permission: elasticloadbalancing:DescribeListeners """ kwargs = dict() if load_balancer_arn: kwargs.update(dict(LoadBalancerArn=load_balancer_arn)) if listener_arns: kwargs.update(dict(ListenerArns=listener_arns)) return client.describe_listeners(**kwargs)
[ "def", "describe_listeners", "(", "load_balancer_arn", "=", "None", ",", "listener_arns", "=", "None", ",", "client", "=", "None", ")", ":", "kwargs", "=", "dict", "(", ")", "if", "load_balancer_arn", ":", "kwargs", ".", "update", "(", "dict", "(", "LoadBalancerArn", "=", "load_balancer_arn", ")", ")", "if", "listener_arns", ":", "kwargs", ".", "update", "(", "dict", "(", "ListenerArns", "=", "listener_arns", ")", ")", "return", "client", ".", "describe_listeners", "(", "*", "*", "kwargs", ")" ]
Permission: elasticloadbalancing:DescribeListeners
[ "Permission", ":", "elasticloadbalancing", ":", "DescribeListeners" ]
python
valid
inasafe/inasafe
safe/impact_function/style.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/style.py#L99-L220
def generate_classified_legend( analysis, exposure, hazard, use_rounding, debug_mode): """Generate an ordered python structure with the classified symbology. :param analysis: The analysis layer. :type analysis: QgsVectorLayer :param exposure: The exposure layer. :type exposure: QgsVectorLayer :param hazard: The hazard layer. :type hazard: QgsVectorLayer :param use_rounding: Boolean if we round number in the legend. :type use_rounding: bool :param debug_mode: Boolean if run in debug mode,to display the not exposed. :type debug_mode: bool :return: The ordered dictionary to use to build the classified style. :rtype: OrderedDict """ # We need to read the analysis layer to get the number of features. analysis_row = next(analysis.getFeatures()) # Let's style the hazard class in each layers. hazard_classification = hazard.keywords['classification'] hazard_classification = definition(hazard_classification) # Let's check if there is some thresholds: thresholds = hazard.keywords.get('thresholds') if thresholds: hazard_unit = hazard.keywords.get('continuous_hazard_unit') hazard_unit = definition(hazard_unit)['abbreviation'] else: hazard_unit = None exposure = exposure.keywords['exposure'] exposure_definitions = definition(exposure) exposure_units = exposure_definitions['units'] exposure_unit = exposure_units[0] coefficient = 1 # We check if can use a greater unit, such as kilometre for instance. if len(exposure_units) > 1: # We use only two units for now. delta = coefficient_between_units( exposure_units[1], exposure_units[0]) all_values_are_greater = True # We check if all values are greater than the coefficient for i, hazard_class in enumerate(hazard_classification['classes']): field_name = hazard_count_field['field_name'] % hazard_class['key'] try: value = analysis_row[field_name] except KeyError: value = 0 if 0 < value < delta: # 0 is fine, we can still keep the second unit. all_values_are_greater = False if all_values_are_greater: # If yes, we can use this unit. exposure_unit = exposure_units[1] coefficient = delta classes = OrderedDict() for i, hazard_class in enumerate(hazard_classification['classes']): # Get the hazard class name. field_name = hazard_count_field['field_name'] % hazard_class['key'] # Get the number of affected feature by this hazard class. try: value = analysis_row[field_name] except KeyError: # The field might not exist if no feature impacted in this hazard # zone. value = 0 value = format_number( value, use_rounding, exposure_definitions['use_population_rounding'], coefficient) minimum = None maximum = None # Check if we need to add thresholds. if thresholds: if i == 0: minimum = thresholds[hazard_class['key']][0] elif i == len(hazard_classification['classes']) - 1: maximum = thresholds[hazard_class['key']][1] else: minimum = thresholds[hazard_class['key']][0] maximum = thresholds[hazard_class['key']][1] label = _format_label( hazard_class=hazard_class['name'], value=value, exposure_unit=exposure_unit['abbreviation'], minimum=minimum, maximum=maximum, hazard_unit=hazard_unit) classes[hazard_class['key']] = (hazard_class['color'], label) if exposure_definitions['display_not_exposed'] or debug_mode: classes[not_exposed_class['key']] = _add_not_exposed( analysis_row, use_rounding, exposure_definitions['use_population_rounding'], exposure_unit['abbreviation'], coefficient) return classes
[ "def", "generate_classified_legend", "(", "analysis", ",", "exposure", ",", "hazard", ",", "use_rounding", ",", "debug_mode", ")", ":", "# We need to read the analysis layer to get the number of features.", "analysis_row", "=", "next", "(", "analysis", ".", "getFeatures", "(", ")", ")", "# Let's style the hazard class in each layers.", "hazard_classification", "=", "hazard", ".", "keywords", "[", "'classification'", "]", "hazard_classification", "=", "definition", "(", "hazard_classification", ")", "# Let's check if there is some thresholds:", "thresholds", "=", "hazard", ".", "keywords", ".", "get", "(", "'thresholds'", ")", "if", "thresholds", ":", "hazard_unit", "=", "hazard", ".", "keywords", ".", "get", "(", "'continuous_hazard_unit'", ")", "hazard_unit", "=", "definition", "(", "hazard_unit", ")", "[", "'abbreviation'", "]", "else", ":", "hazard_unit", "=", "None", "exposure", "=", "exposure", ".", "keywords", "[", "'exposure'", "]", "exposure_definitions", "=", "definition", "(", "exposure", ")", "exposure_units", "=", "exposure_definitions", "[", "'units'", "]", "exposure_unit", "=", "exposure_units", "[", "0", "]", "coefficient", "=", "1", "# We check if can use a greater unit, such as kilometre for instance.", "if", "len", "(", "exposure_units", ")", ">", "1", ":", "# We use only two units for now.", "delta", "=", "coefficient_between_units", "(", "exposure_units", "[", "1", "]", ",", "exposure_units", "[", "0", "]", ")", "all_values_are_greater", "=", "True", "# We check if all values are greater than the coefficient", "for", "i", ",", "hazard_class", "in", "enumerate", "(", "hazard_classification", "[", "'classes'", "]", ")", ":", "field_name", "=", "hazard_count_field", "[", "'field_name'", "]", "%", "hazard_class", "[", "'key'", "]", "try", ":", "value", "=", "analysis_row", "[", "field_name", "]", "except", "KeyError", ":", "value", "=", "0", "if", "0", "<", "value", "<", "delta", ":", "# 0 is fine, we can still keep the second unit.", "all_values_are_greater", "=", "False", "if", "all_values_are_greater", ":", "# If yes, we can use this unit.", "exposure_unit", "=", "exposure_units", "[", "1", "]", "coefficient", "=", "delta", "classes", "=", "OrderedDict", "(", ")", "for", "i", ",", "hazard_class", "in", "enumerate", "(", "hazard_classification", "[", "'classes'", "]", ")", ":", "# Get the hazard class name.", "field_name", "=", "hazard_count_field", "[", "'field_name'", "]", "%", "hazard_class", "[", "'key'", "]", "# Get the number of affected feature by this hazard class.", "try", ":", "value", "=", "analysis_row", "[", "field_name", "]", "except", "KeyError", ":", "# The field might not exist if no feature impacted in this hazard", "# zone.", "value", "=", "0", "value", "=", "format_number", "(", "value", ",", "use_rounding", ",", "exposure_definitions", "[", "'use_population_rounding'", "]", ",", "coefficient", ")", "minimum", "=", "None", "maximum", "=", "None", "# Check if we need to add thresholds.", "if", "thresholds", ":", "if", "i", "==", "0", ":", "minimum", "=", "thresholds", "[", "hazard_class", "[", "'key'", "]", "]", "[", "0", "]", "elif", "i", "==", "len", "(", "hazard_classification", "[", "'classes'", "]", ")", "-", "1", ":", "maximum", "=", "thresholds", "[", "hazard_class", "[", "'key'", "]", "]", "[", "1", "]", "else", ":", "minimum", "=", "thresholds", "[", "hazard_class", "[", "'key'", "]", "]", "[", "0", "]", "maximum", "=", "thresholds", "[", "hazard_class", "[", "'key'", "]", "]", "[", "1", "]", "label", "=", "_format_label", "(", "hazard_class", "=", "hazard_class", "[", "'name'", "]", ",", "value", "=", "value", ",", "exposure_unit", "=", "exposure_unit", "[", "'abbreviation'", "]", ",", "minimum", "=", "minimum", ",", "maximum", "=", "maximum", ",", "hazard_unit", "=", "hazard_unit", ")", "classes", "[", "hazard_class", "[", "'key'", "]", "]", "=", "(", "hazard_class", "[", "'color'", "]", ",", "label", ")", "if", "exposure_definitions", "[", "'display_not_exposed'", "]", "or", "debug_mode", ":", "classes", "[", "not_exposed_class", "[", "'key'", "]", "]", "=", "_add_not_exposed", "(", "analysis_row", ",", "use_rounding", ",", "exposure_definitions", "[", "'use_population_rounding'", "]", ",", "exposure_unit", "[", "'abbreviation'", "]", ",", "coefficient", ")", "return", "classes" ]
Generate an ordered python structure with the classified symbology. :param analysis: The analysis layer. :type analysis: QgsVectorLayer :param exposure: The exposure layer. :type exposure: QgsVectorLayer :param hazard: The hazard layer. :type hazard: QgsVectorLayer :param use_rounding: Boolean if we round number in the legend. :type use_rounding: bool :param debug_mode: Boolean if run in debug mode,to display the not exposed. :type debug_mode: bool :return: The ordered dictionary to use to build the classified style. :rtype: OrderedDict
[ "Generate", "an", "ordered", "python", "structure", "with", "the", "classified", "symbology", "." ]
python
train
lgpage/nbtutor
nbtutor/ipython/utils.py
https://github.com/lgpage/nbtutor/blob/07798a044cf6e1fd4eaac2afddeef3e13348dbcd/nbtutor/ipython/utils.py#L94-L104
def redirect_stdout(new_stdout): """Redirect the stdout Args: new_stdout (io.StringIO): New stdout to use instead """ old_stdout, sys.stdout = sys.stdout, new_stdout try: yield None finally: sys.stdout = old_stdout
[ "def", "redirect_stdout", "(", "new_stdout", ")", ":", "old_stdout", ",", "sys", ".", "stdout", "=", "sys", ".", "stdout", ",", "new_stdout", "try", ":", "yield", "None", "finally", ":", "sys", ".", "stdout", "=", "old_stdout" ]
Redirect the stdout Args: new_stdout (io.StringIO): New stdout to use instead
[ "Redirect", "the", "stdout" ]
python
valid
Bachmann1234/diff-cover
diff_cover/snippets.py
https://github.com/Bachmann1234/diff-cover/blob/901cb3fc986982961785e841658085ead453c6c9/diff_cover/snippets.py#L90-L105
def html(self): """ Return an HTML representation of the snippet. """ formatter = HtmlFormatter( cssclass=self.DIV_CSS_CLASS, linenos=True, linenostart=self._start_line, hl_lines=self._shift_lines( self._violation_lines, self._start_line ), lineanchors=self._src_filename ) return pygments.format(self.src_tokens(), formatter)
[ "def", "html", "(", "self", ")", ":", "formatter", "=", "HtmlFormatter", "(", "cssclass", "=", "self", ".", "DIV_CSS_CLASS", ",", "linenos", "=", "True", ",", "linenostart", "=", "self", ".", "_start_line", ",", "hl_lines", "=", "self", ".", "_shift_lines", "(", "self", ".", "_violation_lines", ",", "self", ".", "_start_line", ")", ",", "lineanchors", "=", "self", ".", "_src_filename", ")", "return", "pygments", ".", "format", "(", "self", ".", "src_tokens", "(", ")", ",", "formatter", ")" ]
Return an HTML representation of the snippet.
[ "Return", "an", "HTML", "representation", "of", "the", "snippet", "." ]
python
train
Unidata/MetPy
metpy/cbook.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/cbook.py#L88-L102
def broadcast_indices(x, minv, ndim, axis): """Calculate index values to properly broadcast index array within data array. See usage in interp. """ ret = [] for dim in range(ndim): if dim == axis: ret.append(minv) else: broadcast_slice = [np.newaxis] * ndim broadcast_slice[dim] = slice(None) dim_inds = np.arange(x.shape[dim]) ret.append(dim_inds[tuple(broadcast_slice)]) return tuple(ret)
[ "def", "broadcast_indices", "(", "x", ",", "minv", ",", "ndim", ",", "axis", ")", ":", "ret", "=", "[", "]", "for", "dim", "in", "range", "(", "ndim", ")", ":", "if", "dim", "==", "axis", ":", "ret", ".", "append", "(", "minv", ")", "else", ":", "broadcast_slice", "=", "[", "np", ".", "newaxis", "]", "*", "ndim", "broadcast_slice", "[", "dim", "]", "=", "slice", "(", "None", ")", "dim_inds", "=", "np", ".", "arange", "(", "x", ".", "shape", "[", "dim", "]", ")", "ret", ".", "append", "(", "dim_inds", "[", "tuple", "(", "broadcast_slice", ")", "]", ")", "return", "tuple", "(", "ret", ")" ]
Calculate index values to properly broadcast index array within data array. See usage in interp.
[ "Calculate", "index", "values", "to", "properly", "broadcast", "index", "array", "within", "data", "array", "." ]
python
train
OSSOS/MOP
src/ossos/canfar/cadc_certificates.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/canfar/cadc_certificates.py#L62-L75
def getUserPassword(host='www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca'): """"Getting the username/password for host from .netrc filie """ if os.access(os.path.join(os.environ.get('HOME','/'),".netrc"),os.R_OK): auth=netrc.netrc().authenticators(host) else: auth=False if not auth: sys.stdout.write("CADC Username: ") username=sys.stdin.readline().strip('\n') password=getpass.getpass().strip('\n') else: username=auth[0] password=auth[2] return (username,password)
[ "def", "getUserPassword", "(", "host", "=", "'www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca'", ")", ":", "if", "os", ".", "access", "(", "os", ".", "path", ".", "join", "(", "os", ".", "environ", ".", "get", "(", "'HOME'", ",", "'/'", ")", ",", "\".netrc\"", ")", ",", "os", ".", "R_OK", ")", ":", "auth", "=", "netrc", ".", "netrc", "(", ")", ".", "authenticators", "(", "host", ")", "else", ":", "auth", "=", "False", "if", "not", "auth", ":", "sys", ".", "stdout", ".", "write", "(", "\"CADC Username: \"", ")", "username", "=", "sys", ".", "stdin", ".", "readline", "(", ")", ".", "strip", "(", "'\\n'", ")", "password", "=", "getpass", ".", "getpass", "(", ")", ".", "strip", "(", "'\\n'", ")", "else", ":", "username", "=", "auth", "[", "0", "]", "password", "=", "auth", "[", "2", "]", "return", "(", "username", ",", "password", ")" ]
Getting the username/password for host from .netrc filie
[ "Getting", "the", "username", "/", "password", "for", "host", "from", ".", "netrc", "filie" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L14672-L14690
def get_guest_property_value(self, property_p): """Reads a value from the machine's guest property store. in property_p of type str The name of the property to read. return value of type str The value of the property. If the property does not exist then this will be empty. raises :class:`VBoxErrorInvalidVmState` Machine session is not open. """ if not isinstance(property_p, basestring): raise TypeError("property_p can only be an instance of type basestring") value = self._call("getGuestPropertyValue", in_p=[property_p]) return value
[ "def", "get_guest_property_value", "(", "self", ",", "property_p", ")", ":", "if", "not", "isinstance", "(", "property_p", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"property_p can only be an instance of type basestring\"", ")", "value", "=", "self", ".", "_call", "(", "\"getGuestPropertyValue\"", ",", "in_p", "=", "[", "property_p", "]", ")", "return", "value" ]
Reads a value from the machine's guest property store. in property_p of type str The name of the property to read. return value of type str The value of the property. If the property does not exist then this will be empty. raises :class:`VBoxErrorInvalidVmState` Machine session is not open.
[ "Reads", "a", "value", "from", "the", "machine", "s", "guest", "property", "store", "." ]
python
train
rix0rrr/gcl
gcl/ast_util.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L171-L181
def find_inherited_key_completions(rootpath, root_env): """Return completion keys from INHERITED tuples. Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple, then enumerate the keys that are NOT in the rightmost tuple. """ tup = inflate_context_tuple(rootpath, root_env) if isinstance(tup, runtime.CompositeTuple): keys = set(k for t in tup.tuples[:-1] for k in t.keys()) return {n: get_completion(tup, n) for n in keys} return {}
[ "def", "find_inherited_key_completions", "(", "rootpath", ",", "root_env", ")", ":", "tup", "=", "inflate_context_tuple", "(", "rootpath", ",", "root_env", ")", "if", "isinstance", "(", "tup", ",", "runtime", ".", "CompositeTuple", ")", ":", "keys", "=", "set", "(", "k", "for", "t", "in", "tup", ".", "tuples", "[", ":", "-", "1", "]", "for", "k", "in", "t", ".", "keys", "(", ")", ")", "return", "{", "n", ":", "get_completion", "(", "tup", ",", "n", ")", "for", "n", "in", "keys", "}", "return", "{", "}" ]
Return completion keys from INHERITED tuples. Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple, then enumerate the keys that are NOT in the rightmost tuple.
[ "Return", "completion", "keys", "from", "INHERITED", "tuples", "." ]
python
train
wheeler-microfluidics/dmf-control-board-firmware
dmf_control_board_firmware/__init__.py
https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L535-L562
def force(self, Ly=None): ''' Estimate the applied force (in Newtons) on a drop according to the electromechanical model [1]. Ly is the length of the actuated electrode along the y-axis (perpendicular to the direction of motion) in milimeters. By default, use the square root of the actuated electrode area, i.e., Ly=Lx=sqrt(Area) To get the force normalized by electrode width (i.e., in units of N/mm), set Ly=1.0. 1. Chatterjee et al., "Electromechanical model for actuating liquids in a two-plate droplet microfluidic device," Lab on a Chip, no. 9 (2009): 1219-1229. ''' if self.calibration._c_drop: c_drop = self.calibration.c_drop(self.frequency) else: c_drop = self.capacitance()[-1] / self.area if self.calibration._c_filler: c_filler = self.calibration.c_filler(self.frequency) else: c_filler = 0 if Ly is None: Ly = np.sqrt(self.area) return 1e3 * Ly * 0.5 * (c_drop - c_filler) * self.V_actuation()**2
[ "def", "force", "(", "self", ",", "Ly", "=", "None", ")", ":", "if", "self", ".", "calibration", ".", "_c_drop", ":", "c_drop", "=", "self", ".", "calibration", ".", "c_drop", "(", "self", ".", "frequency", ")", "else", ":", "c_drop", "=", "self", ".", "capacitance", "(", ")", "[", "-", "1", "]", "/", "self", ".", "area", "if", "self", ".", "calibration", ".", "_c_filler", ":", "c_filler", "=", "self", ".", "calibration", ".", "c_filler", "(", "self", ".", "frequency", ")", "else", ":", "c_filler", "=", "0", "if", "Ly", "is", "None", ":", "Ly", "=", "np", ".", "sqrt", "(", "self", ".", "area", ")", "return", "1e3", "*", "Ly", "*", "0.5", "*", "(", "c_drop", "-", "c_filler", ")", "*", "self", ".", "V_actuation", "(", ")", "**", "2" ]
Estimate the applied force (in Newtons) on a drop according to the electromechanical model [1]. Ly is the length of the actuated electrode along the y-axis (perpendicular to the direction of motion) in milimeters. By default, use the square root of the actuated electrode area, i.e., Ly=Lx=sqrt(Area) To get the force normalized by electrode width (i.e., in units of N/mm), set Ly=1.0. 1. Chatterjee et al., "Electromechanical model for actuating liquids in a two-plate droplet microfluidic device," Lab on a Chip, no. 9 (2009): 1219-1229.
[ "Estimate", "the", "applied", "force", "(", "in", "Newtons", ")", "on", "a", "drop", "according", "to", "the", "electromechanical", "model", "[", "1", "]", "." ]
python
train
hydpy-dev/hydpy
hydpy/auxs/networktools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/networktools.py#L430-L460
def selection(self): """A complete |Selection| object of all "supplying" and "routing" elements and required nodes. >>> from hydpy import RiverBasinNumbers2Selection >>> rbns2s = RiverBasinNumbers2Selection( ... (111, 113, 1129, 11269, 1125, 11261, ... 11262, 1123, 1124, 1122, 1121)) >>> rbns2s.selection Selection("complete", nodes=("node_1123", "node_1125", "node_11269", "node_1129", "node_113", "node_outlet"), elements=("land_111", "land_1121", "land_1122", "land_1123", "land_1124", "land_1125", "land_11261", "land_11262", "land_11269", "land_1129", "land_113", "stream_1123", "stream_1125", "stream_11269", "stream_1129", "stream_113")) Besides the possible modifications on the names of the different nodes and elements, the name of the selection can be set differently: >>> rbns2s.selection_name = 'sel' >>> from hydpy import pub >>> with pub.options.ellipsis(1): ... print(repr(rbns2s.selection)) Selection("sel", nodes=("node_1123", ...,"node_outlet"), elements=("land_111", ...,"stream_113")) """ return selectiontools.Selection( self.selection_name, self.nodes, self.elements)
[ "def", "selection", "(", "self", ")", ":", "return", "selectiontools", ".", "Selection", "(", "self", ".", "selection_name", ",", "self", ".", "nodes", ",", "self", ".", "elements", ")" ]
A complete |Selection| object of all "supplying" and "routing" elements and required nodes. >>> from hydpy import RiverBasinNumbers2Selection >>> rbns2s = RiverBasinNumbers2Selection( ... (111, 113, 1129, 11269, 1125, 11261, ... 11262, 1123, 1124, 1122, 1121)) >>> rbns2s.selection Selection("complete", nodes=("node_1123", "node_1125", "node_11269", "node_1129", "node_113", "node_outlet"), elements=("land_111", "land_1121", "land_1122", "land_1123", "land_1124", "land_1125", "land_11261", "land_11262", "land_11269", "land_1129", "land_113", "stream_1123", "stream_1125", "stream_11269", "stream_1129", "stream_113")) Besides the possible modifications on the names of the different nodes and elements, the name of the selection can be set differently: >>> rbns2s.selection_name = 'sel' >>> from hydpy import pub >>> with pub.options.ellipsis(1): ... print(repr(rbns2s.selection)) Selection("sel", nodes=("node_1123", ...,"node_outlet"), elements=("land_111", ...,"stream_113"))
[ "A", "complete", "|Selection|", "object", "of", "all", "supplying", "and", "routing", "elements", "and", "required", "nodes", "." ]
python
train
Pertino/pertino-sdk-python
pertinosdk/__init__.py
https://github.com/Pertino/pertino-sdk-python/blob/d7d75bd374b7f44967ae6f1626a6520507be3a54/pertinosdk/__init__.py#L75-L83
def deleteFrom(self, organization, devices): """ Deletes all devices in a list. :raises: HTTP errors """ for device in devices: url = self.BASE_URL+self.BASE_PATH+self.ORGS_PATH+"/"+ str(organization["id"]) + self.DEVICES_PATH+ "/" + str(device["id"]) + self.USER_QUERY response = self.requests.delete(url, auth=(self.__username, self.__password)) response.raise_for_status()
[ "def", "deleteFrom", "(", "self", ",", "organization", ",", "devices", ")", ":", "for", "device", "in", "devices", ":", "url", "=", "self", ".", "BASE_URL", "+", "self", ".", "BASE_PATH", "+", "self", ".", "ORGS_PATH", "+", "\"/\"", "+", "str", "(", "organization", "[", "\"id\"", "]", ")", "+", "self", ".", "DEVICES_PATH", "+", "\"/\"", "+", "str", "(", "device", "[", "\"id\"", "]", ")", "+", "self", ".", "USER_QUERY", "response", "=", "self", ".", "requests", ".", "delete", "(", "url", ",", "auth", "=", "(", "self", ".", "__username", ",", "self", ".", "__password", ")", ")", "response", ".", "raise_for_status", "(", ")" ]
Deletes all devices in a list. :raises: HTTP errors
[ "Deletes", "all", "devices", "in", "a", "list", ".", ":", "raises", ":", "HTTP", "errors" ]
python
train
pavoni/pyvera
pyvera/__init__.py
https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/__init__.py#L445-L453
def vera_request(self, **kwargs): """Perfom a vera_request for this device.""" request_payload = { 'output_format': 'json', 'DeviceNum': self.device_id, } request_payload.update(kwargs) return self.vera_controller.data_request(request_payload)
[ "def", "vera_request", "(", "self", ",", "*", "*", "kwargs", ")", ":", "request_payload", "=", "{", "'output_format'", ":", "'json'", ",", "'DeviceNum'", ":", "self", ".", "device_id", ",", "}", "request_payload", ".", "update", "(", "kwargs", ")", "return", "self", ".", "vera_controller", ".", "data_request", "(", "request_payload", ")" ]
Perfom a vera_request for this device.
[ "Perfom", "a", "vera_request", "for", "this", "device", "." ]
python
train
sys-git/certifiable
certifiable/utils.py
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/utils.py#L68-L91
def certify_required(value, required=False): """ Certify that a value is present if required. :param object value: The value that is to be certified. :param bool required: Is the value required? :raises CertifierValueError: Required value is `None`. """ # Certify our kwargs: if not isinstance(required, bool): raise CertifierParamError( 'required', required, ) if value is None: if required: raise CertifierValueError( message="required value is None", ) return True
[ "def", "certify_required", "(", "value", ",", "required", "=", "False", ")", ":", "# Certify our kwargs:", "if", "not", "isinstance", "(", "required", ",", "bool", ")", ":", "raise", "CertifierParamError", "(", "'required'", ",", "required", ",", ")", "if", "value", "is", "None", ":", "if", "required", ":", "raise", "CertifierValueError", "(", "message", "=", "\"required value is None\"", ",", ")", "return", "True" ]
Certify that a value is present if required. :param object value: The value that is to be certified. :param bool required: Is the value required? :raises CertifierValueError: Required value is `None`.
[ "Certify", "that", "a", "value", "is", "present", "if", "required", "." ]
python
train
cloudmesh/cloudmesh-common
cloudmesh/common/console.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/console.py#L251-L264
def cprint(color, prefix, message): """ prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return: """ message = message or "" prefix = prefix or "" print((Console.theme[color] + prefix + message + Console.theme['ENDC']))
[ "def", "cprint", "(", "color", ",", "prefix", ",", "message", ")", ":", "message", "=", "message", "or", "\"\"", "prefix", "=", "prefix", "or", "\"\"", "print", "(", "(", "Console", ".", "theme", "[", "color", "]", "+", "prefix", "+", "message", "+", "Console", ".", "theme", "[", "'ENDC'", "]", ")", ")" ]
prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return:
[ "prints", "a", "message", "in", "a", "given", "color", ":", "param", "color", ":", "the", "color", "as", "defined", "in", "the", "theme", ":", "param", "prefix", ":", "the", "prefix", "(", "a", "string", ")", ":", "param", "message", ":", "the", "message", ":", "return", ":" ]
python
train
wglass/lighthouse
lighthouse/sockutils.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/sockutils.py#L5-L31
def get_response(sock, buffer_size=4096): """ Helper method for retrieving a response from a given socket. Returns two values in a tuple, the first is the reponse line and the second is any extra data after the newline. """ response = "" extra = "" while True: try: chunk = sock.recv(buffer_size) if chunk: response += chunk except socket.error as e: if e.errno not in [errno.EAGAIN, errno.EINTR]: raise if not response: break if "\n" in response: response, extra = response.split("\n", 1) break return response, extra
[ "def", "get_response", "(", "sock", ",", "buffer_size", "=", "4096", ")", ":", "response", "=", "\"\"", "extra", "=", "\"\"", "while", "True", ":", "try", ":", "chunk", "=", "sock", ".", "recv", "(", "buffer_size", ")", "if", "chunk", ":", "response", "+=", "chunk", "except", "socket", ".", "error", "as", "e", ":", "if", "e", ".", "errno", "not", "in", "[", "errno", ".", "EAGAIN", ",", "errno", ".", "EINTR", "]", ":", "raise", "if", "not", "response", ":", "break", "if", "\"\\n\"", "in", "response", ":", "response", ",", "extra", "=", "response", ".", "split", "(", "\"\\n\"", ",", "1", ")", "break", "return", "response", ",", "extra" ]
Helper method for retrieving a response from a given socket. Returns two values in a tuple, the first is the reponse line and the second is any extra data after the newline.
[ "Helper", "method", "for", "retrieving", "a", "response", "from", "a", "given", "socket", "." ]
python
train
brbsix/subsystem
subsystem/subsystem.py
https://github.com/brbsix/subsystem/blob/57705bc20d71ceaed9e22e21246265d717e98eb8/subsystem/subsystem.py#L276-L289
def multithreader(args, paths): """Execute multiple processes at once.""" def shellprocess(path): """Return a ready-to-use subprocess.""" import subprocess return subprocess.Popen(args + [path], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) processes = [shellprocess(path) for path in paths] for process in processes: process.wait()
[ "def", "multithreader", "(", "args", ",", "paths", ")", ":", "def", "shellprocess", "(", "path", ")", ":", "\"\"\"Return a ready-to-use subprocess.\"\"\"", "import", "subprocess", "return", "subprocess", ".", "Popen", "(", "args", "+", "[", "path", "]", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ",", "stdout", "=", "subprocess", ".", "DEVNULL", ")", "processes", "=", "[", "shellprocess", "(", "path", ")", "for", "path", "in", "paths", "]", "for", "process", "in", "processes", ":", "process", ".", "wait", "(", ")" ]
Execute multiple processes at once.
[ "Execute", "multiple", "processes", "at", "once", "." ]
python
train
saltstack/salt
salt/states/keystone_endpoint.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/keystone_endpoint.py#L80-L143
def present(name, service_name, auth=None, **kwargs): ''' Ensure an endpoint exists and is up-to-date name Interface name url URL of the endpoint service_name Service name or ID region The region name to assign the endpoint enabled Boolean to control if endpoint is enabled ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = __utils__['args.clean_kwargs'](**kwargs) __salt__['keystoneng.setup_clouds'](auth) success, val = _, endpoint = _common(ret, name, service_name, kwargs) if not success: return val if not endpoint: if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs ret['comment'] = 'Endpoint will be created.' return ret # NOTE(SamYaple): Endpoints are returned as a list which can contain # several items depending on the options passed endpoints = __salt__['keystoneng.endpoint_create'](**kwargs) if len(endpoints) == 1: ret['changes'] = endpoints[0] else: for i, endpoint in enumerate(endpoints): ret['changes'][i] = endpoint ret['comment'] = 'Created endpoint' return ret changes = __salt__['keystoneng.compare_changes'](endpoint, **kwargs) if changes: if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes ret['comment'] = 'Endpoint will be updated.' return ret kwargs['endpoint_id'] = endpoint.id __salt__['keystoneng.endpoint_update'](**kwargs) ret['changes'].update(changes) ret['comment'] = 'Updated endpoint' return ret
[ "def", "present", "(", "name", ",", "service_name", ",", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "kwargs", "=", "__utils__", "[", "'args.clean_kwargs'", "]", "(", "*", "*", "kwargs", ")", "__salt__", "[", "'keystoneng.setup_clouds'", "]", "(", "auth", ")", "success", ",", "val", "=", "_", ",", "endpoint", "=", "_common", "(", "ret", ",", "name", ",", "service_name", ",", "kwargs", ")", "if", "not", "success", ":", "return", "val", "if", "not", "endpoint", ":", "if", "__opts__", "[", "'test'", "]", "is", "True", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "kwargs", "ret", "[", "'comment'", "]", "=", "'Endpoint will be created.'", "return", "ret", "# NOTE(SamYaple): Endpoints are returned as a list which can contain", "# several items depending on the options passed", "endpoints", "=", "__salt__", "[", "'keystoneng.endpoint_create'", "]", "(", "*", "*", "kwargs", ")", "if", "len", "(", "endpoints", ")", "==", "1", ":", "ret", "[", "'changes'", "]", "=", "endpoints", "[", "0", "]", "else", ":", "for", "i", ",", "endpoint", "in", "enumerate", "(", "endpoints", ")", ":", "ret", "[", "'changes'", "]", "[", "i", "]", "=", "endpoint", "ret", "[", "'comment'", "]", "=", "'Created endpoint'", "return", "ret", "changes", "=", "__salt__", "[", "'keystoneng.compare_changes'", "]", "(", "endpoint", ",", "*", "*", "kwargs", ")", "if", "changes", ":", "if", "__opts__", "[", "'test'", "]", "is", "True", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "changes", "ret", "[", "'comment'", "]", "=", "'Endpoint will be updated.'", "return", "ret", "kwargs", "[", "'endpoint_id'", "]", "=", "endpoint", ".", "id", "__salt__", "[", "'keystoneng.endpoint_update'", "]", "(", "*", "*", "kwargs", ")", "ret", "[", "'changes'", "]", ".", "update", "(", "changes", ")", "ret", "[", "'comment'", "]", "=", "'Updated endpoint'", "return", "ret" ]
Ensure an endpoint exists and is up-to-date name Interface name url URL of the endpoint service_name Service name or ID region The region name to assign the endpoint enabled Boolean to control if endpoint is enabled
[ "Ensure", "an", "endpoint", "exists", "and", "is", "up", "-", "to", "-", "date" ]
python
train
chinapnr/fishbase
fishbase/fish_logger.py
https://github.com/chinapnr/fishbase/blob/23c5147a6bc0d8ed36409e55352ffb2c5b0edc82/fishbase/fish_logger.py#L41-L54
def emit(self, record): """ Emit a record. Always check time """ try: if self.check_base_filename(record): self.build_base_filename() FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "if", "self", ".", "check_base_filename", "(", "record", ")", ":", "self", ".", "build_base_filename", "(", ")", "FileHandler", ".", "emit", "(", "self", ",", "record", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", ":", "self", ".", "handleError", "(", "record", ")" ]
Emit a record. Always check time
[ "Emit", "a", "record", "." ]
python
train
ArabellaTech/django-basic-cms
basic_cms/managers.py
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/managers.py#L273-L280
def sanitize(self, content): """Sanitize a string in order to avoid possible XSS using ``html5lib``.""" import html5lib from html5lib import sanitizer p = html5lib.HTMLParser(tokenizer=sanitizer.HTMLSanitizer) dom_tree = p.parseFragment(content) return dom_tree.text
[ "def", "sanitize", "(", "self", ",", "content", ")", ":", "import", "html5lib", "from", "html5lib", "import", "sanitizer", "p", "=", "html5lib", ".", "HTMLParser", "(", "tokenizer", "=", "sanitizer", ".", "HTMLSanitizer", ")", "dom_tree", "=", "p", ".", "parseFragment", "(", "content", ")", "return", "dom_tree", ".", "text" ]
Sanitize a string in order to avoid possible XSS using ``html5lib``.
[ "Sanitize", "a", "string", "in", "order", "to", "avoid", "possible", "XSS", "using", "html5lib", "." ]
python
train
google/grr
grr/client/grr_response_client/fleetspeak_client.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/fleetspeak_client.py#L122-L129
def _ForemanOp(self): """Sends Foreman checks periodically.""" period = config.CONFIG["Client.foreman_check_frequency"] self._threads["Worker"].SendReply( rdf_protodict.DataBlob(), session_id=rdfvalue.FlowSessionID(flow_name="Foreman"), require_fastpoll=False) time.sleep(period)
[ "def", "_ForemanOp", "(", "self", ")", ":", "period", "=", "config", ".", "CONFIG", "[", "\"Client.foreman_check_frequency\"", "]", "self", ".", "_threads", "[", "\"Worker\"", "]", ".", "SendReply", "(", "rdf_protodict", ".", "DataBlob", "(", ")", ",", "session_id", "=", "rdfvalue", ".", "FlowSessionID", "(", "flow_name", "=", "\"Foreman\"", ")", ",", "require_fastpoll", "=", "False", ")", "time", ".", "sleep", "(", "period", ")" ]
Sends Foreman checks periodically.
[ "Sends", "Foreman", "checks", "periodically", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/zmq/kernelapp.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/kernelapp.py#L236-L248
def init_heartbeat(self): """start the heart beating""" # heartbeat doesn't share context, because it mustn't be blocked # by the GIL, which is accessed by libzmq when freeing zero-copy messages hb_ctx = zmq.Context() self.heartbeat = Heartbeat(hb_ctx, (self.ip, self.hb_port)) self.hb_port = self.heartbeat.port self.log.debug("Heartbeat REP Channel on port: %i"%self.hb_port) self.heartbeat.start() # Helper to make it easier to connect to an existing kernel. # set log-level to critical, to make sure it is output self.log.critical("To connect another client to this kernel, use:")
[ "def", "init_heartbeat", "(", "self", ")", ":", "# heartbeat doesn't share context, because it mustn't be blocked", "# by the GIL, which is accessed by libzmq when freeing zero-copy messages", "hb_ctx", "=", "zmq", ".", "Context", "(", ")", "self", ".", "heartbeat", "=", "Heartbeat", "(", "hb_ctx", ",", "(", "self", ".", "ip", ",", "self", ".", "hb_port", ")", ")", "self", ".", "hb_port", "=", "self", ".", "heartbeat", ".", "port", "self", ".", "log", ".", "debug", "(", "\"Heartbeat REP Channel on port: %i\"", "%", "self", ".", "hb_port", ")", "self", ".", "heartbeat", ".", "start", "(", ")", "# Helper to make it easier to connect to an existing kernel.", "# set log-level to critical, to make sure it is output", "self", ".", "log", ".", "critical", "(", "\"To connect another client to this kernel, use:\"", ")" ]
start the heart beating
[ "start", "the", "heart", "beating" ]
python
test
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/multisig.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/multisig.py#L106-L146
def make_multisig_segwit_info( m, pks ): """ Make either a p2sh-p2wpkh or p2sh-p2wsh redeem script and p2sh address. Return {'address': p2sh address, 'redeem_script': **the witness script**, 'private_keys': privkeys, 'segwit': True} * privkeys and redeem_script will be hex-encoded """ pubs = [] privkeys = [] for pk in pks: priv = BitcoinPrivateKey(pk, compressed=True) priv_hex = priv.to_hex() pub_hex = priv.public_key().to_hex() privkeys.append(priv_hex) pubs.append(keylib.key_formatting.compress(pub_hex)) script = None if len(pubs) == 1: if m != 1: raise ValueError("invalid m: len(pubkeys) == 1") # 1 pubkey means p2wpkh key_hash = hashing.bin_hash160(pubs[0].decode('hex')).encode('hex') script = '160014' + key_hash addr = btc_make_p2sh_address(script[2:]) else: # 2+ pubkeys means p2wsh script = make_multisig_script(pubs, m) addr = make_multisig_segwit_address_from_witness_script(script) return { 'address': addr, 'redeem_script': script, 'private_keys': privkeys, 'segwit': True, 'm': m }
[ "def", "make_multisig_segwit_info", "(", "m", ",", "pks", ")", ":", "pubs", "=", "[", "]", "privkeys", "=", "[", "]", "for", "pk", "in", "pks", ":", "priv", "=", "BitcoinPrivateKey", "(", "pk", ",", "compressed", "=", "True", ")", "priv_hex", "=", "priv", ".", "to_hex", "(", ")", "pub_hex", "=", "priv", ".", "public_key", "(", ")", ".", "to_hex", "(", ")", "privkeys", ".", "append", "(", "priv_hex", ")", "pubs", ".", "append", "(", "keylib", ".", "key_formatting", ".", "compress", "(", "pub_hex", ")", ")", "script", "=", "None", "if", "len", "(", "pubs", ")", "==", "1", ":", "if", "m", "!=", "1", ":", "raise", "ValueError", "(", "\"invalid m: len(pubkeys) == 1\"", ")", "# 1 pubkey means p2wpkh", "key_hash", "=", "hashing", ".", "bin_hash160", "(", "pubs", "[", "0", "]", ".", "decode", "(", "'hex'", ")", ")", ".", "encode", "(", "'hex'", ")", "script", "=", "'160014'", "+", "key_hash", "addr", "=", "btc_make_p2sh_address", "(", "script", "[", "2", ":", "]", ")", "else", ":", "# 2+ pubkeys means p2wsh ", "script", "=", "make_multisig_script", "(", "pubs", ",", "m", ")", "addr", "=", "make_multisig_segwit_address_from_witness_script", "(", "script", ")", "return", "{", "'address'", ":", "addr", ",", "'redeem_script'", ":", "script", ",", "'private_keys'", ":", "privkeys", ",", "'segwit'", ":", "True", ",", "'m'", ":", "m", "}" ]
Make either a p2sh-p2wpkh or p2sh-p2wsh redeem script and p2sh address. Return {'address': p2sh address, 'redeem_script': **the witness script**, 'private_keys': privkeys, 'segwit': True} * privkeys and redeem_script will be hex-encoded
[ "Make", "either", "a", "p2sh", "-", "p2wpkh", "or", "p2sh", "-", "p2wsh", "redeem", "script", "and", "p2sh", "address", "." ]
python
train
cjdrake/pyeda
pyeda/parsing/boolexpr.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/boolexpr.py#L473-L480
def _xorterm(lexer): """Return an xor term expresssion.""" prodterm = _prodterm(lexer) xorterm_prime = _xorterm_prime(lexer) if xorterm_prime is None: return prodterm else: return ('xor', prodterm, xorterm_prime)
[ "def", "_xorterm", "(", "lexer", ")", ":", "prodterm", "=", "_prodterm", "(", "lexer", ")", "xorterm_prime", "=", "_xorterm_prime", "(", "lexer", ")", "if", "xorterm_prime", "is", "None", ":", "return", "prodterm", "else", ":", "return", "(", "'xor'", ",", "prodterm", ",", "xorterm_prime", ")" ]
Return an xor term expresssion.
[ "Return", "an", "xor", "term", "expresssion", "." ]
python
train
edeposit/edeposit.amqp.pdfgen
src/edeposit/amqp/pdfgen/translator.py
https://github.com/edeposit/edeposit.amqp.pdfgen/blob/1022d6d01196f4928d664a71e49273c2d8c67e63/src/edeposit/amqp/pdfgen/translator.py#L49-L79
def gen_pdf(rst_content, style_text, header=None, footer=FOOTER): """ Create PDF file from `rst_content` using `style_text` as style. Optinally, add `header` or `footer`. Args: rst_content (str): Content of the PDF file in restructured text markup. style_text (str): Style for the :mod:`rst2pdf` module. header (str, default None): Header which will be rendered to each page. footer (str, default FOOTER): Footer, which will be rendered to each page. See :attr:`FOOTER` for details. Returns: obj: StringIO file instance containing PDF file. """ out_file_obj = StringIO() with NamedTemporaryFile() as f: f.write(style_text) f.flush() pdf = _init_pdf(f.name, header, footer) # create PDF pdf.createPdf(text=rst_content, output=out_file_obj, compressed=True) # rewind file pointer to begin out_file_obj.seek(0) return out_file_obj
[ "def", "gen_pdf", "(", "rst_content", ",", "style_text", ",", "header", "=", "None", ",", "footer", "=", "FOOTER", ")", ":", "out_file_obj", "=", "StringIO", "(", ")", "with", "NamedTemporaryFile", "(", ")", "as", "f", ":", "f", ".", "write", "(", "style_text", ")", "f", ".", "flush", "(", ")", "pdf", "=", "_init_pdf", "(", "f", ".", "name", ",", "header", ",", "footer", ")", "# create PDF", "pdf", ".", "createPdf", "(", "text", "=", "rst_content", ",", "output", "=", "out_file_obj", ",", "compressed", "=", "True", ")", "# rewind file pointer to begin", "out_file_obj", ".", "seek", "(", "0", ")", "return", "out_file_obj" ]
Create PDF file from `rst_content` using `style_text` as style. Optinally, add `header` or `footer`. Args: rst_content (str): Content of the PDF file in restructured text markup. style_text (str): Style for the :mod:`rst2pdf` module. header (str, default None): Header which will be rendered to each page. footer (str, default FOOTER): Footer, which will be rendered to each page. See :attr:`FOOTER` for details. Returns: obj: StringIO file instance containing PDF file.
[ "Create", "PDF", "file", "from", "rst_content", "using", "style_text", "as", "style", "." ]
python
train
neuroticnerd/armory
armory/utils/__init__.py
https://github.com/neuroticnerd/armory/blob/d37c5ca1dbdd60dddb968e35f0bbe4bc1299dca1/armory/utils/__init__.py#L14-L48
def env(key, default=_NOT_PROVIDED, cast=str, force=False, **kwargs): """ Retrieve environment variables and specify default and options. :param key: (required) environment variable name to retrieve :param default: value to use if the environment var doesn't exist :param cast: values always come in as strings, cast to this type if needed :param force: force casting of value even when it may not be needed :param boolmap: if True use default map, otherwise you can pass custom map :param sticky: injects default into environment so child processes inherit NOTE: None can be passed as the default to avoid raising a KeyError """ boolmap = kwargs.get('boolmap', None) sticky = kwargs.get('sticky', False) value = os.environ.get(key, default) if value is _NOT_PROVIDED: raise KeyError(_ENV_ERROR_MSG.format(key)) if sticky and value == default: try: os.environ[key] = value except TypeError: os.environ[key] = str(value) if force or (value != default and type(value) != cast): if cast is bool and boolmap is not None: value = boolean(value, boolmap=boolmap) elif cast is bool: value = boolean(value) else: value = cast(value) return value
[ "def", "env", "(", "key", ",", "default", "=", "_NOT_PROVIDED", ",", "cast", "=", "str", ",", "force", "=", "False", ",", "*", "*", "kwargs", ")", ":", "boolmap", "=", "kwargs", ".", "get", "(", "'boolmap'", ",", "None", ")", "sticky", "=", "kwargs", ".", "get", "(", "'sticky'", ",", "False", ")", "value", "=", "os", ".", "environ", ".", "get", "(", "key", ",", "default", ")", "if", "value", "is", "_NOT_PROVIDED", ":", "raise", "KeyError", "(", "_ENV_ERROR_MSG", ".", "format", "(", "key", ")", ")", "if", "sticky", "and", "value", "==", "default", ":", "try", ":", "os", ".", "environ", "[", "key", "]", "=", "value", "except", "TypeError", ":", "os", ".", "environ", "[", "key", "]", "=", "str", "(", "value", ")", "if", "force", "or", "(", "value", "!=", "default", "and", "type", "(", "value", ")", "!=", "cast", ")", ":", "if", "cast", "is", "bool", "and", "boolmap", "is", "not", "None", ":", "value", "=", "boolean", "(", "value", ",", "boolmap", "=", "boolmap", ")", "elif", "cast", "is", "bool", ":", "value", "=", "boolean", "(", "value", ")", "else", ":", "value", "=", "cast", "(", "value", ")", "return", "value" ]
Retrieve environment variables and specify default and options. :param key: (required) environment variable name to retrieve :param default: value to use if the environment var doesn't exist :param cast: values always come in as strings, cast to this type if needed :param force: force casting of value even when it may not be needed :param boolmap: if True use default map, otherwise you can pass custom map :param sticky: injects default into environment so child processes inherit NOTE: None can be passed as the default to avoid raising a KeyError
[ "Retrieve", "environment", "variables", "and", "specify", "default", "and", "options", "." ]
python
train
toomore/goristock
grs/twseno.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/twseno.py#L72-L85
def search(self,q): """ Search. """ import re pattern = re.compile("%s" % q) result = {} for i in self.allstockno: b = re.search(pattern, self.allstockno[i]) try: b.group() result[i] = self.allstockno[i] except: pass return result
[ "def", "search", "(", "self", ",", "q", ")", ":", "import", "re", "pattern", "=", "re", ".", "compile", "(", "\"%s\"", "%", "q", ")", "result", "=", "{", "}", "for", "i", "in", "self", ".", "allstockno", ":", "b", "=", "re", ".", "search", "(", "pattern", ",", "self", ".", "allstockno", "[", "i", "]", ")", "try", ":", "b", ".", "group", "(", ")", "result", "[", "i", "]", "=", "self", ".", "allstockno", "[", "i", "]", "except", ":", "pass", "return", "result" ]
Search.
[ "Search", "." ]
python
train
PyCQA/astroid
astroid/scoped_nodes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L1614-L1666
def infer_call_result(self, caller=None, context=None): """Infer what the function returns when called. :returns: What the function returns. :rtype: iterable(NodeNG or Uninferable) or None """ if self.is_generator(): if isinstance(self, AsyncFunctionDef): generator_cls = bases.AsyncGenerator else: generator_cls = bases.Generator result = generator_cls(self) yield result return # This is really a gigantic hack to work around metaclass generators # that return transient class-generating functions. Pylint's AST structure # cannot handle a base class object that is only used for calling __new__, # but does not contribute to the inheritance structure itself. We inject # a fake class into the hierarchy here for several well-known metaclass # generators, and filter it out later. if ( self.name == "with_metaclass" and len(self.args.args) == 1 and self.args.vararg is not None ): metaclass = next(caller.args[0].infer(context)) if isinstance(metaclass, ClassDef): class_bases = [next(arg.infer(context)) for arg in caller.args[1:]] new_class = ClassDef(name="temporary_class") new_class.hide = True new_class.parent = self new_class.postinit( bases=[base for base in class_bases if base != util.Uninferable], body=[], decorators=[], metaclass=metaclass, ) yield new_class return returns = self._get_return_nodes_skip_functions() first_return = next(returns, None) if not first_return: raise exceptions.InferenceError("Empty return iterator") for returnnode in itertools.chain((first_return,), returns): if returnnode.value is None: yield node_classes.Const(None) else: try: yield from returnnode.value.infer(context) except exceptions.InferenceError: yield util.Uninferable
[ "def", "infer_call_result", "(", "self", ",", "caller", "=", "None", ",", "context", "=", "None", ")", ":", "if", "self", ".", "is_generator", "(", ")", ":", "if", "isinstance", "(", "self", ",", "AsyncFunctionDef", ")", ":", "generator_cls", "=", "bases", ".", "AsyncGenerator", "else", ":", "generator_cls", "=", "bases", ".", "Generator", "result", "=", "generator_cls", "(", "self", ")", "yield", "result", "return", "# This is really a gigantic hack to work around metaclass generators", "# that return transient class-generating functions. Pylint's AST structure", "# cannot handle a base class object that is only used for calling __new__,", "# but does not contribute to the inheritance structure itself. We inject", "# a fake class into the hierarchy here for several well-known metaclass", "# generators, and filter it out later.", "if", "(", "self", ".", "name", "==", "\"with_metaclass\"", "and", "len", "(", "self", ".", "args", ".", "args", ")", "==", "1", "and", "self", ".", "args", ".", "vararg", "is", "not", "None", ")", ":", "metaclass", "=", "next", "(", "caller", ".", "args", "[", "0", "]", ".", "infer", "(", "context", ")", ")", "if", "isinstance", "(", "metaclass", ",", "ClassDef", ")", ":", "class_bases", "=", "[", "next", "(", "arg", ".", "infer", "(", "context", ")", ")", "for", "arg", "in", "caller", ".", "args", "[", "1", ":", "]", "]", "new_class", "=", "ClassDef", "(", "name", "=", "\"temporary_class\"", ")", "new_class", ".", "hide", "=", "True", "new_class", ".", "parent", "=", "self", "new_class", ".", "postinit", "(", "bases", "=", "[", "base", "for", "base", "in", "class_bases", "if", "base", "!=", "util", ".", "Uninferable", "]", ",", "body", "=", "[", "]", ",", "decorators", "=", "[", "]", ",", "metaclass", "=", "metaclass", ",", ")", "yield", "new_class", "return", "returns", "=", "self", ".", "_get_return_nodes_skip_functions", "(", ")", "first_return", "=", "next", "(", "returns", ",", "None", ")", "if", "not", "first_return", ":", "raise", "exceptions", ".", "InferenceError", "(", "\"Empty return iterator\"", ")", "for", "returnnode", "in", "itertools", ".", "chain", "(", "(", "first_return", ",", ")", ",", "returns", ")", ":", "if", "returnnode", ".", "value", "is", "None", ":", "yield", "node_classes", ".", "Const", "(", "None", ")", "else", ":", "try", ":", "yield", "from", "returnnode", ".", "value", ".", "infer", "(", "context", ")", "except", "exceptions", ".", "InferenceError", ":", "yield", "util", ".", "Uninferable" ]
Infer what the function returns when called. :returns: What the function returns. :rtype: iterable(NodeNG or Uninferable) or None
[ "Infer", "what", "the", "function", "returns", "when", "called", "." ]
python
train
openstack/quark
quark/plugin_modules/ip_addresses.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ip_addresses.py#L437-L460
def get_port_for_ip_address(context, ip_id, id, fields=None): """Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_port %s for tenant %s fields %s" % (id, context.tenant_id, fields)) addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE) if not addr: raise q_exc.IpAddressNotFound(addr_id=ip_id) filters = {'ip_address_id': [ip_id]} results = db_api.port_find(context, id=id, fields=fields, scope=db_api.ONE, **filters) if not results: raise n_exc.PortNotFound(port_id=id) return v._make_port_for_ip_dict(addr, results)
[ "def", "get_port_for_ip_address", "(", "context", ",", "ip_id", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_port %s for tenant %s fields %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "fields", ")", ")", "addr", "=", "db_api", ".", "ip_address_find", "(", "context", ",", "id", "=", "ip_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "addr", ":", "raise", "q_exc", ".", "IpAddressNotFound", "(", "addr_id", "=", "ip_id", ")", "filters", "=", "{", "'ip_address_id'", ":", "[", "ip_id", "]", "}", "results", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "id", ",", "fields", "=", "fields", ",", "scope", "=", "db_api", ".", "ONE", ",", "*", "*", "filters", ")", "if", "not", "results", ":", "raise", "n_exc", ".", "PortNotFound", "(", "port_id", "=", "id", ")", "return", "v", ".", "_make_port_for_ip_dict", "(", "addr", ",", "results", ")" ]
Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
[ "Retrieve", "a", "port", "." ]
python
valid
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_gradient_boosting_classifier.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_gradient_boosting_classifier.py#L19-L62
def convert(model, feature_names, target): """Convert a boosted tree model to protobuf format. Parameters ---------- decision_tree : GradientBoostingClassifier A trained scikit-learn tree model. feature_names: [str] Name of the input columns. target: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') _sklearn_util.check_expected_type(model, _ensemble.GradientBoostingClassifier) def is_gbr_model(m): if len(m.estimators_) == 0: return False if hasattr(m, 'estimators_') and m.estimators_ is not None: for t in m.estimators_.flatten(): if not hasattr(t, 'tree_') or t.tree_ is None: return False return True else: return False _sklearn_util.check_fitted(model, is_gbr_model) post_evaluation_transform = None if model.n_classes_ == 2: base_prediction = [model.init_.prior] post_evaluation_transform = 'Regression_Logistic' else: base_prediction = list(model.init_.priors) post_evaluation_transform = 'Classification_SoftMax' return _MLModel(_convert_tree_ensemble(model, feature_names, target, mode = 'classifier', base_prediction = base_prediction, class_labels = model.classes_, post_evaluation_transform = post_evaluation_transform))
[ "def", "convert", "(", "model", ",", "feature_names", ",", "target", ")", ":", "if", "not", "(", "_HAS_SKLEARN", ")", ":", "raise", "RuntimeError", "(", "'scikit-learn not found. scikit-learn conversion API is disabled.'", ")", "_sklearn_util", ".", "check_expected_type", "(", "model", ",", "_ensemble", ".", "GradientBoostingClassifier", ")", "def", "is_gbr_model", "(", "m", ")", ":", "if", "len", "(", "m", ".", "estimators_", ")", "==", "0", ":", "return", "False", "if", "hasattr", "(", "m", ",", "'estimators_'", ")", "and", "m", ".", "estimators_", "is", "not", "None", ":", "for", "t", "in", "m", ".", "estimators_", ".", "flatten", "(", ")", ":", "if", "not", "hasattr", "(", "t", ",", "'tree_'", ")", "or", "t", ".", "tree_", "is", "None", ":", "return", "False", "return", "True", "else", ":", "return", "False", "_sklearn_util", ".", "check_fitted", "(", "model", ",", "is_gbr_model", ")", "post_evaluation_transform", "=", "None", "if", "model", ".", "n_classes_", "==", "2", ":", "base_prediction", "=", "[", "model", ".", "init_", ".", "prior", "]", "post_evaluation_transform", "=", "'Regression_Logistic'", "else", ":", "base_prediction", "=", "list", "(", "model", ".", "init_", ".", "priors", ")", "post_evaluation_transform", "=", "'Classification_SoftMax'", "return", "_MLModel", "(", "_convert_tree_ensemble", "(", "model", ",", "feature_names", ",", "target", ",", "mode", "=", "'classifier'", ",", "base_prediction", "=", "base_prediction", ",", "class_labels", "=", "model", ".", "classes_", ",", "post_evaluation_transform", "=", "post_evaluation_transform", ")", ")" ]
Convert a boosted tree model to protobuf format. Parameters ---------- decision_tree : GradientBoostingClassifier A trained scikit-learn tree model. feature_names: [str] Name of the input columns. target: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model
[ "Convert", "a", "boosted", "tree", "model", "to", "protobuf", "format", "." ]
python
train
NuGrid/NuGridPy
scripts/nugrid_set/nugrid_set.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/scripts/nugrid_set/nugrid_set.py#L1240-L1374
def set_plot_surface_abu(self,fig=2,species=['Sr-88','Ba-136'],decay=False,number_frac=False,xaxis='cycles',age_years=False,ratio=False,sumiso=False,eps=False,samefigure=False,samefigureall=False,withkip=False,sparsity=200,linestyle=['--'],marker=['o'],color=['r'],label=[],markevery=100,t0_model=-1,savefig=''): ''' Simply plots surface abundance versus model number or time ''' extralabel=False if len(label)>0: extralabel=True import nugridse as mp import utils as u idx=0 if eps==True: species= species + ['H-1'] if samefigureall==True and ratio==False: plt.figure(fig) for i in range(len(self.runs_H5_surf)): idx=0 sefiles=mp.se(self.runs_H5_surf[i]) if samefigure==True: plt.figure(i) cycles=range(int(sefiles.se.cycles[0]),int(sefiles.se.cycles[-1]),sparsity) mini=sefiles.get("mini") zini=sefiles.get("zini") if not extralabel: label1=str(mini)+'$M_{\odot}$, Z='+str(zini) if xaxis=='cycles': x=cycles if xaxis=='age': x=sefiles.get(cycles,'age') if age_years==True: x=np.array(x)*sefiles.get('age_unit')/(365*24*3600) print 'x is age' if t0_model>0: #print cycles idxt0=0 for kk in range(len(cycles)): print int(cycles[kk]),t0_model if int(cycles[kk]) == t0_model: idxt0=kk break print 'found t0_model idx',idxt0 #idxt0=cycles.index(t0_model) cycles=cycles[idxt0:] if idxt0==0: print 'Warning, t0modle not found' x=x[idxt0:] - x[idxt0] else: idxt0=0 if xaxis=='mass': x=sefiles.get(cycles,'mass') if decay==False: species_abu1=sefiles.get(cycles,species) else: species_abu11=sefiles.get(cycles,'iso_massf_decay') species_abu1=[] for jj in range(len(cycles)): species_abu1.append([]) for j in range(len(species)): species_abu1[-1].append(species_abu11[jj][sefiles.se.isotopes.index(species[j])]) if len(species)==1: species_abu11=[] for kk in range(len(species_abu1)): species_abu11.append([species_abu1[kk]]) species_abu1=species_abu11 species_abu=[] for k in range(len(species)): print 'species ',k species_abu.append([]) for k in range(len(species)): for h in range(len(cycles)): species_abu[k].append(species_abu1[h][k]) #print species_abu #if t0_model>0: # species_abu=species_abu[t0_model:] for k in range(len(species)): if samefigure==False and ratio==False: fig=plt.figure(species[k]) if xaxis=='cycles': plt.xlabel('model number') if xaxis=='age': plt.xlabel('Age [yrs]') if xaxis=='mass': plt.xlabel('$M/M_{\odot}$') plt.ylabel('X$_i$') if ratio==True: continue if extralabel: label=label[k] else: label=label1 if samefigure==True: if sumiso == True: sumiso_massfrac=np.array(species_abu[0]) for hh in range(1,len(species_abu)): sumiso_massfrac = sumiso_massfrac + np.array(species_abu[hh]) plt.plot(x,sumiso_massfrac,linestyle=linestyle[idx],marker=marker[idx],label=species[k]+', '+label,color=color[idx],markevery=markevery) break #leave iso looop else: if eps==True: species_abu[0]=np.log10(np.array(species_abu[0])/(np.array(species_abu[1])*7)) + 12. plt.plot(x,species_abu[k],linestyle=linestyle[idx],marker=marker[idx],label=species[k]+', '+label,color=color[idx],markevery=markevery) idx+=1 if eps==True: break else: if withkip==True: print 'test' else: plt.ylabel('X('+species[k]+')') if eps==True: species_abu[0]=np.log10(np.array(species_abu[0])/(np.array(species_abu[1])*7)) + 12. plt.plot(x,species_abu[k],linestyle=linestyle[i],marker=marker[i],label=label,color=color[i],markevery=markevery) if eps==True: break plt.legend(loc=2) plt.yscale('log') if ratio==True: if number_frac==True: print 'plot number frac' plt.plot(x,4./3.*np.array(species_abu[1])/np.array(species_abu[0]),linestyle=linestyle[i],marker=marker[i],label=label,color=color[i],markevery=markevery) else: plt.plot(x,np.array(species_abu[1])/np.array(species_abu[0]),linestyle=linestyle[i],marker=marker[i],label=label,color=color[i],markevery=markevery) plt.legend(loc=2) plt.yscale('log') name='M'+str(mini)+'Z'+str(zini) plt.legend(loc=4) plt.savefig(savefig+'/surf_'+name+'.png')
[ "def", "set_plot_surface_abu", "(", "self", ",", "fig", "=", "2", ",", "species", "=", "[", "'Sr-88'", ",", "'Ba-136'", "]", ",", "decay", "=", "False", ",", "number_frac", "=", "False", ",", "xaxis", "=", "'cycles'", ",", "age_years", "=", "False", ",", "ratio", "=", "False", ",", "sumiso", "=", "False", ",", "eps", "=", "False", ",", "samefigure", "=", "False", ",", "samefigureall", "=", "False", ",", "withkip", "=", "False", ",", "sparsity", "=", "200", ",", "linestyle", "=", "[", "'--'", "]", ",", "marker", "=", "[", "'o'", "]", ",", "color", "=", "[", "'r'", "]", ",", "label", "=", "[", "]", ",", "markevery", "=", "100", ",", "t0_model", "=", "-", "1", ",", "savefig", "=", "''", ")", ":", "extralabel", "=", "False", "if", "len", "(", "label", ")", ">", "0", ":", "extralabel", "=", "True", "import", "nugridse", "as", "mp", "import", "utils", "as", "u", "idx", "=", "0", "if", "eps", "==", "True", ":", "species", "=", "species", "+", "[", "'H-1'", "]", "if", "samefigureall", "==", "True", "and", "ratio", "==", "False", ":", "plt", ".", "figure", "(", "fig", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "runs_H5_surf", ")", ")", ":", "idx", "=", "0", "sefiles", "=", "mp", ".", "se", "(", "self", ".", "runs_H5_surf", "[", "i", "]", ")", "if", "samefigure", "==", "True", ":", "plt", ".", "figure", "(", "i", ")", "cycles", "=", "range", "(", "int", "(", "sefiles", ".", "se", ".", "cycles", "[", "0", "]", ")", ",", "int", "(", "sefiles", ".", "se", ".", "cycles", "[", "-", "1", "]", ")", ",", "sparsity", ")", "mini", "=", "sefiles", ".", "get", "(", "\"mini\"", ")", "zini", "=", "sefiles", ".", "get", "(", "\"zini\"", ")", "if", "not", "extralabel", ":", "label1", "=", "str", "(", "mini", ")", "+", "'", "o", "dot", "}", "$", ",", "Z", "=", "'", "+", "str", "(", "zini", ")", "if", "xaxis", "==", "'cycles'", ":", "x", "=", "cycles", "if", "xaxis", "==", "'age'", ":", "x", "=", "sefiles", ".", "get", "(", "cycles", ",", "'age'", ")", "if", "age_years", "==", "True", ":", "x", "=", "np", ".", "array", "(", "x", ")", "*", "sefiles", ".", "get", "(", "'age_unit'", ")", "/", "(", "365", "*", "24", "*", "3600", ")", "print", "'x is age'", "if", "t0_model", ">", "0", ":", "#print cycles", "idxt0", "=", "0", "for", "kk", "in", "range", "(", "len", "(", "cycles", ")", ")", ":", "print", "int", "(", "cycles", "[", "kk", "]", ")", ",", "t0_model", "if", "int", "(", "cycles", "[", "kk", "]", ")", "==", "t0_model", ":", "idxt0", "=", "kk", "break", "print", "'found t0_model idx'", ",", "idxt0", "#idxt0=cycles.index(t0_model)", "cycles", "=", "cycles", "[", "idxt0", ":", "]", "if", "idxt0", "==", "0", ":", "print", "'Warning, t0modle not found'", "x", "=", "x", "[", "idxt0", ":", "]", "-", "x", "[", "idxt0", "]", "else", ":", "idxt0", "=", "0", "if", "xaxis", "==", "'mass'", ":", "x", "=", "sefiles", ".", "get", "(", "cycles", ",", "'mass'", ")", "if", "decay", "==", "False", ":", "species_abu1", "=", "sefiles", ".", "get", "(", "cycles", ",", "species", ")", "else", ":", "species_abu11", "=", "sefiles", ".", "get", "(", "cycles", ",", "'iso_massf_decay'", ")", "species_abu1", "=", "[", "]", "for", "jj", "in", "range", "(", "len", "(", "cycles", ")", ")", ":", "species_abu1", ".", "append", "(", "[", "]", ")", "for", "j", "in", "range", "(", "len", "(", "species", ")", ")", ":", "species_abu1", "[", "-", "1", "]", ".", "append", "(", "species_abu11", "[", "jj", "]", "[", "sefiles", ".", "se", ".", "isotopes", ".", "index", "(", "species", "[", "j", "]", ")", "]", ")", "if", "len", "(", "species", ")", "==", "1", ":", "species_abu11", "=", "[", "]", "for", "kk", "in", "range", "(", "len", "(", "species_abu1", ")", ")", ":", "species_abu11", ".", "append", "(", "[", "species_abu1", "[", "kk", "]", "]", ")", "species_abu1", "=", "species_abu11", "species_abu", "=", "[", "]", "for", "k", "in", "range", "(", "len", "(", "species", ")", ")", ":", "print", "'species '", ",", "k", "species_abu", ".", "append", "(", "[", "]", ")", "for", "k", "in", "range", "(", "len", "(", "species", ")", ")", ":", "for", "h", "in", "range", "(", "len", "(", "cycles", ")", ")", ":", "species_abu", "[", "k", "]", ".", "append", "(", "species_abu1", "[", "h", "]", "[", "k", "]", ")", "#print species_abu", "#if t0_model>0:", "#\t\tspecies_abu=species_abu[t0_model:]", "for", "k", "in", "range", "(", "len", "(", "species", ")", ")", ":", "if", "samefigure", "==", "False", "and", "ratio", "==", "False", ":", "fig", "=", "plt", ".", "figure", "(", "species", "[", "k", "]", ")", "if", "xaxis", "==", "'cycles'", ":", "plt", ".", "xlabel", "(", "'model number'", ")", "if", "xaxis", "==", "'age'", ":", "plt", ".", "xlabel", "(", "'Age [yrs]'", ")", "if", "xaxis", "==", "'mass'", ":", "plt", ".", "xlabel", "(", "'", "o", "dot", "}", "$", "'", ")", "plt", ".", "ylabel", "(", "'X$_i$'", ")", "if", "ratio", "==", "True", ":", "continue", "if", "extralabel", ":", "label", "=", "label", "[", "k", "]", "else", ":", "label", "=", "label1", "if", "samefigure", "==", "True", ":", "if", "sumiso", "==", "True", ":", "sumiso_massfrac", "=", "np", ".", "array", "(", "species_abu", "[", "0", "]", ")", "for", "hh", "in", "range", "(", "1", ",", "len", "(", "species_abu", ")", ")", ":", "sumiso_massfrac", "=", "sumiso_massfrac", "+", "np", ".", "array", "(", "species_abu", "[", "hh", "]", ")", "plt", ".", "plot", "(", "x", ",", "sumiso_massfrac", ",", "linestyle", "=", "linestyle", "[", "idx", "]", ",", "marker", "=", "marker", "[", "idx", "]", ",", "label", "=", "species", "[", "k", "]", "+", "', '", "+", "label", ",", "color", "=", "color", "[", "idx", "]", ",", "markevery", "=", "markevery", ")", "break", "#leave iso looop ", "else", ":", "if", "eps", "==", "True", ":", "species_abu", "[", "0", "]", "=", "np", ".", "log10", "(", "np", ".", "array", "(", "species_abu", "[", "0", "]", ")", "/", "(", "np", ".", "array", "(", "species_abu", "[", "1", "]", ")", "*", "7", ")", ")", "+", "12.", "plt", ".", "plot", "(", "x", ",", "species_abu", "[", "k", "]", ",", "linestyle", "=", "linestyle", "[", "idx", "]", ",", "marker", "=", "marker", "[", "idx", "]", ",", "label", "=", "species", "[", "k", "]", "+", "', '", "+", "label", ",", "color", "=", "color", "[", "idx", "]", ",", "markevery", "=", "markevery", ")", "idx", "+=", "1", "if", "eps", "==", "True", ":", "break", "else", ":", "if", "withkip", "==", "True", ":", "print", "'test'", "else", ":", "plt", ".", "ylabel", "(", "'X('", "+", "species", "[", "k", "]", "+", "')'", ")", "if", "eps", "==", "True", ":", "species_abu", "[", "0", "]", "=", "np", ".", "log10", "(", "np", ".", "array", "(", "species_abu", "[", "0", "]", ")", "/", "(", "np", ".", "array", "(", "species_abu", "[", "1", "]", ")", "*", "7", ")", ")", "+", "12.", "plt", ".", "plot", "(", "x", ",", "species_abu", "[", "k", "]", ",", "linestyle", "=", "linestyle", "[", "i", "]", ",", "marker", "=", "marker", "[", "i", "]", ",", "label", "=", "label", ",", "color", "=", "color", "[", "i", "]", ",", "markevery", "=", "markevery", ")", "if", "eps", "==", "True", ":", "break", "plt", ".", "legend", "(", "loc", "=", "2", ")", "plt", ".", "yscale", "(", "'log'", ")", "if", "ratio", "==", "True", ":", "if", "number_frac", "==", "True", ":", "print", "'plot number frac'", "plt", ".", "plot", "(", "x", ",", "4.", "/", "3.", "*", "np", ".", "array", "(", "species_abu", "[", "1", "]", ")", "/", "np", ".", "array", "(", "species_abu", "[", "0", "]", ")", ",", "linestyle", "=", "linestyle", "[", "i", "]", ",", "marker", "=", "marker", "[", "i", "]", ",", "label", "=", "label", ",", "color", "=", "color", "[", "i", "]", ",", "markevery", "=", "markevery", ")", "else", ":", "plt", ".", "plot", "(", "x", ",", "np", ".", "array", "(", "species_abu", "[", "1", "]", ")", "/", "np", ".", "array", "(", "species_abu", "[", "0", "]", ")", ",", "linestyle", "=", "linestyle", "[", "i", "]", ",", "marker", "=", "marker", "[", "i", "]", ",", "label", "=", "label", ",", "color", "=", "color", "[", "i", "]", ",", "markevery", "=", "markevery", ")", "plt", ".", "legend", "(", "loc", "=", "2", ")", "plt", ".", "yscale", "(", "'log'", ")", "name", "=", "'M'", "+", "str", "(", "mini", ")", "+", "'Z'", "+", "str", "(", "zini", ")", "plt", ".", "legend", "(", "loc", "=", "4", ")", "plt", ".", "savefig", "(", "savefig", "+", "'/surf_'", "+", "name", "+", "'.png'", ")" ]
Simply plots surface abundance versus model number or time
[ "Simply", "plots", "surface", "abundance", "versus", "model", "number", "or", "time" ]
python
train
openstack/python-monascaclient
monascaclient/v2_0/notifications.py
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/notifications.py#L29-L37
def get(self, **kwargs): """Get the details for a specific notification.""" # NOTE(trebskit) should actually be find_one, but # monasca does not support expected response format url = '%s/%s' % (self.base_url, kwargs['notification_id']) resp = self.client.list(path=url) return resp
[ "def", "get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# NOTE(trebskit) should actually be find_one, but", "# monasca does not support expected response format", "url", "=", "'%s/%s'", "%", "(", "self", ".", "base_url", ",", "kwargs", "[", "'notification_id'", "]", ")", "resp", "=", "self", ".", "client", ".", "list", "(", "path", "=", "url", ")", "return", "resp" ]
Get the details for a specific notification.
[ "Get", "the", "details", "for", "a", "specific", "notification", "." ]
python
train
wavycloud/pyboto3
pyboto3/rds.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/rds.py#L5435-L5539
def describe_reserved_db_instances(ReservedDBInstanceId=None, ReservedDBInstancesOfferingId=None, DBInstanceClass=None, Duration=None, ProductDescription=None, OfferingType=None, MultiAZ=None, Filters=None, MaxRecords=None, Marker=None): """ Returns information about reserved DB instances for this account, or about a specified reserved DB instance. See also: AWS API Documentation Examples This example lists information for all reserved DB instances for the specified DB instance class, duration, product, offering type, and availability zone settings. Expected Output: :example: response = client.describe_reserved_db_instances( ReservedDBInstanceId='string', ReservedDBInstancesOfferingId='string', DBInstanceClass='string', Duration='string', ProductDescription='string', OfferingType='string', MultiAZ=True|False, Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], MaxRecords=123, Marker='string' ) :type ReservedDBInstanceId: string :param ReservedDBInstanceId: The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID. :type ReservedDBInstancesOfferingId: string :param ReservedDBInstancesOfferingId: The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier. :type DBInstanceClass: string :param DBInstanceClass: The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class. :type Duration: string :param Duration: The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration. Valid Values: 1 | 3 | 31536000 | 94608000 :type ProductDescription: string :param ProductDescription: The product description filter value. Specify this parameter to show only those reservations matching the specified product description. :type OfferingType: string :param OfferingType: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: 'Partial Upfront' | 'All Upfront' | 'No Upfront' :type MultiAZ: boolean :param MultiAZ: The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter. :type Filters: list :param Filters: This parameter is not currently supported. (dict) --This type is not currently supported. Name (string) -- [REQUIRED]This parameter is not currently supported. Values (list) -- [REQUIRED]This parameter is not currently supported. (string) -- :type MaxRecords: integer :param MaxRecords: The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100. :type Marker: string :param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords . :rtype: dict :return: { 'Marker': 'string', 'ReservedDBInstances': [ { 'ReservedDBInstanceId': 'string', 'ReservedDBInstancesOfferingId': 'string', 'DBInstanceClass': 'string', 'StartTime': datetime(2015, 1, 1), 'Duration': 123, 'FixedPrice': 123.0, 'UsagePrice': 123.0, 'CurrencyCode': 'string', 'DBInstanceCount': 123, 'ProductDescription': 'string', 'OfferingType': 'string', 'MultiAZ': True|False, 'State': 'string', 'RecurringCharges': [ { 'RecurringChargeAmount': 123.0, 'RecurringChargeFrequency': 'string' }, ], 'ReservedDBInstanceArn': 'string' }, ] } """ pass
[ "def", "describe_reserved_db_instances", "(", "ReservedDBInstanceId", "=", "None", ",", "ReservedDBInstancesOfferingId", "=", "None", ",", "DBInstanceClass", "=", "None", ",", "Duration", "=", "None", ",", "ProductDescription", "=", "None", ",", "OfferingType", "=", "None", ",", "MultiAZ", "=", "None", ",", "Filters", "=", "None", ",", "MaxRecords", "=", "None", ",", "Marker", "=", "None", ")", ":", "pass" ]
Returns information about reserved DB instances for this account, or about a specified reserved DB instance. See also: AWS API Documentation Examples This example lists information for all reserved DB instances for the specified DB instance class, duration, product, offering type, and availability zone settings. Expected Output: :example: response = client.describe_reserved_db_instances( ReservedDBInstanceId='string', ReservedDBInstancesOfferingId='string', DBInstanceClass='string', Duration='string', ProductDescription='string', OfferingType='string', MultiAZ=True|False, Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], MaxRecords=123, Marker='string' ) :type ReservedDBInstanceId: string :param ReservedDBInstanceId: The reserved DB instance identifier filter value. Specify this parameter to show only the reservation that matches the specified reservation ID. :type ReservedDBInstancesOfferingId: string :param ReservedDBInstancesOfferingId: The offering identifier filter value. Specify this parameter to show only purchased reservations matching the specified offering identifier. :type DBInstanceClass: string :param DBInstanceClass: The DB instance class filter value. Specify this parameter to show only those reservations matching the specified DB instances class. :type Duration: string :param Duration: The duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration. Valid Values: 1 | 3 | 31536000 | 94608000 :type ProductDescription: string :param ProductDescription: The product description filter value. Specify this parameter to show only those reservations matching the specified product description. :type OfferingType: string :param OfferingType: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type. Valid Values: 'Partial Upfront' | 'All Upfront' | 'No Upfront' :type MultiAZ: boolean :param MultiAZ: The Multi-AZ filter value. Specify this parameter to show only those reservations matching the specified Multi-AZ parameter. :type Filters: list :param Filters: This parameter is not currently supported. (dict) --This type is not currently supported. Name (string) -- [REQUIRED]This parameter is not currently supported. Values (list) -- [REQUIRED]This parameter is not currently supported. (string) -- :type MaxRecords: integer :param MaxRecords: The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved. Default: 100 Constraints: Minimum 20, maximum 100. :type Marker: string :param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords . :rtype: dict :return: { 'Marker': 'string', 'ReservedDBInstances': [ { 'ReservedDBInstanceId': 'string', 'ReservedDBInstancesOfferingId': 'string', 'DBInstanceClass': 'string', 'StartTime': datetime(2015, 1, 1), 'Duration': 123, 'FixedPrice': 123.0, 'UsagePrice': 123.0, 'CurrencyCode': 'string', 'DBInstanceCount': 123, 'ProductDescription': 'string', 'OfferingType': 'string', 'MultiAZ': True|False, 'State': 'string', 'RecurringCharges': [ { 'RecurringChargeAmount': 123.0, 'RecurringChargeFrequency': 'string' }, ], 'ReservedDBInstanceArn': 'string' }, ] }
[ "Returns", "information", "about", "reserved", "DB", "instances", "for", "this", "account", "or", "about", "a", "specified", "reserved", "DB", "instance", ".", "See", "also", ":", "AWS", "API", "Documentation", "Examples", "This", "example", "lists", "information", "for", "all", "reserved", "DB", "instances", "for", "the", "specified", "DB", "instance", "class", "duration", "product", "offering", "type", "and", "availability", "zone", "settings", ".", "Expected", "Output", ":", ":", "example", ":", "response", "=", "client", ".", "describe_reserved_db_instances", "(", "ReservedDBInstanceId", "=", "string", "ReservedDBInstancesOfferingId", "=", "string", "DBInstanceClass", "=", "string", "Duration", "=", "string", "ProductDescription", "=", "string", "OfferingType", "=", "string", "MultiAZ", "=", "True|False", "Filters", "=", "[", "{", "Name", ":", "string", "Values", ":", "[", "string", "]", "}", "]", "MaxRecords", "=", "123", "Marker", "=", "string", ")", ":", "type", "ReservedDBInstanceId", ":", "string", ":", "param", "ReservedDBInstanceId", ":", "The", "reserved", "DB", "instance", "identifier", "filter", "value", ".", "Specify", "this", "parameter", "to", "show", "only", "the", "reservation", "that", "matches", "the", "specified", "reservation", "ID", "." ]
python
train
shichao-an/115wangpan
u115/utils.py
https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/utils.py#L25-L30
def get_timestamp(length): """Get a timestamp of `length` in string""" s = '%.6f' % time.time() whole, frac = map(int, s.split('.')) res = '%d%d' % (whole, frac) return res[:length]
[ "def", "get_timestamp", "(", "length", ")", ":", "s", "=", "'%.6f'", "%", "time", ".", "time", "(", ")", "whole", ",", "frac", "=", "map", "(", "int", ",", "s", ".", "split", "(", "'.'", ")", ")", "res", "=", "'%d%d'", "%", "(", "whole", ",", "frac", ")", "return", "res", "[", ":", "length", "]" ]
Get a timestamp of `length` in string
[ "Get", "a", "timestamp", "of", "length", "in", "string" ]
python
train
inveniosoftware-contrib/json-merger
json_merger/merger.py
https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/merger.py#L135-L224
def merge(self): """Populates result members. Performs the merge algorithm using the specified config and fills in the members that provide stats about the merging procedure. Attributes: merged_root: The result of the merge. aligned_root, aligned_head, aligned_update: Copies of root, head and update in which all matched list entities have the same list index for easier diff viewing. head_stats, update_stats: Stats for each list field present in the head or update objects. Instance of :class:`json_merger.stats.ListMatchStats` conflicts: List of :class:`json_merger.conflict.Conflict` instances that occured during the merge. Raises: :class:`json_merger.errors.MergeError` : If conflicts occur during the call. Example: >>> from json_merger import Merger >>> # We compare people by their name >>> from json_merger.comparator import PrimaryKeyComparator >>> from json_merger.config import DictMergerOps, UnifierOps >>> from json_merger.errors import MergeError >>> # Use this only for doctest :) >>> from pprint import pprint as pp >>> >>> root = {'people': [{'name': 'Jimmy', 'age': 30}]} >>> head = {'people': [{'name': 'Jimmy', 'age': 31}, ... {'name': 'George'}]} >>> update = {'people': [{'name': 'John'}, ... {'name': 'Jimmy', 'age': 32}]} >>> >>> class NameComparator(PrimaryKeyComparator): ... # Two objects are the same entitity if they have the ... # same name. ... primary_key_fields = ['name'] >>> m = Merger(root, head, update, ... DictMergerOps.FALLBACK_KEEP_HEAD, ... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST, ... comparators = {'people': NameComparator}) >>> # We do a merge >>> try: ... m.merge() ... except MergeError as e: ... # Conflicts are the same thing as the exception content. ... assert e.content == m.conflicts >>> # This is how the lists are aligned: >>> pp(m.aligned_root['people'], width=60) ['#$PLACEHOLDER$#', {'age': 30, 'name': 'Jimmy'}, '#$PLACEHOLDER$#'] >>> pp(m.aligned_head['people'], width=60) ['#$PLACEHOLDER$#', {'age': 31, 'name': 'Jimmy'}, {'name': 'George'}] >>> pp(m.aligned_update['people'], width=60) [{'name': 'John'}, {'age': 32, 'name': 'Jimmy'}, '#$PLACEHOLDER$#'] >>> # This is the end result of the merge: >>> pp(m.merged_root, width=60) {'people': [{'name': 'John'}, {'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]} >>> # With some conflicts: >>> pp(m.conflicts, width=60) [('SET_FIELD', ('people', 1, 'age'), 32)] >>> # And some stats: >>> pp(m.head_stats[('people',)].in_result) [{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}] >>> pp(m.update_stats[('people',)].not_in_result) [] Note: Even if conflicts occur, merged_root, aligned_root, aligned_head and aligned_update are always populated by following the startegies set for the merger instance. """ self.merged_root = self._recursive_merge(self.root, self.head, self.update) if self.conflicts: raise MergeError('Conflicts Occurred in Merge Process', self.conflicts)
[ "def", "merge", "(", "self", ")", ":", "self", ".", "merged_root", "=", "self", ".", "_recursive_merge", "(", "self", ".", "root", ",", "self", ".", "head", ",", "self", ".", "update", ")", "if", "self", ".", "conflicts", ":", "raise", "MergeError", "(", "'Conflicts Occurred in Merge Process'", ",", "self", ".", "conflicts", ")" ]
Populates result members. Performs the merge algorithm using the specified config and fills in the members that provide stats about the merging procedure. Attributes: merged_root: The result of the merge. aligned_root, aligned_head, aligned_update: Copies of root, head and update in which all matched list entities have the same list index for easier diff viewing. head_stats, update_stats: Stats for each list field present in the head or update objects. Instance of :class:`json_merger.stats.ListMatchStats` conflicts: List of :class:`json_merger.conflict.Conflict` instances that occured during the merge. Raises: :class:`json_merger.errors.MergeError` : If conflicts occur during the call. Example: >>> from json_merger import Merger >>> # We compare people by their name >>> from json_merger.comparator import PrimaryKeyComparator >>> from json_merger.config import DictMergerOps, UnifierOps >>> from json_merger.errors import MergeError >>> # Use this only for doctest :) >>> from pprint import pprint as pp >>> >>> root = {'people': [{'name': 'Jimmy', 'age': 30}]} >>> head = {'people': [{'name': 'Jimmy', 'age': 31}, ... {'name': 'George'}]} >>> update = {'people': [{'name': 'John'}, ... {'name': 'Jimmy', 'age': 32}]} >>> >>> class NameComparator(PrimaryKeyComparator): ... # Two objects are the same entitity if they have the ... # same name. ... primary_key_fields = ['name'] >>> m = Merger(root, head, update, ... DictMergerOps.FALLBACK_KEEP_HEAD, ... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST, ... comparators = {'people': NameComparator}) >>> # We do a merge >>> try: ... m.merge() ... except MergeError as e: ... # Conflicts are the same thing as the exception content. ... assert e.content == m.conflicts >>> # This is how the lists are aligned: >>> pp(m.aligned_root['people'], width=60) ['#$PLACEHOLDER$#', {'age': 30, 'name': 'Jimmy'}, '#$PLACEHOLDER$#'] >>> pp(m.aligned_head['people'], width=60) ['#$PLACEHOLDER$#', {'age': 31, 'name': 'Jimmy'}, {'name': 'George'}] >>> pp(m.aligned_update['people'], width=60) [{'name': 'John'}, {'age': 32, 'name': 'Jimmy'}, '#$PLACEHOLDER$#'] >>> # This is the end result of the merge: >>> pp(m.merged_root, width=60) {'people': [{'name': 'John'}, {'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]} >>> # With some conflicts: >>> pp(m.conflicts, width=60) [('SET_FIELD', ('people', 1, 'age'), 32)] >>> # And some stats: >>> pp(m.head_stats[('people',)].in_result) [{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}] >>> pp(m.update_stats[('people',)].not_in_result) [] Note: Even if conflicts occur, merged_root, aligned_root, aligned_head and aligned_update are always populated by following the startegies set for the merger instance.
[ "Populates", "result", "members", "." ]
python
train
gwastro/pycbc
pycbc/inference/sampler/base.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base.py#L204-L242
def create_new_output_file(sampler, filename, force=False, injection_file=None, **kwargs): """Creates a new output file. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler filename : str Name of the file to create. force : bool, optional Create the file even if it already exists. Default is False. injection_file : str, optional If an injection was added to the data, write its information. \**kwargs : All other keyword arguments are passed through to the file's ``write_metadata`` function. """ if os.path.exists(filename): if force: os.remove(filename) else: raise OSError("output-file already exists; use force if you " "wish to overwrite it.") logging.info("Creating file {}".format(filename)) with sampler.io(filename, "w") as fp: # create the samples group and sampler info group fp.create_group(fp.samples_group) fp.create_group(fp.sampler_group) # save the sampler's metadata fp.write_sampler_metadata(sampler) # save injection parameters if injection_file is not None: logging.info("Writing injection file to output") # just use the first one fp.write_injections(injection_file)
[ "def", "create_new_output_file", "(", "sampler", ",", "filename", ",", "force", "=", "False", ",", "injection_file", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "if", "force", ":", "os", ".", "remove", "(", "filename", ")", "else", ":", "raise", "OSError", "(", "\"output-file already exists; use force if you \"", "\"wish to overwrite it.\"", ")", "logging", ".", "info", "(", "\"Creating file {}\"", ".", "format", "(", "filename", ")", ")", "with", "sampler", ".", "io", "(", "filename", ",", "\"w\"", ")", "as", "fp", ":", "# create the samples group and sampler info group", "fp", ".", "create_group", "(", "fp", ".", "samples_group", ")", "fp", ".", "create_group", "(", "fp", ".", "sampler_group", ")", "# save the sampler's metadata", "fp", ".", "write_sampler_metadata", "(", "sampler", ")", "# save injection parameters", "if", "injection_file", "is", "not", "None", ":", "logging", ".", "info", "(", "\"Writing injection file to output\"", ")", "# just use the first one", "fp", ".", "write_injections", "(", "injection_file", ")" ]
Creates a new output file. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler filename : str Name of the file to create. force : bool, optional Create the file even if it already exists. Default is False. injection_file : str, optional If an injection was added to the data, write its information. \**kwargs : All other keyword arguments are passed through to the file's ``write_metadata`` function.
[ "Creates", "a", "new", "output", "file", "." ]
python
train
optimizely/python-sdk
optimizely/optimizely.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L610-L634
def get_forced_variation(self, experiment_key, user_id): """ Gets the forced variation for a given user and experiment. Args: experiment_key: A string key identifying the experiment. user_id: The user ID. Returns: The forced variation key. None if no forced variation key. """ if not self.is_valid: self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation')) return None if not validator.is_non_empty_string(experiment_key): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key')) return None if not isinstance(user_id, string_types): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return None forced_variation = self.config.get_forced_variation(experiment_key, user_id) return forced_variation.key if forced_variation else None
[ "def", "get_forced_variation", "(", "self", ",", "experiment_key", ",", "user_id", ")", ":", "if", "not", "self", ".", "is_valid", ":", "self", ".", "logger", ".", "error", "(", "enums", ".", "Errors", ".", "INVALID_DATAFILE", ".", "format", "(", "'get_forced_variation'", ")", ")", "return", "None", "if", "not", "validator", ".", "is_non_empty_string", "(", "experiment_key", ")", ":", "self", ".", "logger", ".", "error", "(", "enums", ".", "Errors", ".", "INVALID_INPUT_ERROR", ".", "format", "(", "'experiment_key'", ")", ")", "return", "None", "if", "not", "isinstance", "(", "user_id", ",", "string_types", ")", ":", "self", ".", "logger", ".", "error", "(", "enums", ".", "Errors", ".", "INVALID_INPUT_ERROR", ".", "format", "(", "'user_id'", ")", ")", "return", "None", "forced_variation", "=", "self", ".", "config", ".", "get_forced_variation", "(", "experiment_key", ",", "user_id", ")", "return", "forced_variation", ".", "key", "if", "forced_variation", "else", "None" ]
Gets the forced variation for a given user and experiment. Args: experiment_key: A string key identifying the experiment. user_id: The user ID. Returns: The forced variation key. None if no forced variation key.
[ "Gets", "the", "forced", "variation", "for", "a", "given", "user", "and", "experiment", "." ]
python
train
senaite/senaite.core
bika/lims/utils/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/utils/__init__.py#L71-L83
def t(i18n_msg): """Safely translate and convert to UTF8, any zope i18n msgid returned from a bikaMessageFactory _ """ text = to_unicode(i18n_msg) try: request = api.get_request() domain = getattr(i18n_msg, "domain", "senaite.core") text = translate(text, domain=domain, context=request) except UnicodeDecodeError: # TODO: This is only a quick fix logger.warn("{} couldn't be translated".format(text)) return to_utf8(text)
[ "def", "t", "(", "i18n_msg", ")", ":", "text", "=", "to_unicode", "(", "i18n_msg", ")", "try", ":", "request", "=", "api", ".", "get_request", "(", ")", "domain", "=", "getattr", "(", "i18n_msg", ",", "\"domain\"", ",", "\"senaite.core\"", ")", "text", "=", "translate", "(", "text", ",", "domain", "=", "domain", ",", "context", "=", "request", ")", "except", "UnicodeDecodeError", ":", "# TODO: This is only a quick fix", "logger", ".", "warn", "(", "\"{} couldn't be translated\"", ".", "format", "(", "text", ")", ")", "return", "to_utf8", "(", "text", ")" ]
Safely translate and convert to UTF8, any zope i18n msgid returned from a bikaMessageFactory _
[ "Safely", "translate", "and", "convert", "to", "UTF8", "any", "zope", "i18n", "msgid", "returned", "from", "a", "bikaMessageFactory", "_" ]
python
train
umich-brcf-bioinf/Connor
connor/consam/readers.py
https://github.com/umich-brcf-bioinf/Connor/blob/b20e9f36e9730c29eaa27ea5fa8b0151e58d2f13/connor/consam/readers.py#L112-L124
def paired_reader_from_bamfile(args, log, usage_logger, annotated_writer): '''Given a BAM file, return a generator that yields filtered, paired reads''' total_aligns = pysamwrapper.total_align_count(args.input_bam) bamfile_generator = _bamfile_generator(args.input_bam) return _paired_reader(args.umt_length, bamfile_generator, total_aligns, log, usage_logger, annotated_writer)
[ "def", "paired_reader_from_bamfile", "(", "args", ",", "log", ",", "usage_logger", ",", "annotated_writer", ")", ":", "total_aligns", "=", "pysamwrapper", ".", "total_align_count", "(", "args", ".", "input_bam", ")", "bamfile_generator", "=", "_bamfile_generator", "(", "args", ".", "input_bam", ")", "return", "_paired_reader", "(", "args", ".", "umt_length", ",", "bamfile_generator", ",", "total_aligns", ",", "log", ",", "usage_logger", ",", "annotated_writer", ")" ]
Given a BAM file, return a generator that yields filtered, paired reads
[ "Given", "a", "BAM", "file", "return", "a", "generator", "that", "yields", "filtered", "paired", "reads" ]
python
train
raamana/mrivis
mrivis/base.py
https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/base.py#L558-L567
def _create_imshow_objects(self): """Turns off all the x and y axes in each Axis""" # uniform values for initial image can cause weird behaviour with normalization # as imshow.set_data() does not automatically update the normalization!! # using random data is a better choice random_image = np.random.rand(20, 20) self.images = [None] * len(self.flat_grid) for ix, ax in enumerate(self.flat_grid): self.images[ix] = ax.imshow(random_image, **self.display_params)
[ "def", "_create_imshow_objects", "(", "self", ")", ":", "# uniform values for initial image can cause weird behaviour with normalization", "# as imshow.set_data() does not automatically update the normalization!!", "# using random data is a better choice", "random_image", "=", "np", ".", "random", ".", "rand", "(", "20", ",", "20", ")", "self", ".", "images", "=", "[", "None", "]", "*", "len", "(", "self", ".", "flat_grid", ")", "for", "ix", ",", "ax", "in", "enumerate", "(", "self", ".", "flat_grid", ")", ":", "self", ".", "images", "[", "ix", "]", "=", "ax", ".", "imshow", "(", "random_image", ",", "*", "*", "self", ".", "display_params", ")" ]
Turns off all the x and y axes in each Axis
[ "Turns", "off", "all", "the", "x", "and", "y", "axes", "in", "each", "Axis" ]
python
train
willthames/ansible-inventory-grapher
lib/ansibleinventorygrapher/inventory.py
https://github.com/willthames/ansible-inventory-grapher/blob/018908594776486a317ef9ed9293a9ef391fe3e9/lib/ansibleinventorygrapher/inventory.py#L121-L135
def _plugins_inventory(self, entities): import os from ansible.plugins.loader import vars_loader from ansible.utils.vars import combine_vars ''' merges all entities by inventory source ''' data = {} for inventory_dir in self.variable_manager._inventory._sources: if ',' in inventory_dir: # skip host lists continue elif not os.path.isdir(inventory_dir): # always pass 'inventory directory' inventory_dir = os.path.dirname(inventory_dir) for plugin in vars_loader.all(): data = combine_vars(data, self._get_plugin_vars(plugin, inventory_dir, entities)) return data
[ "def", "_plugins_inventory", "(", "self", ",", "entities", ")", ":", "import", "os", "from", "ansible", ".", "plugins", ".", "loader", "import", "vars_loader", "from", "ansible", ".", "utils", ".", "vars", "import", "combine_vars", "data", "=", "{", "}", "for", "inventory_dir", "in", "self", ".", "variable_manager", ".", "_inventory", ".", "_sources", ":", "if", "','", "in", "inventory_dir", ":", "# skip host lists", "continue", "elif", "not", "os", ".", "path", ".", "isdir", "(", "inventory_dir", ")", ":", "# always pass 'inventory directory'", "inventory_dir", "=", "os", ".", "path", ".", "dirname", "(", "inventory_dir", ")", "for", "plugin", "in", "vars_loader", ".", "all", "(", ")", ":", "data", "=", "combine_vars", "(", "data", ",", "self", ".", "_get_plugin_vars", "(", "plugin", ",", "inventory_dir", ",", "entities", ")", ")", "return", "data" ]
merges all entities by inventory source
[ "merges", "all", "entities", "by", "inventory", "source" ]
python
train
ptmcg/littletable
littletable.py
https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1087-L1097
def pivot(self, attrlist): """Pivots the data using the given attributes, returning a L{PivotTable}. @param attrlist: list of attributes to be used to construct the pivot table @type attrlist: list of strings, or string of space-delimited attribute names """ if isinstance(attrlist, basestring): attrlist = attrlist.split() if all(a in self._indexes for a in attrlist): return PivotTable(self, [], attrlist) else: raise ValueError("pivot can only be called using indexed attributes")
[ "def", "pivot", "(", "self", ",", "attrlist", ")", ":", "if", "isinstance", "(", "attrlist", ",", "basestring", ")", ":", "attrlist", "=", "attrlist", ".", "split", "(", ")", "if", "all", "(", "a", "in", "self", ".", "_indexes", "for", "a", "in", "attrlist", ")", ":", "return", "PivotTable", "(", "self", ",", "[", "]", ",", "attrlist", ")", "else", ":", "raise", "ValueError", "(", "\"pivot can only be called using indexed attributes\"", ")" ]
Pivots the data using the given attributes, returning a L{PivotTable}. @param attrlist: list of attributes to be used to construct the pivot table @type attrlist: list of strings, or string of space-delimited attribute names
[ "Pivots", "the", "data", "using", "the", "given", "attributes", "returning", "a", "L", "{", "PivotTable", "}", "." ]
python
train
apache/airflow
airflow/contrib/hooks/gcp_function_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_function_hook.py#L109-L127
def update_function(self, name, body, update_mask): """ Updates Cloud Functions according to the specified update mask. :param name: The name of the function. :type name: str :param body: The body required by the cloud function patch API. :type body: dict :param update_mask: The update mask - array of fields that should be patched. :type update_mask: [str] :return: None """ response = self.get_conn().projects().locations().functions().patch( updateMask=",".join(update_mask), name=name, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(operation_name=operation_name)
[ "def", "update_function", "(", "self", ",", "name", ",", "body", ",", "update_mask", ")", ":", "response", "=", "self", ".", "get_conn", "(", ")", ".", "projects", "(", ")", ".", "locations", "(", ")", ".", "functions", "(", ")", ".", "patch", "(", "updateMask", "=", "\",\"", ".", "join", "(", "update_mask", ")", ",", "name", "=", "name", ",", "body", "=", "body", ")", ".", "execute", "(", "num_retries", "=", "self", ".", "num_retries", ")", "operation_name", "=", "response", "[", "\"name\"", "]", "self", ".", "_wait_for_operation_to_complete", "(", "operation_name", "=", "operation_name", ")" ]
Updates Cloud Functions according to the specified update mask. :param name: The name of the function. :type name: str :param body: The body required by the cloud function patch API. :type body: dict :param update_mask: The update mask - array of fields that should be patched. :type update_mask: [str] :return: None
[ "Updates", "Cloud", "Functions", "according", "to", "the", "specified", "update", "mask", "." ]
python
test
peri-source/peri
peri/states.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L273-L288
def _grad_one_param(self, funct, p, dl=2e-5, rts=False, nout=1, **kwargs): """ Gradient of `func` wrt a single parameter `p`. (see _graddoc) """ vals = self.get_values(p) f0 = funct(**kwargs) self.update(p, vals+dl) f1 = funct(**kwargs) if rts: self.update(p, vals) if nout == 1: return (f1 - f0) / dl else: return [(f1[i] - f0[i]) / dl for i in range(nout)]
[ "def", "_grad_one_param", "(", "self", ",", "funct", ",", "p", ",", "dl", "=", "2e-5", ",", "rts", "=", "False", ",", "nout", "=", "1", ",", "*", "*", "kwargs", ")", ":", "vals", "=", "self", ".", "get_values", "(", "p", ")", "f0", "=", "funct", "(", "*", "*", "kwargs", ")", "self", ".", "update", "(", "p", ",", "vals", "+", "dl", ")", "f1", "=", "funct", "(", "*", "*", "kwargs", ")", "if", "rts", ":", "self", ".", "update", "(", "p", ",", "vals", ")", "if", "nout", "==", "1", ":", "return", "(", "f1", "-", "f0", ")", "/", "dl", "else", ":", "return", "[", "(", "f1", "[", "i", "]", "-", "f0", "[", "i", "]", ")", "/", "dl", "for", "i", "in", "range", "(", "nout", ")", "]" ]
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
[ "Gradient", "of", "func", "wrt", "a", "single", "parameter", "p", ".", "(", "see", "_graddoc", ")" ]
python
valid
zetaops/zengine
zengine/lib/translation.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/lib/translation.py#L328-L361
def _wrap_locale_formatter(fn, locale_type): """Wrap a Babel data formatting function to automatically format for currently installed locale.""" def wrapped_locale_formatter(*args, **kwargs): """A Babel formatting function, wrapped to automatically use the currently installed language. The wrapped function will not throw any exceptions for unknown locales, if Babel doesn't recognise the locale, we will simply fall back to the default language. The locale used by the wrapped function can be overriden by passing it a `locale` keyword. To learn more about this function, check the documentation of Babel for the function of the same name. """ # Get the current locale from the class kwargs_ = {'locale': getattr(InstalledLocale, locale_type)} # By creating a dict then updating it, we allow locale to be overridden kwargs_.update(kwargs) try: formatted = fn(*args, **kwargs_) except UnknownLocaleError: log.warning( """Can\'t do formatting for language code {locale}, falling back to default {default}""".format( locale=kwargs_['locale'], default=settings.DEFAULT_LANG) ) kwargs_['locale'] = settings.DEFAULT_LANG formatted = fn(*args, **kwargs_) return formatted return wrapped_locale_formatter
[ "def", "_wrap_locale_formatter", "(", "fn", ",", "locale_type", ")", ":", "def", "wrapped_locale_formatter", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"A Babel formatting function, wrapped to automatically use the\n currently installed language.\n\n The wrapped function will not throw any exceptions for unknown locales,\n if Babel doesn't recognise the locale, we will simply fall back to\n the default language.\n\n The locale used by the wrapped function can be overriden by passing it a `locale` keyword.\n To learn more about this function, check the documentation of Babel for the function of\n the same name.\n \"\"\"", "# Get the current locale from the class", "kwargs_", "=", "{", "'locale'", ":", "getattr", "(", "InstalledLocale", ",", "locale_type", ")", "}", "# By creating a dict then updating it, we allow locale to be overridden", "kwargs_", ".", "update", "(", "kwargs", ")", "try", ":", "formatted", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs_", ")", "except", "UnknownLocaleError", ":", "log", ".", "warning", "(", "\"\"\"Can\\'t do formatting for language code {locale},\n falling back to default {default}\"\"\"", ".", "format", "(", "locale", "=", "kwargs_", "[", "'locale'", "]", ",", "default", "=", "settings", ".", "DEFAULT_LANG", ")", ")", "kwargs_", "[", "'locale'", "]", "=", "settings", ".", "DEFAULT_LANG", "formatted", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs_", ")", "return", "formatted", "return", "wrapped_locale_formatter" ]
Wrap a Babel data formatting function to automatically format for currently installed locale.
[ "Wrap", "a", "Babel", "data", "formatting", "function", "to", "automatically", "format", "for", "currently", "installed", "locale", "." ]
python
train
glitchassassin/lackey
lackey/SettingsDebug.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/SettingsDebug.py#L23-L33
def user(self, message): """ Creates a user log (if user logging is turned on) Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT Note: Does *not* use Java string formatting like Sikuli. Format your message with Python ``basestring.format()`` instead. """ if Settings.UserLogs: self._write_log(Settings.UserLogPrefix, Settings.UserLogTime, message)
[ "def", "user", "(", "self", ",", "message", ")", ":", "if", "Settings", ".", "UserLogs", ":", "self", ".", "_write_log", "(", "Settings", ".", "UserLogPrefix", ",", "Settings", ".", "UserLogTime", ",", "message", ")" ]
Creates a user log (if user logging is turned on) Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT Note: Does *not* use Java string formatting like Sikuli. Format your message with Python ``basestring.format()`` instead.
[ "Creates", "a", "user", "log", "(", "if", "user", "logging", "is", "turned", "on", ")" ]
python
train
binux/pyspider
pyspider/scheduler/task_queue.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/task_queue.py#L190-L225
def put(self, taskid, priority=0, exetime=0): """ Put a task into task queue when use heap sort, if we put tasks(with the same priority and exetime=0) into queue, the queue is not a strict FIFO queue, but more like a FILO stack. It is very possible that when there are continuous big flow, the speed of select is slower than request, resulting in priority-queue accumulation in short time. In this scenario, the tasks more earlier entering the priority-queue will not get processed until the request flow becomes small. Thus, we store a global atom self increasing value into task.sequence which represent the task enqueue sequence. When the comparison of exetime and priority have no difference, we compare task.sequence to ensure that the entire queue is ordered. """ now = time.time() task = InQueueTask(taskid, priority, exetime) self.mutex.acquire() if taskid in self.priority_queue: self.priority_queue.put(task) elif taskid in self.time_queue: self.time_queue.put(task) elif taskid in self.processing and self.processing[taskid].taskid: # force update a processing task is not allowed as there are so many # problems may happen pass else: if exetime and exetime > now: self.time_queue.put(task) else: task.exetime = 0 self.priority_queue.put(task) self.mutex.release()
[ "def", "put", "(", "self", ",", "taskid", ",", "priority", "=", "0", ",", "exetime", "=", "0", ")", ":", "now", "=", "time", ".", "time", "(", ")", "task", "=", "InQueueTask", "(", "taskid", ",", "priority", ",", "exetime", ")", "self", ".", "mutex", ".", "acquire", "(", ")", "if", "taskid", "in", "self", ".", "priority_queue", ":", "self", ".", "priority_queue", ".", "put", "(", "task", ")", "elif", "taskid", "in", "self", ".", "time_queue", ":", "self", ".", "time_queue", ".", "put", "(", "task", ")", "elif", "taskid", "in", "self", ".", "processing", "and", "self", ".", "processing", "[", "taskid", "]", ".", "taskid", ":", "# force update a processing task is not allowed as there are so many", "# problems may happen", "pass", "else", ":", "if", "exetime", "and", "exetime", ">", "now", ":", "self", ".", "time_queue", ".", "put", "(", "task", ")", "else", ":", "task", ".", "exetime", "=", "0", "self", ".", "priority_queue", ".", "put", "(", "task", ")", "self", ".", "mutex", ".", "release", "(", ")" ]
Put a task into task queue when use heap sort, if we put tasks(with the same priority and exetime=0) into queue, the queue is not a strict FIFO queue, but more like a FILO stack. It is very possible that when there are continuous big flow, the speed of select is slower than request, resulting in priority-queue accumulation in short time. In this scenario, the tasks more earlier entering the priority-queue will not get processed until the request flow becomes small. Thus, we store a global atom self increasing value into task.sequence which represent the task enqueue sequence. When the comparison of exetime and priority have no difference, we compare task.sequence to ensure that the entire queue is ordered.
[ "Put", "a", "task", "into", "task", "queue", "when", "use", "heap", "sort", "if", "we", "put", "tasks", "(", "with", "the", "same", "priority", "and", "exetime", "=", "0", ")", "into", "queue", "the", "queue", "is", "not", "a", "strict", "FIFO", "queue", "but", "more", "like", "a", "FILO", "stack", ".", "It", "is", "very", "possible", "that", "when", "there", "are", "continuous", "big", "flow", "the", "speed", "of", "select", "is", "slower", "than", "request", "resulting", "in", "priority", "-", "queue", "accumulation", "in", "short", "time", ".", "In", "this", "scenario", "the", "tasks", "more", "earlier", "entering", "the", "priority", "-", "queue", "will", "not", "get", "processed", "until", "the", "request", "flow", "becomes", "small", ".", "Thus", "we", "store", "a", "global", "atom", "self", "increasing", "value", "into", "task", ".", "sequence", "which", "represent", "the", "task", "enqueue", "sequence", ".", "When", "the", "comparison", "of", "exetime", "and", "priority", "have", "no", "difference", "we", "compare", "task", ".", "sequence", "to", "ensure", "that", "the", "entire", "queue", "is", "ordered", "." ]
python
train
zalando/patroni
patroni/dcs/__init__.py
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/dcs/__init__.py#L376-L388
def from_node(index, value): """ >>> h = TimelineHistory.from_node(1, 2) >>> h.lines [] """ try: lines = json.loads(value) except (TypeError, ValueError): lines = None if not isinstance(lines, list): lines = [] return TimelineHistory(index, value, lines)
[ "def", "from_node", "(", "index", ",", "value", ")", ":", "try", ":", "lines", "=", "json", ".", "loads", "(", "value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "lines", "=", "None", "if", "not", "isinstance", "(", "lines", ",", "list", ")", ":", "lines", "=", "[", "]", "return", "TimelineHistory", "(", "index", ",", "value", ",", "lines", ")" ]
>>> h = TimelineHistory.from_node(1, 2) >>> h.lines []
[ ">>>", "h", "=", "TimelineHistory", ".", "from_node", "(", "1", "2", ")", ">>>", "h", ".", "lines", "[]" ]
python
train
a1ezzz/wasp-general
wasp_general/crypto/hmac.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/crypto/hmac.py#L69-L80
def hash(self, key, message=None): """ Return digest of the given message and key :param key: secret HMAC key :param message: code (message) to authenticate :return: bytes """ hmac_obj = hmac.HMAC(key, self.__digest_generator, backend=default_backend()) if message is not None: hmac_obj.update(message) return hmac_obj.finalize()
[ "def", "hash", "(", "self", ",", "key", ",", "message", "=", "None", ")", ":", "hmac_obj", "=", "hmac", ".", "HMAC", "(", "key", ",", "self", ".", "__digest_generator", ",", "backend", "=", "default_backend", "(", ")", ")", "if", "message", "is", "not", "None", ":", "hmac_obj", ".", "update", "(", "message", ")", "return", "hmac_obj", ".", "finalize", "(", ")" ]
Return digest of the given message and key :param key: secret HMAC key :param message: code (message) to authenticate :return: bytes
[ "Return", "digest", "of", "the", "given", "message", "and", "key" ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/generators.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/generators.py#L301-L325
def match_rank (self, ps): """ Returns true if the generator can be run with the specified properties. """ # See if generator's requirements are satisfied by # 'properties'. Treat a feature name in requirements # (i.e. grist-only element), as matching any value of the # feature. assert isinstance(ps, property_set.PropertySet) all_requirements = self.requirements () property_requirements = [] feature_requirements = [] # This uses strings because genenator requirements allow # the '<feature>' syntax without value and regular validation # is not happy about that. for r in all_requirements: if get_value (r): property_requirements.append (r) else: feature_requirements.append (r) return all(ps.get(get_grist(s)) == [get_value(s)] for s in property_requirements) \ and all(ps.get(get_grist(s)) for s in feature_requirements)
[ "def", "match_rank", "(", "self", ",", "ps", ")", ":", "# See if generator's requirements are satisfied by", "# 'properties'. Treat a feature name in requirements", "# (i.e. grist-only element), as matching any value of the", "# feature.", "assert", "isinstance", "(", "ps", ",", "property_set", ".", "PropertySet", ")", "all_requirements", "=", "self", ".", "requirements", "(", ")", "property_requirements", "=", "[", "]", "feature_requirements", "=", "[", "]", "# This uses strings because genenator requirements allow", "# the '<feature>' syntax without value and regular validation", "# is not happy about that.", "for", "r", "in", "all_requirements", ":", "if", "get_value", "(", "r", ")", ":", "property_requirements", ".", "append", "(", "r", ")", "else", ":", "feature_requirements", ".", "append", "(", "r", ")", "return", "all", "(", "ps", ".", "get", "(", "get_grist", "(", "s", ")", ")", "==", "[", "get_value", "(", "s", ")", "]", "for", "s", "in", "property_requirements", ")", "and", "all", "(", "ps", ".", "get", "(", "get_grist", "(", "s", ")", ")", "for", "s", "in", "feature_requirements", ")" ]
Returns true if the generator can be run with the specified properties.
[ "Returns", "true", "if", "the", "generator", "can", "be", "run", "with", "the", "specified", "properties", "." ]
python
train
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L4183-L4201
def block_range(self, lineno): """Get a range from the given line number to where this node ends. :param lineno: The line number to start the range at. :type lineno: int :returns: The range of line numbers that this node belongs to, starting at the given line number. :rtype: tuple(int, int) """ child = self.body[0] # py2.5 try: except: finally: if ( isinstance(child, TryExcept) and child.fromlineno == self.fromlineno and child.tolineno >= lineno > self.fromlineno ): return child.block_range(lineno) return self._elsed_block_range(lineno, self.finalbody)
[ "def", "block_range", "(", "self", ",", "lineno", ")", ":", "child", "=", "self", ".", "body", "[", "0", "]", "# py2.5 try: except: finally:", "if", "(", "isinstance", "(", "child", ",", "TryExcept", ")", "and", "child", ".", "fromlineno", "==", "self", ".", "fromlineno", "and", "child", ".", "tolineno", ">=", "lineno", ">", "self", ".", "fromlineno", ")", ":", "return", "child", ".", "block_range", "(", "lineno", ")", "return", "self", ".", "_elsed_block_range", "(", "lineno", ",", "self", ".", "finalbody", ")" ]
Get a range from the given line number to where this node ends. :param lineno: The line number to start the range at. :type lineno: int :returns: The range of line numbers that this node belongs to, starting at the given line number. :rtype: tuple(int, int)
[ "Get", "a", "range", "from", "the", "given", "line", "number", "to", "where", "this", "node", "ends", "." ]
python
train
openstack/python-monascaclient
monascaclient/v2_0/shell.py
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/shell.py#L788-L812
def do_alarm_definition_create(mc, args): '''Create an alarm definition.''' fields = {} fields['name'] = args.name if args.description: fields['description'] = args.description fields['expression'] = args.expression if args.alarm_actions: fields['alarm_actions'] = args.alarm_actions if args.ok_actions: fields['ok_actions'] = args.ok_actions if args.undetermined_actions: fields['undetermined_actions'] = args.undetermined_actions if args.severity: if not _validate_severity(args.severity): return fields['severity'] = args.severity if args.match_by: fields['match_by'] = args.match_by.split(',') try: alarm = mc.alarm_definitions.create(**fields) except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: print(jsonutils.dumps(alarm, indent=2))
[ "def", "do_alarm_definition_create", "(", "mc", ",", "args", ")", ":", "fields", "=", "{", "}", "fields", "[", "'name'", "]", "=", "args", ".", "name", "if", "args", ".", "description", ":", "fields", "[", "'description'", "]", "=", "args", ".", "description", "fields", "[", "'expression'", "]", "=", "args", ".", "expression", "if", "args", ".", "alarm_actions", ":", "fields", "[", "'alarm_actions'", "]", "=", "args", ".", "alarm_actions", "if", "args", ".", "ok_actions", ":", "fields", "[", "'ok_actions'", "]", "=", "args", ".", "ok_actions", "if", "args", ".", "undetermined_actions", ":", "fields", "[", "'undetermined_actions'", "]", "=", "args", ".", "undetermined_actions", "if", "args", ".", "severity", ":", "if", "not", "_validate_severity", "(", "args", ".", "severity", ")", ":", "return", "fields", "[", "'severity'", "]", "=", "args", ".", "severity", "if", "args", ".", "match_by", ":", "fields", "[", "'match_by'", "]", "=", "args", ".", "match_by", ".", "split", "(", "','", ")", "try", ":", "alarm", "=", "mc", ".", "alarm_definitions", ".", "create", "(", "*", "*", "fields", ")", "except", "(", "osc_exc", ".", "ClientException", ",", "k_exc", ".", "HttpError", ")", "as", "he", ":", "raise", "osc_exc", ".", "CommandError", "(", "'%s\\n%s'", "%", "(", "he", ".", "message", ",", "he", ".", "details", ")", ")", "else", ":", "print", "(", "jsonutils", ".", "dumps", "(", "alarm", ",", "indent", "=", "2", ")", ")" ]
Create an alarm definition.
[ "Create", "an", "alarm", "definition", "." ]
python
train
chrislit/abydos
abydos/stats/_confusion_table.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/stats/_confusion_table.py#L1263-L1302
def mcc(self): r"""Return Matthews correlation coefficient (MCC). The Matthews correlation coefficient is defined in :cite:`Matthews:1975` as: :math:`\frac{(tp \cdot tn) - (fp \cdot fn)} {\sqrt{(tp + fp)(tp + fn)(tn + fp)(tn + fn)}}` This is equivalent to the geometric mean of informedness and markedness, defined above. Cf. https://en.wikipedia.org/wiki/Matthews_correlation_coefficient Returns ------- float The Matthews correlation coefficient of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.mcc() 0.5367450401216932 """ if ( ( (self._tp + self._fp) * (self._tp + self._fn) * (self._tn + self._fp) * (self._tn + self._fn) ) ) == 0: return float('NaN') return ((self._tp * self._tn) - (self._fp * self._fn)) / math.sqrt( (self._tp + self._fp) * (self._tp + self._fn) * (self._tn + self._fp) * (self._tn + self._fn) )
[ "def", "mcc", "(", "self", ")", ":", "if", "(", "(", "(", "self", ".", "_tp", "+", "self", ".", "_fp", ")", "*", "(", "self", ".", "_tp", "+", "self", ".", "_fn", ")", "*", "(", "self", ".", "_tn", "+", "self", ".", "_fp", ")", "*", "(", "self", ".", "_tn", "+", "self", ".", "_fn", ")", ")", ")", "==", "0", ":", "return", "float", "(", "'NaN'", ")", "return", "(", "(", "self", ".", "_tp", "*", "self", ".", "_tn", ")", "-", "(", "self", ".", "_fp", "*", "self", ".", "_fn", ")", ")", "/", "math", ".", "sqrt", "(", "(", "self", ".", "_tp", "+", "self", ".", "_fp", ")", "*", "(", "self", ".", "_tp", "+", "self", ".", "_fn", ")", "*", "(", "self", ".", "_tn", "+", "self", ".", "_fp", ")", "*", "(", "self", ".", "_tn", "+", "self", ".", "_fn", ")", ")" ]
r"""Return Matthews correlation coefficient (MCC). The Matthews correlation coefficient is defined in :cite:`Matthews:1975` as: :math:`\frac{(tp \cdot tn) - (fp \cdot fn)} {\sqrt{(tp + fp)(tp + fn)(tn + fp)(tn + fn)}}` This is equivalent to the geometric mean of informedness and markedness, defined above. Cf. https://en.wikipedia.org/wiki/Matthews_correlation_coefficient Returns ------- float The Matthews correlation coefficient of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.mcc() 0.5367450401216932
[ "r", "Return", "Matthews", "correlation", "coefficient", "(", "MCC", ")", "." ]
python
valid
opencobra/cobrapy
cobra/sampling/hr_sampler.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/sampling/hr_sampler.py#L369-L383
def _bounds_dist(self, p): """Get the lower and upper bound distances. Negative is bad.""" prob = self.problem lb_dist = (p - prob.variable_bounds[0, ]).min() ub_dist = (prob.variable_bounds[1, ] - p).min() if prob.bounds.shape[0] > 0: const = prob.inequalities.dot(p) const_lb_dist = (const - prob.bounds[0, ]).min() const_ub_dist = (prob.bounds[1, ] - const).min() lb_dist = min(lb_dist, const_lb_dist) ub_dist = min(ub_dist, const_ub_dist) return np.array([lb_dist, ub_dist])
[ "def", "_bounds_dist", "(", "self", ",", "p", ")", ":", "prob", "=", "self", ".", "problem", "lb_dist", "=", "(", "p", "-", "prob", ".", "variable_bounds", "[", "0", ",", "]", ")", ".", "min", "(", ")", "ub_dist", "=", "(", "prob", ".", "variable_bounds", "[", "1", ",", "]", "-", "p", ")", ".", "min", "(", ")", "if", "prob", ".", "bounds", ".", "shape", "[", "0", "]", ">", "0", ":", "const", "=", "prob", ".", "inequalities", ".", "dot", "(", "p", ")", "const_lb_dist", "=", "(", "const", "-", "prob", ".", "bounds", "[", "0", ",", "]", ")", ".", "min", "(", ")", "const_ub_dist", "=", "(", "prob", ".", "bounds", "[", "1", ",", "]", "-", "const", ")", ".", "min", "(", ")", "lb_dist", "=", "min", "(", "lb_dist", ",", "const_lb_dist", ")", "ub_dist", "=", "min", "(", "ub_dist", ",", "const_ub_dist", ")", "return", "np", ".", "array", "(", "[", "lb_dist", ",", "ub_dist", "]", ")" ]
Get the lower and upper bound distances. Negative is bad.
[ "Get", "the", "lower", "and", "upper", "bound", "distances", ".", "Negative", "is", "bad", "." ]
python
valid
diging/tethne
tethne/classes/graphcollection.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/graphcollection.py#L213-L235
def edges(self, data=False, native=True): """ Returns a list of all edges in the :class:`.GraphCollection`\. Parameters ---------- data : bool (default: False) If True, returns a list of 3-tuples containing source and target node labels, and attributes. Returns ------- edges : list """ edges = self.master_graph.edges(data=data) if native: if data: edges = [(self.node_index[s], self.node_index[t], attrs) for s, t, attrs in edges] else: edges = [(self.node_index[s], self.node_index[t]) for s, t in edges] return edges
[ "def", "edges", "(", "self", ",", "data", "=", "False", ",", "native", "=", "True", ")", ":", "edges", "=", "self", ".", "master_graph", ".", "edges", "(", "data", "=", "data", ")", "if", "native", ":", "if", "data", ":", "edges", "=", "[", "(", "self", ".", "node_index", "[", "s", "]", ",", "self", ".", "node_index", "[", "t", "]", ",", "attrs", ")", "for", "s", ",", "t", ",", "attrs", "in", "edges", "]", "else", ":", "edges", "=", "[", "(", "self", ".", "node_index", "[", "s", "]", ",", "self", ".", "node_index", "[", "t", "]", ")", "for", "s", ",", "t", "in", "edges", "]", "return", "edges" ]
Returns a list of all edges in the :class:`.GraphCollection`\. Parameters ---------- data : bool (default: False) If True, returns a list of 3-tuples containing source and target node labels, and attributes. Returns ------- edges : list
[ "Returns", "a", "list", "of", "all", "edges", "in", "the", ":", "class", ":", ".", "GraphCollection", "\\", "." ]
python
train
sprockets/sprockets.mixins.metrics
sprockets/mixins/metrics/statsd.py
https://github.com/sprockets/sprockets.mixins.metrics/blob/0b17d5f0c09a2be9db779e17e6789d3d5ff9a0d0/sprockets/mixins/metrics/statsd.py#L51-L66
def execution_timer(self, *path): """ Record the time it takes to perform an arbitrary code block. :param path: elements of the metric path to record This method returns a context manager that records the amount of time spent inside of the context and submits a timing metric to the specified `path` using (:meth:`record_timing`). """ start = time.time() try: yield finally: self.record_timing(max(start, time.time()) - start, *path)
[ "def", "execution_timer", "(", "self", ",", "*", "path", ")", ":", "start", "=", "time", ".", "time", "(", ")", "try", ":", "yield", "finally", ":", "self", ".", "record_timing", "(", "max", "(", "start", ",", "time", ".", "time", "(", ")", ")", "-", "start", ",", "*", "path", ")" ]
Record the time it takes to perform an arbitrary code block. :param path: elements of the metric path to record This method returns a context manager that records the amount of time spent inside of the context and submits a timing metric to the specified `path` using (:meth:`record_timing`).
[ "Record", "the", "time", "it", "takes", "to", "perform", "an", "arbitrary", "code", "block", "." ]
python
train
gwpy/gwpy
gwpy/frequencyseries/frequencyseries.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/frequencyseries.py#L133-L163
def read(cls, source, *args, **kwargs): """Read data into a `FrequencySeries` Arguments and keywords depend on the output format, see the online documentation for full details for each format, the parameters below are common to most formats. Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. *args Other arguments are (in general) specific to the given ``format``. format : `str`, optional Source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. **kwargs Other keywords are (in general) specific to the given ``format``. Notes -----""" return io_registry.read(cls, source, *args, **kwargs)
[ "def", "read", "(", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "io_registry", ".", "read", "(", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Read data into a `FrequencySeries` Arguments and keywords depend on the output format, see the online documentation for full details for each format, the parameters below are common to most formats. Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. *args Other arguments are (in general) specific to the given ``format``. format : `str`, optional Source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. **kwargs Other keywords are (in general) specific to the given ``format``. Notes -----
[ "Read", "data", "into", "a", "FrequencySeries" ]
python
train
martinpitt/python-dbusmock
dbusmock/mockobject.py
https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/mockobject.py#L165-L192
def Set(self, interface_name, property_name, value, *args, **kwargs): '''Standard D-Bus API for setting a property value''' self.log('Set %s.%s%s' % (interface_name, property_name, self.format_args((value,)))) try: iface_props = self.props[interface_name] except KeyError: raise dbus.exceptions.DBusException( 'no such interface ' + interface_name, name=self.interface + '.UnknownInterface') if property_name not in iface_props: raise dbus.exceptions.DBusException( 'no such property ' + property_name, name=self.interface + '.UnknownProperty') iface_props[property_name] = value self.EmitSignal('org.freedesktop.DBus.Properties', 'PropertiesChanged', 'sa{sv}as', [interface_name, dbus.Dictionary({property_name: value}, signature='sv'), dbus.Array([], signature='s') ])
[ "def", "Set", "(", "self", ",", "interface_name", ",", "property_name", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "log", "(", "'Set %s.%s%s'", "%", "(", "interface_name", ",", "property_name", ",", "self", ".", "format_args", "(", "(", "value", ",", ")", ")", ")", ")", "try", ":", "iface_props", "=", "self", ".", "props", "[", "interface_name", "]", "except", "KeyError", ":", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'no such interface '", "+", "interface_name", ",", "name", "=", "self", ".", "interface", "+", "'.UnknownInterface'", ")", "if", "property_name", "not", "in", "iface_props", ":", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'no such property '", "+", "property_name", ",", "name", "=", "self", ".", "interface", "+", "'.UnknownProperty'", ")", "iface_props", "[", "property_name", "]", "=", "value", "self", ".", "EmitSignal", "(", "'org.freedesktop.DBus.Properties'", ",", "'PropertiesChanged'", ",", "'sa{sv}as'", ",", "[", "interface_name", ",", "dbus", ".", "Dictionary", "(", "{", "property_name", ":", "value", "}", ",", "signature", "=", "'sv'", ")", ",", "dbus", ".", "Array", "(", "[", "]", ",", "signature", "=", "'s'", ")", "]", ")" ]
Standard D-Bus API for setting a property value
[ "Standard", "D", "-", "Bus", "API", "for", "setting", "a", "property", "value" ]
python
train
tensorflow/hub
tensorflow_hub/resolver.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L83-L86
def create_local_module_dir(cache_dir, module_name): """Creates and returns the name of directory where to cache a module.""" tf_v1.gfile.MakeDirs(cache_dir) return os.path.join(cache_dir, module_name)
[ "def", "create_local_module_dir", "(", "cache_dir", ",", "module_name", ")", ":", "tf_v1", ".", "gfile", ".", "MakeDirs", "(", "cache_dir", ")", "return", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "module_name", ")" ]
Creates and returns the name of directory where to cache a module.
[ "Creates", "and", "returns", "the", "name", "of", "directory", "where", "to", "cache", "a", "module", "." ]
python
train
bramwelt/field
field/__init__.py
https://github.com/bramwelt/field/blob/05f38170d080fb48e76aa984bf4aa6b3d05ea6dc/field/__init__.py#L49-L71
def column_converter(string): """ Converts column arguments to integers. - Accepts columns in form of INT, or the range INT-INT. - Returns a list of one or more integers. """ column = string.strip(',') if '-' in column: column_range = map(int, column.split('-')) # For decreasing ranges, increment the larger value, reverse the # passing to range (so it will accept the input), and finally # reverse the output ([::-1]) if column_range[0] > column_range[1]: column_range[0] += 1 return [i for i in range(*column_range[::-1])][::-1] # For normal ranges, increment the larger value. column_range[1] += 1 return [i for i in range(*column_range)] if ',' in column: columns = column.split(',') return map(int, columns) return [int(column)]
[ "def", "column_converter", "(", "string", ")", ":", "column", "=", "string", ".", "strip", "(", "','", ")", "if", "'-'", "in", "column", ":", "column_range", "=", "map", "(", "int", ",", "column", ".", "split", "(", "'-'", ")", ")", "# For decreasing ranges, increment the larger value, reverse the", "# passing to range (so it will accept the input), and finally", "# reverse the output ([::-1])", "if", "column_range", "[", "0", "]", ">", "column_range", "[", "1", "]", ":", "column_range", "[", "0", "]", "+=", "1", "return", "[", "i", "for", "i", "in", "range", "(", "*", "column_range", "[", ":", ":", "-", "1", "]", ")", "]", "[", ":", ":", "-", "1", "]", "# For normal ranges, increment the larger value.", "column_range", "[", "1", "]", "+=", "1", "return", "[", "i", "for", "i", "in", "range", "(", "*", "column_range", ")", "]", "if", "','", "in", "column", ":", "columns", "=", "column", ".", "split", "(", "','", ")", "return", "map", "(", "int", ",", "columns", ")", "return", "[", "int", "(", "column", ")", "]" ]
Converts column arguments to integers. - Accepts columns in form of INT, or the range INT-INT. - Returns a list of one or more integers.
[ "Converts", "column", "arguments", "to", "integers", "." ]
python
train
alex-kostirin/pyatomac
atomac/AXClasses.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/AXClasses.py#L656-L671
def waitForFocusToMatchCriteria(self, timeout=10, **kwargs): """Convenience method to wait for focused element to change (to element matching kwargs criteria). Returns: Element or None """ def _matchFocused(retelem, **kwargs): return retelem if retelem._match(**kwargs) else None retelem = None return self._waitFor(timeout, 'AXFocusedUIElementChanged', callback=_matchFocused, args=(retelem,), **kwargs)
[ "def", "waitForFocusToMatchCriteria", "(", "self", ",", "timeout", "=", "10", ",", "*", "*", "kwargs", ")", ":", "def", "_matchFocused", "(", "retelem", ",", "*", "*", "kwargs", ")", ":", "return", "retelem", "if", "retelem", ".", "_match", "(", "*", "*", "kwargs", ")", "else", "None", "retelem", "=", "None", "return", "self", ".", "_waitFor", "(", "timeout", ",", "'AXFocusedUIElementChanged'", ",", "callback", "=", "_matchFocused", ",", "args", "=", "(", "retelem", ",", ")", ",", "*", "*", "kwargs", ")" ]
Convenience method to wait for focused element to change (to element matching kwargs criteria). Returns: Element or None
[ "Convenience", "method", "to", "wait", "for", "focused", "element", "to", "change", "(", "to", "element", "matching", "kwargs", "criteria", ")", "." ]
python
valid
yyuu/botornado
botornado/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/botornado/s3/bucket.py#L175-L213
def delete_key(self, key_name, headers=None, version_id=None, mfa_token=None, callback=None): """ Deletes a key from the bucket. If a version_id is provided, only that version of the key will be deleted. :type key_name: string :param key_name: The key name to delete :type version_id: string :param version_id: The version ID (optional) :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required anytime you are deleting versioned objects from a bucket that has the MFADelete option on the bucket. """ provider = self.connection.provider if version_id: query_args = 'versionId=%s' % version_id else: query_args = None if mfa_token: if not headers: headers = {} headers[provider.mfa_header] = ' '.join(mfa_token) def key_deleted(response): body = response.read() if response.status != 204: raise provider.storage_response_error(response.status, response.reason, body) if callable(callback): callback(True) self.connection.make_request('DELETE', self.name, key_name, headers=headers, query_args=query_args, callback=key_deleted)
[ "def", "delete_key", "(", "self", ",", "key_name", ",", "headers", "=", "None", ",", "version_id", "=", "None", ",", "mfa_token", "=", "None", ",", "callback", "=", "None", ")", ":", "provider", "=", "self", ".", "connection", ".", "provider", "if", "version_id", ":", "query_args", "=", "'versionId=%s'", "%", "version_id", "else", ":", "query_args", "=", "None", "if", "mfa_token", ":", "if", "not", "headers", ":", "headers", "=", "{", "}", "headers", "[", "provider", ".", "mfa_header", "]", "=", "' '", ".", "join", "(", "mfa_token", ")", "def", "key_deleted", "(", "response", ")", ":", "body", "=", "response", ".", "read", "(", ")", "if", "response", ".", "status", "!=", "204", ":", "raise", "provider", ".", "storage_response_error", "(", "response", ".", "status", ",", "response", ".", "reason", ",", "body", ")", "if", "callable", "(", "callback", ")", ":", "callback", "(", "True", ")", "self", ".", "connection", ".", "make_request", "(", "'DELETE'", ",", "self", ".", "name", ",", "key_name", ",", "headers", "=", "headers", ",", "query_args", "=", "query_args", ",", "callback", "=", "key_deleted", ")" ]
Deletes a key from the bucket. If a version_id is provided, only that version of the key will be deleted. :type key_name: string :param key_name: The key name to delete :type version_id: string :param version_id: The version ID (optional) :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required anytime you are deleting versioned objects from a bucket that has the MFADelete option on the bucket.
[ "Deletes", "a", "key", "from", "the", "bucket", ".", "If", "a", "version_id", "is", "provided", "only", "that", "version", "of", "the", "key", "will", "be", "deleted", ".", ":", "type", "key_name", ":", "string", ":", "param", "key_name", ":", "The", "key", "name", "to", "delete" ]
python
train