repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
sosy-lab/benchexec
benchexec/tablegenerator/__init__.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/tablegenerator/__init__.py#L91-L112
def parse_table_definition_file(file): ''' Read an parse the XML of a table-definition file. @return: an ElementTree object for the table definition ''' logging.info("Reading table definition from '%s'...", file) if not os.path.isfile(file): logging.error("File '%s' does not exist.", file) exit(1) try: tableGenFile = ElementTree.ElementTree().parse(file) except IOError as e: logging.error('Could not read result file %s: %s', file, e) exit(1) except ElementTree.ParseError as e: logging.error('Table file %s is invalid: %s', file, e) exit(1) if 'table' != tableGenFile.tag: logging.error("Table file %s is invalid: It's root element is not named 'table'.", file) exit(1) return tableGenFile
[ "def", "parse_table_definition_file", "(", "file", ")", ":", "logging", ".", "info", "(", "\"Reading table definition from '%s'...\"", ",", "file", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "file", ")", ":", "logging", ".", "error", "(", "\"File '%s' does not exist.\"", ",", "file", ")", "exit", "(", "1", ")", "try", ":", "tableGenFile", "=", "ElementTree", ".", "ElementTree", "(", ")", ".", "parse", "(", "file", ")", "except", "IOError", "as", "e", ":", "logging", ".", "error", "(", "'Could not read result file %s: %s'", ",", "file", ",", "e", ")", "exit", "(", "1", ")", "except", "ElementTree", ".", "ParseError", "as", "e", ":", "logging", ".", "error", "(", "'Table file %s is invalid: %s'", ",", "file", ",", "e", ")", "exit", "(", "1", ")", "if", "'table'", "!=", "tableGenFile", ".", "tag", ":", "logging", ".", "error", "(", "\"Table file %s is invalid: It's root element is not named 'table'.\"", ",", "file", ")", "exit", "(", "1", ")", "return", "tableGenFile" ]
Read an parse the XML of a table-definition file. @return: an ElementTree object for the table definition
[ "Read", "an", "parse", "the", "XML", "of", "a", "table", "-", "definition", "file", "." ]
python
train
apache/incubator-mxnet
python/mxnet/rnn/rnn_cell.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/rnn/rnn_cell.py#L190-L223
def begin_state(self, func=symbol.zeros, **kwargs): """Initial state for this cell. Parameters ---------- func : callable, default symbol.zeros Function for creating initial state. Can be symbol.zeros, symbol.uniform, symbol.Variable etc. Use symbol.Variable if you want to directly feed input as states. **kwargs : more keyword arguments passed to func. For example mean, std, dtype, etc. Returns ------- states : nested list of Symbol Starting states for the first RNN step. """ assert not self._modified, \ "After applying modifier cells (e.g. DropoutCell) the base " \ "cell cannot be called directly. Call the modifier cell instead." states = [] for info in self.state_info: self._init_counter += 1 if info is None: state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter), **kwargs) else: kwargs.update(info) state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter), **kwargs) states.append(state) return states
[ "def", "begin_state", "(", "self", ",", "func", "=", "symbol", ".", "zeros", ",", "*", "*", "kwargs", ")", ":", "assert", "not", "self", ".", "_modified", ",", "\"After applying modifier cells (e.g. DropoutCell) the base \"", "\"cell cannot be called directly. Call the modifier cell instead.\"", "states", "=", "[", "]", "for", "info", "in", "self", ".", "state_info", ":", "self", ".", "_init_counter", "+=", "1", "if", "info", "is", "None", ":", "state", "=", "func", "(", "name", "=", "'%sbegin_state_%d'", "%", "(", "self", ".", "_prefix", ",", "self", ".", "_init_counter", ")", ",", "*", "*", "kwargs", ")", "else", ":", "kwargs", ".", "update", "(", "info", ")", "state", "=", "func", "(", "name", "=", "'%sbegin_state_%d'", "%", "(", "self", ".", "_prefix", ",", "self", ".", "_init_counter", ")", ",", "*", "*", "kwargs", ")", "states", ".", "append", "(", "state", ")", "return", "states" ]
Initial state for this cell. Parameters ---------- func : callable, default symbol.zeros Function for creating initial state. Can be symbol.zeros, symbol.uniform, symbol.Variable etc. Use symbol.Variable if you want to directly feed input as states. **kwargs : more keyword arguments passed to func. For example mean, std, dtype, etc. Returns ------- states : nested list of Symbol Starting states for the first RNN step.
[ "Initial", "state", "for", "this", "cell", "." ]
python
train
GNS3/gns3-server
gns3server/compute/dynamips/__init__.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/__init__.py#L270-L314
def start_new_hypervisor(self, working_dir=None): """ Creates a new Dynamips process and start it. :param working_dir: working directory :returns: the new hypervisor instance """ if not self._dynamips_path: self.find_dynamips() if not working_dir: working_dir = tempfile.gettempdir() # FIXME: hypervisor should always listen to 127.0.0.1 # See https://github.com/GNS3/dynamips/issues/62 server_config = self.config.get_section_config("Server") server_host = server_config.get("host") try: info = socket.getaddrinfo(server_host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) if not info: raise DynamipsError("getaddrinfo returns an empty list on {}".format(server_host)) for res in info: af, socktype, proto, _, sa = res # let the OS find an unused port for the Dynamips hypervisor with socket.socket(af, socktype, proto) as sock: sock.bind(sa) port = sock.getsockname()[1] break except OSError as e: raise DynamipsError("Could not find free port for the Dynamips hypervisor: {}".format(e)) port_manager = PortManager.instance() hypervisor = Hypervisor(self._dynamips_path, working_dir, server_host, port, port_manager.console_host) log.info("Creating new hypervisor {}:{} with working directory {}".format(hypervisor.host, hypervisor.port, working_dir)) yield from hypervisor.start() log.info("Hypervisor {}:{} has successfully started".format(hypervisor.host, hypervisor.port)) yield from hypervisor.connect() if parse_version(hypervisor.version) < parse_version('0.2.11'): raise DynamipsError("Dynamips version must be >= 0.2.11, detected version is {}".format(hypervisor.version)) return hypervisor
[ "def", "start_new_hypervisor", "(", "self", ",", "working_dir", "=", "None", ")", ":", "if", "not", "self", ".", "_dynamips_path", ":", "self", ".", "find_dynamips", "(", ")", "if", "not", "working_dir", ":", "working_dir", "=", "tempfile", ".", "gettempdir", "(", ")", "# FIXME: hypervisor should always listen to 127.0.0.1", "# See https://github.com/GNS3/dynamips/issues/62", "server_config", "=", "self", ".", "config", ".", "get_section_config", "(", "\"Server\"", ")", "server_host", "=", "server_config", ".", "get", "(", "\"host\"", ")", "try", ":", "info", "=", "socket", ".", "getaddrinfo", "(", "server_host", ",", "0", ",", "socket", ".", "AF_UNSPEC", ",", "socket", ".", "SOCK_STREAM", ",", "0", ",", "socket", ".", "AI_PASSIVE", ")", "if", "not", "info", ":", "raise", "DynamipsError", "(", "\"getaddrinfo returns an empty list on {}\"", ".", "format", "(", "server_host", ")", ")", "for", "res", "in", "info", ":", "af", ",", "socktype", ",", "proto", ",", "_", ",", "sa", "=", "res", "# let the OS find an unused port for the Dynamips hypervisor", "with", "socket", ".", "socket", "(", "af", ",", "socktype", ",", "proto", ")", "as", "sock", ":", "sock", ".", "bind", "(", "sa", ")", "port", "=", "sock", ".", "getsockname", "(", ")", "[", "1", "]", "break", "except", "OSError", "as", "e", ":", "raise", "DynamipsError", "(", "\"Could not find free port for the Dynamips hypervisor: {}\"", ".", "format", "(", "e", ")", ")", "port_manager", "=", "PortManager", ".", "instance", "(", ")", "hypervisor", "=", "Hypervisor", "(", "self", ".", "_dynamips_path", ",", "working_dir", ",", "server_host", ",", "port", ",", "port_manager", ".", "console_host", ")", "log", ".", "info", "(", "\"Creating new hypervisor {}:{} with working directory {}\"", ".", "format", "(", "hypervisor", ".", "host", ",", "hypervisor", ".", "port", ",", "working_dir", ")", ")", "yield", "from", "hypervisor", ".", "start", "(", ")", "log", ".", "info", "(", "\"Hypervisor {}:{} has successfully started\"", ".", "format", "(", "hypervisor", ".", "host", ",", "hypervisor", ".", "port", ")", ")", "yield", "from", "hypervisor", ".", "connect", "(", ")", "if", "parse_version", "(", "hypervisor", ".", "version", ")", "<", "parse_version", "(", "'0.2.11'", ")", ":", "raise", "DynamipsError", "(", "\"Dynamips version must be >= 0.2.11, detected version is {}\"", ".", "format", "(", "hypervisor", ".", "version", ")", ")", "return", "hypervisor" ]
Creates a new Dynamips process and start it. :param working_dir: working directory :returns: the new hypervisor instance
[ "Creates", "a", "new", "Dynamips", "process", "and", "start", "it", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py#L2289-L2315
def auto_rewrite_input(self, cmd): """Print to the screen the rewritten form of the user's command. This shows visual feedback by rewriting input lines that cause automatic calling to kick in, like:: /f x into:: ------> f(x) after the user's input prompt. This helps the user understand that the input line was transformed automatically by IPython. """ if not self.show_rewritten_input: return rw = self.prompt_manager.render('rewrite') + cmd try: # plain ascii works better w/ pyreadline, on some machines, so # we use it and only print uncolored rewrite if we have unicode rw = str(rw) print >> io.stdout, rw except UnicodeEncodeError: print "------> " + cmd
[ "def", "auto_rewrite_input", "(", "self", ",", "cmd", ")", ":", "if", "not", "self", ".", "show_rewritten_input", ":", "return", "rw", "=", "self", ".", "prompt_manager", ".", "render", "(", "'rewrite'", ")", "+", "cmd", "try", ":", "# plain ascii works better w/ pyreadline, on some machines, so", "# we use it and only print uncolored rewrite if we have unicode", "rw", "=", "str", "(", "rw", ")", "print", ">>", "io", ".", "stdout", ",", "rw", "except", "UnicodeEncodeError", ":", "print", "\"------> \"", "+", "cmd" ]
Print to the screen the rewritten form of the user's command. This shows visual feedback by rewriting input lines that cause automatic calling to kick in, like:: /f x into:: ------> f(x) after the user's input prompt. This helps the user understand that the input line was transformed automatically by IPython.
[ "Print", "to", "the", "screen", "the", "rewritten", "form", "of", "the", "user", "s", "command", "." ]
python
test
vbwagner/ctypescrypto
ctypescrypto/x509.py
https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/x509.py#L344-L351
def _X509__asn1date_to_datetime(asn1date): """ Converts openssl ASN1_TIME object to python datetime.datetime """ bio = Membio() libcrypto.ASN1_TIME_print(bio.bio, asn1date) pydate = datetime.strptime(str(bio), "%b %d %H:%M:%S %Y %Z") return pydate.replace(tzinfo=utc)
[ "def", "_X509__asn1date_to_datetime", "(", "asn1date", ")", ":", "bio", "=", "Membio", "(", ")", "libcrypto", ".", "ASN1_TIME_print", "(", "bio", ".", "bio", ",", "asn1date", ")", "pydate", "=", "datetime", ".", "strptime", "(", "str", "(", "bio", ")", ",", "\"%b %d %H:%M:%S %Y %Z\"", ")", "return", "pydate", ".", "replace", "(", "tzinfo", "=", "utc", ")" ]
Converts openssl ASN1_TIME object to python datetime.datetime
[ "Converts", "openssl", "ASN1_TIME", "object", "to", "python", "datetime", ".", "datetime" ]
python
train
baszoetekouw/janus-py
sr/sr.py
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L171-L177
def get(self, eid): """ Returns a dict with the complete record of the entity with the given eID """ data = self._http_req('connections/%u' % eid) self.debug(0x01, data['decoded']) return data['decoded']
[ "def", "get", "(", "self", ",", "eid", ")", ":", "data", "=", "self", ".", "_http_req", "(", "'connections/%u'", "%", "eid", ")", "self", ".", "debug", "(", "0x01", ",", "data", "[", "'decoded'", "]", ")", "return", "data", "[", "'decoded'", "]" ]
Returns a dict with the complete record of the entity with the given eID
[ "Returns", "a", "dict", "with", "the", "complete", "record", "of", "the", "entity", "with", "the", "given", "eID" ]
python
train
RedHatInsights/insights-core
insights/parsers/multipath_conf.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/multipath_conf.py#L209-L216
def get_tree_from_initramfs(root=None): """ This is a helper function to get a multipath configuration(from initramfs image) component for your local machine or an archive. It's for use in interactive sessions. """ from insights import run return run(MultipathConfTreeInitramfs, root=root).get(MultipathConfTreeInitramfs)
[ "def", "get_tree_from_initramfs", "(", "root", "=", "None", ")", ":", "from", "insights", "import", "run", "return", "run", "(", "MultipathConfTreeInitramfs", ",", "root", "=", "root", ")", ".", "get", "(", "MultipathConfTreeInitramfs", ")" ]
This is a helper function to get a multipath configuration(from initramfs image) component for your local machine or an archive. It's for use in interactive sessions.
[ "This", "is", "a", "helper", "function", "to", "get", "a", "multipath", "configuration", "(", "from", "initramfs", "image", ")", "component", "for", "your", "local", "machine", "or", "an", "archive", ".", "It", "s", "for", "use", "in", "interactive", "sessions", "." ]
python
train
campaignmonitor/createsend-python
lib/createsend/list.py
https://github.com/campaignmonitor/createsend-python/blob/4bfe2fd5cb2fc9d8f12280b23569eea0a6c66426/lib/createsend/list.py#L56-L60
def delete_custom_field(self, custom_field_key): """Deletes a custom field associated with this list.""" custom_field_key = quote(custom_field_key, '') response = self._delete("/lists/%s/customfields/%s.json" % (self.list_id, custom_field_key))
[ "def", "delete_custom_field", "(", "self", ",", "custom_field_key", ")", ":", "custom_field_key", "=", "quote", "(", "custom_field_key", ",", "''", ")", "response", "=", "self", ".", "_delete", "(", "\"/lists/%s/customfields/%s.json\"", "%", "(", "self", ".", "list_id", ",", "custom_field_key", ")", ")" ]
Deletes a custom field associated with this list.
[ "Deletes", "a", "custom", "field", "associated", "with", "this", "list", "." ]
python
train
JNRowe/upoints
upoints/baken.py
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/baken.py#L87-L94
def locator(self, value): """Update the locator, and trigger a latitude and longitude update. Args: value (str): New Maidenhead locator string """ self._locator = value self._latitude, self._longitude = utils.from_grid_locator(value)
[ "def", "locator", "(", "self", ",", "value", ")", ":", "self", ".", "_locator", "=", "value", "self", ".", "_latitude", ",", "self", ".", "_longitude", "=", "utils", ".", "from_grid_locator", "(", "value", ")" ]
Update the locator, and trigger a latitude and longitude update. Args: value (str): New Maidenhead locator string
[ "Update", "the", "locator", "and", "trigger", "a", "latitude", "and", "longitude", "update", "." ]
python
train
objectrocket/python-client
scripts/check_docs.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/scripts/check_docs.py#L238-L248
def _valid_directory(self, path): """Ensure that the given path is valid. :param str path: A valid directory path. :raises: :py:class:`argparse.ArgumentTypeError` :returns: An absolute directory path. """ abspath = os.path.abspath(path) if not os.path.isdir(abspath): raise argparse.ArgumentTypeError('Not a valid directory: {}'.format(abspath)) return abspath
[ "def", "_valid_directory", "(", "self", ",", "path", ")", ":", "abspath", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "abspath", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'Not a valid directory: {}'", ".", "format", "(", "abspath", ")", ")", "return", "abspath" ]
Ensure that the given path is valid. :param str path: A valid directory path. :raises: :py:class:`argparse.ArgumentTypeError` :returns: An absolute directory path.
[ "Ensure", "that", "the", "given", "path", "is", "valid", "." ]
python
train
dade-ai/snipy
snipy/io/fileutil.py
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L207-L228
def listdir(p, match='*', exclude='', listtype='file', matchfun=None): """ list file(or folder) for this path (NOT recursive) :param p: :param match: :param exclude: :param listtype: ('file' | 'filepath' |'dir' | 'all') :param matchfun: match fun (default fnmatch.fnmatch) True/False = matchfun(name, pattern) :rtype: """ if listtype == 'file': gen = listfile(p) elif listtype == 'filepath': gen = listfilepath(p) elif listtype == 'dir': gen = listfolder(p) elif listtype == 'dirpath': gen = listfolderpath(p) else: # list file or folder gen = (entry.name for entry in scandir.scandir(p)) return filter_pattern(gen, match, exclude, matchfun)
[ "def", "listdir", "(", "p", ",", "match", "=", "'*'", ",", "exclude", "=", "''", ",", "listtype", "=", "'file'", ",", "matchfun", "=", "None", ")", ":", "if", "listtype", "==", "'file'", ":", "gen", "=", "listfile", "(", "p", ")", "elif", "listtype", "==", "'filepath'", ":", "gen", "=", "listfilepath", "(", "p", ")", "elif", "listtype", "==", "'dir'", ":", "gen", "=", "listfolder", "(", "p", ")", "elif", "listtype", "==", "'dirpath'", ":", "gen", "=", "listfolderpath", "(", "p", ")", "else", ":", "# list file or folder", "gen", "=", "(", "entry", ".", "name", "for", "entry", "in", "scandir", ".", "scandir", "(", "p", ")", ")", "return", "filter_pattern", "(", "gen", ",", "match", ",", "exclude", ",", "matchfun", ")" ]
list file(or folder) for this path (NOT recursive) :param p: :param match: :param exclude: :param listtype: ('file' | 'filepath' |'dir' | 'all') :param matchfun: match fun (default fnmatch.fnmatch) True/False = matchfun(name, pattern) :rtype:
[ "list", "file", "(", "or", "folder", ")", "for", "this", "path", "(", "NOT", "recursive", ")", ":", "param", "p", ":", ":", "param", "match", ":", ":", "param", "exclude", ":", ":", "param", "listtype", ":", "(", "file", "|", "filepath", "|", "dir", "|", "all", ")", ":", "param", "matchfun", ":", "match", "fun", "(", "default", "fnmatch", ".", "fnmatch", ")", "True", "/", "False", "=", "matchfun", "(", "name", "pattern", ")", ":", "rtype", ":" ]
python
valid
wq/django-rest-pandas
rest_pandas/serializers.py
https://github.com/wq/django-rest-pandas/blob/544177e576a8d54cb46cea6240586c07216f6c49/rest_pandas/serializers.py#L69-L83
def get_index_fields(self): """ List of fields to use for index """ index_fields = self.get_meta_option('index', []) if index_fields: return index_fields model = getattr(self.model_serializer_meta, 'model', None) if model: pk_name = model._meta.pk.name if pk_name in self.child.get_fields(): return [pk_name] return []
[ "def", "get_index_fields", "(", "self", ")", ":", "index_fields", "=", "self", ".", "get_meta_option", "(", "'index'", ",", "[", "]", ")", "if", "index_fields", ":", "return", "index_fields", "model", "=", "getattr", "(", "self", ".", "model_serializer_meta", ",", "'model'", ",", "None", ")", "if", "model", ":", "pk_name", "=", "model", ".", "_meta", ".", "pk", ".", "name", "if", "pk_name", "in", "self", ".", "child", ".", "get_fields", "(", ")", ":", "return", "[", "pk_name", "]", "return", "[", "]" ]
List of fields to use for index
[ "List", "of", "fields", "to", "use", "for", "index" ]
python
train
CyberZHG/keras-word-char-embd
keras_wc_embd/word_char_embd.py
https://github.com/CyberZHG/keras-word-char-embd/blob/cca6ddff01b6264dd0d12613bb9ed308e1367b8c/keras_wc_embd/word_char_embd.py#L278-L313
def get_embedding_weights_from_file(word_dict, file_path, ignore_case=False): """Load pre-trained embeddings from a text file. Each line in the file should look like this: word feature_dim_1 feature_dim_2 ... feature_dim_n The `feature_dim_i` should be a floating point number. :param word_dict: A dict that maps words to indice. :param file_path: The location of the text file containing the pre-trained embeddings. :param ignore_case: Whether ignoring the case of the words. :return weights: A numpy array. """ pre_trained = {} with codecs.open(file_path, 'r', 'utf8') as reader: for line in reader: line = line.strip() if not line: continue parts = line.split() if ignore_case: parts[0] = parts[0].lower() pre_trained[parts[0]] = list(map(float, parts[1:])) embd_dim = len(next(iter(pre_trained.values()))) weights = [[0.0] * embd_dim for _ in range(max(word_dict.values()) + 1)] for word, index in word_dict.items(): if not word: continue if ignore_case: word = word.lower() if word in pre_trained: weights[index] = pre_trained[word] else: weights[index] = numpy.random.random((embd_dim,)).tolist() return numpy.asarray(weights)
[ "def", "get_embedding_weights_from_file", "(", "word_dict", ",", "file_path", ",", "ignore_case", "=", "False", ")", ":", "pre_trained", "=", "{", "}", "with", "codecs", ".", "open", "(", "file_path", ",", "'r'", ",", "'utf8'", ")", "as", "reader", ":", "for", "line", "in", "reader", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "parts", "=", "line", ".", "split", "(", ")", "if", "ignore_case", ":", "parts", "[", "0", "]", "=", "parts", "[", "0", "]", ".", "lower", "(", ")", "pre_trained", "[", "parts", "[", "0", "]", "]", "=", "list", "(", "map", "(", "float", ",", "parts", "[", "1", ":", "]", ")", ")", "embd_dim", "=", "len", "(", "next", "(", "iter", "(", "pre_trained", ".", "values", "(", ")", ")", ")", ")", "weights", "=", "[", "[", "0.0", "]", "*", "embd_dim", "for", "_", "in", "range", "(", "max", "(", "word_dict", ".", "values", "(", ")", ")", "+", "1", ")", "]", "for", "word", ",", "index", "in", "word_dict", ".", "items", "(", ")", ":", "if", "not", "word", ":", "continue", "if", "ignore_case", ":", "word", "=", "word", ".", "lower", "(", ")", "if", "word", "in", "pre_trained", ":", "weights", "[", "index", "]", "=", "pre_trained", "[", "word", "]", "else", ":", "weights", "[", "index", "]", "=", "numpy", ".", "random", ".", "random", "(", "(", "embd_dim", ",", ")", ")", ".", "tolist", "(", ")", "return", "numpy", ".", "asarray", "(", "weights", ")" ]
Load pre-trained embeddings from a text file. Each line in the file should look like this: word feature_dim_1 feature_dim_2 ... feature_dim_n The `feature_dim_i` should be a floating point number. :param word_dict: A dict that maps words to indice. :param file_path: The location of the text file containing the pre-trained embeddings. :param ignore_case: Whether ignoring the case of the words. :return weights: A numpy array.
[ "Load", "pre", "-", "trained", "embeddings", "from", "a", "text", "file", "." ]
python
train
nimbusproject/dashi
dashi/bootstrap/__init__.py
https://github.com/nimbusproject/dashi/blob/368b3963ec8abd60aebe0f81915429b45cbf4b5a/dashi/bootstrap/__init__.py#L212-L235
def _dict_from_dotted(key, val): """takes a key value pair like: key: "this.is.a.key" val: "the value" and returns a dictionary like: {"this": {"is": {"a": {"key": "the value" } } } } """ split_key = key.split(".") split_key.reverse() for key_part in split_key: new_dict = DotDict() new_dict[key_part] = val val = new_dict return val
[ "def", "_dict_from_dotted", "(", "key", ",", "val", ")", ":", "split_key", "=", "key", ".", "split", "(", "\".\"", ")", "split_key", ".", "reverse", "(", ")", "for", "key_part", "in", "split_key", ":", "new_dict", "=", "DotDict", "(", ")", "new_dict", "[", "key_part", "]", "=", "val", "val", "=", "new_dict", "return", "val" ]
takes a key value pair like: key: "this.is.a.key" val: "the value" and returns a dictionary like: {"this": {"is": {"a": {"key": "the value" } } } }
[ "takes", "a", "key", "value", "pair", "like", ":", "key", ":", "this", ".", "is", ".", "a", ".", "key", "val", ":", "the", "value" ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L662-L691
def ensure_block_device(block_device): ''' Confirm block_device, create as loopback if necessary. :param block_device: str: Full path of block device to ensure. :returns: str: Full path of ensured block device. ''' _none = ['None', 'none', None] if (block_device in _none): error_out('prepare_storage(): Missing required input: block_device=%s.' % block_device) if block_device.startswith('/dev/'): bdev = block_device elif block_device.startswith('/'): _bd = block_device.split('|') if len(_bd) == 2: bdev, size = _bd else: bdev = block_device size = DEFAULT_LOOPBACK_SIZE bdev = ensure_loopback_device(bdev, size) else: bdev = '/dev/%s' % block_device if not is_block_device(bdev): error_out('Failed to locate valid block device at %s' % bdev) return bdev
[ "def", "ensure_block_device", "(", "block_device", ")", ":", "_none", "=", "[", "'None'", ",", "'none'", ",", "None", "]", "if", "(", "block_device", "in", "_none", ")", ":", "error_out", "(", "'prepare_storage(): Missing required input: block_device=%s.'", "%", "block_device", ")", "if", "block_device", ".", "startswith", "(", "'/dev/'", ")", ":", "bdev", "=", "block_device", "elif", "block_device", ".", "startswith", "(", "'/'", ")", ":", "_bd", "=", "block_device", ".", "split", "(", "'|'", ")", "if", "len", "(", "_bd", ")", "==", "2", ":", "bdev", ",", "size", "=", "_bd", "else", ":", "bdev", "=", "block_device", "size", "=", "DEFAULT_LOOPBACK_SIZE", "bdev", "=", "ensure_loopback_device", "(", "bdev", ",", "size", ")", "else", ":", "bdev", "=", "'/dev/%s'", "%", "block_device", "if", "not", "is_block_device", "(", "bdev", ")", ":", "error_out", "(", "'Failed to locate valid block device at %s'", "%", "bdev", ")", "return", "bdev" ]
Confirm block_device, create as loopback if necessary. :param block_device: str: Full path of block device to ensure. :returns: str: Full path of ensured block device.
[ "Confirm", "block_device", "create", "as", "loopback", "if", "necessary", "." ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/rabbitmq/driver.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/rabbitmq/driver.py#L482-L499
def stop(self): """ Stop services and requestors and then connection. :return: self """ LOGGER.debug("rabbitmq.Driver.stop") for requester in self.requester_registry: requester.stop() self.requester_registry.clear() for service in self.services_registry: if service.is_started: service.stop() self.services_registry.clear() pykka.ActorRegistry.stop_all() return self
[ "def", "stop", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"rabbitmq.Driver.stop\"", ")", "for", "requester", "in", "self", ".", "requester_registry", ":", "requester", ".", "stop", "(", ")", "self", ".", "requester_registry", ".", "clear", "(", ")", "for", "service", "in", "self", ".", "services_registry", ":", "if", "service", ".", "is_started", ":", "service", ".", "stop", "(", ")", "self", ".", "services_registry", ".", "clear", "(", ")", "pykka", ".", "ActorRegistry", ".", "stop_all", "(", ")", "return", "self" ]
Stop services and requestors and then connection. :return: self
[ "Stop", "services", "and", "requestors", "and", "then", "connection", ".", ":", "return", ":", "self" ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L1726-L1742
async def _report_version(self): """ This is a private message handler method. This method reads the following 2 bytes after the report version command (0xF9 - non sysex). The first byte is the major number and the second byte is the minor number. :returns: None """ # get next two bytes major = await self.read() version_string = str(major) minor = await self.read() version_string += '.' version_string += str(minor) self.query_reply_data[PrivateConstants.REPORT_VERSION] = version_string
[ "async", "def", "_report_version", "(", "self", ")", ":", "# get next two bytes", "major", "=", "await", "self", ".", "read", "(", ")", "version_string", "=", "str", "(", "major", ")", "minor", "=", "await", "self", ".", "read", "(", ")", "version_string", "+=", "'.'", "version_string", "+=", "str", "(", "minor", ")", "self", ".", "query_reply_data", "[", "PrivateConstants", ".", "REPORT_VERSION", "]", "=", "version_string" ]
This is a private message handler method. This method reads the following 2 bytes after the report version command (0xF9 - non sysex). The first byte is the major number and the second byte is the minor number. :returns: None
[ "This", "is", "a", "private", "message", "handler", "method", ".", "This", "method", "reads", "the", "following", "2", "bytes", "after", "the", "report", "version", "command", "(", "0xF9", "-", "non", "sysex", ")", ".", "The", "first", "byte", "is", "the", "major", "number", "and", "the", "second", "byte", "is", "the", "minor", "number", "." ]
python
train
aiogram/aiogram
aiogram/bot/bot.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L1270-L1288
async def get_chat_administrators(self, chat_id: typing.Union[base.Integer, base.String] ) -> typing.List[types.ChatMember]: """ Use this method to get a list of administrators in a chat. Source: https://core.telegram.org/bots/api#getchatadministrators :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :return: On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned. :rtype: :obj:`typing.List[types.ChatMember]` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.GET_CHAT_ADMINISTRATORS, payload) return [types.ChatMember(**chatmember) for chatmember in result]
[ "async", "def", "get_chat_administrators", "(", "self", ",", "chat_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "base", ".", "String", "]", ")", "->", "typing", ".", "List", "[", "types", ".", "ChatMember", "]", ":", "payload", "=", "generate_payload", "(", "*", "*", "locals", "(", ")", ")", "result", "=", "await", "self", ".", "request", "(", "api", ".", "Methods", ".", "GET_CHAT_ADMINISTRATORS", ",", "payload", ")", "return", "[", "types", ".", "ChatMember", "(", "*", "*", "chatmember", ")", "for", "chatmember", "in", "result", "]" ]
Use this method to get a list of administrators in a chat. Source: https://core.telegram.org/bots/api#getchatadministrators :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :return: On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned. :rtype: :obj:`typing.List[types.ChatMember]`
[ "Use", "this", "method", "to", "get", "a", "list", "of", "administrators", "in", "a", "chat", "." ]
python
train
Projectplace/basepage
basepage/base_page.py
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L511-L524
def get_visible_children(self, parent, locator, params=None, timeout=None): """ Get child-elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param parent: parent-element :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_children(parent, locator, params, timeout, True)
[ "def", "get_visible_children", "(", "self", ",", "parent", ",", "locator", ",", "params", "=", "None", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "get_present_children", "(", "parent", ",", "locator", ",", "params", ",", "timeout", ",", "True", ")" ]
Get child-elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param parent: parent-element :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance
[ "Get", "child", "-", "elements", "both", "present", "AND", "visible", "in", "the", "DOM", "." ]
python
train
bitesofcode/projexui
projexui/xdatatype.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xdatatype.py#L70-L87
def saveDataSet( settings, key, dataSet ): """ Records the dataset settings to the inputed data set for the given key. :param settings | <QSettings> key | <str> dataSet | <projex.dataset.DataSet> """ for datakey, value in dataSet.items(): datatype = type(value).__name__ if ( datatype in _dataValueTypes ): datavalue = _dataValueTypes[datatype][0](value) else: datavalue = value settings.setValue('%s/%s/type' % (key, datakey), wrapVariant(datatype)) settings.setValue('%s/%s/value' % (key, datakey), wrapVariant(datavalue))
[ "def", "saveDataSet", "(", "settings", ",", "key", ",", "dataSet", ")", ":", "for", "datakey", ",", "value", "in", "dataSet", ".", "items", "(", ")", ":", "datatype", "=", "type", "(", "value", ")", ".", "__name__", "if", "(", "datatype", "in", "_dataValueTypes", ")", ":", "datavalue", "=", "_dataValueTypes", "[", "datatype", "]", "[", "0", "]", "(", "value", ")", "else", ":", "datavalue", "=", "value", "settings", ".", "setValue", "(", "'%s/%s/type'", "%", "(", "key", ",", "datakey", ")", ",", "wrapVariant", "(", "datatype", ")", ")", "settings", ".", "setValue", "(", "'%s/%s/value'", "%", "(", "key", ",", "datakey", ")", ",", "wrapVariant", "(", "datavalue", ")", ")" ]
Records the dataset settings to the inputed data set for the given key. :param settings | <QSettings> key | <str> dataSet | <projex.dataset.DataSet>
[ "Records", "the", "dataset", "settings", "to", "the", "inputed", "data", "set", "for", "the", "given", "key", ".", ":", "param", "settings", "|", "<QSettings", ">", "key", "|", "<str", ">", "dataSet", "|", "<projex", ".", "dataset", ".", "DataSet", ">" ]
python
train
LogicalDash/LiSE
allegedb/allegedb/graph.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/graph.py#L1426-L1449
def remove_edge(self, u, v, key=None): """Version of remove_edge that's much like normal networkx but only deletes once, since the database doesn't keep separate adj and succ mappings """ try: d = self.adj[u][v] except KeyError: raise NetworkXError( "The edge {}-{} is not in the graph.".format(u, v) ) if key is None: d.popitem() else: try: del d[key] except KeyError: raise NetworkXError( "The edge {}-{} with key {} is not in the graph.".format (u, v, key) ) if len(d) == 0: del self.succ[u][v]
[ "def", "remove_edge", "(", "self", ",", "u", ",", "v", ",", "key", "=", "None", ")", ":", "try", ":", "d", "=", "self", ".", "adj", "[", "u", "]", "[", "v", "]", "except", "KeyError", ":", "raise", "NetworkXError", "(", "\"The edge {}-{} is not in the graph.\"", ".", "format", "(", "u", ",", "v", ")", ")", "if", "key", "is", "None", ":", "d", ".", "popitem", "(", ")", "else", ":", "try", ":", "del", "d", "[", "key", "]", "except", "KeyError", ":", "raise", "NetworkXError", "(", "\"The edge {}-{} with key {} is not in the graph.\"", ".", "format", "(", "u", ",", "v", ",", "key", ")", ")", "if", "len", "(", "d", ")", "==", "0", ":", "del", "self", ".", "succ", "[", "u", "]", "[", "v", "]" ]
Version of remove_edge that's much like normal networkx but only deletes once, since the database doesn't keep separate adj and succ mappings
[ "Version", "of", "remove_edge", "that", "s", "much", "like", "normal", "networkx", "but", "only", "deletes", "once", "since", "the", "database", "doesn", "t", "keep", "separate", "adj", "and", "succ", "mappings" ]
python
train
mattloper/chumpy
chumpy/monitor.py
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/monitor.py#L139-L144
def record(self, ch_node): ''' Incremental changes ''' rec = self.serialize_node(ch_node) self.history.append(rec)
[ "def", "record", "(", "self", ",", "ch_node", ")", ":", "rec", "=", "self", ".", "serialize_node", "(", "ch_node", ")", "self", ".", "history", ".", "append", "(", "rec", ")" ]
Incremental changes
[ "Incremental", "changes" ]
python
train
rbarrois/mpdlcd
mpdlcd/vendor/lcdproc/server.py
https://github.com/rbarrois/mpdlcd/blob/85f16c8cc0883f8abb4c2cc7f69729c3e2f857da/mpdlcd/vendor/lcdproc/server.py#L155-L168
def del_key(self, ref): """ Delete a key. (ref) Return None or LCDd response on error """ if ref not in self.keys: response = self.request("client_del_key %s" % (ref)) self.keys.remove(ref) if "success" in response: return None else: return response
[ "def", "del_key", "(", "self", ",", "ref", ")", ":", "if", "ref", "not", "in", "self", ".", "keys", ":", "response", "=", "self", ".", "request", "(", "\"client_del_key %s\"", "%", "(", "ref", ")", ")", "self", ".", "keys", ".", "remove", "(", "ref", ")", "if", "\"success\"", "in", "response", ":", "return", "None", "else", ":", "return", "response" ]
Delete a key. (ref) Return None or LCDd response on error
[ "Delete", "a", "key", "." ]
python
train
masci/django-appengine-toolkit
appengine_toolkit/storage.py
https://github.com/masci/django-appengine-toolkit/blob/9ffe8b05a263889787fb34a3e28ebc66b1f0a1d2/appengine_toolkit/storage.py#L64-L68
def listdir(self, name): """ TODO collect directories """ return [], [obj.filename for obj in cloudstorage.listbucket(self.path(name))]
[ "def", "listdir", "(", "self", ",", "name", ")", ":", "return", "[", "]", ",", "[", "obj", ".", "filename", "for", "obj", "in", "cloudstorage", ".", "listbucket", "(", "self", ".", "path", "(", "name", ")", ")", "]" ]
TODO collect directories
[ "TODO", "collect", "directories" ]
python
train
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L92-L101
def nsamples_to_minsec(x, pos): '''Convert axes labels to experiment duration in minutes/seconds Notes ----- Matplotlib FuncFormatter function https://matplotlib.org/examples/pylab_examples/custom_ticker1.html ''' h, m, s = hourminsec(x/16.0) return '{:2.0f}′ {:2.1f}″'.format(m+(h*60), s)
[ "def", "nsamples_to_minsec", "(", "x", ",", "pos", ")", ":", "h", ",", "m", ",", "s", "=", "hourminsec", "(", "x", "/", "16.0", ")", "return", "'{:2.0f}′ {:2.1f}″'.for", "m", "at(m+(", "h", "*", "6", "0", ")", ",", " s", ")", "", "", "" ]
Convert axes labels to experiment duration in minutes/seconds Notes ----- Matplotlib FuncFormatter function https://matplotlib.org/examples/pylab_examples/custom_ticker1.html
[ "Convert", "axes", "labels", "to", "experiment", "duration", "in", "minutes", "/", "seconds" ]
python
train
rasbt/biopandas
biopandas/pdb/pandas_pdb.py
https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L95-L110
def fetch_pdb(self, pdb_code): """Fetches PDB file contents from the Protein Databank at rcsb.org. Parameters ---------- pdb_code : str A 4-letter PDB code, e.g., "3eiy". Returns --------- self """ self.pdb_path, self.pdb_text = self._fetch_pdb(pdb_code) self._df = self._construct_df(pdb_lines=self.pdb_text.splitlines(True)) return self
[ "def", "fetch_pdb", "(", "self", ",", "pdb_code", ")", ":", "self", ".", "pdb_path", ",", "self", ".", "pdb_text", "=", "self", ".", "_fetch_pdb", "(", "pdb_code", ")", "self", ".", "_df", "=", "self", ".", "_construct_df", "(", "pdb_lines", "=", "self", ".", "pdb_text", ".", "splitlines", "(", "True", ")", ")", "return", "self" ]
Fetches PDB file contents from the Protein Databank at rcsb.org. Parameters ---------- pdb_code : str A 4-letter PDB code, e.g., "3eiy". Returns --------- self
[ "Fetches", "PDB", "file", "contents", "from", "the", "Protein", "Databank", "at", "rcsb", ".", "org", "." ]
python
train
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/corebluetooth/metadata.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/corebluetooth/metadata.py#L71-L79
def add(self, cbobject, metadata): """Add the specified CoreBluetooth item with the associated metadata if it doesn't already exist. Returns the newly created or preexisting metadata item. """ with self._lock: if cbobject not in self._metadata: self._metadata[cbobject] = metadata return self._metadata[cbobject]
[ "def", "add", "(", "self", ",", "cbobject", ",", "metadata", ")", ":", "with", "self", ".", "_lock", ":", "if", "cbobject", "not", "in", "self", ".", "_metadata", ":", "self", ".", "_metadata", "[", "cbobject", "]", "=", "metadata", "return", "self", ".", "_metadata", "[", "cbobject", "]" ]
Add the specified CoreBluetooth item with the associated metadata if it doesn't already exist. Returns the newly created or preexisting metadata item.
[ "Add", "the", "specified", "CoreBluetooth", "item", "with", "the", "associated", "metadata", "if", "it", "doesn", "t", "already", "exist", ".", "Returns", "the", "newly", "created", "or", "preexisting", "metadata", "item", "." ]
python
valid
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L524-L535
def getv(self, r): """ Like the above, but returns the <int> value. """ v = self.get(r) if not is_unknown(v): try: v = int(v) except ValueError: v = None else: v = None return v
[ "def", "getv", "(", "self", ",", "r", ")", ":", "v", "=", "self", ".", "get", "(", "r", ")", "if", "not", "is_unknown", "(", "v", ")", ":", "try", ":", "v", "=", "int", "(", "v", ")", "except", "ValueError", ":", "v", "=", "None", "else", ":", "v", "=", "None", "return", "v" ]
Like the above, but returns the <int> value.
[ "Like", "the", "above", "but", "returns", "the", "<int", ">", "value", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/run.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L339-L358
def get_double_or_single_prec_mdrun(): """Return double precision ``mdrun`` or fall back to single precision. This convenience function tries :func:`gromacs.mdrun_d` first and if it cannot run it, falls back to :func:`gromacs.mdrun` (without further checking). .. versionadded:: 0.5.1 """ try: gromacs.mdrun_d(h=True, stdout=False, stderr=False) logger.debug("using double precision gromacs.mdrun_d") return gromacs.mdrun_d except (AttributeError, GromacsError, OSError): # fall back to mdrun if no double precision binary wmsg = "No 'mdrun_d' binary found so trying 'mdrun' instead.\n"\ "(Note that energy minimization runs better with mdrun_d.)" logger.warn(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) return gromacs.mdrun
[ "def", "get_double_or_single_prec_mdrun", "(", ")", ":", "try", ":", "gromacs", ".", "mdrun_d", "(", "h", "=", "True", ",", "stdout", "=", "False", ",", "stderr", "=", "False", ")", "logger", ".", "debug", "(", "\"using double precision gromacs.mdrun_d\"", ")", "return", "gromacs", ".", "mdrun_d", "except", "(", "AttributeError", ",", "GromacsError", ",", "OSError", ")", ":", "# fall back to mdrun if no double precision binary", "wmsg", "=", "\"No 'mdrun_d' binary found so trying 'mdrun' instead.\\n\"", "\"(Note that energy minimization runs better with mdrun_d.)\"", "logger", ".", "warn", "(", "wmsg", ")", "warnings", ".", "warn", "(", "wmsg", ",", "category", "=", "AutoCorrectionWarning", ")", "return", "gromacs", ".", "mdrun" ]
Return double precision ``mdrun`` or fall back to single precision. This convenience function tries :func:`gromacs.mdrun_d` first and if it cannot run it, falls back to :func:`gromacs.mdrun` (without further checking). .. versionadded:: 0.5.1
[ "Return", "double", "precision", "mdrun", "or", "fall", "back", "to", "single", "precision", "." ]
python
valid
mardix/Mocha
mocha/core.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L858-L864
def _setup_db(cls): """ Setup the DB connection if DB_URL is set """ uri = cls._app.config.get("DB_URL") if uri: db.connect__(uri, cls._app)
[ "def", "_setup_db", "(", "cls", ")", ":", "uri", "=", "cls", ".", "_app", ".", "config", ".", "get", "(", "\"DB_URL\"", ")", "if", "uri", ":", "db", ".", "connect__", "(", "uri", ",", "cls", ".", "_app", ")" ]
Setup the DB connection if DB_URL is set
[ "Setup", "the", "DB", "connection", "if", "DB_URL", "is", "set" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L1296-L1328
def dump_registers(cls, registers, arch = None): """ Dump the x86/x64 processor register values. The output mimics that of the WinDBG debugger. @type registers: dict( str S{->} int ) @param registers: Dictionary mapping register names to their values. @type arch: str @param arch: Architecture of the machine whose registers were dumped. Defaults to the current architecture. Currently only the following architectures are supported: - L{win32.ARCH_I386} - L{win32.ARCH_AMD64} @rtype: str @return: Text suitable for logging. """ if registers is None: return '' if arch is None: if 'Eax' in registers: arch = win32.ARCH_I386 elif 'Rax' in registers: arch = win32.ARCH_AMD64 else: arch = 'Unknown' if arch not in cls.reg_template: msg = "Don't know how to dump the registers for architecture: %s" raise NotImplementedError(msg % arch) registers = registers.copy() registers['efl_dump'] = cls.dump_flags( registers['EFlags'] ) return cls.reg_template[arch] % registers
[ "def", "dump_registers", "(", "cls", ",", "registers", ",", "arch", "=", "None", ")", ":", "if", "registers", "is", "None", ":", "return", "''", "if", "arch", "is", "None", ":", "if", "'Eax'", "in", "registers", ":", "arch", "=", "win32", ".", "ARCH_I386", "elif", "'Rax'", "in", "registers", ":", "arch", "=", "win32", ".", "ARCH_AMD64", "else", ":", "arch", "=", "'Unknown'", "if", "arch", "not", "in", "cls", ".", "reg_template", ":", "msg", "=", "\"Don't know how to dump the registers for architecture: %s\"", "raise", "NotImplementedError", "(", "msg", "%", "arch", ")", "registers", "=", "registers", ".", "copy", "(", ")", "registers", "[", "'efl_dump'", "]", "=", "cls", ".", "dump_flags", "(", "registers", "[", "'EFlags'", "]", ")", "return", "cls", ".", "reg_template", "[", "arch", "]", "%", "registers" ]
Dump the x86/x64 processor register values. The output mimics that of the WinDBG debugger. @type registers: dict( str S{->} int ) @param registers: Dictionary mapping register names to their values. @type arch: str @param arch: Architecture of the machine whose registers were dumped. Defaults to the current architecture. Currently only the following architectures are supported: - L{win32.ARCH_I386} - L{win32.ARCH_AMD64} @rtype: str @return: Text suitable for logging.
[ "Dump", "the", "x86", "/", "x64", "processor", "register", "values", ".", "The", "output", "mimics", "that", "of", "the", "WinDBG", "debugger", "." ]
python
train
joytunes/JTLocalize
localization_flow/jtlocalize/core/create_localized_strings_from_ib_files.py
https://github.com/joytunes/JTLocalize/blob/87864dc60114e0e61c768d057c6eddfadff3f40a/localization_flow/jtlocalize/core/create_localized_strings_from_ib_files.py#L106-L136
def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix): """ Adds string pairs from a UI element with attributed text Args: results (list): The list to add the results to. attributed_element (element): The element from the xib that contains, to extract the fragments from. comment_prefix (str): The prefix of the comment to use for extracted string (will be appended "Part X" suffices) Returns: bool: Whether or not an attributed string was found. """ attributed_strings = ui_element.getElementsByTagName('attributedString') if attributed_strings.length == 0: return False attributed_element = attributed_strings[0] fragment_index = 1 for fragment in attributed_element.getElementsByTagName('fragment'): # The fragment text is either as an attribute <fragment content="TEXT"> # or a child in the format <string key='content'>TEXT</string> try: label_entry_key = fragment.attributes['content'].value except KeyError: label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue comment = "%s Part %d" % (comment_prefix, fragment_index) results.append((label_entry_key, comment)) fragment_index += 1 return fragment_index > 1
[ "def", "add_string_pairs_from_attributed_ui_element", "(", "results", ",", "ui_element", ",", "comment_prefix", ")", ":", "attributed_strings", "=", "ui_element", ".", "getElementsByTagName", "(", "'attributedString'", ")", "if", "attributed_strings", ".", "length", "==", "0", ":", "return", "False", "attributed_element", "=", "attributed_strings", "[", "0", "]", "fragment_index", "=", "1", "for", "fragment", "in", "attributed_element", ".", "getElementsByTagName", "(", "'fragment'", ")", ":", "# The fragment text is either as an attribute <fragment content=\"TEXT\">", "# or a child in the format <string key='content'>TEXT</string>", "try", ":", "label_entry_key", "=", "fragment", ".", "attributes", "[", "'content'", "]", ".", "value", "except", "KeyError", ":", "label_entry_key", "=", "fragment", ".", "getElementsByTagName", "(", "'string'", ")", "[", "0", "]", ".", "firstChild", ".", "nodeValue", "comment", "=", "\"%s Part %d\"", "%", "(", "comment_prefix", ",", "fragment_index", ")", "results", ".", "append", "(", "(", "label_entry_key", ",", "comment", ")", ")", "fragment_index", "+=", "1", "return", "fragment_index", ">", "1" ]
Adds string pairs from a UI element with attributed text Args: results (list): The list to add the results to. attributed_element (element): The element from the xib that contains, to extract the fragments from. comment_prefix (str): The prefix of the comment to use for extracted string (will be appended "Part X" suffices) Returns: bool: Whether or not an attributed string was found.
[ "Adds", "string", "pairs", "from", "a", "UI", "element", "with", "attributed", "text" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L764-L778
def pearson_correlation_coefficient(predictions, labels, weights_fn=None): """Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient. """ del weights_fn _, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions, labels) return pearson, tf.constant(1.0)
[ "def", "pearson_correlation_coefficient", "(", "predictions", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "del", "weights_fn", "_", ",", "pearson", "=", "tf", ".", "contrib", ".", "metrics", ".", "streaming_pearson_correlation", "(", "predictions", ",", "labels", ")", "return", "pearson", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient.
[ "Calculate", "pearson", "correlation", "coefficient", "." ]
python
train
tritemio/PyBroMo
pybromo/psflib.py
https://github.com/tritemio/PyBroMo/blob/b75f82a4551ff37e7c7a7e6954c536451f3e6d06/pybromo/psflib.py#L120-L129
def hash(self): """Return an hash string computed on the PSF data.""" hash_list = [] for key, value in sorted(self.__dict__.items()): if not callable(value): if isinstance(value, np.ndarray): hash_list.append(value.tostring()) else: hash_list.append(str(value)) return hashlib.md5(repr(hash_list).encode()).hexdigest()
[ "def", "hash", "(", "self", ")", ":", "hash_list", "=", "[", "]", "for", "key", ",", "value", "in", "sorted", "(", "self", ".", "__dict__", ".", "items", "(", ")", ")", ":", "if", "not", "callable", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "np", ".", "ndarray", ")", ":", "hash_list", ".", "append", "(", "value", ".", "tostring", "(", ")", ")", "else", ":", "hash_list", ".", "append", "(", "str", "(", "value", ")", ")", "return", "hashlib", ".", "md5", "(", "repr", "(", "hash_list", ")", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")" ]
Return an hash string computed on the PSF data.
[ "Return", "an", "hash", "string", "computed", "on", "the", "PSF", "data", "." ]
python
valid
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/coreBurkert.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/coreBurkert.py#L113-L132
def coreBurkAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y): """ deflection angle :param R: :param Rs: :param rho0: :param r_core: :param ax_x: :param ax_y: :return: """ x = R * Rs ** -1 p = Rs * r_core ** -1 gx = self._G(x, p) a = 2 * rho0 * Rs ** 2 * gx / x return a * ax_x / R, a * ax_y / R
[ "def", "coreBurkAlpha", "(", "self", ",", "R", ",", "Rs", ",", "rho0", ",", "r_core", ",", "ax_x", ",", "ax_y", ")", ":", "x", "=", "R", "*", "Rs", "**", "-", "1", "p", "=", "Rs", "*", "r_core", "**", "-", "1", "gx", "=", "self", ".", "_G", "(", "x", ",", "p", ")", "a", "=", "2", "*", "rho0", "*", "Rs", "**", "2", "*", "gx", "/", "x", "return", "a", "*", "ax_x", "/", "R", ",", "a", "*", "ax_y", "/", "R" ]
deflection angle :param R: :param Rs: :param rho0: :param r_core: :param ax_x: :param ax_y: :return:
[ "deflection", "angle" ]
python
train
secdev/scapy
scapy/arch/bpf/supersocket.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/bpf/supersocket.py#L289-L306
def recv(self, x=BPF_BUFFER_LENGTH): """Receive a frame from the network""" if self.buffered_frames(): # Get a frame from the buffer return self.get_frame() # Get data from BPF try: bpf_buffer = os.read(self.ins, x) except EnvironmentError as exc: if exc.errno != errno.EAGAIN: warning("BPF recv()", exc_info=True) return # Extract all frames from the BPF buffer self.extract_frames(bpf_buffer) return self.get_frame()
[ "def", "recv", "(", "self", ",", "x", "=", "BPF_BUFFER_LENGTH", ")", ":", "if", "self", ".", "buffered_frames", "(", ")", ":", "# Get a frame from the buffer", "return", "self", ".", "get_frame", "(", ")", "# Get data from BPF", "try", ":", "bpf_buffer", "=", "os", ".", "read", "(", "self", ".", "ins", ",", "x", ")", "except", "EnvironmentError", "as", "exc", ":", "if", "exc", ".", "errno", "!=", "errno", ".", "EAGAIN", ":", "warning", "(", "\"BPF recv()\"", ",", "exc_info", "=", "True", ")", "return", "# Extract all frames from the BPF buffer", "self", ".", "extract_frames", "(", "bpf_buffer", ")", "return", "self", ".", "get_frame", "(", ")" ]
Receive a frame from the network
[ "Receive", "a", "frame", "from", "the", "network" ]
python
train
Autodesk/pyccc
pyccc/python.py
https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/python.py#L292-L311
def prepare_namespace(self, func): """ Prepares the function to be run after deserializing it. Re-associates any previously bound variables and modules from the closure Returns: callable: ready-to-call function """ if self.is_imethod: to_run = getattr(self.obj, self.imethod_name) else: to_run = func for varname, modulename in self.global_modules.items(): to_run.__globals__[varname] = __import__(modulename) if self.global_closure: to_run.__globals__.update(self.global_closure) if self.global_functions: to_run.__globals__.update(self.global_functions) return to_run
[ "def", "prepare_namespace", "(", "self", ",", "func", ")", ":", "if", "self", ".", "is_imethod", ":", "to_run", "=", "getattr", "(", "self", ".", "obj", ",", "self", ".", "imethod_name", ")", "else", ":", "to_run", "=", "func", "for", "varname", ",", "modulename", "in", "self", ".", "global_modules", ".", "items", "(", ")", ":", "to_run", ".", "__globals__", "[", "varname", "]", "=", "__import__", "(", "modulename", ")", "if", "self", ".", "global_closure", ":", "to_run", ".", "__globals__", ".", "update", "(", "self", ".", "global_closure", ")", "if", "self", ".", "global_functions", ":", "to_run", ".", "__globals__", ".", "update", "(", "self", ".", "global_functions", ")", "return", "to_run" ]
Prepares the function to be run after deserializing it. Re-associates any previously bound variables and modules from the closure Returns: callable: ready-to-call function
[ "Prepares", "the", "function", "to", "be", "run", "after", "deserializing", "it", ".", "Re", "-", "associates", "any", "previously", "bound", "variables", "and", "modules", "from", "the", "closure" ]
python
train
Chilipp/docrep
docrep/__init__.py
https://github.com/Chilipp/docrep/blob/637971f76e1a6e1c70e36dcd1b02bbc37ba02487/docrep/__init__.py#L45-L105
def safe_modulo(s, meta, checked='', print_warning=True, stacklevel=2): """Safe version of the modulo operation (%) of strings Parameters ---------- s: str string to apply the modulo operation with meta: dict or tuple meta informations to insert (usually via ``s % meta``) checked: {'KEY', 'VALUE'}, optional Security parameter for the recursive structure of this function. It can be set to 'VALUE' if an error shall be raised when facing a TypeError or ValueError or to 'KEY' if an error shall be raised when facing a KeyError. This parameter is mainly for internal processes. print_warning: bool If True and a key is not existent in `s`, a warning is raised stacklevel: int The stacklevel for the :func:`warnings.warn` function Examples -------- The effects are demonstrated by this example:: >>> from docrep import safe_modulo >>> s = "That's %(one)s string %(with)s missing 'with' and %s key" >>> s % {'one': 1} # raises KeyError because of missing 'with' Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: 'with' >>> s % {'one': 1, 'with': 2} # raises TypeError because of '%s' Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: not enough arguments for format string >>> safe_modulo(s, {'one': 1}) "That's 1 string %(with)s missing 'with' and %s key" """ try: return s % meta except (ValueError, TypeError, KeyError): # replace the missing fields by %% keys = substitution_pattern.finditer(s) for m in keys: key = m.group('key') if not isinstance(meta, dict) or key not in meta: if print_warning: warn("%r is not a valid key!" % key, SyntaxWarning, stacklevel) full = m.group() s = s.replace(full, '%' + full) if 'KEY' not in checked: return safe_modulo(s, meta, checked=checked + 'KEY', print_warning=print_warning, stacklevel=stacklevel) if not isinstance(meta, dict) or 'VALUE' in checked: raise s = re.sub(r"""(?<!%)(%%)*%(?!%) # uneven number of % \s*(\w|$) # format strings""", '%\g<0>', s, flags=re.VERBOSE) return safe_modulo(s, meta, checked=checked + 'VALUE', print_warning=print_warning, stacklevel=stacklevel)
[ "def", "safe_modulo", "(", "s", ",", "meta", ",", "checked", "=", "''", ",", "print_warning", "=", "True", ",", "stacklevel", "=", "2", ")", ":", "try", ":", "return", "s", "%", "meta", "except", "(", "ValueError", ",", "TypeError", ",", "KeyError", ")", ":", "# replace the missing fields by %%", "keys", "=", "substitution_pattern", ".", "finditer", "(", "s", ")", "for", "m", "in", "keys", ":", "key", "=", "m", ".", "group", "(", "'key'", ")", "if", "not", "isinstance", "(", "meta", ",", "dict", ")", "or", "key", "not", "in", "meta", ":", "if", "print_warning", ":", "warn", "(", "\"%r is not a valid key!\"", "%", "key", ",", "SyntaxWarning", ",", "stacklevel", ")", "full", "=", "m", ".", "group", "(", ")", "s", "=", "s", ".", "replace", "(", "full", ",", "'%'", "+", "full", ")", "if", "'KEY'", "not", "in", "checked", ":", "return", "safe_modulo", "(", "s", ",", "meta", ",", "checked", "=", "checked", "+", "'KEY'", ",", "print_warning", "=", "print_warning", ",", "stacklevel", "=", "stacklevel", ")", "if", "not", "isinstance", "(", "meta", ",", "dict", ")", "or", "'VALUE'", "in", "checked", ":", "raise", "s", "=", "re", ".", "sub", "(", "r\"\"\"(?<!%)(%%)*%(?!%) # uneven number of %\n \\s*(\\w|$) # format strings\"\"\"", ",", "'%\\g<0>'", ",", "s", ",", "flags", "=", "re", ".", "VERBOSE", ")", "return", "safe_modulo", "(", "s", ",", "meta", ",", "checked", "=", "checked", "+", "'VALUE'", ",", "print_warning", "=", "print_warning", ",", "stacklevel", "=", "stacklevel", ")" ]
Safe version of the modulo operation (%) of strings Parameters ---------- s: str string to apply the modulo operation with meta: dict or tuple meta informations to insert (usually via ``s % meta``) checked: {'KEY', 'VALUE'}, optional Security parameter for the recursive structure of this function. It can be set to 'VALUE' if an error shall be raised when facing a TypeError or ValueError or to 'KEY' if an error shall be raised when facing a KeyError. This parameter is mainly for internal processes. print_warning: bool If True and a key is not existent in `s`, a warning is raised stacklevel: int The stacklevel for the :func:`warnings.warn` function Examples -------- The effects are demonstrated by this example:: >>> from docrep import safe_modulo >>> s = "That's %(one)s string %(with)s missing 'with' and %s key" >>> s % {'one': 1} # raises KeyError because of missing 'with' Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: 'with' >>> s % {'one': 1, 'with': 2} # raises TypeError because of '%s' Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: not enough arguments for format string >>> safe_modulo(s, {'one': 1}) "That's 1 string %(with)s missing 'with' and %s key"
[ "Safe", "version", "of", "the", "modulo", "operation", "(", "%", ")", "of", "strings" ]
python
train
cltk/cltk
cltk/prosody/old_norse/verse.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/old_norse/verse.py#L357-L372
def from_short_lines_text(self, text: str): """ Example from Völsupá 28 >>> stanza = "Ein sat hon úti,\\nþá er inn aldni kom\\nyggjungr ása\\nok í augu leit.\\nHvers fregnið mik?\\nHví freistið mín?\\nAllt veit ek, Óðinn,\\nhvar þú auga falt,\\ní inum mæra\\nMímisbrunni.\\nDrekkr mjöð Mímir\\nmorgun hverjan\\naf veði Valföðrs.\\nVituð ér enn - eða hvat?" >>> us = UnspecifiedStanza() >>> us.from_short_lines_text(stanza) >>> [sl.text for sl in us.short_lines] ['Ein sat hon úti,', 'þá er inn aldni kom', 'yggjungr ása', 'ok í augu leit.', 'Hvers fregnið mik?', 'Hví freistið mín?', 'Allt veit ek, Óðinn,', 'hvar þú auga falt,', 'í inum mæra', 'Mímisbrunni.', 'Drekkr mjöð Mímir', 'morgun hverjan', 'af veði Valföðrs.', 'Vituð ér enn - eða hvat?'] >>> us.long_lines :param text: :return: """ Metre.from_short_lines_text(self, text) self.short_lines = [ShortLine(line) for line in text.split("\n") if line] self.long_lines = None
[ "def", "from_short_lines_text", "(", "self", ",", "text", ":", "str", ")", ":", "Metre", ".", "from_short_lines_text", "(", "self", ",", "text", ")", "self", ".", "short_lines", "=", "[", "ShortLine", "(", "line", ")", "for", "line", "in", "text", ".", "split", "(", "\"\\n\"", ")", "if", "line", "]", "self", ".", "long_lines", "=", "None" ]
Example from Völsupá 28 >>> stanza = "Ein sat hon úti,\\nþá er inn aldni kom\\nyggjungr ása\\nok í augu leit.\\nHvers fregnið mik?\\nHví freistið mín?\\nAllt veit ek, Óðinn,\\nhvar þú auga falt,\\ní inum mæra\\nMímisbrunni.\\nDrekkr mjöð Mímir\\nmorgun hverjan\\naf veði Valföðrs.\\nVituð ér enn - eða hvat?" >>> us = UnspecifiedStanza() >>> us.from_short_lines_text(stanza) >>> [sl.text for sl in us.short_lines] ['Ein sat hon úti,', 'þá er inn aldni kom', 'yggjungr ása', 'ok í augu leit.', 'Hvers fregnið mik?', 'Hví freistið mín?', 'Allt veit ek, Óðinn,', 'hvar þú auga falt,', 'í inum mæra', 'Mímisbrunni.', 'Drekkr mjöð Mímir', 'morgun hverjan', 'af veði Valföðrs.', 'Vituð ér enn - eða hvat?'] >>> us.long_lines :param text: :return:
[ "Example", "from", "Völsupá", "28", ">>>", "stanza", "=", "Ein", "sat", "hon", "úti", "\\\\", "nþá", "er", "inn", "aldni", "kom", "\\\\", "nyggjungr", "ása", "\\\\", "nok", "í", "augu", "leit", ".", "\\\\", "nHvers", "fregnið", "mik?", "\\\\", "nHví", "freistið", "mín?", "\\\\", "nAllt", "veit", "ek", "Óðinn", "\\\\", "nhvar", "þú", "auga", "falt", "\\\\", "ní", "inum", "mæra", "\\\\", "nMímisbrunni", ".", "\\\\", "nDrekkr", "mjöð", "Mímir", "\\\\", "nmorgun", "hverjan", "\\\\", "naf", "veði", "Valföðrs", ".", "\\\\", "nVituð", "ér", "enn", "-", "eða", "hvat?", ">>>", "us", "=", "UnspecifiedStanza", "()", ">>>", "us", ".", "from_short_lines_text", "(", "stanza", ")", ">>>", "[", "sl", ".", "text", "for", "sl", "in", "us", ".", "short_lines", "]", "[", "Ein", "sat", "hon", "úti", "þá", "er", "inn", "aldni", "kom", "yggjungr", "ása", "ok", "í", "augu", "leit", ".", "Hvers", "fregnið", "mik?", "Hví", "freistið", "mín?", "Allt", "veit", "ek", "Óðinn", "hvar", "þú", "auga", "falt", "í", "inum", "mæra", "Mímisbrunni", ".", "Drekkr", "mjöð", "Mímir", "morgun", "hverjan", "af", "veði", "Valföðrs", ".", "Vituð", "ér", "enn", "-", "eða", "hvat?", "]", ">>>", "us", ".", "long_lines" ]
python
train
sassoftware/saspy
saspy/sasViyaML.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasViyaML.py#L303-L337
def svmachine(self, data: ['SASdata', str] = None, autotune: str = None, code: str = None, id: str = None, input: [str, list, dict] = None, kernel: str = None, output: [str, bool, 'SASdata'] = None, partition: str = None, savestate: str = None, solver: str = None, target: [str, list, dict] = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the SVMACHINE procedure Documentation link: https://go.documentation.sas.com/?docsetId=casml&docsetTarget=casml_svmachine_toc.htm&docsetVersion=8.3&locale=en :param data: SASdata object or string. This parameter is required. :parm autotune: The autotune variable can only be a string type. :parm code: The code variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. :parm kernel: The kernel variable can only be a string type. :parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output". :parm partition: The partition variable can only be a string type. :parm savestate: The savestate variable can only be a string type. :parm solver: The solver variable can only be a string type. :parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
[ "def", "svmachine", "(", "self", ",", "data", ":", "[", "'SASdata'", ",", "str", "]", "=", "None", ",", "autotune", ":", "str", "=", "None", ",", "code", ":", "str", "=", "None", ",", "id", ":", "str", "=", "None", ",", "input", ":", "[", "str", ",", "list", ",", "dict", "]", "=", "None", ",", "kernel", ":", "str", "=", "None", ",", "output", ":", "[", "str", ",", "bool", ",", "'SASdata'", "]", "=", "None", ",", "partition", ":", "str", "=", "None", ",", "savestate", ":", "str", "=", "None", ",", "solver", ":", "str", "=", "None", ",", "target", ":", "[", "str", ",", "list", ",", "dict", "]", "=", "None", ",", "procopts", ":", "str", "=", "None", ",", "stmtpassthrough", ":", "str", "=", "None", ",", "*", "*", "kwargs", ":", "dict", ")", "->", "'SASresults'", ":" ]
Python method to call the SVMACHINE procedure Documentation link: https://go.documentation.sas.com/?docsetId=casml&docsetTarget=casml_svmachine_toc.htm&docsetVersion=8.3&locale=en :param data: SASdata object or string. This parameter is required. :parm autotune: The autotune variable can only be a string type. :parm code: The code variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. :parm kernel: The kernel variable can only be a string type. :parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output". :parm partition: The partition variable can only be a string type. :parm savestate: The savestate variable can only be a string type. :parm solver: The solver variable can only be a string type. :parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object
[ "Python", "method", "to", "call", "the", "SVMACHINE", "procedure" ]
python
train
sibirrer/lenstronomy
lenstronomy/Workflow/psf_fitting.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Workflow/psf_fitting.py#L146-L169
def image_single_point_source(self, image_model_class, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): """ return model without including the point source contributions as a list (for each point source individually) :param image_model_class: ImageModel class instance :param kwargs_lens: lens model kwargs list :param kwargs_source: source model kwargs list :param kwargs_lens_light: lens light model kwargs list :param kwargs_ps: point source model kwargs list :return: list of images with point source isolated """ # reconstructed model with given psf model, error_map, cov_param, param = image_model_class.image_linear_solve(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) #model = image_model_class.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) data = image_model_class.Data.data mask = image_model_class.ImageNumerics.mask point_source_list = image_model_class.point_sources_list(kwargs_ps, kwargs_lens) n = len(point_source_list) model_single_source_list = [] for i in range(n): model_single_source = (data - model + point_source_list[i]) * mask model_single_source_list.append(model_single_source) return model_single_source_list
[ "def", "image_single_point_source", "(", "self", ",", "image_model_class", ",", "kwargs_lens", ",", "kwargs_source", ",", "kwargs_lens_light", ",", "kwargs_ps", ")", ":", "# reconstructed model with given psf", "model", ",", "error_map", ",", "cov_param", ",", "param", "=", "image_model_class", ".", "image_linear_solve", "(", "kwargs_lens", ",", "kwargs_source", ",", "kwargs_lens_light", ",", "kwargs_ps", ")", "#model = image_model_class.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)", "data", "=", "image_model_class", ".", "Data", ".", "data", "mask", "=", "image_model_class", ".", "ImageNumerics", ".", "mask", "point_source_list", "=", "image_model_class", ".", "point_sources_list", "(", "kwargs_ps", ",", "kwargs_lens", ")", "n", "=", "len", "(", "point_source_list", ")", "model_single_source_list", "=", "[", "]", "for", "i", "in", "range", "(", "n", ")", ":", "model_single_source", "=", "(", "data", "-", "model", "+", "point_source_list", "[", "i", "]", ")", "*", "mask", "model_single_source_list", ".", "append", "(", "model_single_source", ")", "return", "model_single_source_list" ]
return model without including the point source contributions as a list (for each point source individually) :param image_model_class: ImageModel class instance :param kwargs_lens: lens model kwargs list :param kwargs_source: source model kwargs list :param kwargs_lens_light: lens light model kwargs list :param kwargs_ps: point source model kwargs list :return: list of images with point source isolated
[ "return", "model", "without", "including", "the", "point", "source", "contributions", "as", "a", "list", "(", "for", "each", "point", "source", "individually", ")", ":", "param", "image_model_class", ":", "ImageModel", "class", "instance", ":", "param", "kwargs_lens", ":", "lens", "model", "kwargs", "list", ":", "param", "kwargs_source", ":", "source", "model", "kwargs", "list", ":", "param", "kwargs_lens_light", ":", "lens", "light", "model", "kwargs", "list", ":", "param", "kwargs_ps", ":", "point", "source", "model", "kwargs", "list", ":", "return", ":", "list", "of", "images", "with", "point", "source", "isolated" ]
python
train
paylogic/pip-accel
pip_accel/config.py
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/config.py#L123-L143
def load_configuration_file(self, configuration_file): """ Load configuration defaults from a configuration file. :param configuration_file: The pathname of a configuration file (a string). :raises: :exc:`Exception` when the configuration file cannot be loaded. """ configuration_file = parse_path(configuration_file) logger.debug("Loading configuration file: %s", configuration_file) parser = configparser.RawConfigParser() files_loaded = parser.read(configuration_file) if len(files_loaded) != 1: msg = "Failed to load configuration file! (%s)" raise Exception(msg % configuration_file) elif not parser.has_section('pip-accel'): msg = "Missing 'pip-accel' section in configuration file! (%s)" raise Exception(msg % configuration_file) else: self.configuration.update(parser.items('pip-accel'))
[ "def", "load_configuration_file", "(", "self", ",", "configuration_file", ")", ":", "configuration_file", "=", "parse_path", "(", "configuration_file", ")", "logger", ".", "debug", "(", "\"Loading configuration file: %s\"", ",", "configuration_file", ")", "parser", "=", "configparser", ".", "RawConfigParser", "(", ")", "files_loaded", "=", "parser", ".", "read", "(", "configuration_file", ")", "if", "len", "(", "files_loaded", ")", "!=", "1", ":", "msg", "=", "\"Failed to load configuration file! (%s)\"", "raise", "Exception", "(", "msg", "%", "configuration_file", ")", "elif", "not", "parser", ".", "has_section", "(", "'pip-accel'", ")", ":", "msg", "=", "\"Missing 'pip-accel' section in configuration file! (%s)\"", "raise", "Exception", "(", "msg", "%", "configuration_file", ")", "else", ":", "self", ".", "configuration", ".", "update", "(", "parser", ".", "items", "(", "'pip-accel'", ")", ")" ]
Load configuration defaults from a configuration file. :param configuration_file: The pathname of a configuration file (a string). :raises: :exc:`Exception` when the configuration file cannot be loaded.
[ "Load", "configuration", "defaults", "from", "a", "configuration", "file", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/bindepend.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/bindepend.py#L499-L542
def findLibrary(name): """ Look for a library in the system. Emulate the algorithm used by dlopen. `name`must include the prefix, e.g. ``libpython2.4.so`` """ assert is_unix, "Current implementation for Unix only (Linux, Solaris, AIX)" lib = None # Look in the LD_LIBRARY_PATH lp = compat.getenv('LD_LIBRARY_PATH', '') for path in lp.split(os.pathsep): libs = glob(os.path.join(path, name + '*')) if libs: lib = libs[0] break # Look in /etc/ld.so.cache if lib is None: expr = r'/[^\(\)\s]*%s\.[^\(\)\s]*' % re.escape(name) m = re.search(expr, compat.exec_command('/sbin/ldconfig', '-p')) if m: lib = m.group(0) # Look in the known safe paths if lib is None: paths = ['/lib', '/usr/lib'] if is_aix: paths.append('/opt/freeware/lib') for path in paths: libs = glob(os.path.join(path, name + '*')) if libs: lib = libs[0] break # give up :( if lib is None: return None # Resolve the file name into the soname dir, file = os.path.split(lib) return os.path.join(dir, getSoname(lib))
[ "def", "findLibrary", "(", "name", ")", ":", "assert", "is_unix", ",", "\"Current implementation for Unix only (Linux, Solaris, AIX)\"", "lib", "=", "None", "# Look in the LD_LIBRARY_PATH", "lp", "=", "compat", ".", "getenv", "(", "'LD_LIBRARY_PATH'", ",", "''", ")", "for", "path", "in", "lp", ".", "split", "(", "os", ".", "pathsep", ")", ":", "libs", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", "+", "'*'", ")", ")", "if", "libs", ":", "lib", "=", "libs", "[", "0", "]", "break", "# Look in /etc/ld.so.cache", "if", "lib", "is", "None", ":", "expr", "=", "r'/[^\\(\\)\\s]*%s\\.[^\\(\\)\\s]*'", "%", "re", ".", "escape", "(", "name", ")", "m", "=", "re", ".", "search", "(", "expr", ",", "compat", ".", "exec_command", "(", "'/sbin/ldconfig'", ",", "'-p'", ")", ")", "if", "m", ":", "lib", "=", "m", ".", "group", "(", "0", ")", "# Look in the known safe paths", "if", "lib", "is", "None", ":", "paths", "=", "[", "'/lib'", ",", "'/usr/lib'", "]", "if", "is_aix", ":", "paths", ".", "append", "(", "'/opt/freeware/lib'", ")", "for", "path", "in", "paths", ":", "libs", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", "+", "'*'", ")", ")", "if", "libs", ":", "lib", "=", "libs", "[", "0", "]", "break", "# give up :(", "if", "lib", "is", "None", ":", "return", "None", "# Resolve the file name into the soname", "dir", ",", "file", "=", "os", ".", "path", ".", "split", "(", "lib", ")", "return", "os", ".", "path", ".", "join", "(", "dir", ",", "getSoname", "(", "lib", ")", ")" ]
Look for a library in the system. Emulate the algorithm used by dlopen. `name`must include the prefix, e.g. ``libpython2.4.so``
[ "Look", "for", "a", "library", "in", "the", "system", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L44-L58
def hide_routemap_holder_route_map_instance(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance = ET.SubElement(route_map, "instance") instance.text = kwargs.pop('instance') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_instance", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-routemap-holder\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ip-policy\"", ")", "route_map", "=", "ET", ".", "SubElement", "(", "hide_routemap_holder", ",", "\"route-map\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "action_rm_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"action-rm\"", ")", "action_rm_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'action_rm'", ")", "instance", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"instance\"", ")", "instance", ".", "text", "=", "kwargs", ".", "pop", "(", "'instance'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
ladybug-tools/uwg
uwg/element.py
https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/element.py#L233-L254
def qsat(self,temp,pres,parameter): """ Calculate (qsat_lst) vector of saturation humidity from: temp = vector of element layer temperatures pres = pressure (at current timestep). """ gamw = (parameter.cl - parameter.cpv) / parameter.rv betaw = (parameter.lvtt/parameter.rv) + (gamw * parameter.tt) alpw = math.log(parameter.estt) + (betaw /parameter.tt) + (gamw * math.log(parameter.tt)) work2 = parameter.r/parameter.rv foes_lst = [0 for i in range(len(temp))] work1_lst = [0 for i in range(len(temp))] qsat_lst = [0 for i in range(len(temp))] for i in range(len(temp)): # saturation vapor pressure foes_lst[i] = math.exp( alpw - betaw/temp[i] - gamw*math.log(temp[i]) ) work1_lst[i] = foes_lst[i]/pres[i] # saturation humidity qsat_lst[i] = work2*work1_lst[i] / (1. + (work2-1.) * work1_lst[i]) return qsat_lst
[ "def", "qsat", "(", "self", ",", "temp", ",", "pres", ",", "parameter", ")", ":", "gamw", "=", "(", "parameter", ".", "cl", "-", "parameter", ".", "cpv", ")", "/", "parameter", ".", "rv", "betaw", "=", "(", "parameter", ".", "lvtt", "/", "parameter", ".", "rv", ")", "+", "(", "gamw", "*", "parameter", ".", "tt", ")", "alpw", "=", "math", ".", "log", "(", "parameter", ".", "estt", ")", "+", "(", "betaw", "/", "parameter", ".", "tt", ")", "+", "(", "gamw", "*", "math", ".", "log", "(", "parameter", ".", "tt", ")", ")", "work2", "=", "parameter", ".", "r", "/", "parameter", ".", "rv", "foes_lst", "=", "[", "0", "for", "i", "in", "range", "(", "len", "(", "temp", ")", ")", "]", "work1_lst", "=", "[", "0", "for", "i", "in", "range", "(", "len", "(", "temp", ")", ")", "]", "qsat_lst", "=", "[", "0", "for", "i", "in", "range", "(", "len", "(", "temp", ")", ")", "]", "for", "i", "in", "range", "(", "len", "(", "temp", ")", ")", ":", "# saturation vapor pressure", "foes_lst", "[", "i", "]", "=", "math", ".", "exp", "(", "alpw", "-", "betaw", "/", "temp", "[", "i", "]", "-", "gamw", "*", "math", ".", "log", "(", "temp", "[", "i", "]", ")", ")", "work1_lst", "[", "i", "]", "=", "foes_lst", "[", "i", "]", "/", "pres", "[", "i", "]", "# saturation humidity", "qsat_lst", "[", "i", "]", "=", "work2", "*", "work1_lst", "[", "i", "]", "/", "(", "1.", "+", "(", "work2", "-", "1.", ")", "*", "work1_lst", "[", "i", "]", ")", "return", "qsat_lst" ]
Calculate (qsat_lst) vector of saturation humidity from: temp = vector of element layer temperatures pres = pressure (at current timestep).
[ "Calculate", "(", "qsat_lst", ")", "vector", "of", "saturation", "humidity", "from", ":", "temp", "=", "vector", "of", "element", "layer", "temperatures", "pres", "=", "pressure", "(", "at", "current", "timestep", ")", "." ]
python
train
Erotemic/utool
utool/util_hash.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L829-L897
def get_file_hash(fpath, blocksize=65536, hasher=None, stride=1, hexdigest=False): r""" For better hashes use hasher=hashlib.sha256, and keep stride=1 Args: fpath (str): file path string blocksize (int): 2 ** 16. Affects speed of reading file hasher (None): defaults to sha1 for fast (but insecure) hashing stride (int): strides > 1 skip data to hash, useful for faster hashing, but less accurate, also makes hash dependant on blocksize. References: http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file http://stackoverflow.com/questions/5001893/when-should-i-use-sha-1-and-when-should-i-use-sha-2 CommandLine: python -m utool.util_hash --test-get_file_hash python -m utool.util_hash --test-get_file_hash:0 python -m utool.util_hash --test-get_file_hash:1 Example: >>> # DISABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> fpath = ut.grab_test_imgpath('patsy.jpg') >>> #blocksize = 65536 # 2 ** 16 >>> blocksize = 2 ** 16 >>> hasher = None >>> stride = 1 >>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride) >>> result = repr(hashbytes_20) >>> print(result) '7\x07B\x0eX<sRu\xa2\x90P\xda\xb2\x84?\x81?\xa9\xd9' '\x13\x9b\xf6\x0f\xa3QQ \xd7"$\xe9m\x05\x9e\x81\xf6\xf2v\xe4' '\x16\x00\x80Xx\x8c-H\xcdP\xf6\x02\x9frl\xbf\x99VQ\xb5' Example: >>> # DISABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> #fpath = ut.grab_file_url('http://en.wikipedia.org/wiki/List_of_comets_by_type') >>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'tmp.txt') >>> ut.write_to(fpath, ut.lorium_ipsum()) >>> blocksize = 2 ** 3 >>> hasher = None >>> stride = 2 >>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride) >>> result = repr(hashbytes_20) >>> print(result) '5KP\xcf>R\xf6\xffO:L\xac\x9c\xd3V+\x0e\xf6\xe1n' Ignore: file_ = open(fpath, 'rb') """ if hasher is None: hasher = hashlib.sha1() with open(fpath, 'rb') as file_: buf = file_.read(blocksize) while len(buf) > 0: hasher.update(buf) if stride > 1: file_.seek(blocksize * (stride - 1), 1) # skip blocks buf = file_.read(blocksize) if hexdigest: return hasher.hexdigest() else: return hasher.digest()
[ "def", "get_file_hash", "(", "fpath", ",", "blocksize", "=", "65536", ",", "hasher", "=", "None", ",", "stride", "=", "1", ",", "hexdigest", "=", "False", ")", ":", "if", "hasher", "is", "None", ":", "hasher", "=", "hashlib", ".", "sha1", "(", ")", "with", "open", "(", "fpath", ",", "'rb'", ")", "as", "file_", ":", "buf", "=", "file_", ".", "read", "(", "blocksize", ")", "while", "len", "(", "buf", ")", ">", "0", ":", "hasher", ".", "update", "(", "buf", ")", "if", "stride", ">", "1", ":", "file_", ".", "seek", "(", "blocksize", "*", "(", "stride", "-", "1", ")", ",", "1", ")", "# skip blocks", "buf", "=", "file_", ".", "read", "(", "blocksize", ")", "if", "hexdigest", ":", "return", "hasher", ".", "hexdigest", "(", ")", "else", ":", "return", "hasher", ".", "digest", "(", ")" ]
r""" For better hashes use hasher=hashlib.sha256, and keep stride=1 Args: fpath (str): file path string blocksize (int): 2 ** 16. Affects speed of reading file hasher (None): defaults to sha1 for fast (but insecure) hashing stride (int): strides > 1 skip data to hash, useful for faster hashing, but less accurate, also makes hash dependant on blocksize. References: http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file http://stackoverflow.com/questions/5001893/when-should-i-use-sha-1-and-when-should-i-use-sha-2 CommandLine: python -m utool.util_hash --test-get_file_hash python -m utool.util_hash --test-get_file_hash:0 python -m utool.util_hash --test-get_file_hash:1 Example: >>> # DISABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> fpath = ut.grab_test_imgpath('patsy.jpg') >>> #blocksize = 65536 # 2 ** 16 >>> blocksize = 2 ** 16 >>> hasher = None >>> stride = 1 >>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride) >>> result = repr(hashbytes_20) >>> print(result) '7\x07B\x0eX<sRu\xa2\x90P\xda\xb2\x84?\x81?\xa9\xd9' '\x13\x9b\xf6\x0f\xa3QQ \xd7"$\xe9m\x05\x9e\x81\xf6\xf2v\xe4' '\x16\x00\x80Xx\x8c-H\xcdP\xf6\x02\x9frl\xbf\x99VQ\xb5' Example: >>> # DISABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> #fpath = ut.grab_file_url('http://en.wikipedia.org/wiki/List_of_comets_by_type') >>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'tmp.txt') >>> ut.write_to(fpath, ut.lorium_ipsum()) >>> blocksize = 2 ** 3 >>> hasher = None >>> stride = 2 >>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride) >>> result = repr(hashbytes_20) >>> print(result) '5KP\xcf>R\xf6\xffO:L\xac\x9c\xd3V+\x0e\xf6\xe1n' Ignore: file_ = open(fpath, 'rb')
[ "r", "For", "better", "hashes", "use", "hasher", "=", "hashlib", ".", "sha256", "and", "keep", "stride", "=", "1" ]
python
train
python-openxml/python-docx
docx/text/parfmt.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/text/parfmt.py#L114-L128
def line_spacing(self): """ |float| or |Length| value specifying the space between baselines in successive lines of the paragraph. A value of |None| indicates line spacing is inherited from the style hierarchy. A float value, e.g. ``2.0`` or ``1.75``, indicates spacing is applied in multiples of line heights. A |Length| value such as ``Pt(12)`` indicates spacing is a fixed height. The |Pt| value class is a convenient way to apply line spacing in units of points. Assigning |None| resets line spacing to inherit from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule)
[ "def", "line_spacing", "(", "self", ")", ":", "pPr", "=", "self", ".", "_element", ".", "pPr", "if", "pPr", "is", "None", ":", "return", "None", "return", "self", ".", "_line_spacing", "(", "pPr", ".", "spacing_line", ",", "pPr", ".", "spacing_lineRule", ")" ]
|float| or |Length| value specifying the space between baselines in successive lines of the paragraph. A value of |None| indicates line spacing is inherited from the style hierarchy. A float value, e.g. ``2.0`` or ``1.75``, indicates spacing is applied in multiples of line heights. A |Length| value such as ``Pt(12)`` indicates spacing is a fixed height. The |Pt| value class is a convenient way to apply line spacing in units of points. Assigning |None| resets line spacing to inherit from the style hierarchy.
[ "|float|", "or", "|Length|", "value", "specifying", "the", "space", "between", "baselines", "in", "successive", "lines", "of", "the", "paragraph", ".", "A", "value", "of", "|None|", "indicates", "line", "spacing", "is", "inherited", "from", "the", "style", "hierarchy", ".", "A", "float", "value", "e", ".", "g", ".", "2", ".", "0", "or", "1", ".", "75", "indicates", "spacing", "is", "applied", "in", "multiples", "of", "line", "heights", ".", "A", "|Length|", "value", "such", "as", "Pt", "(", "12", ")", "indicates", "spacing", "is", "a", "fixed", "height", ".", "The", "|Pt|", "value", "class", "is", "a", "convenient", "way", "to", "apply", "line", "spacing", "in", "units", "of", "points", ".", "Assigning", "|None|", "resets", "line", "spacing", "to", "inherit", "from", "the", "style", "hierarchy", "." ]
python
train
DataDog/integrations-core
datadog_checks_dev/datadog_checks/dev/tooling/requirements.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_dev/datadog_checks/dev/tooling/requirements.py#L163-L174
def read_packages(reqs_file): """ Generator yielding one Package instance for every corresponing line in a requirements file """ for line in stream_file_lines(reqs_file): line = line.strip() if not line.startswith(('#', '--hash')): match = DEP_PATTERN.match(line) if match: package, version, marker = match.groups() yield Package(package.lower(), version, marker)
[ "def", "read_packages", "(", "reqs_file", ")", ":", "for", "line", "in", "stream_file_lines", "(", "reqs_file", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ".", "startswith", "(", "(", "'#'", ",", "'--hash'", ")", ")", ":", "match", "=", "DEP_PATTERN", ".", "match", "(", "line", ")", "if", "match", ":", "package", ",", "version", ",", "marker", "=", "match", ".", "groups", "(", ")", "yield", "Package", "(", "package", ".", "lower", "(", ")", ",", "version", ",", "marker", ")" ]
Generator yielding one Package instance for every corresponing line in a requirements file
[ "Generator", "yielding", "one", "Package", "instance", "for", "every", "corresponing", "line", "in", "a", "requirements", "file" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/oneview_client.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L1081-L1090
def certificate_rabbitmq(self): """ Gets the Certificate RabbitMQ API client. Returns: CertificateRabbitMQ: """ if not self.__certificate_rabbitmq: self.__certificate_rabbitmq = CertificateRabbitMQ(self.__connection) return self.__certificate_rabbitmq
[ "def", "certificate_rabbitmq", "(", "self", ")", ":", "if", "not", "self", ".", "__certificate_rabbitmq", ":", "self", ".", "__certificate_rabbitmq", "=", "CertificateRabbitMQ", "(", "self", ".", "__connection", ")", "return", "self", ".", "__certificate_rabbitmq" ]
Gets the Certificate RabbitMQ API client. Returns: CertificateRabbitMQ:
[ "Gets", "the", "Certificate", "RabbitMQ", "API", "client", "." ]
python
train
sendgrid/sendgrid-python
sendgrid/helpers/mail/mail.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L500-L526
def add_substitution(self, substitution): """Add a substitution to the email :param value: Add a substitution to the email :type value: Substitution """ if substitution.personalization: try: personalization = \ self._personalizations[substitution.personalization] has_internal_personalization = True except IndexError: personalization = Personalization() has_internal_personalization = False personalization.add_substitution(substitution) if not has_internal_personalization: self.add_personalization( personalization, index=substitution.personalization) else: if isinstance(substitution, list): for s in substitution: for p in self.personalizations: p.add_substitution(s) else: for p in self.personalizations: p.add_substitution(substitution)
[ "def", "add_substitution", "(", "self", ",", "substitution", ")", ":", "if", "substitution", ".", "personalization", ":", "try", ":", "personalization", "=", "self", ".", "_personalizations", "[", "substitution", ".", "personalization", "]", "has_internal_personalization", "=", "True", "except", "IndexError", ":", "personalization", "=", "Personalization", "(", ")", "has_internal_personalization", "=", "False", "personalization", ".", "add_substitution", "(", "substitution", ")", "if", "not", "has_internal_personalization", ":", "self", ".", "add_personalization", "(", "personalization", ",", "index", "=", "substitution", ".", "personalization", ")", "else", ":", "if", "isinstance", "(", "substitution", ",", "list", ")", ":", "for", "s", "in", "substitution", ":", "for", "p", "in", "self", ".", "personalizations", ":", "p", ".", "add_substitution", "(", "s", ")", "else", ":", "for", "p", "in", "self", ".", "personalizations", ":", "p", ".", "add_substitution", "(", "substitution", ")" ]
Add a substitution to the email :param value: Add a substitution to the email :type value: Substitution
[ "Add", "a", "substitution", "to", "the", "email" ]
python
train
mattjj/pybasicbayes
pybasicbayes/util/stats.py
https://github.com/mattjj/pybasicbayes/blob/76aef00f011415cc5c858cd1a101f3aab971a62d/pybasicbayes/util/stats.py#L124-L150
def sample_truncated_gaussian(mu=0, sigma=1, lb=-np.Inf, ub=np.Inf): """ Sample a truncated normal with the specified params. This is not the most stable way but it works as long as the truncation region is not too far from the mean. """ # Broadcast arrays to be of the same shape mu, sigma, lb, ub = np.broadcast_arrays(mu, sigma, lb, ub) shp = mu.shape if np.allclose(sigma, 0.0): return mu cdflb = normal_cdf(lb, mu, sigma) cdfub = normal_cdf(ub, mu, sigma) # Sample uniformly from the CDF cdfsamples = cdflb + np.random.rand(*shp) * (cdfub-cdflb) # Clip the CDF samples so that we can invert them cdfsamples = np.clip(cdfsamples, 1e-15, 1-1e-15) zs = -np.sqrt(2) * special.erfcinv(2 * cdfsamples) # Transform the standard normal samples xs = sigma * zs + mu xs = np.clip(xs, lb, ub) return xs
[ "def", "sample_truncated_gaussian", "(", "mu", "=", "0", ",", "sigma", "=", "1", ",", "lb", "=", "-", "np", ".", "Inf", ",", "ub", "=", "np", ".", "Inf", ")", ":", "# Broadcast arrays to be of the same shape", "mu", ",", "sigma", ",", "lb", ",", "ub", "=", "np", ".", "broadcast_arrays", "(", "mu", ",", "sigma", ",", "lb", ",", "ub", ")", "shp", "=", "mu", ".", "shape", "if", "np", ".", "allclose", "(", "sigma", ",", "0.0", ")", ":", "return", "mu", "cdflb", "=", "normal_cdf", "(", "lb", ",", "mu", ",", "sigma", ")", "cdfub", "=", "normal_cdf", "(", "ub", ",", "mu", ",", "sigma", ")", "# Sample uniformly from the CDF", "cdfsamples", "=", "cdflb", "+", "np", ".", "random", ".", "rand", "(", "*", "shp", ")", "*", "(", "cdfub", "-", "cdflb", ")", "# Clip the CDF samples so that we can invert them", "cdfsamples", "=", "np", ".", "clip", "(", "cdfsamples", ",", "1e-15", ",", "1", "-", "1e-15", ")", "zs", "=", "-", "np", ".", "sqrt", "(", "2", ")", "*", "special", ".", "erfcinv", "(", "2", "*", "cdfsamples", ")", "# Transform the standard normal samples", "xs", "=", "sigma", "*", "zs", "+", "mu", "xs", "=", "np", ".", "clip", "(", "xs", ",", "lb", ",", "ub", ")", "return", "xs" ]
Sample a truncated normal with the specified params. This is not the most stable way but it works as long as the truncation region is not too far from the mean.
[ "Sample", "a", "truncated", "normal", "with", "the", "specified", "params", ".", "This", "is", "not", "the", "most", "stable", "way", "but", "it", "works", "as", "long", "as", "the", "truncation", "region", "is", "not", "too", "far", "from", "the", "mean", "." ]
python
train
janpipek/physt
physt/histogram1d.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram1d.py#L208-L220
def mean(self) -> Optional[float]: """Statistical mean of all values entered into histogram. This number is precise, because we keep the necessary data separate from bin contents. """ if self._stats: # TODO: should be true always? if self.total > 0: return self._stats["sum"] / self.total else: return np.nan else: return None
[ "def", "mean", "(", "self", ")", "->", "Optional", "[", "float", "]", ":", "if", "self", ".", "_stats", ":", "# TODO: should be true always?", "if", "self", ".", "total", ">", "0", ":", "return", "self", ".", "_stats", "[", "\"sum\"", "]", "/", "self", ".", "total", "else", ":", "return", "np", ".", "nan", "else", ":", "return", "None" ]
Statistical mean of all values entered into histogram. This number is precise, because we keep the necessary data separate from bin contents.
[ "Statistical", "mean", "of", "all", "values", "entered", "into", "histogram", "." ]
python
train
mandiant/ioc_writer
ioc_writer/ioc_common.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_common.py#L951-L963
def make_serviceitem_servicedllmd5sum(servicedll_md5, condition='is', negate=False): """ Create a node for ServiceItem/serviceDLLmd5sum :return: A IndicatorItem represented as an Element node """ document = 'ServiceItem' search = 'ServiceItem/serviceDLLmd5sum' content_type = 'md5' content = servicedll_md5 ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate) return ii_node
[ "def", "make_serviceitem_servicedllmd5sum", "(", "servicedll_md5", ",", "condition", "=", "'is'", ",", "negate", "=", "False", ")", ":", "document", "=", "'ServiceItem'", "search", "=", "'ServiceItem/serviceDLLmd5sum'", "content_type", "=", "'md5'", "content", "=", "servicedll_md5", "ii_node", "=", "ioc_api", ".", "make_indicatoritem_node", "(", "condition", ",", "document", ",", "search", ",", "content_type", ",", "content", ",", "negate", "=", "negate", ")", "return", "ii_node" ]
Create a node for ServiceItem/serviceDLLmd5sum :return: A IndicatorItem represented as an Element node
[ "Create", "a", "node", "for", "ServiceItem", "/", "serviceDLLmd5sum", ":", "return", ":", "A", "IndicatorItem", "represented", "as", "an", "Element", "node" ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/files.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/files.py#L288-L309
def find_python_files(dirname): """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files. """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): if i > 0 and '__init__.py' not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename)
[ "def", "find_python_files", "(", "dirname", ")", ":", "for", "i", ",", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "enumerate", "(", "os", ".", "walk", "(", "dirname", ")", ")", ":", "if", "i", ">", "0", "and", "'__init__.py'", "not", "in", "filenames", ":", "# If a directory doesn't have __init__.py, then it isn't", "# importable and neither are its files", "del", "dirnames", "[", ":", "]", "continue", "for", "filename", "in", "filenames", ":", "# We're only interested in files that look like reasonable Python", "# files: Must end with .py or .pyw, and must not have certain funny", "# characters that probably mean they are editor junk.", "if", "re", ".", "match", "(", "r\"^[^.#~!$@%^&*()+=,]+\\.pyw?$\"", ",", "filename", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")" ]
Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files.
[ "Yield", "all", "of", "the", "importable", "Python", "files", "in", "dirname", "recursively", "." ]
python
test
Alignak-monitoring/alignak
alignak/objects/satellitelink.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L937-L949
def get_broks(self, broker_name): """Send a HTTP request to the satellite (GET /_broks) Get broks from the satellite. Un-serialize data received. :param broker_name: the concerned broker link :type broker_name: BrokerLink :return: Broks list on success, [] on failure :rtype: list """ res = self.con.get('_broks', {'broker_name': broker_name}, wait=False) logger.debug("Got broks from %s: %s", self.name, res) return unserialize(res, True)
[ "def", "get_broks", "(", "self", ",", "broker_name", ")", ":", "res", "=", "self", ".", "con", ".", "get", "(", "'_broks'", ",", "{", "'broker_name'", ":", "broker_name", "}", ",", "wait", "=", "False", ")", "logger", ".", "debug", "(", "\"Got broks from %s: %s\"", ",", "self", ".", "name", ",", "res", ")", "return", "unserialize", "(", "res", ",", "True", ")" ]
Send a HTTP request to the satellite (GET /_broks) Get broks from the satellite. Un-serialize data received. :param broker_name: the concerned broker link :type broker_name: BrokerLink :return: Broks list on success, [] on failure :rtype: list
[ "Send", "a", "HTTP", "request", "to", "the", "satellite", "(", "GET", "/", "_broks", ")", "Get", "broks", "from", "the", "satellite", ".", "Un", "-", "serialize", "data", "received", "." ]
python
train
saltstack/salt
salt/renderers/cheetah.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/cheetah.py#L19-L36
def render(cheetah_data, saltenv='base', sls='', method='xml', **kws): ''' Render a Cheetah template. :rtype: A Python data structure ''' if not HAS_LIBS: return {} if not isinstance(cheetah_data, six.string_types): cheetah_data = cheetah_data.read() if cheetah_data.startswith('#!'): cheetah_data = cheetah_data[(cheetah_data.find('\n') + 1):] if not cheetah_data.strip(): return {} return six.text_type(Template(cheetah_data, searchList=[kws]))
[ "def", "render", "(", "cheetah_data", ",", "saltenv", "=", "'base'", ",", "sls", "=", "''", ",", "method", "=", "'xml'", ",", "*", "*", "kws", ")", ":", "if", "not", "HAS_LIBS", ":", "return", "{", "}", "if", "not", "isinstance", "(", "cheetah_data", ",", "six", ".", "string_types", ")", ":", "cheetah_data", "=", "cheetah_data", ".", "read", "(", ")", "if", "cheetah_data", ".", "startswith", "(", "'#!'", ")", ":", "cheetah_data", "=", "cheetah_data", "[", "(", "cheetah_data", ".", "find", "(", "'\\n'", ")", "+", "1", ")", ":", "]", "if", "not", "cheetah_data", ".", "strip", "(", ")", ":", "return", "{", "}", "return", "six", ".", "text_type", "(", "Template", "(", "cheetah_data", ",", "searchList", "=", "[", "kws", "]", ")", ")" ]
Render a Cheetah template. :rtype: A Python data structure
[ "Render", "a", "Cheetah", "template", "." ]
python
train
samuelcolvin/pydantic
pydantic/utils.py
https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/utils.py#L120-L134
def import_string(dotted_path: str) -> Any: """ Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import fails. """ try: module_path, class_name = dotted_path.strip(' ').rsplit('.', 1) except ValueError as e: raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e module = import_module(module_path) try: return getattr(module, class_name) except AttributeError as e: raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e
[ "def", "import_string", "(", "dotted_path", ":", "str", ")", "->", "Any", ":", "try", ":", "module_path", ",", "class_name", "=", "dotted_path", ".", "strip", "(", "' '", ")", ".", "rsplit", "(", "'.'", ",", "1", ")", "except", "ValueError", "as", "e", ":", "raise", "ImportError", "(", "f'\"{dotted_path}\" doesn\\'t look like a module path'", ")", "from", "e", "module", "=", "import_module", "(", "module_path", ")", "try", ":", "return", "getattr", "(", "module", ",", "class_name", ")", "except", "AttributeError", "as", "e", ":", "raise", "ImportError", "(", "f'Module \"{module_path}\" does not define a \"{class_name}\" attribute'", ")", "from", "e" ]
Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import fails.
[ "Stolen", "approximately", "from", "django", ".", "Import", "a", "dotted", "module", "path", "and", "return", "the", "attribute", "/", "class", "designated", "by", "the", "last", "name", "in", "the", "path", ".", "Raise", "ImportError", "if", "the", "import", "fails", "." ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/api.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/api.py#L560-L570
def search(**kw): """Search the catalog adapter :returns: Catalog search results :rtype: iterable """ portal = get_portal() catalog = ICatalog(portal) catalog_query = ICatalogQuery(catalog) query = catalog_query.make_query(**kw) return catalog(query)
[ "def", "search", "(", "*", "*", "kw", ")", ":", "portal", "=", "get_portal", "(", ")", "catalog", "=", "ICatalog", "(", "portal", ")", "catalog_query", "=", "ICatalogQuery", "(", "catalog", ")", "query", "=", "catalog_query", ".", "make_query", "(", "*", "*", "kw", ")", "return", "catalog", "(", "query", ")" ]
Search the catalog adapter :returns: Catalog search results :rtype: iterable
[ "Search", "the", "catalog", "adapter" ]
python
train
oceanprotocol/squid-py
squid_py/agreements/service_agreement.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/service_agreement.py#L226-L245
def get_service_agreement_hash( self, agreement_id, asset_id, consumer_address, publisher_address, keeper): """Return the hash of the service agreement values to be signed by a consumer. :param agreement_id:id of the agreement, hex str :param asset_id: :param consumer_address: ethereum account address of consumer, hex str :param publisher_address: ethereum account address of publisher, hex str :param keeper: :return: """ agreement_hash = ServiceAgreement.generate_service_agreement_hash( self.template_id, self.generate_agreement_condition_ids( agreement_id, asset_id, consumer_address, publisher_address, keeper), self.conditions_timelocks, self.conditions_timeouts, agreement_id ) return agreement_hash
[ "def", "get_service_agreement_hash", "(", "self", ",", "agreement_id", ",", "asset_id", ",", "consumer_address", ",", "publisher_address", ",", "keeper", ")", ":", "agreement_hash", "=", "ServiceAgreement", ".", "generate_service_agreement_hash", "(", "self", ".", "template_id", ",", "self", ".", "generate_agreement_condition_ids", "(", "agreement_id", ",", "asset_id", ",", "consumer_address", ",", "publisher_address", ",", "keeper", ")", ",", "self", ".", "conditions_timelocks", ",", "self", ".", "conditions_timeouts", ",", "agreement_id", ")", "return", "agreement_hash" ]
Return the hash of the service agreement values to be signed by a consumer. :param agreement_id:id of the agreement, hex str :param asset_id: :param consumer_address: ethereum account address of consumer, hex str :param publisher_address: ethereum account address of publisher, hex str :param keeper: :return:
[ "Return", "the", "hash", "of", "the", "service", "agreement", "values", "to", "be", "signed", "by", "a", "consumer", "." ]
python
train
pudo-attic/scrapekit
scrapekit/tasks.py
https://github.com/pudo-attic/scrapekit/blob/cfd258120922fcd571430cdf00ba50f3cf18dc15/scrapekit/tasks.py#L176-L188
def pipe(self, other_task): """ Add a pipe listener to the execution of this task. The output of this task is required to be an iterable. Each item in the iterable will be queued as the sole argument to an execution of the listener task. Can also be written as:: pipeline = task1 | task2 """ other_task._source = self self._listeners.append(PipeListener(other_task)) return other_task
[ "def", "pipe", "(", "self", ",", "other_task", ")", ":", "other_task", ".", "_source", "=", "self", "self", ".", "_listeners", ".", "append", "(", "PipeListener", "(", "other_task", ")", ")", "return", "other_task" ]
Add a pipe listener to the execution of this task. The output of this task is required to be an iterable. Each item in the iterable will be queued as the sole argument to an execution of the listener task. Can also be written as:: pipeline = task1 | task2
[ "Add", "a", "pipe", "listener", "to", "the", "execution", "of", "this", "task", ".", "The", "output", "of", "this", "task", "is", "required", "to", "be", "an", "iterable", ".", "Each", "item", "in", "the", "iterable", "will", "be", "queued", "as", "the", "sole", "argument", "to", "an", "execution", "of", "the", "listener", "task", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/rietbrock_2013.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/rietbrock_2013.py#L109-L120
def _get_distance_scaling_term(self, C, rjb, mag): """ Returns the distance scaling component of the model Equation 10, Page 63 """ # Depth adjusted distance, equation 11 (Page 63) rval = np.sqrt(rjb ** 2.0 + C["c11"] ** 2.0) f_0, f_1, f_2 = self._get_distance_segment_coefficients(rval) return ((C["c4"] + C["c5"] * mag) * f_0 + (C["c6"] + C["c7"] * mag) * f_1 + (C["c8"] + C["c9"] * mag) * f_2 + (C["c10"] * rval))
[ "def", "_get_distance_scaling_term", "(", "self", ",", "C", ",", "rjb", ",", "mag", ")", ":", "# Depth adjusted distance, equation 11 (Page 63)", "rval", "=", "np", ".", "sqrt", "(", "rjb", "**", "2.0", "+", "C", "[", "\"c11\"", "]", "**", "2.0", ")", "f_0", ",", "f_1", ",", "f_2", "=", "self", ".", "_get_distance_segment_coefficients", "(", "rval", ")", "return", "(", "(", "C", "[", "\"c4\"", "]", "+", "C", "[", "\"c5\"", "]", "*", "mag", ")", "*", "f_0", "+", "(", "C", "[", "\"c6\"", "]", "+", "C", "[", "\"c7\"", "]", "*", "mag", ")", "*", "f_1", "+", "(", "C", "[", "\"c8\"", "]", "+", "C", "[", "\"c9\"", "]", "*", "mag", ")", "*", "f_2", "+", "(", "C", "[", "\"c10\"", "]", "*", "rval", ")", ")" ]
Returns the distance scaling component of the model Equation 10, Page 63
[ "Returns", "the", "distance", "scaling", "component", "of", "the", "model", "Equation", "10", "Page", "63" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L558-L569
def prepare_encoder(inputs, hparams, attention_type="local_1d"): """Prepare encoder for images.""" x = prepare_image(inputs, hparams, name="enc_channels") # Add position signals. x = add_pos_signals(x, hparams, "enc_pos") x_shape = common_layers.shape_list(x) if attention_type == "local_1d": x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x.set_shape([None, None, hparams.hidden_size]) elif attention_type == "local_2d": x.set_shape([None, None, None, hparams.hidden_size]) return x
[ "def", "prepare_encoder", "(", "inputs", ",", "hparams", ",", "attention_type", "=", "\"local_1d\"", ")", ":", "x", "=", "prepare_image", "(", "inputs", ",", "hparams", ",", "name", "=", "\"enc_channels\"", ")", "# Add position signals.", "x", "=", "add_pos_signals", "(", "x", ",", "hparams", ",", "\"enc_pos\"", ")", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "if", "attention_type", "==", "\"local_1d\"", ":", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", "*", "x_shape", "[", "2", "]", ",", "hparams", ".", "hidden_size", "]", ")", "x", ".", "set_shape", "(", "[", "None", ",", "None", ",", "hparams", ".", "hidden_size", "]", ")", "elif", "attention_type", "==", "\"local_2d\"", ":", "x", ".", "set_shape", "(", "[", "None", ",", "None", ",", "None", ",", "hparams", ".", "hidden_size", "]", ")", "return", "x" ]
Prepare encoder for images.
[ "Prepare", "encoder", "for", "images", "." ]
python
train
piotr-rusin/spam-lists
spam_lists/validation.py
https://github.com/piotr-rusin/spam-lists/blob/fd616e8761b28f3eaa503fee5e45f7748e8f88f2/spam_lists/validation.py#L15-L22
def is_valid_host(value): """Check if given value is a valid host string. :param value: a value to test :returns: True if the value is valid """ host_validators = validators.ipv4, validators.ipv6, validators.domain return any(f(value) for f in host_validators)
[ "def", "is_valid_host", "(", "value", ")", ":", "host_validators", "=", "validators", ".", "ipv4", ",", "validators", ".", "ipv6", ",", "validators", ".", "domain", "return", "any", "(", "f", "(", "value", ")", "for", "f", "in", "host_validators", ")" ]
Check if given value is a valid host string. :param value: a value to test :returns: True if the value is valid
[ "Check", "if", "given", "value", "is", "a", "valid", "host", "string", "." ]
python
train
plivo/sharq
sharq/utils.py
https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/utils.py#L9-L28
def is_valid_identifier(identifier): """Checks if the given identifier is valid or not. A valid identifier may consists of the following characters with a maximum length of 100 characters, minimum of 1 character. Valid characters for an identifier, - A to Z - a to z - 0 to 9 - _ (underscore) - - (hypen) """ if not isinstance(identifier, basestring): return False if len(identifier) > 100 or len(identifier) < 1: return False condensed_form = set(list(identifier.lower())) return condensed_form.issubset(VALID_IDENTIFIER_SET)
[ "def", "is_valid_identifier", "(", "identifier", ")", ":", "if", "not", "isinstance", "(", "identifier", ",", "basestring", ")", ":", "return", "False", "if", "len", "(", "identifier", ")", ">", "100", "or", "len", "(", "identifier", ")", "<", "1", ":", "return", "False", "condensed_form", "=", "set", "(", "list", "(", "identifier", ".", "lower", "(", ")", ")", ")", "return", "condensed_form", ".", "issubset", "(", "VALID_IDENTIFIER_SET", ")" ]
Checks if the given identifier is valid or not. A valid identifier may consists of the following characters with a maximum length of 100 characters, minimum of 1 character. Valid characters for an identifier, - A to Z - a to z - 0 to 9 - _ (underscore) - - (hypen)
[ "Checks", "if", "the", "given", "identifier", "is", "valid", "or", "not", ".", "A", "valid", "identifier", "may", "consists", "of", "the", "following", "characters", "with", "a", "maximum", "length", "of", "100", "characters", "minimum", "of", "1", "character", "." ]
python
train
ModisWorks/modis
modis/discord_modis/modules/music/_musicplayer.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/music/_musicplayer.py#L892-L909
def update_queue(self): """Updates the queue in the music player """ self.logger.debug("Updating queue display") queue_display = [] for i in range(self.queue_display): try: if len(self.queue[i][1]) > 40: songname = self.queue[i][1][:37] + "..." else: songname = self.queue[i][1] except IndexError: songname = "---" queue_display.append("{}. {}\n".format(str(i + 1), songname)) self.queuelog.debug(''.join(queue_display)) self.queuelenlog.debug(str(len(self.queue)))
[ "def", "update_queue", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Updating queue display\"", ")", "queue_display", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "queue_display", ")", ":", "try", ":", "if", "len", "(", "self", ".", "queue", "[", "i", "]", "[", "1", "]", ")", ">", "40", ":", "songname", "=", "self", ".", "queue", "[", "i", "]", "[", "1", "]", "[", ":", "37", "]", "+", "\"...\"", "else", ":", "songname", "=", "self", ".", "queue", "[", "i", "]", "[", "1", "]", "except", "IndexError", ":", "songname", "=", "\"---\"", "queue_display", ".", "append", "(", "\"{}. {}\\n\"", ".", "format", "(", "str", "(", "i", "+", "1", ")", ",", "songname", ")", ")", "self", ".", "queuelog", ".", "debug", "(", "''", ".", "join", "(", "queue_display", ")", ")", "self", ".", "queuelenlog", ".", "debug", "(", "str", "(", "len", "(", "self", ".", "queue", ")", ")", ")" ]
Updates the queue in the music player
[ "Updates", "the", "queue", "in", "the", "music", "player" ]
python
train
pantsbuild/pants
src/python/pants/option/parser.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/option/parser.py#L267-L283
def _unnormalized_option_registrations_iter(self): """Returns an iterator over the raw registration arguments of each option in this parser. Each yielded item is an (args, kwargs) pair, exactly as passed to register(), except for substituting list and dict types with list_option/dict_option. Note that recursive options we inherit from a parent will also be yielded here. """ # First yield any recursive options we inherit from our parent. if self._parent_parser: for args, kwargs in self._parent_parser._recursive_option_registration_args(): yield args, kwargs # Then yield our directly-registered options. for args, kwargs in self._option_registrations: if 'recursive' in kwargs and self._scope_info.category == ScopeInfo.SUBSYSTEM: raise RecursiveSubsystemOption(self.scope, args[0]) yield args, kwargs
[ "def", "_unnormalized_option_registrations_iter", "(", "self", ")", ":", "# First yield any recursive options we inherit from our parent.", "if", "self", ".", "_parent_parser", ":", "for", "args", ",", "kwargs", "in", "self", ".", "_parent_parser", ".", "_recursive_option_registration_args", "(", ")", ":", "yield", "args", ",", "kwargs", "# Then yield our directly-registered options.", "for", "args", ",", "kwargs", "in", "self", ".", "_option_registrations", ":", "if", "'recursive'", "in", "kwargs", "and", "self", ".", "_scope_info", ".", "category", "==", "ScopeInfo", ".", "SUBSYSTEM", ":", "raise", "RecursiveSubsystemOption", "(", "self", ".", "scope", ",", "args", "[", "0", "]", ")", "yield", "args", ",", "kwargs" ]
Returns an iterator over the raw registration arguments of each option in this parser. Each yielded item is an (args, kwargs) pair, exactly as passed to register(), except for substituting list and dict types with list_option/dict_option. Note that recursive options we inherit from a parent will also be yielded here.
[ "Returns", "an", "iterator", "over", "the", "raw", "registration", "arguments", "of", "each", "option", "in", "this", "parser", "." ]
python
train
gabstopper/smc-python
smc/vpn/elements.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/vpn/elements.py#L436-L455
def create(self, name, site_element): """ Create a VPN site for an internal or external gateway :param str name: name of site :param list site_element: list of protected networks/hosts :type site_element: list[str,Element] :raises CreateElementFailed: create element failed with reason :return: href of new element :rtype: str """ site_element = element_resolver(site_element) json = { 'name': name, 'site_element': site_element} return ElementCreator( self.__class__, href=self.href, json=json)
[ "def", "create", "(", "self", ",", "name", ",", "site_element", ")", ":", "site_element", "=", "element_resolver", "(", "site_element", ")", "json", "=", "{", "'name'", ":", "name", ",", "'site_element'", ":", "site_element", "}", "return", "ElementCreator", "(", "self", ".", "__class__", ",", "href", "=", "self", ".", "href", ",", "json", "=", "json", ")" ]
Create a VPN site for an internal or external gateway :param str name: name of site :param list site_element: list of protected networks/hosts :type site_element: list[str,Element] :raises CreateElementFailed: create element failed with reason :return: href of new element :rtype: str
[ "Create", "a", "VPN", "site", "for", "an", "internal", "or", "external", "gateway" ]
python
train
pytest-dev/pytest-xprocess
xprocess.py
https://github.com/pytest-dev/pytest-xprocess/blob/c3ee760b02dce2d0eed960b3ab0e28379853c3ef/xprocess.py#L78-L135
def ensure(self, name, preparefunc, restart=False): """ returns (PID, logfile) from a newly started or already running process. @param name: name of the external process, used for caching info across test runs. @param preparefunc: A subclass of ProcessStarter. @param restart: force restarting the process if it is running. @return: (PID, logfile) logfile will be seeked to the end if the server was running, otherwise seeked to the line after where the waitpattern matched. """ from subprocess import Popen, STDOUT info = self.getinfo(name) if not restart and not info.isrunning(): restart = True if restart: if info.pid is not None: info.terminate() controldir = info.controldir.ensure(dir=1) #controldir.remove() preparefunc = CompatStarter.wrap(preparefunc) starter = preparefunc(controldir, self) args = [str(x) for x in starter.args] self.log.debug("%s$ %s", controldir, " ".join(args)) stdout = open(str(info.logpath), "wb", 0) kwargs = {'env': starter.env} if sys.platform == "win32": kwargs["startupinfo"] = sinfo = std.subprocess.STARTUPINFO() if sys.version_info >= (2,7): sinfo.dwFlags |= std.subprocess.STARTF_USESHOWWINDOW sinfo.wShowWindow |= std.subprocess.SW_HIDE else: kwargs["close_fds"] = True kwargs["preexec_fn"] = os.setpgrp # no CONTROL-C popen = Popen(args, cwd=str(controldir), stdout=stdout, stderr=STDOUT, **kwargs) info.pid = pid = popen.pid info.pidpath.write(str(pid)) self.log.debug("process %r started pid=%s", name, pid) stdout.close() f = info.logpath.open() if not restart: f.seek(0, 2) else: if not starter.wait(f): raise RuntimeError("Could not start process %s" % name) self.log.debug("%s process startup detected", name) logfiles = self.config.__dict__.setdefault("_extlogfiles", {}) logfiles[name] = f self.getinfo(name) return info.pid, info.logpath
[ "def", "ensure", "(", "self", ",", "name", ",", "preparefunc", ",", "restart", "=", "False", ")", ":", "from", "subprocess", "import", "Popen", ",", "STDOUT", "info", "=", "self", ".", "getinfo", "(", "name", ")", "if", "not", "restart", "and", "not", "info", ".", "isrunning", "(", ")", ":", "restart", "=", "True", "if", "restart", ":", "if", "info", ".", "pid", "is", "not", "None", ":", "info", ".", "terminate", "(", ")", "controldir", "=", "info", ".", "controldir", ".", "ensure", "(", "dir", "=", "1", ")", "#controldir.remove()", "preparefunc", "=", "CompatStarter", ".", "wrap", "(", "preparefunc", ")", "starter", "=", "preparefunc", "(", "controldir", ",", "self", ")", "args", "=", "[", "str", "(", "x", ")", "for", "x", "in", "starter", ".", "args", "]", "self", ".", "log", ".", "debug", "(", "\"%s$ %s\"", ",", "controldir", ",", "\" \"", ".", "join", "(", "args", ")", ")", "stdout", "=", "open", "(", "str", "(", "info", ".", "logpath", ")", ",", "\"wb\"", ",", "0", ")", "kwargs", "=", "{", "'env'", ":", "starter", ".", "env", "}", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "kwargs", "[", "\"startupinfo\"", "]", "=", "sinfo", "=", "std", ".", "subprocess", ".", "STARTUPINFO", "(", ")", "if", "sys", ".", "version_info", ">=", "(", "2", ",", "7", ")", ":", "sinfo", ".", "dwFlags", "|=", "std", ".", "subprocess", ".", "STARTF_USESHOWWINDOW", "sinfo", ".", "wShowWindow", "|=", "std", ".", "subprocess", ".", "SW_HIDE", "else", ":", "kwargs", "[", "\"close_fds\"", "]", "=", "True", "kwargs", "[", "\"preexec_fn\"", "]", "=", "os", ".", "setpgrp", "# no CONTROL-C", "popen", "=", "Popen", "(", "args", ",", "cwd", "=", "str", "(", "controldir", ")", ",", "stdout", "=", "stdout", ",", "stderr", "=", "STDOUT", ",", "*", "*", "kwargs", ")", "info", ".", "pid", "=", "pid", "=", "popen", ".", "pid", "info", ".", "pidpath", ".", "write", "(", "str", "(", "pid", ")", ")", "self", ".", "log", ".", "debug", "(", "\"process %r started pid=%s\"", ",", "name", ",", "pid", ")", "stdout", ".", "close", "(", ")", "f", "=", "info", ".", "logpath", ".", "open", "(", ")", "if", "not", "restart", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "else", ":", "if", "not", "starter", ".", "wait", "(", "f", ")", ":", "raise", "RuntimeError", "(", "\"Could not start process %s\"", "%", "name", ")", "self", ".", "log", ".", "debug", "(", "\"%s process startup detected\"", ",", "name", ")", "logfiles", "=", "self", ".", "config", ".", "__dict__", ".", "setdefault", "(", "\"_extlogfiles\"", ",", "{", "}", ")", "logfiles", "[", "name", "]", "=", "f", "self", ".", "getinfo", "(", "name", ")", "return", "info", ".", "pid", ",", "info", ".", "logpath" ]
returns (PID, logfile) from a newly started or already running process. @param name: name of the external process, used for caching info across test runs. @param preparefunc: A subclass of ProcessStarter. @param restart: force restarting the process if it is running. @return: (PID, logfile) logfile will be seeked to the end if the server was running, otherwise seeked to the line after where the waitpattern matched.
[ "returns", "(", "PID", "logfile", ")", "from", "a", "newly", "started", "or", "already", "running", "process", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L779-L786
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128(): """separate rgb embeddings.""" hparams = imagetransformer_sep_channels_12l_16h_imagenet_large() hparams.num_hidden_layers = 16 hparams.local_attention = True hparams.batch_size = 1 hparams.block_length = 128 return hparams
[ "def", "imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128", "(", ")", ":", "hparams", "=", "imagetransformer_sep_channels_12l_16h_imagenet_large", "(", ")", "hparams", ".", "num_hidden_layers", "=", "16", "hparams", ".", "local_attention", "=", "True", "hparams", ".", "batch_size", "=", "1", "hparams", ".", "block_length", "=", "128", "return", "hparams" ]
separate rgb embeddings.
[ "separate", "rgb", "embeddings", "." ]
python
train
openvax/topiary
topiary/rna/cufflinks.py
https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/cufflinks.py#L234-L243
def load_cufflinks_fpkm_dict(*args, **kwargs): """ Returns dictionary mapping feature identifier (either transcript or gene ID) to FPKM expression value. """ return { row.id: row.fpkm for (_, row) in load_cufflinks_dataframe(*args, **kwargs).iterrows() }
[ "def", "load_cufflinks_fpkm_dict", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "{", "row", ".", "id", ":", "row", ".", "fpkm", "for", "(", "_", ",", "row", ")", "in", "load_cufflinks_dataframe", "(", "*", "args", ",", "*", "*", "kwargs", ")", ".", "iterrows", "(", ")", "}" ]
Returns dictionary mapping feature identifier (either transcript or gene ID) to FPKM expression value.
[ "Returns", "dictionary", "mapping", "feature", "identifier", "(", "either", "transcript", "or", "gene", "ID", ")", "to", "FPKM", "expression", "value", "." ]
python
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/database.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/database.py#L242-L255
def reload(self): """Reload this database. Refresh any configured schema into :attr:`ddl_statements`. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL :raises NotFound: if the database does not exist """ api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) response = api.get_database_ddl(self.name, metadata=metadata) self._ddl_statements = tuple(response.statements)
[ "def", "reload", "(", "self", ")", ":", "api", "=", "self", ".", "_instance", ".", "_client", ".", "database_admin_api", "metadata", "=", "_metadata_with_prefix", "(", "self", ".", "name", ")", "response", "=", "api", ".", "get_database_ddl", "(", "self", ".", "name", ",", "metadata", "=", "metadata", ")", "self", ".", "_ddl_statements", "=", "tuple", "(", "response", ".", "statements", ")" ]
Reload this database. Refresh any configured schema into :attr:`ddl_statements`. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL :raises NotFound: if the database does not exist
[ "Reload", "this", "database", "." ]
python
train
mongolab/mongoctl
mongoctl/utils.py
https://github.com/mongolab/mongoctl/blob/fab15216127ad4bf8ea9aa8a95d75504c0ef01a2/mongoctl/utils.py#L253-L268
def is_same_host(host1, host2): """ Returns true if host1 == host2 OR map to the same host (using DNS) """ try: if host1 == host2: return True else: ips1 = get_host_ips(host1) ips2 = get_host_ips(host2) return len(set(ips1) & set(ips2)) > 0 except Exception, ex: log_exception(ex) return False
[ "def", "is_same_host", "(", "host1", ",", "host2", ")", ":", "try", ":", "if", "host1", "==", "host2", ":", "return", "True", "else", ":", "ips1", "=", "get_host_ips", "(", "host1", ")", "ips2", "=", "get_host_ips", "(", "host2", ")", "return", "len", "(", "set", "(", "ips1", ")", "&", "set", "(", "ips2", ")", ")", ">", "0", "except", "Exception", ",", "ex", ":", "log_exception", "(", "ex", ")", "return", "False" ]
Returns true if host1 == host2 OR map to the same host (using DNS)
[ "Returns", "true", "if", "host1", "==", "host2", "OR", "map", "to", "the", "same", "host", "(", "using", "DNS", ")" ]
python
train
rocky/python-xdis
xdis/verify.py
https://github.com/rocky/python-xdis/blob/46a2902ae8f5d8eee495eed67ac0690fd545453d/xdis/verify.py#L101-L151
def verify_file(real_source_filename, real_bytecode_filename): """Compile *real_source_filename* using the running Python interpreter. Then write bytecode out to a new place again using Python's routines. Next load it in using two of our routines. Compare that the code objects there are equal. Next write out the bytecode (using the same Python bytecode writin routine as in step 1. Finally compare the bytecode files. """ tempdir = tempfile.gettempdir() source_filename = os.path.join(tempdir, "testing.py") if not os.path.exists(real_source_filename): return try: f = open(real_source_filename, 'U') except: return codestring = f.read() f.close() codeobject1 = compile(codestring, source_filename,'exec') (version, timestamp, magic_int, codeobject2, is_pypy, source_size) = load_module(real_bytecode_filename) # A hack for PyPy 3.2 if magic_int == 3180+7: magic_int = 48 assert MAGIC == magics.int2magic(magic_int), \ ("magic_int %d vs %d in %s/%s" % (magic_int, magics.magic2int(MAGIC), os.getcwd(), real_bytecode_filename)) bytecode_filename1 = os.path.join(tempdir, "testing1.pyc") dump_compile(codeobject1, bytecode_filename1, timestamp, MAGIC) (version, timestamp, magic_int, codeobject3, is_pypy, source_size) = load_module(real_bytecode_filename, fast_load=not is_pypy) # compare_code(codeobject1, codeobject2) # compare_code(codeobject2, codeobject3) bytecode_filename2 = os.path.join(tempdir, "testing2.pyc") dump_compile(codeobject1, bytecode_filename2, timestamp, magics.int2magic(magic_int)) compare_bytecode_files(bytecode_filename1, bytecode_filename2) return
[ "def", "verify_file", "(", "real_source_filename", ",", "real_bytecode_filename", ")", ":", "tempdir", "=", "tempfile", ".", "gettempdir", "(", ")", "source_filename", "=", "os", ".", "path", ".", "join", "(", "tempdir", ",", "\"testing.py\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "real_source_filename", ")", ":", "return", "try", ":", "f", "=", "open", "(", "real_source_filename", ",", "'U'", ")", "except", ":", "return", "codestring", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "codeobject1", "=", "compile", "(", "codestring", ",", "source_filename", ",", "'exec'", ")", "(", "version", ",", "timestamp", ",", "magic_int", ",", "codeobject2", ",", "is_pypy", ",", "source_size", ")", "=", "load_module", "(", "real_bytecode_filename", ")", "# A hack for PyPy 3.2", "if", "magic_int", "==", "3180", "+", "7", ":", "magic_int", "=", "48", "assert", "MAGIC", "==", "magics", ".", "int2magic", "(", "magic_int", ")", ",", "(", "\"magic_int %d vs %d in %s/%s\"", "%", "(", "magic_int", ",", "magics", ".", "magic2int", "(", "MAGIC", ")", ",", "os", ".", "getcwd", "(", ")", ",", "real_bytecode_filename", ")", ")", "bytecode_filename1", "=", "os", ".", "path", ".", "join", "(", "tempdir", ",", "\"testing1.pyc\"", ")", "dump_compile", "(", "codeobject1", ",", "bytecode_filename1", ",", "timestamp", ",", "MAGIC", ")", "(", "version", ",", "timestamp", ",", "magic_int", ",", "codeobject3", ",", "is_pypy", ",", "source_size", ")", "=", "load_module", "(", "real_bytecode_filename", ",", "fast_load", "=", "not", "is_pypy", ")", "# compare_code(codeobject1, codeobject2)", "# compare_code(codeobject2, codeobject3)", "bytecode_filename2", "=", "os", ".", "path", ".", "join", "(", "tempdir", ",", "\"testing2.pyc\"", ")", "dump_compile", "(", "codeobject1", ",", "bytecode_filename2", ",", "timestamp", ",", "magics", ".", "int2magic", "(", "magic_int", ")", ")", "compare_bytecode_files", "(", "bytecode_filename1", ",", "bytecode_filename2", ")", "return" ]
Compile *real_source_filename* using the running Python interpreter. Then write bytecode out to a new place again using Python's routines. Next load it in using two of our routines. Compare that the code objects there are equal. Next write out the bytecode (using the same Python bytecode writin routine as in step 1. Finally compare the bytecode files.
[ "Compile", "*", "real_source_filename", "*", "using", "the", "running", "Python", "interpreter", ".", "Then", "write", "bytecode", "out", "to", "a", "new", "place", "again", "using", "Python", "s", "routines", "." ]
python
train
inasafe/inasafe
safe/gui/tools/shake_grid/shake_grid.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/shake_grid/shake_grid.py#L453-L491
def mmi_to_vrt(self, force_flag=True): """Save the mmi_data to an ogr vrt text file. :param force_flag: Whether to force the regeneration of the output file. Defaults to False. :type force_flag: bool :returns: The absolute file system path to the .vrt text file. :rtype: str :raises: None """ # Ensure the delimited mmi file exists LOGGER.debug('mmi_to_vrt requested.') vrt_path = os.path.join( self.output_dir, self.output_basename + '.vrt') # short circuit if the vrt is already created. if os.path.exists(vrt_path) and force_flag is not True: return vrt_path csv_path = self.mmi_to_delimited_file(True) vrt_string = ( '<OGRVRTDataSource>' ' <OGRVRTLayer name="mmi">' ' <SrcDataSource>%s</SrcDataSource>' ' <GeometryType>wkbPoint</GeometryType>' ' <GeometryField encoding="PointFromColumns"' ' x="lon" y="lat" z="mmi"/>' ' </OGRVRTLayer>' '</OGRVRTDataSource>' % csv_path) with codecs.open(vrt_path, 'w', encoding='utf-8') as f: f.write(vrt_string) return vrt_path
[ "def", "mmi_to_vrt", "(", "self", ",", "force_flag", "=", "True", ")", ":", "# Ensure the delimited mmi file exists", "LOGGER", ".", "debug", "(", "'mmi_to_vrt requested.'", ")", "vrt_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output_dir", ",", "self", ".", "output_basename", "+", "'.vrt'", ")", "# short circuit if the vrt is already created.", "if", "os", ".", "path", ".", "exists", "(", "vrt_path", ")", "and", "force_flag", "is", "not", "True", ":", "return", "vrt_path", "csv_path", "=", "self", ".", "mmi_to_delimited_file", "(", "True", ")", "vrt_string", "=", "(", "'<OGRVRTDataSource>'", "' <OGRVRTLayer name=\"mmi\">'", "' <SrcDataSource>%s</SrcDataSource>'", "' <GeometryType>wkbPoint</GeometryType>'", "' <GeometryField encoding=\"PointFromColumns\"'", "' x=\"lon\" y=\"lat\" z=\"mmi\"/>'", "' </OGRVRTLayer>'", "'</OGRVRTDataSource>'", "%", "csv_path", ")", "with", "codecs", ".", "open", "(", "vrt_path", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "vrt_string", ")", "return", "vrt_path" ]
Save the mmi_data to an ogr vrt text file. :param force_flag: Whether to force the regeneration of the output file. Defaults to False. :type force_flag: bool :returns: The absolute file system path to the .vrt text file. :rtype: str :raises: None
[ "Save", "the", "mmi_data", "to", "an", "ogr", "vrt", "text", "file", "." ]
python
train
matiasb/python-unrar
unrar/rarfile.py
https://github.com/matiasb/python-unrar/blob/b1ac46cbcf42f3d3c5c69ab971fe97369a4da617/unrar/rarfile.py#L315-L323
def extractall(self, path=None, members=None, pwd=None): """Extract all members from the archive to the current working directory. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by namelist(). """ if members is None: members = self.namelist() self._extract_members(members, path, pwd)
[ "def", "extractall", "(", "self", ",", "path", "=", "None", ",", "members", "=", "None", ",", "pwd", "=", "None", ")", ":", "if", "members", "is", "None", ":", "members", "=", "self", ".", "namelist", "(", ")", "self", ".", "_extract_members", "(", "members", ",", "path", ",", "pwd", ")" ]
Extract all members from the archive to the current working directory. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by namelist().
[ "Extract", "all", "members", "from", "the", "archive", "to", "the", "current", "working", "directory", ".", "path", "specifies", "a", "different", "directory", "to", "extract", "to", ".", "members", "is", "optional", "and", "must", "be", "a", "subset", "of", "the", "list", "returned", "by", "namelist", "()", "." ]
python
valid
Meseira/subordinate
subordinate/idmap.py
https://github.com/Meseira/subordinate/blob/3438df304af3dccc5bd1515231402afa708f1cc3/subordinate/idmap.py#L177-L185
def who_has(self, subid): """Return a list of names who own subid in their id range set.""" answer = [] for name in self.__map: if subid in self.__map[name] and not name in answer: answer.append(name) return answer
[ "def", "who_has", "(", "self", ",", "subid", ")", ":", "answer", "=", "[", "]", "for", "name", "in", "self", ".", "__map", ":", "if", "subid", "in", "self", ".", "__map", "[", "name", "]", "and", "not", "name", "in", "answer", ":", "answer", ".", "append", "(", "name", ")", "return", "answer" ]
Return a list of names who own subid in their id range set.
[ "Return", "a", "list", "of", "names", "who", "own", "subid", "in", "their", "id", "range", "set", "." ]
python
train
jstitch/MambuPy
MambuPy/rest/mambustruct.py
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambustruct.py#L299-L355
def init(self, attrs={}, *args, **kwargs): """Default initialization from a dictionary responded by Mambu in to the elements of the Mambu object. It assings the response to attrs attribute and converts each of its elements from a string to an adequate python object: number, datetime, etc. Basically it stores the response on the attrs attribute, then runs some customizable preprocess method, then runs convertDict2Attrs method to convert the string elements to an adequate python object, then a customizable postprocess method. It also executes each method on the 'methods' attribute given on instantiation time, and sets new customizable 'properties' to the object. Why not on __init__? two reasons: * __init__ optionally connects to Mambu, if you don't connect to Mambu, the Mambu object will be configured but it won't have any Mambu info on it. Only when connected, the Mambu object will be initialized, here. Useful to POST several times the same Mambu object. You make a POST request over and over again by calling it's connect() method every time you wish. This init method will configure the response in to the attrs attribute each time. You may also wish to update the info on a previously initialized Mambu object and refresh it with what Mambu now has. Instead of building a new object, you just connect() again and it will be refreshed. * Iterable Mambu objects (lists) do not initialize here, the iterable Mambu object __init__ goes through each of its elements and then initializes with this code one by one. Please look at some Mambu iterable object code and pydoc for more details. """ self.attrs = attrs self.preprocess() self.convertDict2Attrs(*args, **kwargs) self.postprocess() try: for meth in kwargs['methods']: try: getattr(self,meth)() except Exception: pass except Exception: pass try: for propname,propval in kwargs['properties'].items(): setattr(self,propname,propval) except Exception: pass
[ "def", "init", "(", "self", ",", "attrs", "=", "{", "}", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "attrs", "=", "attrs", "self", ".", "preprocess", "(", ")", "self", ".", "convertDict2Attrs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "postprocess", "(", ")", "try", ":", "for", "meth", "in", "kwargs", "[", "'methods'", "]", ":", "try", ":", "getattr", "(", "self", ",", "meth", ")", "(", ")", "except", "Exception", ":", "pass", "except", "Exception", ":", "pass", "try", ":", "for", "propname", ",", "propval", "in", "kwargs", "[", "'properties'", "]", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "propname", ",", "propval", ")", "except", "Exception", ":", "pass" ]
Default initialization from a dictionary responded by Mambu in to the elements of the Mambu object. It assings the response to attrs attribute and converts each of its elements from a string to an adequate python object: number, datetime, etc. Basically it stores the response on the attrs attribute, then runs some customizable preprocess method, then runs convertDict2Attrs method to convert the string elements to an adequate python object, then a customizable postprocess method. It also executes each method on the 'methods' attribute given on instantiation time, and sets new customizable 'properties' to the object. Why not on __init__? two reasons: * __init__ optionally connects to Mambu, if you don't connect to Mambu, the Mambu object will be configured but it won't have any Mambu info on it. Only when connected, the Mambu object will be initialized, here. Useful to POST several times the same Mambu object. You make a POST request over and over again by calling it's connect() method every time you wish. This init method will configure the response in to the attrs attribute each time. You may also wish to update the info on a previously initialized Mambu object and refresh it with what Mambu now has. Instead of building a new object, you just connect() again and it will be refreshed. * Iterable Mambu objects (lists) do not initialize here, the iterable Mambu object __init__ goes through each of its elements and then initializes with this code one by one. Please look at some Mambu iterable object code and pydoc for more details.
[ "Default", "initialization", "from", "a", "dictionary", "responded", "by", "Mambu" ]
python
train
disqus/nydus
nydus/contrib/ketama.py
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L109-L118
def add_node(self, node, weight=1): """ Adds node to circle and rebuild it. """ self._nodes.add(node) self._weights[node] = weight self._hashring = dict() self._sorted_keys = [] self._build_circle()
[ "def", "add_node", "(", "self", ",", "node", ",", "weight", "=", "1", ")", ":", "self", ".", "_nodes", ".", "add", "(", "node", ")", "self", ".", "_weights", "[", "node", "]", "=", "weight", "self", ".", "_hashring", "=", "dict", "(", ")", "self", ".", "_sorted_keys", "=", "[", "]", "self", ".", "_build_circle", "(", ")" ]
Adds node to circle and rebuild it.
[ "Adds", "node", "to", "circle", "and", "rebuild", "it", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/tooltip.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/tooltip.py#L235-L250
def add_widget(self, widget): """Add the given widget to the tooltip :param widget: the widget to add :type widget: QtGui.QWidget :returns: None :rtype: None :raises: None """ if self._buttons.get(widget): return btn = self.create_button(widget) cb = partial(self.focus_widget, w=widget) btn.clicked.connect(cb) self.layout().addWidget(btn) self._buttons[widget] = btn
[ "def", "add_widget", "(", "self", ",", "widget", ")", ":", "if", "self", ".", "_buttons", ".", "get", "(", "widget", ")", ":", "return", "btn", "=", "self", ".", "create_button", "(", "widget", ")", "cb", "=", "partial", "(", "self", ".", "focus_widget", ",", "w", "=", "widget", ")", "btn", ".", "clicked", ".", "connect", "(", "cb", ")", "self", ".", "layout", "(", ")", ".", "addWidget", "(", "btn", ")", "self", ".", "_buttons", "[", "widget", "]", "=", "btn" ]
Add the given widget to the tooltip :param widget: the widget to add :type widget: QtGui.QWidget :returns: None :rtype: None :raises: None
[ "Add", "the", "given", "widget", "to", "the", "tooltip" ]
python
train
walkr/nanoservice
nanoservice/core.py
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/core.py#L67-L77
def initialize(self, timeouts): """ Bind or connect the nanomsg socket to some address """ # Bind or connect to address if self.bind is True: self.socket.bind(self.address) else: self.socket.connect(self.address) # Set send and recv timeouts self._set_timeouts(timeouts)
[ "def", "initialize", "(", "self", ",", "timeouts", ")", ":", "# Bind or connect to address", "if", "self", ".", "bind", "is", "True", ":", "self", ".", "socket", ".", "bind", "(", "self", ".", "address", ")", "else", ":", "self", ".", "socket", ".", "connect", "(", "self", ".", "address", ")", "# Set send and recv timeouts", "self", ".", "_set_timeouts", "(", "timeouts", ")" ]
Bind or connect the nanomsg socket to some address
[ "Bind", "or", "connect", "the", "nanomsg", "socket", "to", "some", "address" ]
python
train
frmdstryr/enamlx
enamlx/core/looper.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/looper.py#L157-L186
def _prefetch_items(self,change): """ When the current_row in the model changes (whether from scrolling) or set by the application. Make sure the results are loaded! """ if self.is_initialized: view = self.item_view upper_limit = view.iterable_index+view.iterable_fetch_size-view.iterable_prefetch lower_limit = max(0,view.iterable_index+view.iterable_prefetch) offset = int(view.iterable_fetch_size/2.0) upper_visible_row = view.visible_rect[2] lower_visible_row = view.visible_rect[0] print("Visible rect = %s"%view.visible_rect) if upper_visible_row >= upper_limit: next_index = max(0,upper_visible_row-offset) # Center on current row # Going up works... if next_index>view.iterable_index: print("Auto prefetch upper limit %s!"%upper_limit) view.iterable_index = next_index #view.model().reset() # But doewn doesnt? elif view.iterable_index>0 and lower_visible_row < lower_limit: next_index = max(0,lower_visible_row-offset) # Center on current row # Going down works if next_index<view.iterable_index: print("Auto prefetch lower limit=%s, iterable=%s, setting next=%s!"%(lower_limit,view.iterable_index,next_index)) view.iterable_index = next_index
[ "def", "_prefetch_items", "(", "self", ",", "change", ")", ":", "if", "self", ".", "is_initialized", ":", "view", "=", "self", ".", "item_view", "upper_limit", "=", "view", ".", "iterable_index", "+", "view", ".", "iterable_fetch_size", "-", "view", ".", "iterable_prefetch", "lower_limit", "=", "max", "(", "0", ",", "view", ".", "iterable_index", "+", "view", ".", "iterable_prefetch", ")", "offset", "=", "int", "(", "view", ".", "iterable_fetch_size", "/", "2.0", ")", "upper_visible_row", "=", "view", ".", "visible_rect", "[", "2", "]", "lower_visible_row", "=", "view", ".", "visible_rect", "[", "0", "]", "print", "(", "\"Visible rect = %s\"", "%", "view", ".", "visible_rect", ")", "if", "upper_visible_row", ">=", "upper_limit", ":", "next_index", "=", "max", "(", "0", ",", "upper_visible_row", "-", "offset", ")", "# Center on current row", "# Going up works... ", "if", "next_index", ">", "view", ".", "iterable_index", ":", "print", "(", "\"Auto prefetch upper limit %s!\"", "%", "upper_limit", ")", "view", ".", "iterable_index", "=", "next_index", "#view.model().reset()", "# But doewn doesnt?", "elif", "view", ".", "iterable_index", ">", "0", "and", "lower_visible_row", "<", "lower_limit", ":", "next_index", "=", "max", "(", "0", ",", "lower_visible_row", "-", "offset", ")", "# Center on current row", "# Going down works", "if", "next_index", "<", "view", ".", "iterable_index", ":", "print", "(", "\"Auto prefetch lower limit=%s, iterable=%s, setting next=%s!\"", "%", "(", "lower_limit", ",", "view", ".", "iterable_index", ",", "next_index", ")", ")", "view", ".", "iterable_index", "=", "next_index" ]
When the current_row in the model changes (whether from scrolling) or set by the application. Make sure the results are loaded!
[ "When", "the", "current_row", "in", "the", "model", "changes", "(", "whether", "from", "scrolling", ")", "or", "set", "by", "the", "application", ".", "Make", "sure", "the", "results", "are", "loaded!" ]
python
train
inasafe/inasafe
scripts/create_api_docs.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/scripts/create_api_docs.py#L88-L104
def create_module_rst_file(module_name): """Function for creating content in each .rst file for a module. :param module_name: name of the module. :type module_name: str :returns: A content for auto module. :rtype: str """ return_text = 'Module: ' + module_name dash = '=' * len(return_text) return_text += '\n' + dash + '\n\n' return_text += '.. automodule:: ' + module_name + '\n' return_text += ' :members:\n\n' return return_text
[ "def", "create_module_rst_file", "(", "module_name", ")", ":", "return_text", "=", "'Module: '", "+", "module_name", "dash", "=", "'='", "*", "len", "(", "return_text", ")", "return_text", "+=", "'\\n'", "+", "dash", "+", "'\\n\\n'", "return_text", "+=", "'.. automodule:: '", "+", "module_name", "+", "'\\n'", "return_text", "+=", "' :members:\\n\\n'", "return", "return_text" ]
Function for creating content in each .rst file for a module. :param module_name: name of the module. :type module_name: str :returns: A content for auto module. :rtype: str
[ "Function", "for", "creating", "content", "in", "each", ".", "rst", "file", "for", "a", "module", "." ]
python
train
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L399-L413
def get_meta(self, key=None): """Get metadata value for collection.""" if self.is_fake: return {} if key == "tag": return self.tag elif key is None: ret = {} for key in self.journal.info.keys(): ret[key] = self.meta_mappings.map_get(self.journal.info, key)[1] return ret else: key, value = self.meta_mappings.map_get(self.journal.info, key) return value
[ "def", "get_meta", "(", "self", ",", "key", "=", "None", ")", ":", "if", "self", ".", "is_fake", ":", "return", "{", "}", "if", "key", "==", "\"tag\"", ":", "return", "self", ".", "tag", "elif", "key", "is", "None", ":", "ret", "=", "{", "}", "for", "key", "in", "self", ".", "journal", ".", "info", ".", "keys", "(", ")", ":", "ret", "[", "key", "]", "=", "self", ".", "meta_mappings", ".", "map_get", "(", "self", ".", "journal", ".", "info", ",", "key", ")", "[", "1", "]", "return", "ret", "else", ":", "key", ",", "value", "=", "self", ".", "meta_mappings", ".", "map_get", "(", "self", ".", "journal", ".", "info", ",", "key", ")", "return", "value" ]
Get metadata value for collection.
[ "Get", "metadata", "value", "for", "collection", "." ]
python
train
Azure/msrest-for-python
msrest/universal_http/async_requests.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/universal_http/async_requests.py#L87-L91
async def send(self, request: ClientRequest, **kwargs: Any) -> AsyncClientResponse: # type: ignore """Send the request using this HTTP sender. """ requests_kwargs = self._configure_send(request, **kwargs) return await super(AsyncRequestsHTTPSender, self).send(request, **requests_kwargs)
[ "async", "def", "send", "(", "self", ",", "request", ":", "ClientRequest", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "AsyncClientResponse", ":", "# type: ignore", "requests_kwargs", "=", "self", ".", "_configure_send", "(", "request", ",", "*", "*", "kwargs", ")", "return", "await", "super", "(", "AsyncRequestsHTTPSender", ",", "self", ")", ".", "send", "(", "request", ",", "*", "*", "requests_kwargs", ")" ]
Send the request using this HTTP sender.
[ "Send", "the", "request", "using", "this", "HTTP", "sender", "." ]
python
train
zsims/dic
dic/container.py
https://github.com/zsims/dic/blob/bb4e615c236e6cfe804bd7286a5af081007325ce/dic/container.py#L211-L219
def register_instance(self, class_type, instance, register_as=None): """ Registers the given instance (already created). :param class_type: The class type. :param instance: The instance to register. :param register_as: The types to register the class as, defaults to the given class_type. """ registration = _InstanceRegistration(instance) self._register(class_type, registration, register_as)
[ "def", "register_instance", "(", "self", ",", "class_type", ",", "instance", ",", "register_as", "=", "None", ")", ":", "registration", "=", "_InstanceRegistration", "(", "instance", ")", "self", ".", "_register", "(", "class_type", ",", "registration", ",", "register_as", ")" ]
Registers the given instance (already created). :param class_type: The class type. :param instance: The instance to register. :param register_as: The types to register the class as, defaults to the given class_type.
[ "Registers", "the", "given", "instance", "(", "already", "created", ")", ".", ":", "param", "class_type", ":", "The", "class", "type", ".", ":", "param", "instance", ":", "The", "instance", "to", "register", ".", ":", "param", "register_as", ":", "The", "types", "to", "register", "the", "class", "as", "defaults", "to", "the", "given", "class_type", "." ]
python
train
samghelms/mathviz
mathviz_hopper/src/indices.py
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/indices.py#L110-L118
def _convert_query(self, query): """ Convert query into an indexable string. """ query = self.dictionary.doc2bow(self._tokenize_latex(query)) sims = self.index[query] neighbors = sorted(sims, key=lambda item: -item[1]) neighbors = {"neighbors":[{self.columns[0]: {"data": self.docs[n[0]], "fmt": "math"}, self.columns[1]: {"data": float(n[1])}} for n in neighbors]} if neighbors else {"neighbors": []} return neighbors
[ "def", "_convert_query", "(", "self", ",", "query", ")", ":", "query", "=", "self", ".", "dictionary", ".", "doc2bow", "(", "self", ".", "_tokenize_latex", "(", "query", ")", ")", "sims", "=", "self", ".", "index", "[", "query", "]", "neighbors", "=", "sorted", "(", "sims", ",", "key", "=", "lambda", "item", ":", "-", "item", "[", "1", "]", ")", "neighbors", "=", "{", "\"neighbors\"", ":", "[", "{", "self", ".", "columns", "[", "0", "]", ":", "{", "\"data\"", ":", "self", ".", "docs", "[", "n", "[", "0", "]", "]", ",", "\"fmt\"", ":", "\"math\"", "}", ",", "self", ".", "columns", "[", "1", "]", ":", "{", "\"data\"", ":", "float", "(", "n", "[", "1", "]", ")", "}", "}", "for", "n", "in", "neighbors", "]", "}", "if", "neighbors", "else", "{", "\"neighbors\"", ":", "[", "]", "}", "return", "neighbors" ]
Convert query into an indexable string.
[ "Convert", "query", "into", "an", "indexable", "string", "." ]
python
train
waqasbhatti/astrobase
astrobase/lcproc/tfa.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/tfa.py#L1335-L1456
def parallel_tfa_lclist(lclist, templateinfo, timecols=None, magcols=None, errcols=None, lcformat='hat-sql', lcformatdir=None, interp='nearest', sigclip=5.0, mintemplatedist_arcmin=10.0, nworkers=NCPUS, maxworkertasks=1000): '''This applies TFA in parallel to all LCs in the given list of file names. Parameters ---------- lclist : str This is a list of light curve files to apply TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. timecols : list of str or None The timecol keys to use from the lcdict in applying TFA corrections. magcols : list of str or None The magcol keys to use from the lcdict in applying TFA corrections. errcols : list of str or None The errcol keys to use from the lcdict in applying TFA corrections. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming the light curves to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to the light curves before running TFA on it. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. nworkers : int The number of parallel workers to launch maxworkertasks : int The maximum number of tasks per worker allowed before it's replaced by a fresh one. Returns ------- dict Contains the input file names and output TFA light curve filenames per input file organized by each `magcol` in `magcols`. ''' # open the templateinfo first if isinstance(templateinfo,str) and os.path.exists(templateinfo): with open(templateinfo,'rb') as infd: templateinfo = pickle.load(infd) try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return None except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # override the default timecols, magcols, and errcols # using the ones provided to the function # we'll get the defaults from the templateinfo object if timecols is None: timecols = templateinfo['timecols'] if magcols is None: magcols = templateinfo['magcols'] if errcols is None: errcols = templateinfo['errcols'] outdict = {} # run by magcol for t, m, e in zip(timecols, magcols, errcols): tasks = [(x, t, m, e, templateinfo, lcformat, lcformatdir, interp, sigclip) for x in lclist] pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks) results = pool.map(_parallel_tfa_worker, tasks) pool.close() pool.join() outdict[m] = results return outdict
[ "def", "parallel_tfa_lclist", "(", "lclist", ",", "templateinfo", ",", "timecols", "=", "None", ",", "magcols", "=", "None", ",", "errcols", "=", "None", ",", "lcformat", "=", "'hat-sql'", ",", "lcformatdir", "=", "None", ",", "interp", "=", "'nearest'", ",", "sigclip", "=", "5.0", ",", "mintemplatedist_arcmin", "=", "10.0", ",", "nworkers", "=", "NCPUS", ",", "maxworkertasks", "=", "1000", ")", ":", "# open the templateinfo first", "if", "isinstance", "(", "templateinfo", ",", "str", ")", "and", "os", ".", "path", ".", "exists", "(", "templateinfo", ")", ":", "with", "open", "(", "templateinfo", ",", "'rb'", ")", "as", "infd", ":", "templateinfo", "=", "pickle", ".", "load", "(", "infd", ")", "try", ":", "formatinfo", "=", "get_lcformat", "(", "lcformat", ",", "use_lcformat_dir", "=", "lcformatdir", ")", "if", "formatinfo", ":", "(", "dfileglob", ",", "readerfunc", ",", "dtimecols", ",", "dmagcols", ",", "derrcols", ",", "magsarefluxes", ",", "normfunc", ")", "=", "formatinfo", "else", ":", "LOGERROR", "(", "\"can't figure out the light curve format\"", ")", "return", "None", "except", "Exception", "as", "e", ":", "LOGEXCEPTION", "(", "\"can't figure out the light curve format\"", ")", "return", "None", "# override the default timecols, magcols, and errcols", "# using the ones provided to the function", "# we'll get the defaults from the templateinfo object", "if", "timecols", "is", "None", ":", "timecols", "=", "templateinfo", "[", "'timecols'", "]", "if", "magcols", "is", "None", ":", "magcols", "=", "templateinfo", "[", "'magcols'", "]", "if", "errcols", "is", "None", ":", "errcols", "=", "templateinfo", "[", "'errcols'", "]", "outdict", "=", "{", "}", "# run by magcol", "for", "t", ",", "m", ",", "e", "in", "zip", "(", "timecols", ",", "magcols", ",", "errcols", ")", ":", "tasks", "=", "[", "(", "x", ",", "t", ",", "m", ",", "e", ",", "templateinfo", ",", "lcformat", ",", "lcformatdir", ",", "interp", ",", "sigclip", ")", "for", "x", "in", "lclist", "]", "pool", "=", "mp", ".", "Pool", "(", "nworkers", ",", "maxtasksperchild", "=", "maxworkertasks", ")", "results", "=", "pool", ".", "map", "(", "_parallel_tfa_worker", ",", "tasks", ")", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "outdict", "[", "m", "]", "=", "results", "return", "outdict" ]
This applies TFA in parallel to all LCs in the given list of file names. Parameters ---------- lclist : str This is a list of light curve files to apply TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. timecols : list of str or None The timecol keys to use from the lcdict in applying TFA corrections. magcols : list of str or None The magcol keys to use from the lcdict in applying TFA corrections. errcols : list of str or None The errcol keys to use from the lcdict in applying TFA corrections. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming the light curves to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to the light curves before running TFA on it. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. nworkers : int The number of parallel workers to launch maxworkertasks : int The maximum number of tasks per worker allowed before it's replaced by a fresh one. Returns ------- dict Contains the input file names and output TFA light curve filenames per input file organized by each `magcol` in `magcols`.
[ "This", "applies", "TFA", "in", "parallel", "to", "all", "LCs", "in", "the", "given", "list", "of", "file", "names", "." ]
python
valid
yfpeng/bioc
bioc/utils.py
https://github.com/yfpeng/bioc/blob/47ddaa010960d9ba673aefe068e7bbaf39f0fff4/bioc/utils.py#L74-L80
def shorten_text(text: str): """Return a short repr of text if it is longer than 40""" if len(text) <= 40: text = text else: text = text[:17] + ' ... ' + text[-17:] return repr(text)
[ "def", "shorten_text", "(", "text", ":", "str", ")", ":", "if", "len", "(", "text", ")", "<=", "40", ":", "text", "=", "text", "else", ":", "text", "=", "text", "[", ":", "17", "]", "+", "' ... '", "+", "text", "[", "-", "17", ":", "]", "return", "repr", "(", "text", ")" ]
Return a short repr of text if it is longer than 40
[ "Return", "a", "short", "repr", "of", "text", "if", "it", "is", "longer", "than", "40" ]
python
train
habnabit/panglery
panglery/pangler.py
https://github.com/habnabit/panglery/blob/4d62e408c4bfaae126c93a6151ded1e8dc75bcc8/panglery/pangler.py#L141-L156
def stored_bind(self, instance): """Bind an instance to this Pangler, using the bound Pangler store. This method functions identically to `bind`, except that it might return a Pangler which was previously bound to the provided instance. """ if self.id is None: return self.bind(instance) store = self._bound_pangler_store.setdefault(instance, {}) p = store.get(self.id) if p is None: p = store[self.id] = self.bind(instance) return p
[ "def", "stored_bind", "(", "self", ",", "instance", ")", ":", "if", "self", ".", "id", "is", "None", ":", "return", "self", ".", "bind", "(", "instance", ")", "store", "=", "self", ".", "_bound_pangler_store", ".", "setdefault", "(", "instance", ",", "{", "}", ")", "p", "=", "store", ".", "get", "(", "self", ".", "id", ")", "if", "p", "is", "None", ":", "p", "=", "store", "[", "self", ".", "id", "]", "=", "self", ".", "bind", "(", "instance", ")", "return", "p" ]
Bind an instance to this Pangler, using the bound Pangler store. This method functions identically to `bind`, except that it might return a Pangler which was previously bound to the provided instance.
[ "Bind", "an", "instance", "to", "this", "Pangler", "using", "the", "bound", "Pangler", "store", "." ]
python
train
omtinez/pddb
pddb/pddb.py
https://github.com/omtinez/pddb/blob/a24cee0702c8286c5c466c51ca65cf8dbc2c183c/pddb/pddb.py#L473-L525
def find_one(self, tname, where=None, where_not=None, columns=None, astype=None): ''' Find a single record in the provided table from the database. If multiple match, return the first one based on the internal order of the records. If no records are found, return empty dictionary, string or series depending on the value of `astype`. Parameters ---------- tname : str Table to search records from. where : dict or None (default `None`) Dictionary of <column, value> where value can be of str type for exact match or a compiled regex expression for more advanced matching. where_not : dict or None (default `None`) Identical to `where` but for negative-matching. columns: list of str, str or None (default `None`) Column(s) to return for the found records, if any. astype: str, type or None (default `None`) Type to cast the output to. Possible values are: `nonetype`, `series`, `str`, `dict`, `json`. If this is `None`, falls back to the type provided to the constructor. If a type was provided to the constructor but the user wants to avoid any casting, "nonetype" should be passed as the value. Returns ------- records : str, dict or series Output type depends on `astype` parameter. Examples -------- >>> db = PandasDatabase("test") >>> db.insert("test", record={"Name": "John"}) Name John __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 dtype: object >>> db.find_one("test", astype="dict") {'Name': 'John', '__id__': 'dc876999-1f5b-4262-b6bf-c23b875f3a54'} >>> db.find_one("test", astype="series") __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 Name John Name: 0, dtype: object >>> db.find_one("test", astype=None) __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 Name John Name: 0, dtype: object >>> db.find_one("test", where={"Name": "John"}, astype="dict") {'Name': 'John', '__id__': 'dc876999-1f5b-4262-b6bf-c23b875f3a54'} >>> db.find_one("test", where_not={"Name": "John"}, astype="dict") {} ''' records = self.find(tname, where=where, where_not=where_not, columns=columns, astype='dataframe') return self._output(records, single=True, astype=astype)
[ "def", "find_one", "(", "self", ",", "tname", ",", "where", "=", "None", ",", "where_not", "=", "None", ",", "columns", "=", "None", ",", "astype", "=", "None", ")", ":", "records", "=", "self", ".", "find", "(", "tname", ",", "where", "=", "where", ",", "where_not", "=", "where_not", ",", "columns", "=", "columns", ",", "astype", "=", "'dataframe'", ")", "return", "self", ".", "_output", "(", "records", ",", "single", "=", "True", ",", "astype", "=", "astype", ")" ]
Find a single record in the provided table from the database. If multiple match, return the first one based on the internal order of the records. If no records are found, return empty dictionary, string or series depending on the value of `astype`. Parameters ---------- tname : str Table to search records from. where : dict or None (default `None`) Dictionary of <column, value> where value can be of str type for exact match or a compiled regex expression for more advanced matching. where_not : dict or None (default `None`) Identical to `where` but for negative-matching. columns: list of str, str or None (default `None`) Column(s) to return for the found records, if any. astype: str, type or None (default `None`) Type to cast the output to. Possible values are: `nonetype`, `series`, `str`, `dict`, `json`. If this is `None`, falls back to the type provided to the constructor. If a type was provided to the constructor but the user wants to avoid any casting, "nonetype" should be passed as the value. Returns ------- records : str, dict or series Output type depends on `astype` parameter. Examples -------- >>> db = PandasDatabase("test") >>> db.insert("test", record={"Name": "John"}) Name John __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 dtype: object >>> db.find_one("test", astype="dict") {'Name': 'John', '__id__': 'dc876999-1f5b-4262-b6bf-c23b875f3a54'} >>> db.find_one("test", astype="series") __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 Name John Name: 0, dtype: object >>> db.find_one("test", astype=None) __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 Name John Name: 0, dtype: object >>> db.find_one("test", where={"Name": "John"}, astype="dict") {'Name': 'John', '__id__': 'dc876999-1f5b-4262-b6bf-c23b875f3a54'} >>> db.find_one("test", where_not={"Name": "John"}, astype="dict") {}
[ "Find", "a", "single", "record", "in", "the", "provided", "table", "from", "the", "database", ".", "If", "multiple", "match", "return", "the", "first", "one", "based", "on", "the", "internal", "order", "of", "the", "records", ".", "If", "no", "records", "are", "found", "return", "empty", "dictionary", "string", "or", "series", "depending", "on", "the", "value", "of", "astype", "." ]
python
train
inasafe/inasafe
safe/processors/post_processor_functions.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/processors/post_processor_functions.py#L29-L44
def multiply(**kwargs): """Simple postprocessor where we multiply the input values. :param kwargs: Dictionary of values to multiply :type kwargs: dict :return: The result. :rtype: float """ result = 1 for i in list(kwargs.values()): if not i: # If one value is null, we return null. return i result *= i return result
[ "def", "multiply", "(", "*", "*", "kwargs", ")", ":", "result", "=", "1", "for", "i", "in", "list", "(", "kwargs", ".", "values", "(", ")", ")", ":", "if", "not", "i", ":", "# If one value is null, we return null.", "return", "i", "result", "*=", "i", "return", "result" ]
Simple postprocessor where we multiply the input values. :param kwargs: Dictionary of values to multiply :type kwargs: dict :return: The result. :rtype: float
[ "Simple", "postprocessor", "where", "we", "multiply", "the", "input", "values", "." ]
python
train
ask/redish
redish/types.py
https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/types.py#L167-L178
def union(self, other): """Return the union of sets as a new set. (i.e. all elements that are in either set.) Operates on either redish.types.Set or __builtins__.set. """ if isinstance(other, self.__class__): return self.client.sunion([self.name, other.name]) else: return self._as_set().union(other)
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "return", "self", ".", "client", ".", "sunion", "(", "[", "self", ".", "name", ",", "other", ".", "name", "]", ")", "else", ":", "return", "self", ".", "_as_set", "(", ")", ".", "union", "(", "other", ")" ]
Return the union of sets as a new set. (i.e. all elements that are in either set.) Operates on either redish.types.Set or __builtins__.set.
[ "Return", "the", "union", "of", "sets", "as", "a", "new", "set", "." ]
python
train
icgood/pymap
pymap/parsing/response/code.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/parsing/response/code.py#L29-L35
def string(self) -> bytes: """The capabilities string without the enclosing square brackets.""" if self._raw is not None: return self._raw self._raw = raw = BytesFormat(b' ').join( [b'CAPABILITY', b'IMAP4rev1'] + self.capabilities) return raw
[ "def", "string", "(", "self", ")", "->", "bytes", ":", "if", "self", ".", "_raw", "is", "not", "None", ":", "return", "self", ".", "_raw", "self", ".", "_raw", "=", "raw", "=", "BytesFormat", "(", "b' '", ")", ".", "join", "(", "[", "b'CAPABILITY'", ",", "b'IMAP4rev1'", "]", "+", "self", ".", "capabilities", ")", "return", "raw" ]
The capabilities string without the enclosing square brackets.
[ "The", "capabilities", "string", "without", "the", "enclosing", "square", "brackets", "." ]
python
train
mkoura/dump2polarion
dump2polarion/csv_unicode.py
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/csv_unicode.py#L8-L15
def get_csv_reader(csvfile, dialect=csv.excel, encoding="utf-8", **kwds): """Returns csv reader.""" try: # pylint: disable=pointless-statement unicode return UnicodeReader(csvfile, dialect=dialect, encoding=encoding, **kwds) except NameError: return csv.reader(csvfile, dialect=dialect, **kwds)
[ "def", "get_csv_reader", "(", "csvfile", ",", "dialect", "=", "csv", ".", "excel", ",", "encoding", "=", "\"utf-8\"", ",", "*", "*", "kwds", ")", ":", "try", ":", "# pylint: disable=pointless-statement", "unicode", "return", "UnicodeReader", "(", "csvfile", ",", "dialect", "=", "dialect", ",", "encoding", "=", "encoding", ",", "*", "*", "kwds", ")", "except", "NameError", ":", "return", "csv", ".", "reader", "(", "csvfile", ",", "dialect", "=", "dialect", ",", "*", "*", "kwds", ")" ]
Returns csv reader.
[ "Returns", "csv", "reader", "." ]
python
train
ejeschke/ginga
ginga/rv/plugins/Pan.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Pan.py#L420-L429
def pan_pan_cb(self, fitsimage, event): """Pan event in the pan window. Just pan the channel viewer. """ chviewer = self.fv.getfocus_viewer() bd = chviewer.get_bindings() if hasattr(bd, 'pa_pan'): return bd.pa_pan(chviewer, event) return False
[ "def", "pan_pan_cb", "(", "self", ",", "fitsimage", ",", "event", ")", ":", "chviewer", "=", "self", ".", "fv", ".", "getfocus_viewer", "(", ")", "bd", "=", "chviewer", ".", "get_bindings", "(", ")", "if", "hasattr", "(", "bd", ",", "'pa_pan'", ")", ":", "return", "bd", ".", "pa_pan", "(", "chviewer", ",", "event", ")", "return", "False" ]
Pan event in the pan window. Just pan the channel viewer.
[ "Pan", "event", "in", "the", "pan", "window", ".", "Just", "pan", "the", "channel", "viewer", "." ]
python
train
dade-ai/snipy
snipy/io/fileutil.py
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L32-L42
def readlines(filepath): """ read lines from a textfile :param filepath: :return: list[line] """ with open(filepath, 'rt') as f: lines = f.readlines() lines = map(str.strip, lines) lines = [l for l in lines if l] return lines
[ "def", "readlines", "(", "filepath", ")", ":", "with", "open", "(", "filepath", ",", "'rt'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "lines", "=", "map", "(", "str", ".", "strip", ",", "lines", ")", "lines", "=", "[", "l", "for", "l", "in", "lines", "if", "l", "]", "return", "lines" ]
read lines from a textfile :param filepath: :return: list[line]
[ "read", "lines", "from", "a", "textfile", ":", "param", "filepath", ":", ":", "return", ":", "list", "[", "line", "]" ]
python
valid
PmagPy/PmagPy
pmagpy/ipmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7630-L7733
def iplot_hys(fignum, B, M, s): """ function to plot hysteresis data This function has been adapted from pmagplotlib.iplot_hys for specific use within a Jupyter notebook. Parameters ----------- fignum : reference number for matplotlib figure being created B : list of B (flux density) values of hysteresis experiment M : list of M (magnetization) values of hysteresis experiment s : specimen name """ if fignum != 0: plt.figure(num=fignum) plt.clf() hpars = {} # close up loop Npts = len(M) B70 = 0.7 * B[0] # 70 percent of maximum field for b in B: if b < B70: break Nint = B.index(b) - 1 if Nint > 30: Nint = 30 if Nint < 10: Nint = 10 Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], [] Mazero = "" m_init = 0.5 * (M[0] + M[1]) m_fin = 0.5 * (M[-1] + M[-2]) diff = m_fin - m_init Bmin = 0. for k in range(Npts): frac = old_div(float(k), float(Npts - 1)) Mfix.append((M[k] - diff * frac)) if Bzero == "" and B[k] < 0: Bzero = k if B[k] < Bmin: Bmin = B[k] kmin = k # adjust slope with first 30 data points (throwing out first 3) Bslop = B[2:Nint + 2] Mslop = Mfix[2:Nint + 2] polyU = polyfit(Bslop, Mslop, 1) # best fit line to high field points # adjust slope with first 30 points of ascending branch Bslop = B[kmin:kmin + (Nint + 1)] Mslop = Mfix[kmin:kmin + (Nint + 1)] polyL = polyfit(Bslop, Mslop, 1) # best fit line to high field points xhf = 0.5 * (polyU[0] + polyL[0]) # mean of two slopes # convert B to A/m, high field slope in m^3 hpars['hysteresis_xhf'] = '%8.2e' % (xhf * 4 * np.pi * 1e-7) meanint = 0.5 * (polyU[1] + polyL[1]) # mean of two intercepts Msat = 0.5 * (polyU[1] - polyL[1]) # mean of saturation remanence Moff = [] for k in range(Npts): # take out linear slope and offset (makes symmetric about origin) Moff.append((Mfix[k] - xhf * B[k] - meanint)) if Mzero == "" and Moff[k] < 0: Mzero = k if Mzero != "" and Mazero == "" and Moff[k] > 0: Mazero = k hpars['hysteresis_ms_moment'] = '%8.3e' % (Msat) # Ms in Am^2 # # split into upper and lower loops for splining Mupper, Bupper, Mlower, Blower = [], [], [], [] deltaM, Bdm = [], [] # diff between upper and lower curves at Bdm for k in range(kmin - 2, 0, -2): Mupper.append(old_div(Moff[k], Msat)) Bupper.append(B[k]) for k in range(kmin + 2, len(B)-1): Mlower.append(Moff[k] / Msat) Blower.append(B[k]) Iupper = spline.Spline(Bupper, Mupper) # get splines for upper up and down Ilower = spline.Spline(Blower, Mlower) # get splines for lower for b in np.arange(B[0]): # get range of field values Mpos = ((Iupper(b) - Ilower(b))) # evaluate on both sides of B Mneg = ((Iupper(-b) - Ilower(-b))) Bdm.append(b) deltaM.append(0.5 * (Mpos + Mneg)) # take average delta M print('whew') for k in range(Npts): MadjN.append(old_div(Moff[k], Msat)) Mnorm.append(old_div(M[k], Msat)) # find Mr : average of two spline fits evaluted at B=0 (times Msat) Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.)) hpars['hysteresis_mr_moment'] = '%8.3e' % (Mr) # find Bc (x intercept), interpolate between two bounding points Bz = B[Mzero - 1:Mzero + 1] Mz = Moff[Mzero - 1:Mzero + 1] Baz = B[Mazero - 1:Mazero + 1] Maz = Moff[Mazero - 1:Mazero + 1] try: poly = polyfit(Bz, Mz, 1) # best fit line through two bounding points Bc = old_div(-poly[1], poly[0]) # x intercept # best fit line through two bounding points poly = polyfit(Baz, Maz, 1) Bac = old_div(-poly[1], poly[0]) # x intercept hpars['hysteresis_bc'] = '%8.3e' % (0.5 * (abs(Bc) + abs(Bac))) except: hpars['hysteresis_bc'] = '0' return hpars, deltaM, Bdm, B, Mnorm, MadjN
[ "def", "iplot_hys", "(", "fignum", ",", "B", ",", "M", ",", "s", ")", ":", "if", "fignum", "!=", "0", ":", "plt", ".", "figure", "(", "num", "=", "fignum", ")", "plt", ".", "clf", "(", ")", "hpars", "=", "{", "}", "# close up loop", "Npts", "=", "len", "(", "M", ")", "B70", "=", "0.7", "*", "B", "[", "0", "]", "# 70 percent of maximum field", "for", "b", "in", "B", ":", "if", "b", "<", "B70", ":", "break", "Nint", "=", "B", ".", "index", "(", "b", ")", "-", "1", "if", "Nint", ">", "30", ":", "Nint", "=", "30", "if", "Nint", "<", "10", ":", "Nint", "=", "10", "Bzero", ",", "Mzero", ",", "Mfix", ",", "Mnorm", ",", "Madj", ",", "MadjN", "=", "\"\"", ",", "\"\"", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "Mazero", "=", "\"\"", "m_init", "=", "0.5", "*", "(", "M", "[", "0", "]", "+", "M", "[", "1", "]", ")", "m_fin", "=", "0.5", "*", "(", "M", "[", "-", "1", "]", "+", "M", "[", "-", "2", "]", ")", "diff", "=", "m_fin", "-", "m_init", "Bmin", "=", "0.", "for", "k", "in", "range", "(", "Npts", ")", ":", "frac", "=", "old_div", "(", "float", "(", "k", ")", ",", "float", "(", "Npts", "-", "1", ")", ")", "Mfix", ".", "append", "(", "(", "M", "[", "k", "]", "-", "diff", "*", "frac", ")", ")", "if", "Bzero", "==", "\"\"", "and", "B", "[", "k", "]", "<", "0", ":", "Bzero", "=", "k", "if", "B", "[", "k", "]", "<", "Bmin", ":", "Bmin", "=", "B", "[", "k", "]", "kmin", "=", "k", "# adjust slope with first 30 data points (throwing out first 3)", "Bslop", "=", "B", "[", "2", ":", "Nint", "+", "2", "]", "Mslop", "=", "Mfix", "[", "2", ":", "Nint", "+", "2", "]", "polyU", "=", "polyfit", "(", "Bslop", ",", "Mslop", ",", "1", ")", "# best fit line to high field points", "# adjust slope with first 30 points of ascending branch", "Bslop", "=", "B", "[", "kmin", ":", "kmin", "+", "(", "Nint", "+", "1", ")", "]", "Mslop", "=", "Mfix", "[", "kmin", ":", "kmin", "+", "(", "Nint", "+", "1", ")", "]", "polyL", "=", "polyfit", "(", "Bslop", ",", "Mslop", ",", "1", ")", "# best fit line to high field points", "xhf", "=", "0.5", "*", "(", "polyU", "[", "0", "]", "+", "polyL", "[", "0", "]", ")", "# mean of two slopes", "# convert B to A/m, high field slope in m^3", "hpars", "[", "'hysteresis_xhf'", "]", "=", "'%8.2e'", "%", "(", "xhf", "*", "4", "*", "np", ".", "pi", "*", "1e-7", ")", "meanint", "=", "0.5", "*", "(", "polyU", "[", "1", "]", "+", "polyL", "[", "1", "]", ")", "# mean of two intercepts", "Msat", "=", "0.5", "*", "(", "polyU", "[", "1", "]", "-", "polyL", "[", "1", "]", ")", "# mean of saturation remanence", "Moff", "=", "[", "]", "for", "k", "in", "range", "(", "Npts", ")", ":", "# take out linear slope and offset (makes symmetric about origin)", "Moff", ".", "append", "(", "(", "Mfix", "[", "k", "]", "-", "xhf", "*", "B", "[", "k", "]", "-", "meanint", ")", ")", "if", "Mzero", "==", "\"\"", "and", "Moff", "[", "k", "]", "<", "0", ":", "Mzero", "=", "k", "if", "Mzero", "!=", "\"\"", "and", "Mazero", "==", "\"\"", "and", "Moff", "[", "k", "]", ">", "0", ":", "Mazero", "=", "k", "hpars", "[", "'hysteresis_ms_moment'", "]", "=", "'%8.3e'", "%", "(", "Msat", ")", "# Ms in Am^2", "#", "# split into upper and lower loops for splining", "Mupper", ",", "Bupper", ",", "Mlower", ",", "Blower", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "deltaM", ",", "Bdm", "=", "[", "]", ",", "[", "]", "# diff between upper and lower curves at Bdm", "for", "k", "in", "range", "(", "kmin", "-", "2", ",", "0", ",", "-", "2", ")", ":", "Mupper", ".", "append", "(", "old_div", "(", "Moff", "[", "k", "]", ",", "Msat", ")", ")", "Bupper", ".", "append", "(", "B", "[", "k", "]", ")", "for", "k", "in", "range", "(", "kmin", "+", "2", ",", "len", "(", "B", ")", "-", "1", ")", ":", "Mlower", ".", "append", "(", "Moff", "[", "k", "]", "/", "Msat", ")", "Blower", ".", "append", "(", "B", "[", "k", "]", ")", "Iupper", "=", "spline", ".", "Spline", "(", "Bupper", ",", "Mupper", ")", "# get splines for upper up and down", "Ilower", "=", "spline", ".", "Spline", "(", "Blower", ",", "Mlower", ")", "# get splines for lower", "for", "b", "in", "np", ".", "arange", "(", "B", "[", "0", "]", ")", ":", "# get range of field values", "Mpos", "=", "(", "(", "Iupper", "(", "b", ")", "-", "Ilower", "(", "b", ")", ")", ")", "# evaluate on both sides of B", "Mneg", "=", "(", "(", "Iupper", "(", "-", "b", ")", "-", "Ilower", "(", "-", "b", ")", ")", ")", "Bdm", ".", "append", "(", "b", ")", "deltaM", ".", "append", "(", "0.5", "*", "(", "Mpos", "+", "Mneg", ")", ")", "# take average delta M", "print", "(", "'whew'", ")", "for", "k", "in", "range", "(", "Npts", ")", ":", "MadjN", ".", "append", "(", "old_div", "(", "Moff", "[", "k", "]", ",", "Msat", ")", ")", "Mnorm", ".", "append", "(", "old_div", "(", "M", "[", "k", "]", ",", "Msat", ")", ")", "# find Mr : average of two spline fits evaluted at B=0 (times Msat)", "Mr", "=", "Msat", "*", "0.5", "*", "(", "Iupper", "(", "0.", ")", "-", "Ilower", "(", "0.", ")", ")", "hpars", "[", "'hysteresis_mr_moment'", "]", "=", "'%8.3e'", "%", "(", "Mr", ")", "# find Bc (x intercept), interpolate between two bounding points", "Bz", "=", "B", "[", "Mzero", "-", "1", ":", "Mzero", "+", "1", "]", "Mz", "=", "Moff", "[", "Mzero", "-", "1", ":", "Mzero", "+", "1", "]", "Baz", "=", "B", "[", "Mazero", "-", "1", ":", "Mazero", "+", "1", "]", "Maz", "=", "Moff", "[", "Mazero", "-", "1", ":", "Mazero", "+", "1", "]", "try", ":", "poly", "=", "polyfit", "(", "Bz", ",", "Mz", ",", "1", ")", "# best fit line through two bounding points", "Bc", "=", "old_div", "(", "-", "poly", "[", "1", "]", ",", "poly", "[", "0", "]", ")", "# x intercept", "# best fit line through two bounding points", "poly", "=", "polyfit", "(", "Baz", ",", "Maz", ",", "1", ")", "Bac", "=", "old_div", "(", "-", "poly", "[", "1", "]", ",", "poly", "[", "0", "]", ")", "# x intercept", "hpars", "[", "'hysteresis_bc'", "]", "=", "'%8.3e'", "%", "(", "0.5", "*", "(", "abs", "(", "Bc", ")", "+", "abs", "(", "Bac", ")", ")", ")", "except", ":", "hpars", "[", "'hysteresis_bc'", "]", "=", "'0'", "return", "hpars", ",", "deltaM", ",", "Bdm", ",", "B", ",", "Mnorm", ",", "MadjN" ]
function to plot hysteresis data This function has been adapted from pmagplotlib.iplot_hys for specific use within a Jupyter notebook. Parameters ----------- fignum : reference number for matplotlib figure being created B : list of B (flux density) values of hysteresis experiment M : list of M (magnetization) values of hysteresis experiment s : specimen name
[ "function", "to", "plot", "hysteresis", "data" ]
python
train
tradenity/python-sdk
tradenity/resources/measurement_settings.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/measurement_settings.py#L55-L69
def weight_unit(self, weight_unit): """Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str """ allowed_values = ["pound", "kilogram"] # noqa: E501 if weight_unit is not None and weight_unit not in allowed_values: raise ValueError( "Invalid value for `weight_unit` ({0}), must be one of {1}" # noqa: E501 .format(weight_unit, allowed_values) ) self._weight_unit = weight_unit
[ "def", "weight_unit", "(", "self", ",", "weight_unit", ")", ":", "allowed_values", "=", "[", "\"pound\"", ",", "\"kilogram\"", "]", "# noqa: E501", "if", "weight_unit", "is", "not", "None", "and", "weight_unit", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `weight_unit` ({0}), must be one of {1}\"", "# noqa: E501", ".", "format", "(", "weight_unit", ",", "allowed_values", ")", ")", "self", ".", "_weight_unit", "=", "weight_unit" ]
Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str
[ "Sets", "the", "weight_unit", "of", "this", "MeasurementSettings", "." ]
python
train
readbeyond/aeneas
aeneas/audiofile.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/audiofile.py#L607-L630
def write(self, file_path): """ Write the audio data to file. Return ``True`` on success, or ``False`` otherwise. :param string file_path: the path of the output file to be written :raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet .. versionadded:: 1.2.0 """ if self.__samples is None: if self.file_path is None: self.log_exc(u"AudioFile object not initialized", None, True, AudioFileNotInitializedError) else: self.read_samples_from_file() self.log([u"Writing audio file '%s'...", file_path]) try: # our value is a float64 in [-1, 1] # scipy writes the sample as an int16_t, that is, a number in [-32768, 32767] data = (self.audio_samples * 32768).astype("int16") scipywavwrite(file_path, self.audio_sample_rate, data) except Exception as exc: self.log_exc(u"Error writing audio file to '%s'" % (file_path), exc, True, OSError) self.log([u"Writing audio file '%s'... done", file_path])
[ "def", "write", "(", "self", ",", "file_path", ")", ":", "if", "self", ".", "__samples", "is", "None", ":", "if", "self", ".", "file_path", "is", "None", ":", "self", ".", "log_exc", "(", "u\"AudioFile object not initialized\"", ",", "None", ",", "True", ",", "AudioFileNotInitializedError", ")", "else", ":", "self", ".", "read_samples_from_file", "(", ")", "self", ".", "log", "(", "[", "u\"Writing audio file '%s'...\"", ",", "file_path", "]", ")", "try", ":", "# our value is a float64 in [-1, 1]", "# scipy writes the sample as an int16_t, that is, a number in [-32768, 32767]", "data", "=", "(", "self", ".", "audio_samples", "*", "32768", ")", ".", "astype", "(", "\"int16\"", ")", "scipywavwrite", "(", "file_path", ",", "self", ".", "audio_sample_rate", ",", "data", ")", "except", "Exception", "as", "exc", ":", "self", ".", "log_exc", "(", "u\"Error writing audio file to '%s'\"", "%", "(", "file_path", ")", ",", "exc", ",", "True", ",", "OSError", ")", "self", ".", "log", "(", "[", "u\"Writing audio file '%s'... done\"", ",", "file_path", "]", ")" ]
Write the audio data to file. Return ``True`` on success, or ``False`` otherwise. :param string file_path: the path of the output file to be written :raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet .. versionadded:: 1.2.0
[ "Write", "the", "audio", "data", "to", "file", ".", "Return", "True", "on", "success", "or", "False", "otherwise", "." ]
python
train
seleniumbase/SeleniumBase
seleniumbase/core/log_helper.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/core/log_helper.py#L107-L132
def log_folder_setup(log_path, archive_logs=False): """ Handle Logging """ if log_path.endswith("/"): log_path = log_path[:-1] if not os.path.exists(log_path): try: os.makedirs(log_path) except Exception: pass # Should only be reachable during multi-threaded runs else: archived_folder = "%s/../archived_logs/" % log_path if not os.path.exists(archived_folder): try: os.makedirs(archived_folder) except Exception: pass # Should only be reachable during multi-threaded runs if not "".join(sys.argv) == "-c": # Only move log files if the test run is not multi-threaded. # (Running tests with "-n NUM" will create threads that only # have "-c" in the sys.argv list. Easy to catch.) archived_logs = "%slogs_%s" % ( archived_folder, int(time.time())) shutil.move(log_path, archived_logs) os.makedirs(log_path) if not settings.ARCHIVE_EXISTING_LOGS and not archive_logs: shutil.rmtree(archived_logs)
[ "def", "log_folder_setup", "(", "log_path", ",", "archive_logs", "=", "False", ")", ":", "if", "log_path", ".", "endswith", "(", "\"/\"", ")", ":", "log_path", "=", "log_path", "[", ":", "-", "1", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "log_path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "log_path", ")", "except", "Exception", ":", "pass", "# Should only be reachable during multi-threaded runs", "else", ":", "archived_folder", "=", "\"%s/../archived_logs/\"", "%", "log_path", "if", "not", "os", ".", "path", ".", "exists", "(", "archived_folder", ")", ":", "try", ":", "os", ".", "makedirs", "(", "archived_folder", ")", "except", "Exception", ":", "pass", "# Should only be reachable during multi-threaded runs", "if", "not", "\"\"", ".", "join", "(", "sys", ".", "argv", ")", "==", "\"-c\"", ":", "# Only move log files if the test run is not multi-threaded.", "# (Running tests with \"-n NUM\" will create threads that only", "# have \"-c\" in the sys.argv list. Easy to catch.)", "archived_logs", "=", "\"%slogs_%s\"", "%", "(", "archived_folder", ",", "int", "(", "time", ".", "time", "(", ")", ")", ")", "shutil", ".", "move", "(", "log_path", ",", "archived_logs", ")", "os", ".", "makedirs", "(", "log_path", ")", "if", "not", "settings", ".", "ARCHIVE_EXISTING_LOGS", "and", "not", "archive_logs", ":", "shutil", ".", "rmtree", "(", "archived_logs", ")" ]
Handle Logging
[ "Handle", "Logging" ]
python
train