repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/compare.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/compare.py#L7-L21
def compare_files(path1, path2): # type: (str, str) -> List[str] """Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files """ diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in ['-', '+', '?']]
[ "def", "compare_files", "(", "path1", ",", "path2", ")", ":", "# type: (str, str) -> List[str]", "diff", "=", "difflib", ".", "ndiff", "(", "open", "(", "path1", ")", ".", "readlines", "(", ")", ",", "open", "(", "path2", ")", ".", "readlines", "(", ")", ")", "return", "[", "x", "for", "x", "in", "diff", "if", "x", "[", "0", "]", "in", "[", "'-'", ",", "'+'", ",", "'?'", "]", "]" ]
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
[ "Returns", "the", "delta", "between", "two", "files", "using", "-", "?", "+", "format", "excluding", "lines", "that", "are", "the", "same" ]
python
train
29.933333
dropbox/stone
stone/backends/python_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_types.py#L197-L223
def _docf(self, tag, val): """ Callback used as the handler argument to process_docs(). This converts Stone doc references to Sphinx-friendly annotations. """ if tag == 'type': return ':class:`{}`'.format(val) elif tag == 'route': if self.args.route_method: return ':meth:`%s`' % self.args.route_method.format( ns=self.cur_namespace.name, route=fmt_func(val)) else: return val elif tag == 'link': anchor, link = val.rsplit(' ', 1) return '`{} <{}>`_'.format(anchor, link) elif tag == 'val': if val == 'null': return 'None' elif val == 'true' or val == 'false': return '``{}``'.format(val.capitalize()) else: return val elif tag == 'field': return '``{}``'.format(val) else: raise RuntimeError('Unknown doc ref tag %r' % tag)
[ "def", "_docf", "(", "self", ",", "tag", ",", "val", ")", ":", "if", "tag", "==", "'type'", ":", "return", "':class:`{}`'", ".", "format", "(", "val", ")", "elif", "tag", "==", "'route'", ":", "if", "self", ".", "args", ".", "route_method", ":", "return", "':meth:`%s`'", "%", "self", ".", "args", ".", "route_method", ".", "format", "(", "ns", "=", "self", ".", "cur_namespace", ".", "name", ",", "route", "=", "fmt_func", "(", "val", ")", ")", "else", ":", "return", "val", "elif", "tag", "==", "'link'", ":", "anchor", ",", "link", "=", "val", ".", "rsplit", "(", "' '", ",", "1", ")", "return", "'`{} <{}>`_'", ".", "format", "(", "anchor", ",", "link", ")", "elif", "tag", "==", "'val'", ":", "if", "val", "==", "'null'", ":", "return", "'None'", "elif", "val", "==", "'true'", "or", "val", "==", "'false'", ":", "return", "'``{}``'", ".", "format", "(", "val", ".", "capitalize", "(", ")", ")", "else", ":", "return", "val", "elif", "tag", "==", "'field'", ":", "return", "'``{}``'", ".", "format", "(", "val", ")", "else", ":", "raise", "RuntimeError", "(", "'Unknown doc ref tag %r'", "%", "tag", ")" ]
Callback used as the handler argument to process_docs(). This converts Stone doc references to Sphinx-friendly annotations.
[ "Callback", "used", "as", "the", "handler", "argument", "to", "process_docs", "()", ".", "This", "converts", "Stone", "doc", "references", "to", "Sphinx", "-", "friendly", "annotations", "." ]
python
train
36.962963
quasipedia/swaggery
swaggery/utils.py
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/utils.py#L98-L101
def map_exception_codes(): '''Helper function to intialise CODES_TO_EXCEPTIONS.''' werkex = inspect.getmembers(exceptions, lambda x: getattr(x, 'code', None)) return {e.code: e for _, e in werkex}
[ "def", "map_exception_codes", "(", ")", ":", "werkex", "=", "inspect", ".", "getmembers", "(", "exceptions", ",", "lambda", "x", ":", "getattr", "(", "x", ",", "'code'", ",", "None", ")", ")", "return", "{", "e", ".", "code", ":", "e", "for", "_", ",", "e", "in", "werkex", "}" ]
Helper function to intialise CODES_TO_EXCEPTIONS.
[ "Helper", "function", "to", "intialise", "CODES_TO_EXCEPTIONS", "." ]
python
train
51.25
miguelgrinberg/python-socketio
socketio/server.py
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/server.py#L405-L443
def session(self, sid, namespace=None): """Return the user session for a client with context manager syntax. :param sid: The session id of the client. This is a context manager that returns the user session dictionary for the client. Any changes that are made to this dictionary inside the context manager block are saved back to the session. Example usage:: @sio.on('connect') def on_connect(sid, environ): username = authenticate_user(environ) if not username: return False with sio.session(sid) as session: session['username'] = username @sio.on('message') def on_message(sid, msg): with sio.session(sid) as session: print('received message from ', session['username']) """ class _session_context_manager(object): def __init__(self, server, sid, namespace): self.server = server self.sid = sid self.namespace = namespace self.session = None def __enter__(self): self.session = self.server.get_session(sid, namespace=namespace) return self.session def __exit__(self, *args): self.server.save_session(sid, self.session, namespace=namespace) return _session_context_manager(self, sid, namespace)
[ "def", "session", "(", "self", ",", "sid", ",", "namespace", "=", "None", ")", ":", "class", "_session_context_manager", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "server", ",", "sid", ",", "namespace", ")", ":", "self", ".", "server", "=", "server", "self", ".", "sid", "=", "sid", "self", ".", "namespace", "=", "namespace", "self", ".", "session", "=", "None", "def", "__enter__", "(", "self", ")", ":", "self", ".", "session", "=", "self", ".", "server", ".", "get_session", "(", "sid", ",", "namespace", "=", "namespace", ")", "return", "self", ".", "session", "def", "__exit__", "(", "self", ",", "*", "args", ")", ":", "self", ".", "server", ".", "save_session", "(", "sid", ",", "self", ".", "session", ",", "namespace", "=", "namespace", ")", "return", "_session_context_manager", "(", "self", ",", "sid", ",", "namespace", ")" ]
Return the user session for a client with context manager syntax. :param sid: The session id of the client. This is a context manager that returns the user session dictionary for the client. Any changes that are made to this dictionary inside the context manager block are saved back to the session. Example usage:: @sio.on('connect') def on_connect(sid, environ): username = authenticate_user(environ) if not username: return False with sio.session(sid) as session: session['username'] = username @sio.on('message') def on_message(sid, msg): with sio.session(sid) as session: print('received message from ', session['username'])
[ "Return", "the", "user", "session", "for", "a", "client", "with", "context", "manager", "syntax", "." ]
python
train
39.589744
rsheftel/raccoon
raccoon/series.py
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/series.py#L674-L681
def reset_index(self): """ Resets the index of the Series to simple integer list and the index name to 'index'. :return: nothing """ self.index = list(range(self.__len__())) self.index_name = 'index'
[ "def", "reset_index", "(", "self", ")", ":", "self", ".", "index", "=", "list", "(", "range", "(", "self", ".", "__len__", "(", ")", ")", ")", "self", ".", "index_name", "=", "'index'" ]
Resets the index of the Series to simple integer list and the index name to 'index'. :return: nothing
[ "Resets", "the", "index", "of", "the", "Series", "to", "simple", "integer", "list", "and", "the", "index", "name", "to", "index", "." ]
python
train
30.125
ml4ai/delphi
delphi/analysis/comparison/utils.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/analysis/comparison/utils.py#L18-L20
def get_output_nodes(G: nx.DiGraph) -> List[str]: """ Get all output nodes from a network. """ return [n for n, d in G.out_degree() if d == 0]
[ "def", "get_output_nodes", "(", "G", ":", "nx", ".", "DiGraph", ")", "->", "List", "[", "str", "]", ":", "return", "[", "n", "for", "n", ",", "d", "in", "G", ".", "out_degree", "(", ")", "if", "d", "==", "0", "]" ]
Get all output nodes from a network.
[ "Get", "all", "output", "nodes", "from", "a", "network", "." ]
python
train
49.333333
smarter-travel-media/stac
stac/http.py
https://github.com/smarter-travel-media/stac/blob/cdb29a17ede0924b122b3905a500442c62ae53b7/stac/http.py#L69-L102
def get_most_recent_versions(self, group, artifact, limit, remote=False, integration=False): """Get a list of the version numbers of the most recent artifacts (integration or non-integration), ordered by the version number, for a particular group and artifact combination. :param str group: Group of the artifact to get versions of :param str artifact: Name of the artifact to get versions of :param int limit: Fetch only this many of the most recent releases :param bool remote: Should remote repositories be searched to find the latest versions? Note this can make the request much slower. Default is false. :param bool integration: If true, fetch only "integration versions", otherwise fetch only non-integration versions. :return: Version numbers of the most recent artifacts :rtype: list :raises requests.exceptions.HTTPError: For any non-success HTTP responses from the Artifactory API. :raises ValueError: If limit is 0 or negative. """ if limit < 1: raise ValueError("Releases limit must be positive") url = self._base_url + '/api/search/versions' params = {'g': group, 'a': artifact, 'repos': self._repo, 'remote': int(remote)} self._logger.debug("Using all version API at %s - params %s", url, params) response = self._session.get(url, params=params) response.raise_for_status() json = response.json() versions = [ item['version'] for item in json['results'] if item['integration'] is integration] # pylint: disable=no-member versions.sort(key=distutils.version.LooseVersion, reverse=True) return versions[:limit]
[ "def", "get_most_recent_versions", "(", "self", ",", "group", ",", "artifact", ",", "limit", ",", "remote", "=", "False", ",", "integration", "=", "False", ")", ":", "if", "limit", "<", "1", ":", "raise", "ValueError", "(", "\"Releases limit must be positive\"", ")", "url", "=", "self", ".", "_base_url", "+", "'/api/search/versions'", "params", "=", "{", "'g'", ":", "group", ",", "'a'", ":", "artifact", ",", "'repos'", ":", "self", ".", "_repo", ",", "'remote'", ":", "int", "(", "remote", ")", "}", "self", ".", "_logger", ".", "debug", "(", "\"Using all version API at %s - params %s\"", ",", "url", ",", "params", ")", "response", "=", "self", ".", "_session", ".", "get", "(", "url", ",", "params", "=", "params", ")", "response", ".", "raise_for_status", "(", ")", "json", "=", "response", ".", "json", "(", ")", "versions", "=", "[", "item", "[", "'version'", "]", "for", "item", "in", "json", "[", "'results'", "]", "if", "item", "[", "'integration'", "]", "is", "integration", "]", "# pylint: disable=no-member", "versions", ".", "sort", "(", "key", "=", "distutils", ".", "version", ".", "LooseVersion", ",", "reverse", "=", "True", ")", "return", "versions", "[", ":", "limit", "]" ]
Get a list of the version numbers of the most recent artifacts (integration or non-integration), ordered by the version number, for a particular group and artifact combination. :param str group: Group of the artifact to get versions of :param str artifact: Name of the artifact to get versions of :param int limit: Fetch only this many of the most recent releases :param bool remote: Should remote repositories be searched to find the latest versions? Note this can make the request much slower. Default is false. :param bool integration: If true, fetch only "integration versions", otherwise fetch only non-integration versions. :return: Version numbers of the most recent artifacts :rtype: list :raises requests.exceptions.HTTPError: For any non-success HTTP responses from the Artifactory API. :raises ValueError: If limit is 0 or negative.
[ "Get", "a", "list", "of", "the", "version", "numbers", "of", "the", "most", "recent", "artifacts", "(", "integration", "or", "non", "-", "integration", ")", "ordered", "by", "the", "version", "number", "for", "a", "particular", "group", "and", "artifact", "combination", "." ]
python
train
51.352941
tanghaibao/jcvi
jcvi/assembly/kmer.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L491-L516
def logodds(args): """ %prog logodds cnt1 cnt2 Compute log likelihood between two db. """ from math import log from jcvi.formats.base import DictFile p = OptionParser(logodds.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cnt1, cnt2 = args d = DictFile(cnt2) fp = open(cnt1) for row in fp: scf, c1 = row.split() c2 = d[scf] c1, c2 = float(c1), float(c2) c1 += 1 c2 += 1 score = int(100 * (log(c1) - log(c2))) print("{0}\t{1}".format(scf, score))
[ "def", "logodds", "(", "args", ")", ":", "from", "math", "import", "log", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "logodds", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "cnt1", ",", "cnt2", "=", "args", "d", "=", "DictFile", "(", "cnt2", ")", "fp", "=", "open", "(", "cnt1", ")", "for", "row", "in", "fp", ":", "scf", ",", "c1", "=", "row", ".", "split", "(", ")", "c2", "=", "d", "[", "scf", "]", "c1", ",", "c2", "=", "float", "(", "c1", ")", ",", "float", "(", "c2", ")", "c1", "+=", "1", "c2", "+=", "1", "score", "=", "int", "(", "100", "*", "(", "log", "(", "c1", ")", "-", "log", "(", "c2", ")", ")", ")", "print", "(", "\"{0}\\t{1}\"", ".", "format", "(", "scf", ",", "score", ")", ")" ]
%prog logodds cnt1 cnt2 Compute log likelihood between two db.
[ "%prog", "logodds", "cnt1", "cnt2" ]
python
train
22.384615
aequitas/python-rflink
rflink/protocol.py
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L304-L325
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol, packet_callback=None, event_callback=None, disconnect_callback=None, ignore=None, loop=None): """Create Rflink manager class, returns transport coroutine.""" # use default protocol if not specified protocol = partial( protocol, loop=loop if loop else asyncio.get_event_loop(), packet_callback=packet_callback, event_callback=event_callback, disconnect_callback=disconnect_callback, ignore=ignore if ignore else [], ) # setup serial connection if no transport specified if host: conn = loop.create_connection(protocol, host, port) else: baud = baud conn = create_serial_connection(loop, protocol, port, baud) return conn
[ "def", "create_rflink_connection", "(", "port", "=", "None", ",", "host", "=", "None", ",", "baud", "=", "57600", ",", "protocol", "=", "RflinkProtocol", ",", "packet_callback", "=", "None", ",", "event_callback", "=", "None", ",", "disconnect_callback", "=", "None", ",", "ignore", "=", "None", ",", "loop", "=", "None", ")", ":", "# use default protocol if not specified", "protocol", "=", "partial", "(", "protocol", ",", "loop", "=", "loop", "if", "loop", "else", "asyncio", ".", "get_event_loop", "(", ")", ",", "packet_callback", "=", "packet_callback", ",", "event_callback", "=", "event_callback", ",", "disconnect_callback", "=", "disconnect_callback", ",", "ignore", "=", "ignore", "if", "ignore", "else", "[", "]", ",", ")", "# setup serial connection if no transport specified", "if", "host", ":", "conn", "=", "loop", ".", "create_connection", "(", "protocol", ",", "host", ",", "port", ")", "else", ":", "baud", "=", "baud", "conn", "=", "create_serial_connection", "(", "loop", ",", "protocol", ",", "port", ",", "baud", ")", "return", "conn" ]
Create Rflink manager class, returns transport coroutine.
[ "Create", "Rflink", "manager", "class", "returns", "transport", "coroutine", "." ]
python
train
38.636364
twilio/twilio-python
twilio/rest/autopilot/v1/assistant/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/__init__.py#L352-L361
def model_builds(self): """ Access the model_builds :returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList :rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList """ if self._model_builds is None: self._model_builds = ModelBuildList(self._version, assistant_sid=self._solution['sid'], ) return self._model_builds
[ "def", "model_builds", "(", "self", ")", ":", "if", "self", ".", "_model_builds", "is", "None", ":", "self", ".", "_model_builds", "=", "ModelBuildList", "(", "self", ".", "_version", ",", "assistant_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_model_builds" ]
Access the model_builds :returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList :rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
[ "Access", "the", "model_builds" ]
python
train
40.4
sentinel-hub/eo-learn
core/eolearn/core/eoworkflow.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/eoworkflow.py#L294-L315
def _get_dep_to_dot_name_mapping(dependencies): """ Creates mapping between Dependency classes and names used in DOT graph """ dot_name_to_deps = {} for dep in dependencies: dot_name = dep.name if dot_name not in dot_name_to_deps: dot_name_to_deps[dot_name] = [dep] else: dot_name_to_deps[dot_name].append(dep) dep_to_dot_name = {} for dot_name, deps in dot_name_to_deps.items(): if len(deps) == 1: dep_to_dot_name[deps[0]] = dot_name continue for idx, dep in enumerate(deps): dep_to_dot_name[dep] = dot_name + str(idx) return dep_to_dot_name
[ "def", "_get_dep_to_dot_name_mapping", "(", "dependencies", ")", ":", "dot_name_to_deps", "=", "{", "}", "for", "dep", "in", "dependencies", ":", "dot_name", "=", "dep", ".", "name", "if", "dot_name", "not", "in", "dot_name_to_deps", ":", "dot_name_to_deps", "[", "dot_name", "]", "=", "[", "dep", "]", "else", ":", "dot_name_to_deps", "[", "dot_name", "]", ".", "append", "(", "dep", ")", "dep_to_dot_name", "=", "{", "}", "for", "dot_name", ",", "deps", "in", "dot_name_to_deps", ".", "items", "(", ")", ":", "if", "len", "(", "deps", ")", "==", "1", ":", "dep_to_dot_name", "[", "deps", "[", "0", "]", "]", "=", "dot_name", "continue", "for", "idx", ",", "dep", "in", "enumerate", "(", "deps", ")", ":", "dep_to_dot_name", "[", "dep", "]", "=", "dot_name", "+", "str", "(", "idx", ")", "return", "dep_to_dot_name" ]
Creates mapping between Dependency classes and names used in DOT graph
[ "Creates", "mapping", "between", "Dependency", "classes", "and", "names", "used", "in", "DOT", "graph" ]
python
train
33.727273
MacHu-GWU/single_file_module-project
sfm/iterable.py
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L65-L82
def nth(iterable, n, default=None): """Returns the nth item or a default value. Example:: >>> nth([0, 1, 2], 1) 1 >>> nth([0, 1, 2], 100) None **中文文档** 取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。 """ return next(itertools.islice(iterable, n, None), default)
[ "def", "nth", "(", "iterable", ",", "n", ",", "default", "=", "None", ")", ":", "return", "next", "(", "itertools", ".", "islice", "(", "iterable", ",", "n", ",", "None", ")", ",", "default", ")" ]
Returns the nth item or a default value. Example:: >>> nth([0, 1, 2], 1) 1 >>> nth([0, 1, 2], 100) None **中文文档** 取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。
[ "Returns", "the", "nth", "item", "or", "a", "default", "value", "." ]
python
train
21.166667
mlperf/training
reinforcement/tensorflow/minigo/ratings/ratings.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/ratings/ratings.py#L233-L268
def suggest_pairs(top_n=10, per_n=3, ignore_before=300): """ Find the maximally interesting pairs of players to match up First, sort the ratings by uncertainty. Then, take the ten highest players with the highest uncertainty For each of them, call them `p1` Sort all the models by their distance from p1's rating and take the 20 nearest rated models. ('candidate_p2s') Choose pairings, (p1, p2), randomly from this list. `top_n` will pair the top n models by uncertainty. `per_n` will give each of the top_n models this many opponents `ignore_before` is the model number to `filter` off, i.e., the early models. Returns a list of *model numbers*, not model ids. """ db = sqlite3.connect("ratings.db") data = db.execute("select model_winner, model_loser from wins").fetchall() bucket_ids = [id[0] for id in db.execute( "select id from models where bucket = ?", (fsdb.models_dir(),)).fetchall()] bucket_ids.sort() data = [d for d in data if d[0] in bucket_ids and d[1] in bucket_ids] ratings = [(model_num_for(k), v[0], v[1]) for k, v in compute_ratings(data).items()] ratings.sort() ratings = ratings[ignore_before:] # Filter off the first 100 models, which improve too fast. ratings.sort(key=lambda r: r[2], reverse=True) res = [] for p1 in ratings[:top_n]: candidate_p2s = sorted(ratings, key=lambda p2_tup: abs(p1[1] - p2_tup[1]))[1:20] choices = random.sample(candidate_p2s, per_n) print("Pairing {}, sigma {:.2f} (Rating {:.2f})".format(p1[0], p1[2], p1[1])) for p2 in choices: res.append([p1[0], p2[0]]) print(" {}, ratings delta {:.2f}".format(p2[0], abs(p1[1] - p2[1]))) return res
[ "def", "suggest_pairs", "(", "top_n", "=", "10", ",", "per_n", "=", "3", ",", "ignore_before", "=", "300", ")", ":", "db", "=", "sqlite3", ".", "connect", "(", "\"ratings.db\"", ")", "data", "=", "db", ".", "execute", "(", "\"select model_winner, model_loser from wins\"", ")", ".", "fetchall", "(", ")", "bucket_ids", "=", "[", "id", "[", "0", "]", "for", "id", "in", "db", ".", "execute", "(", "\"select id from models where bucket = ?\"", ",", "(", "fsdb", ".", "models_dir", "(", ")", ",", ")", ")", ".", "fetchall", "(", ")", "]", "bucket_ids", ".", "sort", "(", ")", "data", "=", "[", "d", "for", "d", "in", "data", "if", "d", "[", "0", "]", "in", "bucket_ids", "and", "d", "[", "1", "]", "in", "bucket_ids", "]", "ratings", "=", "[", "(", "model_num_for", "(", "k", ")", ",", "v", "[", "0", "]", ",", "v", "[", "1", "]", ")", "for", "k", ",", "v", "in", "compute_ratings", "(", "data", ")", ".", "items", "(", ")", "]", "ratings", ".", "sort", "(", ")", "ratings", "=", "ratings", "[", "ignore_before", ":", "]", "# Filter off the first 100 models, which improve too fast.", "ratings", ".", "sort", "(", "key", "=", "lambda", "r", ":", "r", "[", "2", "]", ",", "reverse", "=", "True", ")", "res", "=", "[", "]", "for", "p1", "in", "ratings", "[", ":", "top_n", "]", ":", "candidate_p2s", "=", "sorted", "(", "ratings", ",", "key", "=", "lambda", "p2_tup", ":", "abs", "(", "p1", "[", "1", "]", "-", "p2_tup", "[", "1", "]", ")", ")", "[", "1", ":", "20", "]", "choices", "=", "random", ".", "sample", "(", "candidate_p2s", ",", "per_n", ")", "print", "(", "\"Pairing {}, sigma {:.2f} (Rating {:.2f})\"", ".", "format", "(", "p1", "[", "0", "]", ",", "p1", "[", "2", "]", ",", "p1", "[", "1", "]", ")", ")", "for", "p2", "in", "choices", ":", "res", ".", "append", "(", "[", "p1", "[", "0", "]", ",", "p2", "[", "0", "]", "]", ")", "print", "(", "\" {}, ratings delta {:.2f}\"", ".", "format", "(", "p2", "[", "0", "]", ",", "abs", "(", "p1", "[", "1", "]", "-", "p2", "[", "1", "]", ")", ")", ")", "return", "res" ]
Find the maximally interesting pairs of players to match up First, sort the ratings by uncertainty. Then, take the ten highest players with the highest uncertainty For each of them, call them `p1` Sort all the models by their distance from p1's rating and take the 20 nearest rated models. ('candidate_p2s') Choose pairings, (p1, p2), randomly from this list. `top_n` will pair the top n models by uncertainty. `per_n` will give each of the top_n models this many opponents `ignore_before` is the model number to `filter` off, i.e., the early models. Returns a list of *model numbers*, not model ids.
[ "Find", "the", "maximally", "interesting", "pairs", "of", "players", "to", "match", "up", "First", "sort", "the", "ratings", "by", "uncertainty", ".", "Then", "take", "the", "ten", "highest", "players", "with", "the", "highest", "uncertainty", "For", "each", "of", "them", "call", "them", "p1", "Sort", "all", "the", "models", "by", "their", "distance", "from", "p1", "s", "rating", "and", "take", "the", "20", "nearest", "rated", "models", ".", "(", "candidate_p2s", ")", "Choose", "pairings", "(", "p1", "p2", ")", "randomly", "from", "this", "list", "." ]
python
train
47.777778
rstoneback/pysat
pysat/_instrument.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/_instrument.py#L709-L916
def load(self, yr=None, doy=None, date=None, fname=None, fid=None, verifyPad=False): """Load instrument data into Instrument object .data. Parameters ---------- yr : integer year for desired data doy : integer day of year date : datetime object date to load fname : 'string' filename to be loaded verifyPad : boolean if True, padding data not removed (debug purposes) Returns -------- Void. Data is added to self.data Note ---- Loads data for a chosen instrument into .data. Any functions chosen by the user and added to the custom processing queue (.custom.add) are automatically applied to the data before it is available to user in .data. """ # set options used by loading routine based upon user input if date is not None: self._set_load_parameters(date=date, fid=None) # increment inc = pds.DateOffset(days=1) curr = date elif (yr is not None) & (doy is not None): date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1)) self._set_load_parameters(date=date, fid=None) # increment inc = pds.DateOffset(days=1) curr = self.date elif fname is not None: # date will have to be set later by looking at the data self._set_load_parameters(date=None, fid=self.files.get_index(fname)) # increment one file at a time inc = 1 curr = self._fid.copy() elif fid is not None: self._set_load_parameters(date=None, fid=fid) # increment one file at a time inc = 1 curr = fid else: estr = 'Must supply a yr,doy pair, or datetime object, or filename' estr = '{:s} to load data from.'.format(estr) raise TypeError(estr) self.orbits._reset() # if pad or multi_file_day is true, need to have a three day/file load loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0) if (self.pad is not None) | self.multi_file_day: if self._next_data.empty & self._prev_data.empty: # data has not already been loaded for previous and next days # load data for all three print('Initializing three day/file window') # using current date or fid self._prev_data, self._prev_meta = self._load_prev() self._curr_data, self._curr_meta = \ self._load_data(date=self.date, fid=self._fid) self._next_data, self._next_meta = self._load_next() else: # moving forward in time if self._next_data_track == curr: del self._prev_data self._prev_data = self._curr_data self._prev_meta = self._curr_meta self._curr_data = self._next_data self._curr_meta = self._next_meta self._next_data, self._next_meta = self._load_next() # moving backward in time elif self._prev_data_track == curr: del self._next_data self._next_data = self._curr_data self._next_meta = self._curr_meta self._curr_data = self._prev_data self._curr_meta = self._prev_meta self._prev_data, self._prev_meta = self._load_prev() # jumped in time/or switched from filebased to date based access else: del self._prev_data del self._curr_data del self._next_data self._prev_data, self._prev_meta = self._load_prev() self._curr_data, self._curr_meta = \ self._load_data(date=self.date, fid=self._fid) self._next_data, self._next_meta = self._load_next() # make sure datetime indices for all data is monotonic if not self._prev_data.index.is_monotonic_increasing: self._prev_data.sort_index(inplace=True) if not self._curr_data.index.is_monotonic_increasing: self._curr_data.sort_index(inplace=True) if not self._next_data.index.is_monotonic_increasing: self._next_data.sort_index(inplace=True) # make tracking indexes consistent with new loads self._next_data_track = curr + inc self._prev_data_track = curr - inc # attach data to object if not self._curr_data.empty: self.data = self._curr_data.copy() self.meta = self._curr_meta.copy() else: self.data = DataFrame(None) # line below removed as it would delete previous meta, if any # if you end a seasonal analysis with a day with no data, then # no meta: self.meta = _meta.Meta() # multi file days can extend past a single day, only want data from # specific date if loading by day # set up times for the possible data padding coming up if self._load_by_date: #print ('double trouble') first_time = self.date first_pad = self.date - loop_pad last_time = self.date + pds.DateOffset(days=1) last_pad = self.date + pds.DateOffset(days=1) + loop_pad want_last_pad = False # loading by file, can't be a multi_file-day flag situation elif (not self._load_by_date) and (not self.multi_file_day): #print ('single trouble') first_time = self._curr_data.index[0] first_pad = first_time - loop_pad last_time = self._curr_data.index[-1] last_pad = last_time + loop_pad want_last_pad = True else: raise ValueError("multi_file_day and loading by date are " + "effectively equivalent. Can't have " + "multi_file_day and load by file.") #print (first_pad, first_time, last_time, last_pad) # pad data based upon passed parameter if (not self._prev_data.empty) & (not self.data.empty): padLeft = self._prev_data.loc[first_pad : self.data.index[0]] if len(padLeft) > 0: if (padLeft.index[-1] == self.data.index[0]) : padLeft = padLeft.iloc[:-1, :] self.data = pds.concat([padLeft, self.data]) if (not self._next_data.empty) & (not self.data.empty): padRight = self._next_data.loc[self.data.index[-1] : last_pad] if len(padRight) > 0: if (padRight.index[0] == self.data.index[-1]) : padRight = padRight.iloc[1:, :] self.data = pds.concat([self.data, padRight]) self.data = self.data.ix[first_pad : last_pad] # want exclusive end slicing behavior from above if not self.empty: if (self.data.index[-1] == last_pad) & (not want_last_pad): self.data = self.data.iloc[:-1, :] ## drop any possible duplicate index times ##self.data.drop_duplicates(inplace=True) #self.data = self.data[~self.data.index.duplicated()] # if self.pad is False, load single day else: self.data, meta = self._load_data(date=self.date, fid=self._fid) if not self.data.empty: self.meta = meta # check if load routine actually returns meta if self.meta.data.empty: self.meta[self.data.columns] = {self.name_label: self.data.columns, self.units_label: [''] * len(self.data.columns)} # if loading by file set the yr, doy, and date if not self._load_by_date: if self.pad is not None: temp = first_time else: temp = self.data.index[0] self.date = pds.datetime(temp.year, temp.month, temp.day) self.yr, self.doy = utils.getyrdoy(self.date) if not self.data.empty: self._default_rtn(self) # clean if (not self.data.empty) & (self.clean_level != 'none'): self._clean_rtn(self) # apply custom functions if not self.data.empty: self.custom._apply_all(self) # remove the excess padding, if any applied if (self.pad is not None) & (not self.data.empty) & (not verifyPad): self.data = self.data[first_time: last_time] if not self.empty: if (self.data.index[-1] == last_time) & (not want_last_pad): self.data = self.data.iloc[:-1, :] # transfer any extra attributes in meta to the Instrument object self.meta.transfer_attributes_to_instrument(self) sys.stdout.flush() return
[ "def", "load", "(", "self", ",", "yr", "=", "None", ",", "doy", "=", "None", ",", "date", "=", "None", ",", "fname", "=", "None", ",", "fid", "=", "None", ",", "verifyPad", "=", "False", ")", ":", "# set options used by loading routine based upon user input", "if", "date", "is", "not", "None", ":", "self", ".", "_set_load_parameters", "(", "date", "=", "date", ",", "fid", "=", "None", ")", "# increment ", "inc", "=", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "curr", "=", "date", "elif", "(", "yr", "is", "not", "None", ")", "&", "(", "doy", "is", "not", "None", ")", ":", "date", "=", "pds", ".", "datetime", "(", "yr", ",", "1", ",", "1", ")", "+", "pds", ".", "DateOffset", "(", "days", "=", "(", "doy", "-", "1", ")", ")", "self", ".", "_set_load_parameters", "(", "date", "=", "date", ",", "fid", "=", "None", ")", "# increment ", "inc", "=", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "curr", "=", "self", ".", "date", "elif", "fname", "is", "not", "None", ":", "# date will have to be set later by looking at the data", "self", ".", "_set_load_parameters", "(", "date", "=", "None", ",", "fid", "=", "self", ".", "files", ".", "get_index", "(", "fname", ")", ")", "# increment one file at a time", "inc", "=", "1", "curr", "=", "self", ".", "_fid", ".", "copy", "(", ")", "elif", "fid", "is", "not", "None", ":", "self", ".", "_set_load_parameters", "(", "date", "=", "None", ",", "fid", "=", "fid", ")", "# increment one file at a time", "inc", "=", "1", "curr", "=", "fid", "else", ":", "estr", "=", "'Must supply a yr,doy pair, or datetime object, or filename'", "estr", "=", "'{:s} to load data from.'", ".", "format", "(", "estr", ")", "raise", "TypeError", "(", "estr", ")", "self", ".", "orbits", ".", "_reset", "(", ")", "# if pad or multi_file_day is true, need to have a three day/file load", "loop_pad", "=", "self", ".", "pad", "if", "self", ".", "pad", "is", "not", "None", "else", "pds", ".", "DateOffset", "(", "seconds", "=", "0", ")", "if", "(", "self", ".", "pad", "is", "not", "None", ")", "|", "self", ".", "multi_file_day", ":", "if", "self", ".", "_next_data", ".", "empty", "&", "self", ".", "_prev_data", ".", "empty", ":", "# data has not already been loaded for previous and next days", "# load data for all three", "print", "(", "'Initializing three day/file window'", ")", "# using current date or fid", "self", ".", "_prev_data", ",", "self", ".", "_prev_meta", "=", "self", ".", "_load_prev", "(", ")", "self", ".", "_curr_data", ",", "self", ".", "_curr_meta", "=", "self", ".", "_load_data", "(", "date", "=", "self", ".", "date", ",", "fid", "=", "self", ".", "_fid", ")", "self", ".", "_next_data", ",", "self", ".", "_next_meta", "=", "self", ".", "_load_next", "(", ")", "else", ":", "# moving forward in time", "if", "self", ".", "_next_data_track", "==", "curr", ":", "del", "self", ".", "_prev_data", "self", ".", "_prev_data", "=", "self", ".", "_curr_data", "self", ".", "_prev_meta", "=", "self", ".", "_curr_meta", "self", ".", "_curr_data", "=", "self", ".", "_next_data", "self", ".", "_curr_meta", "=", "self", ".", "_next_meta", "self", ".", "_next_data", ",", "self", ".", "_next_meta", "=", "self", ".", "_load_next", "(", ")", "# moving backward in time", "elif", "self", ".", "_prev_data_track", "==", "curr", ":", "del", "self", ".", "_next_data", "self", ".", "_next_data", "=", "self", ".", "_curr_data", "self", ".", "_next_meta", "=", "self", ".", "_curr_meta", "self", ".", "_curr_data", "=", "self", ".", "_prev_data", "self", ".", "_curr_meta", "=", "self", ".", "_prev_meta", "self", ".", "_prev_data", ",", "self", ".", "_prev_meta", "=", "self", ".", "_load_prev", "(", ")", "# jumped in time/or switched from filebased to date based access", "else", ":", "del", "self", ".", "_prev_data", "del", "self", ".", "_curr_data", "del", "self", ".", "_next_data", "self", ".", "_prev_data", ",", "self", ".", "_prev_meta", "=", "self", ".", "_load_prev", "(", ")", "self", ".", "_curr_data", ",", "self", ".", "_curr_meta", "=", "self", ".", "_load_data", "(", "date", "=", "self", ".", "date", ",", "fid", "=", "self", ".", "_fid", ")", "self", ".", "_next_data", ",", "self", ".", "_next_meta", "=", "self", ".", "_load_next", "(", ")", "# make sure datetime indices for all data is monotonic", "if", "not", "self", ".", "_prev_data", ".", "index", ".", "is_monotonic_increasing", ":", "self", ".", "_prev_data", ".", "sort_index", "(", "inplace", "=", "True", ")", "if", "not", "self", ".", "_curr_data", ".", "index", ".", "is_monotonic_increasing", ":", "self", ".", "_curr_data", ".", "sort_index", "(", "inplace", "=", "True", ")", "if", "not", "self", ".", "_next_data", ".", "index", ".", "is_monotonic_increasing", ":", "self", ".", "_next_data", ".", "sort_index", "(", "inplace", "=", "True", ")", "# make tracking indexes consistent with new loads", "self", ".", "_next_data_track", "=", "curr", "+", "inc", "self", ".", "_prev_data_track", "=", "curr", "-", "inc", "# attach data to object", "if", "not", "self", ".", "_curr_data", ".", "empty", ":", "self", ".", "data", "=", "self", ".", "_curr_data", ".", "copy", "(", ")", "self", ".", "meta", "=", "self", ".", "_curr_meta", ".", "copy", "(", ")", "else", ":", "self", ".", "data", "=", "DataFrame", "(", "None", ")", "# line below removed as it would delete previous meta, if any", "# if you end a seasonal analysis with a day with no data, then", "# no meta: self.meta = _meta.Meta()", "# multi file days can extend past a single day, only want data from ", "# specific date if loading by day", "# set up times for the possible data padding coming up", "if", "self", ".", "_load_by_date", ":", "#print ('double trouble')", "first_time", "=", "self", ".", "date", "first_pad", "=", "self", ".", "date", "-", "loop_pad", "last_time", "=", "self", ".", "date", "+", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "last_pad", "=", "self", ".", "date", "+", "pds", ".", "DateOffset", "(", "days", "=", "1", ")", "+", "loop_pad", "want_last_pad", "=", "False", "# loading by file, can't be a multi_file-day flag situation", "elif", "(", "not", "self", ".", "_load_by_date", ")", "and", "(", "not", "self", ".", "multi_file_day", ")", ":", "#print ('single trouble')", "first_time", "=", "self", ".", "_curr_data", ".", "index", "[", "0", "]", "first_pad", "=", "first_time", "-", "loop_pad", "last_time", "=", "self", ".", "_curr_data", ".", "index", "[", "-", "1", "]", "last_pad", "=", "last_time", "+", "loop_pad", "want_last_pad", "=", "True", "else", ":", "raise", "ValueError", "(", "\"multi_file_day and loading by date are \"", "+", "\"effectively equivalent. Can't have \"", "+", "\"multi_file_day and load by file.\"", ")", "#print (first_pad, first_time, last_time, last_pad)", "# pad data based upon passed parameter", "if", "(", "not", "self", ".", "_prev_data", ".", "empty", ")", "&", "(", "not", "self", ".", "data", ".", "empty", ")", ":", "padLeft", "=", "self", ".", "_prev_data", ".", "loc", "[", "first_pad", ":", "self", ".", "data", ".", "index", "[", "0", "]", "]", "if", "len", "(", "padLeft", ")", ">", "0", ":", "if", "(", "padLeft", ".", "index", "[", "-", "1", "]", "==", "self", ".", "data", ".", "index", "[", "0", "]", ")", ":", "padLeft", "=", "padLeft", ".", "iloc", "[", ":", "-", "1", ",", ":", "]", "self", ".", "data", "=", "pds", ".", "concat", "(", "[", "padLeft", ",", "self", ".", "data", "]", ")", "if", "(", "not", "self", ".", "_next_data", ".", "empty", ")", "&", "(", "not", "self", ".", "data", ".", "empty", ")", ":", "padRight", "=", "self", ".", "_next_data", ".", "loc", "[", "self", ".", "data", ".", "index", "[", "-", "1", "]", ":", "last_pad", "]", "if", "len", "(", "padRight", ")", ">", "0", ":", "if", "(", "padRight", ".", "index", "[", "0", "]", "==", "self", ".", "data", ".", "index", "[", "-", "1", "]", ")", ":", "padRight", "=", "padRight", ".", "iloc", "[", "1", ":", ",", ":", "]", "self", ".", "data", "=", "pds", ".", "concat", "(", "[", "self", ".", "data", ",", "padRight", "]", ")", "self", ".", "data", "=", "self", ".", "data", ".", "ix", "[", "first_pad", ":", "last_pad", "]", "# want exclusive end slicing behavior from above", "if", "not", "self", ".", "empty", ":", "if", "(", "self", ".", "data", ".", "index", "[", "-", "1", "]", "==", "last_pad", ")", "&", "(", "not", "want_last_pad", ")", ":", "self", ".", "data", "=", "self", ".", "data", ".", "iloc", "[", ":", "-", "1", ",", ":", "]", "## drop any possible duplicate index times", "##self.data.drop_duplicates(inplace=True)", "#self.data = self.data[~self.data.index.duplicated()]", "# if self.pad is False, load single day", "else", ":", "self", ".", "data", ",", "meta", "=", "self", ".", "_load_data", "(", "date", "=", "self", ".", "date", ",", "fid", "=", "self", ".", "_fid", ")", "if", "not", "self", ".", "data", ".", "empty", ":", "self", ".", "meta", "=", "meta", "# check if load routine actually returns meta", "if", "self", ".", "meta", ".", "data", ".", "empty", ":", "self", ".", "meta", "[", "self", ".", "data", ".", "columns", "]", "=", "{", "self", ".", "name_label", ":", "self", ".", "data", ".", "columns", ",", "self", ".", "units_label", ":", "[", "''", "]", "*", "len", "(", "self", ".", "data", ".", "columns", ")", "}", "# if loading by file set the yr, doy, and date", "if", "not", "self", ".", "_load_by_date", ":", "if", "self", ".", "pad", "is", "not", "None", ":", "temp", "=", "first_time", "else", ":", "temp", "=", "self", ".", "data", ".", "index", "[", "0", "]", "self", ".", "date", "=", "pds", ".", "datetime", "(", "temp", ".", "year", ",", "temp", ".", "month", ",", "temp", ".", "day", ")", "self", ".", "yr", ",", "self", ".", "doy", "=", "utils", ".", "getyrdoy", "(", "self", ".", "date", ")", "if", "not", "self", ".", "data", ".", "empty", ":", "self", ".", "_default_rtn", "(", "self", ")", "# clean", "if", "(", "not", "self", ".", "data", ".", "empty", ")", "&", "(", "self", ".", "clean_level", "!=", "'none'", ")", ":", "self", ".", "_clean_rtn", "(", "self", ")", "# apply custom functions", "if", "not", "self", ".", "data", ".", "empty", ":", "self", ".", "custom", ".", "_apply_all", "(", "self", ")", "# remove the excess padding, if any applied", "if", "(", "self", ".", "pad", "is", "not", "None", ")", "&", "(", "not", "self", ".", "data", ".", "empty", ")", "&", "(", "not", "verifyPad", ")", ":", "self", ".", "data", "=", "self", ".", "data", "[", "first_time", ":", "last_time", "]", "if", "not", "self", ".", "empty", ":", "if", "(", "self", ".", "data", ".", "index", "[", "-", "1", "]", "==", "last_time", ")", "&", "(", "not", "want_last_pad", ")", ":", "self", ".", "data", "=", "self", ".", "data", ".", "iloc", "[", ":", "-", "1", ",", ":", "]", "# transfer any extra attributes in meta to the Instrument object", "self", ".", "meta", ".", "transfer_attributes_to_instrument", "(", "self", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "return" ]
Load instrument data into Instrument object .data. Parameters ---------- yr : integer year for desired data doy : integer day of year date : datetime object date to load fname : 'string' filename to be loaded verifyPad : boolean if True, padding data not removed (debug purposes) Returns -------- Void. Data is added to self.data Note ---- Loads data for a chosen instrument into .data. Any functions chosen by the user and added to the custom processing queue (.custom.add) are automatically applied to the data before it is available to user in .data.
[ "Load", "instrument", "data", "into", "Instrument", "object", ".", "data", "." ]
python
train
45.105769
Hackerfleet/hfos
hfos/ui/schemamanager.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/schemamanager.py#L211-L223
def get(self, event): """Return a single schema""" self.log("Schemarequest for", event.data, "from", event.user, lvl=debug) if event.data in schemastore: response = { 'component': 'hfos.events.schemamanager', 'action': 'get', 'data': l10n_schemastore[event.client.language][event.data] } self.fireEvent(send(event.client.uuid, response)) else: self.log("Unavailable schema requested!", lvl=warn)
[ "def", "get", "(", "self", ",", "event", ")", ":", "self", ".", "log", "(", "\"Schemarequest for\"", ",", "event", ".", "data", ",", "\"from\"", ",", "event", ".", "user", ",", "lvl", "=", "debug", ")", "if", "event", ".", "data", "in", "schemastore", ":", "response", "=", "{", "'component'", ":", "'hfos.events.schemamanager'", ",", "'action'", ":", "'get'", ",", "'data'", ":", "l10n_schemastore", "[", "event", ".", "client", ".", "language", "]", "[", "event", ".", "data", "]", "}", "self", ".", "fireEvent", "(", "send", "(", "event", ".", "client", ".", "uuid", ",", "response", ")", ")", "else", ":", "self", ".", "log", "(", "\"Unavailable schema requested!\"", ",", "lvl", "=", "warn", ")" ]
Return a single schema
[ "Return", "a", "single", "schema" ]
python
train
40.615385
Robpol86/appveyor-artifacts
appveyor_artifacts.py
https://github.com/Robpol86/appveyor-artifacts/blob/20bc2963b09f4142fd4c0b1f5da04f1105379e36/appveyor_artifacts.py#L530-L559
def mangle_coverage(local_path, log): """Edit .coverage file substituting Windows file paths to Linux paths. :param str local_path: Destination path to save file to. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """ # Read the file, or return if not a .coverage file. with open(local_path, mode='rb') as handle: if handle.read(13) != b'!coverage.py:': log.debug('File %s not a coverage file.', local_path) return handle.seek(0) # I'm lazy, reading all of this into memory. What could possibly go wrong? file_contents = handle.read(52428800).decode('utf-8') # 50 MiB limit, surely this is enough? # Substitute paths. for windows_path in set(REGEX_MANGLE.findall(file_contents)): unix_relative_path = windows_path.replace(r'\\', '/').split('/', 3)[-1] unix_absolute_path = os.path.abspath(unix_relative_path) if not os.path.isfile(unix_absolute_path): log.debug('Windows path: %s', windows_path) log.debug('Unix relative path: %s', unix_relative_path) log.error('No such file: %s', unix_absolute_path) raise HandledError file_contents = file_contents.replace(windows_path, unix_absolute_path) # Write. with open(local_path, 'w') as handle: handle.write(file_contents)
[ "def", "mangle_coverage", "(", "local_path", ",", "log", ")", ":", "# Read the file, or return if not a .coverage file.", "with", "open", "(", "local_path", ",", "mode", "=", "'rb'", ")", "as", "handle", ":", "if", "handle", ".", "read", "(", "13", ")", "!=", "b'!coverage.py:'", ":", "log", ".", "debug", "(", "'File %s not a coverage file.'", ",", "local_path", ")", "return", "handle", ".", "seek", "(", "0", ")", "# I'm lazy, reading all of this into memory. What could possibly go wrong?", "file_contents", "=", "handle", ".", "read", "(", "52428800", ")", ".", "decode", "(", "'utf-8'", ")", "# 50 MiB limit, surely this is enough?", "# Substitute paths.", "for", "windows_path", "in", "set", "(", "REGEX_MANGLE", ".", "findall", "(", "file_contents", ")", ")", ":", "unix_relative_path", "=", "windows_path", ".", "replace", "(", "r'\\\\'", ",", "'/'", ")", ".", "split", "(", "'/'", ",", "3", ")", "[", "-", "1", "]", "unix_absolute_path", "=", "os", ".", "path", ".", "abspath", "(", "unix_relative_path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "unix_absolute_path", ")", ":", "log", ".", "debug", "(", "'Windows path: %s'", ",", "windows_path", ")", "log", ".", "debug", "(", "'Unix relative path: %s'", ",", "unix_relative_path", ")", "log", ".", "error", "(", "'No such file: %s'", ",", "unix_absolute_path", ")", "raise", "HandledError", "file_contents", "=", "file_contents", ".", "replace", "(", "windows_path", ",", "unix_absolute_path", ")", "# Write.", "with", "open", "(", "local_path", ",", "'w'", ")", "as", "handle", ":", "handle", ".", "write", "(", "file_contents", ")" ]
Edit .coverage file substituting Windows file paths to Linux paths. :param str local_path: Destination path to save file to. :param logging.Logger log: Logger for this function. Populated by with_log() decorator.
[ "Edit", ".", "coverage", "file", "substituting", "Windows", "file", "paths", "to", "Linux", "paths", "." ]
python
train
45.566667
kennethreitz/omnijson
omnijson/packages/simplejson/encoder.py
https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/packages/simplejson/encoder.py#L34-L42
def encode_basestring(s): """Return a JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): return ESCAPE_DCT[match.group(0)] return u'"' + ESCAPE.sub(replace, s) + u'"'
[ "def", "encode_basestring", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "str", ")", "and", "HAS_UTF8", ".", "search", "(", "s", ")", "is", "not", "None", ":", "s", "=", "s", ".", "decode", "(", "'utf-8'", ")", "def", "replace", "(", "match", ")", ":", "return", "ESCAPE_DCT", "[", "match", ".", "group", "(", "0", ")", "]", "return", "u'\"'", "+", "ESCAPE", ".", "sub", "(", "replace", ",", "s", ")", "+", "u'\"'" ]
Return a JSON representation of a Python string
[ "Return", "a", "JSON", "representation", "of", "a", "Python", "string" ]
python
train
31.888889
Locu/chronology
pykronos/pykronos/utils/cache.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/pykronos/pykronos/utils/cache.py#L120-L130
def _bucket_time(self, event_time): """ The seconds since epoch that represent a computed bucket. An event bucket is the time of the earliest possible event for that `bucket_width`. Example: if `bucket_width = timedelta(minutes=10)`, bucket times will be the number of seconds since epoch at 12:00, 12:10, ... on each day. """ event_time = kronos_time_to_epoch_time(event_time) return event_time - (event_time % self._bucket_width)
[ "def", "_bucket_time", "(", "self", ",", "event_time", ")", ":", "event_time", "=", "kronos_time_to_epoch_time", "(", "event_time", ")", "return", "event_time", "-", "(", "event_time", "%", "self", ".", "_bucket_width", ")" ]
The seconds since epoch that represent a computed bucket. An event bucket is the time of the earliest possible event for that `bucket_width`. Example: if `bucket_width = timedelta(minutes=10)`, bucket times will be the number of seconds since epoch at 12:00, 12:10, ... on each day.
[ "The", "seconds", "since", "epoch", "that", "represent", "a", "computed", "bucket", "." ]
python
train
41.818182
siemens/django-mantis-stix-importer
mantis_stix_importer/importer.py
https://github.com/siemens/django-mantis-stix-importer/blob/20f5709e068101dad299f58134513d8873c91ba5/mantis_stix_importer/importer.py#L1235-L1323
def derive_iobject_type(self, embedding_ns, embedded_ns, elt_name): """ Derive type of information object stemming from an embedded element based on namespace information of embedding element, the embedded element itself, and the name of the element. """ # Extract namespace-information ns_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, self.namespace_dict.get(embedding_ns, "")) if not ns_info: ns_info = {} # This should yield the following information: # - For namespace of an Cybox Object such as http://cybox.mitre.org/objects#AddressObject-2: # - iotype_ns = http://cybox.mitre.org/objects#AddressObject # - family = cybox.mitre.org # - family_tag = cybox # - type = AddressObject # - revision = 2 # - For a base namespace such as http://cybox.mitre.org/common-2: # - iotype_ns = http://cybox.mitre.org/common # - family = cybox.mitre.org # - family_tag = cybox # - type = common # - revision = 2 iobject_family_name = ns_info.get('family',None) if not iobject_family_name: iobject_family_name = "" family_info = {} if ns_info.get('family_tag',None) in ['stix', 'cybox']: family_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, self.namespace_dict.get(ns_info['family_tag'], "")) if family_info: iobject_family_revision_name = family_info["revision"] else: iobject_family_revision_name = None else: iobject_family_revision_name = ns_info.get("revision",None) if not iobject_family_revision_name: iobject_family_revision_name = '' # We take the object type from the ``xsi:type`` attribute # given as in the following example:: # <cybox:Properties xsi:type="EmailMessageObj:EmailMessageObjectType"> # <cybox:Properties xsi:type="AddrObj:AddressObjectType" category="ipv4-addr"> # if embedded_ns: namespace_uri = self.namespace_dict.get(embedded_ns, "") type_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, namespace_uri) if not type_info: type_info = {} if type_info and type_info.get('type',None) in ['common', 'cybox', 'stix']: iobject_type_name = elt_name iobject_type_namespace_uri = ns_info['iotype_ns'] iobject_type_revision_name = ns_info['revision'] else: iobject_type_namespace_uri = type_info.get('iotype_ns',"%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,embedded_ns)) iobject_type_name = type_info.get('type',embedded_ns)#.split('Object')[0] iobject_type_revision_name = type_info.get('revision','') else: iobject_type_name = elt_name iobject_type_revision_name = iobject_family_revision_name iobject_type_namespace_uri = ns_info.get("iotype_ns", "%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,elt_name)) if not iobject_type_revision_name: iobject_type_revision_name = '' logger.debug("Results of datatype extraction for ns %s, embedded ns %s and element name %s" % ( embedding_ns, embedded_ns, elt_name)) logger.debug("Family Name: %s" % iobject_family_name) logger.debug("Family Revision %s" % iobject_family_revision_name) logger.debug("Type Name %s" % iobject_type_name) logger.debug("Type NS URI %s" % iobject_type_namespace_uri) logger.debug("Type Revision %s" % iobject_type_revision_name) return {'iobject_type_name': iobject_type_name, 'iobject_type_revision_name': iobject_type_revision_name, 'iobject_type_namespace_uri': iobject_type_namespace_uri, 'iobject_family_name': iobject_family_name, 'iobject_family_revision_name': iobject_family_revision_name}
[ "def", "derive_iobject_type", "(", "self", ",", "embedding_ns", ",", "embedded_ns", ",", "elt_name", ")", ":", "# Extract namespace-information", "ns_info", "=", "search_by_re_list", "(", "self", ".", "RE_LIST_NS_TYPE_FROM_NS_URL", ",", "self", ".", "namespace_dict", ".", "get", "(", "embedding_ns", ",", "\"\"", ")", ")", "if", "not", "ns_info", ":", "ns_info", "=", "{", "}", "# This should yield the following information:", "# - For namespace of an Cybox Object such as http://cybox.mitre.org/objects#AddressObject-2:", "# - iotype_ns = http://cybox.mitre.org/objects#AddressObject", "# - family = cybox.mitre.org", "# - family_tag = cybox", "# - type = AddressObject", "# - revision = 2", "# - For a base namespace such as http://cybox.mitre.org/common-2:", "# - iotype_ns = http://cybox.mitre.org/common", "# - family = cybox.mitre.org", "# - family_tag = cybox", "# - type = common", "# - revision = 2", "iobject_family_name", "=", "ns_info", ".", "get", "(", "'family'", ",", "None", ")", "if", "not", "iobject_family_name", ":", "iobject_family_name", "=", "\"\"", "family_info", "=", "{", "}", "if", "ns_info", ".", "get", "(", "'family_tag'", ",", "None", ")", "in", "[", "'stix'", ",", "'cybox'", "]", ":", "family_info", "=", "search_by_re_list", "(", "self", ".", "RE_LIST_NS_TYPE_FROM_NS_URL", ",", "self", ".", "namespace_dict", ".", "get", "(", "ns_info", "[", "'family_tag'", "]", ",", "\"\"", ")", ")", "if", "family_info", ":", "iobject_family_revision_name", "=", "family_info", "[", "\"revision\"", "]", "else", ":", "iobject_family_revision_name", "=", "None", "else", ":", "iobject_family_revision_name", "=", "ns_info", ".", "get", "(", "\"revision\"", ",", "None", ")", "if", "not", "iobject_family_revision_name", ":", "iobject_family_revision_name", "=", "''", "# We take the object type from the ``xsi:type`` attribute", "# given as in the following example::", "# <cybox:Properties xsi:type=\"EmailMessageObj:EmailMessageObjectType\">", "# <cybox:Properties xsi:type=\"AddrObj:AddressObjectType\" category=\"ipv4-addr\">", "#", "if", "embedded_ns", ":", "namespace_uri", "=", "self", ".", "namespace_dict", ".", "get", "(", "embedded_ns", ",", "\"\"", ")", "type_info", "=", "search_by_re_list", "(", "self", ".", "RE_LIST_NS_TYPE_FROM_NS_URL", ",", "namespace_uri", ")", "if", "not", "type_info", ":", "type_info", "=", "{", "}", "if", "type_info", "and", "type_info", ".", "get", "(", "'type'", ",", "None", ")", "in", "[", "'common'", ",", "'cybox'", ",", "'stix'", "]", ":", "iobject_type_name", "=", "elt_name", "iobject_type_namespace_uri", "=", "ns_info", "[", "'iotype_ns'", "]", "iobject_type_revision_name", "=", "ns_info", "[", "'revision'", "]", "else", ":", "iobject_type_namespace_uri", "=", "type_info", ".", "get", "(", "'iotype_ns'", ",", "\"%s/%s\"", "%", "(", "dingos", ".", "DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX", ",", "embedded_ns", ")", ")", "iobject_type_name", "=", "type_info", ".", "get", "(", "'type'", ",", "embedded_ns", ")", "#.split('Object')[0]", "iobject_type_revision_name", "=", "type_info", ".", "get", "(", "'revision'", ",", "''", ")", "else", ":", "iobject_type_name", "=", "elt_name", "iobject_type_revision_name", "=", "iobject_family_revision_name", "iobject_type_namespace_uri", "=", "ns_info", ".", "get", "(", "\"iotype_ns\"", ",", "\"%s/%s\"", "%", "(", "dingos", ".", "DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX", ",", "elt_name", ")", ")", "if", "not", "iobject_type_revision_name", ":", "iobject_type_revision_name", "=", "''", "logger", ".", "debug", "(", "\"Results of datatype extraction for ns %s, embedded ns %s and element name %s\"", "%", "(", "embedding_ns", ",", "embedded_ns", ",", "elt_name", ")", ")", "logger", ".", "debug", "(", "\"Family Name: %s\"", "%", "iobject_family_name", ")", "logger", ".", "debug", "(", "\"Family Revision %s\"", "%", "iobject_family_revision_name", ")", "logger", ".", "debug", "(", "\"Type Name %s\"", "%", "iobject_type_name", ")", "logger", ".", "debug", "(", "\"Type NS URI %s\"", "%", "iobject_type_namespace_uri", ")", "logger", ".", "debug", "(", "\"Type Revision %s\"", "%", "iobject_type_revision_name", ")", "return", "{", "'iobject_type_name'", ":", "iobject_type_name", ",", "'iobject_type_revision_name'", ":", "iobject_type_revision_name", ",", "'iobject_type_namespace_uri'", ":", "iobject_type_namespace_uri", ",", "'iobject_family_name'", ":", "iobject_family_name", ",", "'iobject_family_revision_name'", ":", "iobject_family_revision_name", "}" ]
Derive type of information object stemming from an embedded element based on namespace information of embedding element, the embedded element itself, and the name of the element.
[ "Derive", "type", "of", "information", "object", "stemming", "from", "an", "embedded", "element", "based", "on", "namespace", "information", "of", "embedding", "element", "the", "embedded", "element", "itself", "and", "the", "name", "of", "the", "element", "." ]
python
train
45.797753
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L4059-L4073
def _prm_write_shared_array(self, key, data, hdf5_group, full_name, flag, **kwargs): """Creates and array that can be used with an HDF5 array object""" if flag == HDF5StorageService.ARRAY: self._prm_write_into_array(key, data, hdf5_group, full_name, **kwargs) elif flag in (HDF5StorageService.CARRAY, HDF5StorageService.EARRAY, HDF5StorageService.VLARRAY): self._prm_write_into_other_array(key, data, hdf5_group, full_name, flag=flag, **kwargs) else: raise RuntimeError('Flag `%s` of hdf5 data `%s` of `%s` not understood' % (flag, key, full_name)) self._hdf5file.flush()
[ "def", "_prm_write_shared_array", "(", "self", ",", "key", ",", "data", ",", "hdf5_group", ",", "full_name", ",", "flag", ",", "*", "*", "kwargs", ")", ":", "if", "flag", "==", "HDF5StorageService", ".", "ARRAY", ":", "self", ".", "_prm_write_into_array", "(", "key", ",", "data", ",", "hdf5_group", ",", "full_name", ",", "*", "*", "kwargs", ")", "elif", "flag", "in", "(", "HDF5StorageService", ".", "CARRAY", ",", "HDF5StorageService", ".", "EARRAY", ",", "HDF5StorageService", ".", "VLARRAY", ")", ":", "self", ".", "_prm_write_into_other_array", "(", "key", ",", "data", ",", "hdf5_group", ",", "full_name", ",", "flag", "=", "flag", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "RuntimeError", "(", "'Flag `%s` of hdf5 data `%s` of `%s` not understood'", "%", "(", "flag", ",", "key", ",", "full_name", ")", ")", "self", ".", "_hdf5file", ".", "flush", "(", ")" ]
Creates and array that can be used with an HDF5 array object
[ "Creates", "and", "array", "that", "can", "be", "used", "with", "an", "HDF5", "array", "object" ]
python
test
50.066667
EasyPost/pystalk
pystalk/client.py
https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L300-L324
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120): """Insert a new job into a specific queue. Wrapper around :func:`put_job`. :param tube_name: Tube name :type tube_name: str :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job()` Put a job into whatever the current tube is :func:`using()` Insert a job using an external guard """ with self.using(tube_name) as inserter: return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
[ "def", "put_job_into", "(", "self", ",", "tube_name", ",", "data", ",", "pri", "=", "65536", ",", "delay", "=", "0", ",", "ttr", "=", "120", ")", ":", "with", "self", ".", "using", "(", "tube_name", ")", "as", "inserter", ":", "return", "inserter", ".", "put_job", "(", "data", "=", "data", ",", "pri", "=", "pri", ",", "delay", "=", "delay", ",", "ttr", "=", "ttr", ")" ]
Insert a new job into a specific queue. Wrapper around :func:`put_job`. :param tube_name: Tube name :type tube_name: str :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job()` Put a job into whatever the current tube is :func:`using()` Insert a job using an external guard
[ "Insert", "a", "new", "job", "into", "a", "specific", "queue", ".", "Wrapper", "around", ":", "func", ":", "put_job", "." ]
python
train
40.68
gwastro/pycbc
pycbc/sensitivity.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/sensitivity.py#L63-L82
def volume_to_distance_with_errors(vol, vol_err): """ Return the distance and standard deviation upper and lower bounds Parameters ---------- vol: float vol_err: float Returns ------- dist: float ehigh: float elow: float """ dist = (vol * 3.0/4.0/numpy.pi) ** (1.0/3.0) ehigh = ((vol + vol_err) * 3.0/4.0/numpy.pi) ** (1.0/3.0) - dist delta = numpy.where(vol >= vol_err, vol - vol_err, 0) elow = dist - (delta * 3.0/4.0/numpy.pi) ** (1.0/3.0) return dist, ehigh, elow
[ "def", "volume_to_distance_with_errors", "(", "vol", ",", "vol_err", ")", ":", "dist", "=", "(", "vol", "*", "3.0", "/", "4.0", "/", "numpy", ".", "pi", ")", "**", "(", "1.0", "/", "3.0", ")", "ehigh", "=", "(", "(", "vol", "+", "vol_err", ")", "*", "3.0", "/", "4.0", "/", "numpy", ".", "pi", ")", "**", "(", "1.0", "/", "3.0", ")", "-", "dist", "delta", "=", "numpy", ".", "where", "(", "vol", ">=", "vol_err", ",", "vol", "-", "vol_err", ",", "0", ")", "elow", "=", "dist", "-", "(", "delta", "*", "3.0", "/", "4.0", "/", "numpy", ".", "pi", ")", "**", "(", "1.0", "/", "3.0", ")", "return", "dist", ",", "ehigh", ",", "elow" ]
Return the distance and standard deviation upper and lower bounds Parameters ---------- vol: float vol_err: float Returns ------- dist: float ehigh: float elow: float
[ "Return", "the", "distance", "and", "standard", "deviation", "upper", "and", "lower", "bounds" ]
python
train
25.75
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py#L694-L697
def _tr_paren(line_info): "Translate lines escaped with: /" return '%s%s(%s)' % (line_info.pre, line_info.ifun, ", ".join(line_info.the_rest.split()))
[ "def", "_tr_paren", "(", "line_info", ")", ":", "return", "'%s%s(%s)'", "%", "(", "line_info", ".", "pre", ",", "line_info", ".", "ifun", ",", "\", \"", ".", "join", "(", "line_info", ".", "the_rest", ".", "split", "(", ")", ")", ")" ]
Translate lines escaped with: /
[ "Translate", "lines", "escaped", "with", ":", "/" ]
python
test
48
google/transitfeed
kmlwriter.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/kmlwriter.py#L438-L475
def _CreateRoutePatternsFolder(self, parent, route, style_id=None, visible=True): """Create a KML Folder containing placemarks for each pattern in the route. A pattern is a sequence of stops used by one of the trips in the route. If there are not patterns for the route then no folder is created and None is returned. Args: parent: The parent ElementTree.Element instance. route: The transitfeed.Route instance. style_id: The id of a style to use if not None. visible: Whether the folder is initially visible or not. Returns: The Folder ElementTree.Element instance or None if there are no patterns. """ pattern_id_to_trips = route.GetPatternIdTripDict() if not pattern_id_to_trips: return None # sort by number of trips using the pattern pattern_trips = pattern_id_to_trips.values() pattern_trips.sort(lambda a, b: cmp(len(b), len(a))) folder = self._CreateFolder(parent, 'Patterns', visible) for n, trips in enumerate(pattern_trips): trip_ids = [trip.trip_id for trip in trips] name = 'Pattern %d (trips: %d)' % (n+1, len(trips)) description = 'Trips using this pattern (%d in total): %s' % ( len(trips), ', '.join(trip_ids)) placemark = self._CreatePlacemark(folder, name, style_id, visible, description) coordinates = [(stop.stop_lon, stop.stop_lat) for stop in trips[0].GetPattern()] self._CreateLineString(placemark, coordinates) return folder
[ "def", "_CreateRoutePatternsFolder", "(", "self", ",", "parent", ",", "route", ",", "style_id", "=", "None", ",", "visible", "=", "True", ")", ":", "pattern_id_to_trips", "=", "route", ".", "GetPatternIdTripDict", "(", ")", "if", "not", "pattern_id_to_trips", ":", "return", "None", "# sort by number of trips using the pattern", "pattern_trips", "=", "pattern_id_to_trips", ".", "values", "(", ")", "pattern_trips", ".", "sort", "(", "lambda", "a", ",", "b", ":", "cmp", "(", "len", "(", "b", ")", ",", "len", "(", "a", ")", ")", ")", "folder", "=", "self", ".", "_CreateFolder", "(", "parent", ",", "'Patterns'", ",", "visible", ")", "for", "n", ",", "trips", "in", "enumerate", "(", "pattern_trips", ")", ":", "trip_ids", "=", "[", "trip", ".", "trip_id", "for", "trip", "in", "trips", "]", "name", "=", "'Pattern %d (trips: %d)'", "%", "(", "n", "+", "1", ",", "len", "(", "trips", ")", ")", "description", "=", "'Trips using this pattern (%d in total): %s'", "%", "(", "len", "(", "trips", ")", ",", "', '", ".", "join", "(", "trip_ids", ")", ")", "placemark", "=", "self", ".", "_CreatePlacemark", "(", "folder", ",", "name", ",", "style_id", ",", "visible", ",", "description", ")", "coordinates", "=", "[", "(", "stop", ".", "stop_lon", ",", "stop", ".", "stop_lat", ")", "for", "stop", "in", "trips", "[", "0", "]", ".", "GetPattern", "(", ")", "]", "self", ".", "_CreateLineString", "(", "placemark", ",", "coordinates", ")", "return", "folder" ]
Create a KML Folder containing placemarks for each pattern in the route. A pattern is a sequence of stops used by one of the trips in the route. If there are not patterns for the route then no folder is created and None is returned. Args: parent: The parent ElementTree.Element instance. route: The transitfeed.Route instance. style_id: The id of a style to use if not None. visible: Whether the folder is initially visible or not. Returns: The Folder ElementTree.Element instance or None if there are no patterns.
[ "Create", "a", "KML", "Folder", "containing", "placemarks", "for", "each", "pattern", "in", "the", "route", "." ]
python
train
40.947368
mjirik/io3d
io3d/dcmreaddata.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dcmreaddata.py#L491-L586
def get_metaData(self, dcmlist, series_number): """ Get metadata. Voxel size is obtained from PixelSpacing and difference of SliceLocation of two neighboorhoding slices (first have index ifile). Files in are used. """ # if dcmlist is None: # dcmlist = self.files_in_serie # number of slice where to extract metadata inforamtion ifile = 0 if len(dcmlist) == 0: return {} logger.debug("Filename: " + dcmlist[ifile]) data1 = self._read_file(dcmlist[ifile]) try: # try to get difference from the beginning and also from the end voxeldepth = self._get_slice_location_difference(dcmlist, ifile) voxeldepth_end = self._get_slice_location_difference(dcmlist, -2) if voxeldepth != voxeldepth_end: logger.warning("Depth of slices is not the same in beginning and end of the sequence") voxeldepth_1 = self._get_slice_location_difference(dcmlist, 1) voxeldepth = np.median([voxeldepth, voxeldepth_end, voxeldepth_1]) # head1, teil1 = os.path.split(dcmlist[ifile]) # head2, teil2 = os.path.split(dcmlist[ifile + 1]) # # data2 = self._read_file(dcmlist[ifile + 1]) # loc1 = get_slice_location(data1, teil1) # loc2 = get_slice_location(data2, teil2) # voxeldepth = float(np.abs(loc1 - loc2)) except Exception: logger.warning('Problem with voxel depth. Using SliceThickness') logger.debug(traceback.format_exc()) # + ' SeriesNumber: ' + str(data1.SeriesNumber)) try: voxeldepth = float(data1.SliceThickness) except Exception: logger.warning('Probem with SliceThicknes, setting zero. ' + traceback.format_exc()) voxeldepth = 0 try: pixelsize_mm = data1.PixelSpacing except: logger.warning('Problem with PixelSpacing. Using [1,1]') pixelsize_mm = [1, 1] voxelsize_mm = [ voxeldepth, float(pixelsize_mm[0]), float(pixelsize_mm[1]), ] metadata = {'voxelsize_mm': voxelsize_mm, 'Modality': data1.Modality, 'SeriesNumber': series_number } try: metadata['SeriesDescription'] = data1.SeriesDescription except: logger.info( 'Problem with tag SeriesDescription, SeriesNumber: ' + str(data1.SeriesNumber)) try: metadata['ImageComments'] = data1.ImageComments except: logger.info( 'Problem with tag ImageComments') # , SeriesNumber: ' + # str(data1.SeriesNumber)) try: metadata['Modality'] = data1.Modality except: logger.info( 'Problem with tag Modality') # SeriesNumber: ' + # str(data1.SeriesNumber)) metadata = attr_to_dict(data1, "AcquisitionDate", metadata) metadata = attr_to_dict(data1, "StudyDate", metadata) metadata = attr_to_dict(data1, "StudyID", metadata) metadata = attr_to_dict(data1, "StudyDescription", metadata) metadata = attr_to_dict(data1, "RequestedProcedureDescription", metadata) metadata = attr_to_dict(data1, "PatientSex", metadata) metadata = attr_to_dict(data1, "PatientAge", metadata) metadata = attr_to_dict(data1, "PatientID", metadata) metadata = attr_to_dict(data1, "PatientName", metadata) # metadata = attr_to_dict(data1, "AcquisitionTime", metadata) metadata['dcmfilelist'] = dcmlist return metadata
[ "def", "get_metaData", "(", "self", ",", "dcmlist", ",", "series_number", ")", ":", "# if dcmlist is None:", "# dcmlist = self.files_in_serie", "# number of slice where to extract metadata inforamtion", "ifile", "=", "0", "if", "len", "(", "dcmlist", ")", "==", "0", ":", "return", "{", "}", "logger", ".", "debug", "(", "\"Filename: \"", "+", "dcmlist", "[", "ifile", "]", ")", "data1", "=", "self", ".", "_read_file", "(", "dcmlist", "[", "ifile", "]", ")", "try", ":", "# try to get difference from the beginning and also from the end", "voxeldepth", "=", "self", ".", "_get_slice_location_difference", "(", "dcmlist", ",", "ifile", ")", "voxeldepth_end", "=", "self", ".", "_get_slice_location_difference", "(", "dcmlist", ",", "-", "2", ")", "if", "voxeldepth", "!=", "voxeldepth_end", ":", "logger", ".", "warning", "(", "\"Depth of slices is not the same in beginning and end of the sequence\"", ")", "voxeldepth_1", "=", "self", ".", "_get_slice_location_difference", "(", "dcmlist", ",", "1", ")", "voxeldepth", "=", "np", ".", "median", "(", "[", "voxeldepth", ",", "voxeldepth_end", ",", "voxeldepth_1", "]", ")", "# head1, teil1 = os.path.split(dcmlist[ifile])", "# head2, teil2 = os.path.split(dcmlist[ifile + 1])", "#", "# data2 = self._read_file(dcmlist[ifile + 1])", "# loc1 = get_slice_location(data1, teil1)", "# loc2 = get_slice_location(data2, teil2)", "# voxeldepth = float(np.abs(loc1 - loc2))", "except", "Exception", ":", "logger", ".", "warning", "(", "'Problem with voxel depth. Using SliceThickness'", ")", "logger", ".", "debug", "(", "traceback", ".", "format_exc", "(", ")", ")", "# + ' SeriesNumber: ' + str(data1.SeriesNumber))", "try", ":", "voxeldepth", "=", "float", "(", "data1", ".", "SliceThickness", ")", "except", "Exception", ":", "logger", ".", "warning", "(", "'Probem with SliceThicknes, setting zero. '", "+", "traceback", ".", "format_exc", "(", ")", ")", "voxeldepth", "=", "0", "try", ":", "pixelsize_mm", "=", "data1", ".", "PixelSpacing", "except", ":", "logger", ".", "warning", "(", "'Problem with PixelSpacing. Using [1,1]'", ")", "pixelsize_mm", "=", "[", "1", ",", "1", "]", "voxelsize_mm", "=", "[", "voxeldepth", ",", "float", "(", "pixelsize_mm", "[", "0", "]", ")", ",", "float", "(", "pixelsize_mm", "[", "1", "]", ")", ",", "]", "metadata", "=", "{", "'voxelsize_mm'", ":", "voxelsize_mm", ",", "'Modality'", ":", "data1", ".", "Modality", ",", "'SeriesNumber'", ":", "series_number", "}", "try", ":", "metadata", "[", "'SeriesDescription'", "]", "=", "data1", ".", "SeriesDescription", "except", ":", "logger", ".", "info", "(", "'Problem with tag SeriesDescription, SeriesNumber: '", "+", "str", "(", "data1", ".", "SeriesNumber", ")", ")", "try", ":", "metadata", "[", "'ImageComments'", "]", "=", "data1", ".", "ImageComments", "except", ":", "logger", ".", "info", "(", "'Problem with tag ImageComments'", ")", "# , SeriesNumber: ' +", "# str(data1.SeriesNumber))", "try", ":", "metadata", "[", "'Modality'", "]", "=", "data1", ".", "Modality", "except", ":", "logger", ".", "info", "(", "'Problem with tag Modality'", ")", "# SeriesNumber: ' +", "# str(data1.SeriesNumber))", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"AcquisitionDate\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"StudyDate\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"StudyID\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"StudyDescription\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"RequestedProcedureDescription\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"PatientSex\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"PatientAge\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"PatientID\"", ",", "metadata", ")", "metadata", "=", "attr_to_dict", "(", "data1", ",", "\"PatientName\"", ",", "metadata", ")", "# metadata = attr_to_dict(data1, \"AcquisitionTime\", metadata)", "metadata", "[", "'dcmfilelist'", "]", "=", "dcmlist", "return", "metadata" ]
Get metadata. Voxel size is obtained from PixelSpacing and difference of SliceLocation of two neighboorhoding slices (first have index ifile). Files in are used.
[ "Get", "metadata", ".", "Voxel", "size", "is", "obtained", "from", "PixelSpacing", "and", "difference", "of", "SliceLocation", "of", "two", "neighboorhoding", "slices", "(", "first", "have", "index", "ifile", ")", ".", "Files", "in", "are", "used", "." ]
python
train
39.395833
openstack/horizon
horizon/templatetags/horizon.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/templatetags/horizon.py#L95-L110
def horizon_main_nav(context): """Generates top-level dashboard navigation entries.""" if 'request' not in context: return {} current_dashboard = context['request'].horizon.get('dashboard', None) dashboards = [] for dash in Horizon.get_dashboards(): if dash.can_access(context): if callable(dash.nav) and dash.nav(context): dashboards.append(dash) elif dash.nav: dashboards.append(dash) return {'components': dashboards, 'user': context['request'].user, 'current': current_dashboard, 'request': context['request']}
[ "def", "horizon_main_nav", "(", "context", ")", ":", "if", "'request'", "not", "in", "context", ":", "return", "{", "}", "current_dashboard", "=", "context", "[", "'request'", "]", ".", "horizon", ".", "get", "(", "'dashboard'", ",", "None", ")", "dashboards", "=", "[", "]", "for", "dash", "in", "Horizon", ".", "get_dashboards", "(", ")", ":", "if", "dash", ".", "can_access", "(", "context", ")", ":", "if", "callable", "(", "dash", ".", "nav", ")", "and", "dash", ".", "nav", "(", "context", ")", ":", "dashboards", ".", "append", "(", "dash", ")", "elif", "dash", ".", "nav", ":", "dashboards", ".", "append", "(", "dash", ")", "return", "{", "'components'", ":", "dashboards", ",", "'user'", ":", "context", "[", "'request'", "]", ".", "user", ",", "'current'", ":", "current_dashboard", ",", "'request'", ":", "context", "[", "'request'", "]", "}" ]
Generates top-level dashboard navigation entries.
[ "Generates", "top", "-", "level", "dashboard", "navigation", "entries", "." ]
python
train
39.4375
pazz/alot
alot/helper.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L570-L576
def mailto_to_envelope(mailto_str): """ Interpret mailto-string into a :class:`alot.db.envelope.Envelope` """ from alot.db.envelope import Envelope headers, body = parse_mailto(mailto_str) return Envelope(bodytext=body, headers=headers)
[ "def", "mailto_to_envelope", "(", "mailto_str", ")", ":", "from", "alot", ".", "db", ".", "envelope", "import", "Envelope", "headers", ",", "body", "=", "parse_mailto", "(", "mailto_str", ")", "return", "Envelope", "(", "bodytext", "=", "body", ",", "headers", "=", "headers", ")" ]
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
[ "Interpret", "mailto", "-", "string", "into", "a", ":", "class", ":", "alot", ".", "db", ".", "envelope", ".", "Envelope" ]
python
train
36.285714
limix/scipy-sugar
scipy_sugar/stats/_normalize.py
https://github.com/limix/scipy-sugar/blob/8109685b14f61cf4c7fc66e6a98f10f35cbd086c/scipy_sugar/stats/_normalize.py#L6-L32
def quantile_gaussianize(x): """Normalize a sequence of values via rank and Normal c.d.f. Args: x (array_like): sequence of values. Returns: Gaussian-normalized values. Example: .. doctest:: >>> from scipy_sugar.stats import quantile_gaussianize >>> print(quantile_gaussianize([-1, 0, 2])) [-0.67448975 0. 0.67448975] """ from scipy.stats import norm, rankdata x = asarray(x, float).copy() ok = isfinite(x) x[ok] *= -1 y = empty_like(x) y[ok] = rankdata(x[ok]) y[ok] = norm.isf(y[ok] / (sum(ok) + 1)) y[~ok] = x[~ok] return y
[ "def", "quantile_gaussianize", "(", "x", ")", ":", "from", "scipy", ".", "stats", "import", "norm", ",", "rankdata", "x", "=", "asarray", "(", "x", ",", "float", ")", ".", "copy", "(", ")", "ok", "=", "isfinite", "(", "x", ")", "x", "[", "ok", "]", "*=", "-", "1", "y", "=", "empty_like", "(", "x", ")", "y", "[", "ok", "]", "=", "rankdata", "(", "x", "[", "ok", "]", ")", "y", "[", "ok", "]", "=", "norm", ".", "isf", "(", "y", "[", "ok", "]", "/", "(", "sum", "(", "ok", ")", "+", "1", ")", ")", "y", "[", "~", "ok", "]", "=", "x", "[", "~", "ok", "]", "return", "y" ]
Normalize a sequence of values via rank and Normal c.d.f. Args: x (array_like): sequence of values. Returns: Gaussian-normalized values. Example: .. doctest:: >>> from scipy_sugar.stats import quantile_gaussianize >>> print(quantile_gaussianize([-1, 0, 2])) [-0.67448975 0. 0.67448975]
[ "Normalize", "a", "sequence", "of", "values", "via", "rank", "and", "Normal", "c", ".", "d", ".", "f", "." ]
python
train
22.777778
ninuxorg/nodeshot
nodeshot/community/notifications/management/commands/purge_notifications.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/notifications/management/commands/purge_notifications.py#L24-L35
def handle(self, *args, **options): """ Purge notifications """ # retrieve layers notifications = self.retrieve_old_notifications() count = len(notifications) if count > 0: self.output('found %d notifications to purge...' % count) notifications.delete() self.output('%d notifications deleted successfully.' % count) else: self.output('there are no old notifications to purge')
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "# retrieve layers", "notifications", "=", "self", ".", "retrieve_old_notifications", "(", ")", "count", "=", "len", "(", "notifications", ")", "if", "count", ">", "0", ":", "self", ".", "output", "(", "'found %d notifications to purge...'", "%", "count", ")", "notifications", ".", "delete", "(", ")", "self", ".", "output", "(", "'%d notifications deleted successfully.'", "%", "count", ")", "else", ":", "self", ".", "output", "(", "'there are no old notifications to purge'", ")" ]
Purge notifications
[ "Purge", "notifications" ]
python
train
38.5
rapidpro/expressions
python/temba_expressions/dates.py
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/dates.py#L178-L239
def _get_token_possibilities(cls, token, mode): """ Returns all possible component types of a token without regard to its context. For example "26" could be year, date or minute, but can't be a month or an hour. :param token: the token to classify :param mode: the parse mode :return: the dict of possible types and values if token was of that type """ token = token.lower().strip() possibilities = {} try: as_int = int(token) if mode != Mode.TIME: if 1 <= as_int <= 9999 and (len(token) == 2 or len(token) == 4): possibilities[Component.YEAR] = as_int if 1 <= as_int <= 12: possibilities[Component.MONTH] = as_int if 1 <= as_int <= 31: possibilities[Component.DAY] = as_int if mode != Mode.DATE: if 0 <= as_int <= 23: possibilities[Component.HOUR] = as_int if 0 <= as_int <= 59: possibilities[Component.MINUTE] = as_int if 0 <= as_int <= 59: possibilities[Component.SECOND] = as_int if len(token) == 3 or len(token) == 6 or len(token) == 9: nano = 0 if len(token) == 3: # millisecond precision nano = as_int * 1000000 elif len(token) == 6: # microsecond precision nano = as_int * 1000 elif len(token) == 9: nano = as_int possibilities[Component.NANO] = nano if len(token) == 4: hour = as_int // 100 minute = as_int - (hour * 100) if 1 <= hour <= 24 and 1 <= minute <= 59: possibilities[Component.HOUR_AND_MINUTE] = as_int except ValueError: if mode != Mode.TIME: # could it be a month alias? month = MONTHS_BY_ALIAS.get(token, None) if month is not None: possibilities[Component.MONTH] = month if mode != Mode.DATE: # could it be an AM/PM marker? is_am_marker = token == "am" is_pm_marker = token == "pm" if is_am_marker or is_pm_marker: possibilities[Component.AM_PM] = cls.AM if is_am_marker else cls.PM # offset parsing is limited to Z meaning UTC for now if token == "z": possibilities[Component.OFFSET] = 0 return possibilities
[ "def", "_get_token_possibilities", "(", "cls", ",", "token", ",", "mode", ")", ":", "token", "=", "token", ".", "lower", "(", ")", ".", "strip", "(", ")", "possibilities", "=", "{", "}", "try", ":", "as_int", "=", "int", "(", "token", ")", "if", "mode", "!=", "Mode", ".", "TIME", ":", "if", "1", "<=", "as_int", "<=", "9999", "and", "(", "len", "(", "token", ")", "==", "2", "or", "len", "(", "token", ")", "==", "4", ")", ":", "possibilities", "[", "Component", ".", "YEAR", "]", "=", "as_int", "if", "1", "<=", "as_int", "<=", "12", ":", "possibilities", "[", "Component", ".", "MONTH", "]", "=", "as_int", "if", "1", "<=", "as_int", "<=", "31", ":", "possibilities", "[", "Component", ".", "DAY", "]", "=", "as_int", "if", "mode", "!=", "Mode", ".", "DATE", ":", "if", "0", "<=", "as_int", "<=", "23", ":", "possibilities", "[", "Component", ".", "HOUR", "]", "=", "as_int", "if", "0", "<=", "as_int", "<=", "59", ":", "possibilities", "[", "Component", ".", "MINUTE", "]", "=", "as_int", "if", "0", "<=", "as_int", "<=", "59", ":", "possibilities", "[", "Component", ".", "SECOND", "]", "=", "as_int", "if", "len", "(", "token", ")", "==", "3", "or", "len", "(", "token", ")", "==", "6", "or", "len", "(", "token", ")", "==", "9", ":", "nano", "=", "0", "if", "len", "(", "token", ")", "==", "3", ":", "# millisecond precision", "nano", "=", "as_int", "*", "1000000", "elif", "len", "(", "token", ")", "==", "6", ":", "# microsecond precision", "nano", "=", "as_int", "*", "1000", "elif", "len", "(", "token", ")", "==", "9", ":", "nano", "=", "as_int", "possibilities", "[", "Component", ".", "NANO", "]", "=", "nano", "if", "len", "(", "token", ")", "==", "4", ":", "hour", "=", "as_int", "//", "100", "minute", "=", "as_int", "-", "(", "hour", "*", "100", ")", "if", "1", "<=", "hour", "<=", "24", "and", "1", "<=", "minute", "<=", "59", ":", "possibilities", "[", "Component", ".", "HOUR_AND_MINUTE", "]", "=", "as_int", "except", "ValueError", ":", "if", "mode", "!=", "Mode", ".", "TIME", ":", "# could it be a month alias?", "month", "=", "MONTHS_BY_ALIAS", ".", "get", "(", "token", ",", "None", ")", "if", "month", "is", "not", "None", ":", "possibilities", "[", "Component", ".", "MONTH", "]", "=", "month", "if", "mode", "!=", "Mode", ".", "DATE", ":", "# could it be an AM/PM marker?", "is_am_marker", "=", "token", "==", "\"am\"", "is_pm_marker", "=", "token", "==", "\"pm\"", "if", "is_am_marker", "or", "is_pm_marker", ":", "possibilities", "[", "Component", ".", "AM_PM", "]", "=", "cls", ".", "AM", "if", "is_am_marker", "else", "cls", ".", "PM", "# offset parsing is limited to Z meaning UTC for now", "if", "token", "==", "\"z\"", ":", "possibilities", "[", "Component", ".", "OFFSET", "]", "=", "0", "return", "possibilities" ]
Returns all possible component types of a token without regard to its context. For example "26" could be year, date or minute, but can't be a month or an hour. :param token: the token to classify :param mode: the parse mode :return: the dict of possible types and values if token was of that type
[ "Returns", "all", "possible", "component", "types", "of", "a", "token", "without", "regard", "to", "its", "context", ".", "For", "example", "26", "could", "be", "year", "date", "or", "minute", "but", "can", "t", "be", "a", "month", "or", "an", "hour", ".", ":", "param", "token", ":", "the", "token", "to", "classify", ":", "param", "mode", ":", "the", "parse", "mode", ":", "return", ":", "the", "dict", "of", "possible", "types", "and", "values", "if", "token", "was", "of", "that", "type" ]
python
train
42.645161
bluedazzle/wechat_sender
wechat_sender/utils.py
https://github.com/bluedazzle/wechat_sender/blob/21d861735509153d6b34408157911c25a5d7018b/wechat_sender/utils.py#L22-L28
def _read_config_list(): """ 配置列表读取 """ with codecs.open('conf.ini', 'w+', encoding='utf-8') as f1: conf_list = [conf for conf in f1.read().split('\n') if conf != ''] return conf_list
[ "def", "_read_config_list", "(", ")", ":", "with", "codecs", ".", "open", "(", "'conf.ini'", ",", "'w+'", ",", "encoding", "=", "'utf-8'", ")", "as", "f1", ":", "conf_list", "=", "[", "conf", "for", "conf", "in", "f1", ".", "read", "(", ")", ".", "split", "(", "'\\n'", ")", "if", "conf", "!=", "''", "]", "return", "conf_list" ]
配置列表读取
[ "配置列表读取" ]
python
train
29.857143
chaoss/grimoirelab-perceval
perceval/backends/core/askbot.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/askbot.py#L393-L462
def parse_answers(html_question): """Parse the answers of a given HTML question. The method parses the answers related with a given HTML question, as well as all the comments related to the answer. :param html_question: raw HTML question element :returns: a list with the answers """ def parse_answer_container(update_info): """Parse the answer info container of a given HTML question. The method parses the information available in the answer information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param update_info: beautiful soup update_info container element :returns: an object with the parsed information """ container_info = {} created = update_info[0] answered_at = created.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['added_at'] = str(str_to_datetime(answered_at).timestamp()) container_info['answered_by'] = AskbotParser.parse_user_info(created) try: update_info[1] except IndexError: pass else: updated = update_info[1] updated_at = updated.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['updated_at'] = str(str_to_datetime(updated_at).timestamp()) if AskbotParser.parse_user_info(updated): container_info['updated_by'] = AskbotParser.parse_user_info(updated) return container_info answer_list = [] # Select all the answers bs_question = bs4.BeautifulSoup(html_question, "html.parser") bs_answers = bs_question.select("div.answer") for bs_answer in bs_answers: answer_id = bs_answer.attrs["data-post-id"] votes_element = bs_answer.select("div.vote-number")[0].text accepted_answer = bs_answer.select("div.answer-img-accept")[0].get('title').endswith("correct") # Select the body of the answer body = bs_answer.select("div.post-body") # Get the user information container and parse it update_info = body[0].select("div.post-update-info") answer_container = parse_answer_container(update_info) # Remove the update-info-container div to be able to get the body body[0].div.extract().select("div.post-update-info-container") # Override the body with a clean one body = body[0].get_text(strip=True) # Generate the answer object answer = {'id': answer_id, 'score': votes_element, 'summary': body, 'accepted': accepted_answer } # Update the object with the information in the answer container answer.update(answer_container) answer_list.append(answer) return answer_list
[ "def", "parse_answers", "(", "html_question", ")", ":", "def", "parse_answer_container", "(", "update_info", ")", ":", "\"\"\"Parse the answer info container of a given HTML question.\n\n The method parses the information available in the answer information\n container. The container can have up to 2 elements: the first one\n contains the information related with the user who generated the question\n and the date (if any). The second one contains the date of the updated,\n and the user who updated it (if not the same who generated the question).\n\n :param update_info: beautiful soup update_info container element\n\n :returns: an object with the parsed information\n \"\"\"", "container_info", "=", "{", "}", "created", "=", "update_info", "[", "0", "]", "answered_at", "=", "created", ".", "abbr", ".", "attrs", "[", "\"title\"", "]", "# Convert date to UNIX timestamp", "container_info", "[", "'added_at'", "]", "=", "str", "(", "str_to_datetime", "(", "answered_at", ")", ".", "timestamp", "(", ")", ")", "container_info", "[", "'answered_by'", "]", "=", "AskbotParser", ".", "parse_user_info", "(", "created", ")", "try", ":", "update_info", "[", "1", "]", "except", "IndexError", ":", "pass", "else", ":", "updated", "=", "update_info", "[", "1", "]", "updated_at", "=", "updated", ".", "abbr", ".", "attrs", "[", "\"title\"", "]", "# Convert date to UNIX timestamp", "container_info", "[", "'updated_at'", "]", "=", "str", "(", "str_to_datetime", "(", "updated_at", ")", ".", "timestamp", "(", ")", ")", "if", "AskbotParser", ".", "parse_user_info", "(", "updated", ")", ":", "container_info", "[", "'updated_by'", "]", "=", "AskbotParser", ".", "parse_user_info", "(", "updated", ")", "return", "container_info", "answer_list", "=", "[", "]", "# Select all the answers", "bs_question", "=", "bs4", ".", "BeautifulSoup", "(", "html_question", ",", "\"html.parser\"", ")", "bs_answers", "=", "bs_question", ".", "select", "(", "\"div.answer\"", ")", "for", "bs_answer", "in", "bs_answers", ":", "answer_id", "=", "bs_answer", ".", "attrs", "[", "\"data-post-id\"", "]", "votes_element", "=", "bs_answer", ".", "select", "(", "\"div.vote-number\"", ")", "[", "0", "]", ".", "text", "accepted_answer", "=", "bs_answer", ".", "select", "(", "\"div.answer-img-accept\"", ")", "[", "0", "]", ".", "get", "(", "'title'", ")", ".", "endswith", "(", "\"correct\"", ")", "# Select the body of the answer", "body", "=", "bs_answer", ".", "select", "(", "\"div.post-body\"", ")", "# Get the user information container and parse it", "update_info", "=", "body", "[", "0", "]", ".", "select", "(", "\"div.post-update-info\"", ")", "answer_container", "=", "parse_answer_container", "(", "update_info", ")", "# Remove the update-info-container div to be able to get the body", "body", "[", "0", "]", ".", "div", ".", "extract", "(", ")", ".", "select", "(", "\"div.post-update-info-container\"", ")", "# Override the body with a clean one", "body", "=", "body", "[", "0", "]", ".", "get_text", "(", "strip", "=", "True", ")", "# Generate the answer object", "answer", "=", "{", "'id'", ":", "answer_id", ",", "'score'", ":", "votes_element", ",", "'summary'", ":", "body", ",", "'accepted'", ":", "accepted_answer", "}", "# Update the object with the information in the answer container", "answer", ".", "update", "(", "answer_container", ")", "answer_list", ".", "append", "(", "answer", ")", "return", "answer_list" ]
Parse the answers of a given HTML question. The method parses the answers related with a given HTML question, as well as all the comments related to the answer. :param html_question: raw HTML question element :returns: a list with the answers
[ "Parse", "the", "answers", "of", "a", "given", "HTML", "question", "." ]
python
test
46.542857
SeattleTestbed/seash
pyreadline/rlmain.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/rlmain.py#L265-L274
def callback_read_char(self): u'''Reads a character and informs the readline callback interface when a line is received''' if self.keyboard_poll(): line = self.get_line_buffer() + u'\n' # however there is another newline added by # self.mode.readline_setup(prompt) which is called by callback_handler_install # this differs from GNU readline self.add_history(self.mode.l_buffer) # TADA: self.callback(line)
[ "def", "callback_read_char", "(", "self", ")", ":", "if", "self", ".", "keyboard_poll", "(", ")", ":", "line", "=", "self", ".", "get_line_buffer", "(", ")", "+", "u'\\n'", "# however there is another newline added by", "# self.mode.readline_setup(prompt) which is called by callback_handler_install", "# this differs from GNU readline", "self", ".", "add_history", "(", "self", ".", "mode", ".", "l_buffer", ")", "# TADA:", "self", ".", "callback", "(", "line", ")" ]
u'''Reads a character and informs the readline callback interface when a line is received
[ "u", "Reads", "a", "character", "and", "informs", "the", "readline", "callback", "interface", "when", "a", "line", "is", "received" ]
python
train
49.7
saltstack/salt
salt/modules/nspawn.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nspawn.py#L1391-L1420
def pull_raw(url, name, verify=False): ''' Execute a ``machinectl pull-raw`` to download a .qcow2 or raw disk image, and add it to /var/lib/machines as a new container. .. note:: **Requires systemd >= 219** url URL from which to download the container name Name for the new container verify : False Perform signature or checksum verification on the container. See the ``machinectl(1)`` man page (section titled "Image Transfer Commands") for more information on requirements for image verification. To perform signature verification, use ``verify=signature``. For checksum verification, use ``verify=checksum``. By default, no verification will be performed. CLI Examples: .. code-block:: bash salt myminion nspawn.pull_raw http://ftp.halifax.rwth-aachen.de/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.raw.xz fedora21 ''' return _pull_image('raw', url, name, verify=verify)
[ "def", "pull_raw", "(", "url", ",", "name", ",", "verify", "=", "False", ")", ":", "return", "_pull_image", "(", "'raw'", ",", "url", ",", "name", ",", "verify", "=", "verify", ")" ]
Execute a ``machinectl pull-raw`` to download a .qcow2 or raw disk image, and add it to /var/lib/machines as a new container. .. note:: **Requires systemd >= 219** url URL from which to download the container name Name for the new container verify : False Perform signature or checksum verification on the container. See the ``machinectl(1)`` man page (section titled "Image Transfer Commands") for more information on requirements for image verification. To perform signature verification, use ``verify=signature``. For checksum verification, use ``verify=checksum``. By default, no verification will be performed. CLI Examples: .. code-block:: bash salt myminion nspawn.pull_raw http://ftp.halifax.rwth-aachen.de/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.raw.xz fedora21
[ "Execute", "a", "machinectl", "pull", "-", "raw", "to", "download", "a", ".", "qcow2", "or", "raw", "disk", "image", "and", "add", "it", "to", "/", "var", "/", "lib", "/", "machines", "as", "a", "new", "container", "." ]
python
train
33.8
lsst-sqre/sqre-codekit
codekit/progressbar.py
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/progressbar.py#L19-L40
def countdown_timer(seconds=10): """Show a simple countdown progress bar Parameters ---------- seconds Period of time the progress bar takes to reach zero. """ tick = 0.1 # seconds n_ticks = int(seconds / tick) widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()] pbar = progressbar.ProgressBar( widgets=widgets, max_value=n_ticks ).start() for i in range(n_ticks): pbar.update(i) sleep(tick) pbar.finish()
[ "def", "countdown_timer", "(", "seconds", "=", "10", ")", ":", "tick", "=", "0.1", "# seconds", "n_ticks", "=", "int", "(", "seconds", "/", "tick", ")", "widgets", "=", "[", "'Pause for panic: '", ",", "progressbar", ".", "ETA", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", ")", "]", "pbar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "widgets", ",", "max_value", "=", "n_ticks", ")", ".", "start", "(", ")", "for", "i", "in", "range", "(", "n_ticks", ")", ":", "pbar", ".", "update", "(", "i", ")", "sleep", "(", "tick", ")", "pbar", ".", "finish", "(", ")" ]
Show a simple countdown progress bar Parameters ---------- seconds Period of time the progress bar takes to reach zero.
[ "Show", "a", "simple", "countdown", "progress", "bar" ]
python
train
22.409091
chimera0/accel-brain-code
Reinforcement-Learning/pyqlearning/misc/beta_dist.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/misc/beta_dist.py#L95-L109
def variance(self): ''' Compute variance. Returns: variance. ''' alpha = self.__success + self.__default_alpha beta = self.__failure + self.__default_beta try: variance = alpha * beta / ((alpha + beta) ** 2) * (alpha + beta + 1) except ZeroDivisionError: variance = 0.0 return variance
[ "def", "variance", "(", "self", ")", ":", "alpha", "=", "self", ".", "__success", "+", "self", ".", "__default_alpha", "beta", "=", "self", ".", "__failure", "+", "self", ".", "__default_beta", "try", ":", "variance", "=", "alpha", "*", "beta", "/", "(", "(", "alpha", "+", "beta", ")", "**", "2", ")", "*", "(", "alpha", "+", "beta", "+", "1", ")", "except", "ZeroDivisionError", ":", "variance", "=", "0.0", "return", "variance" ]
Compute variance. Returns: variance.
[ "Compute", "variance", "." ]
python
train
25.4
thebigmunch/gmusicapi-wrapper
gmusicapi_wrapper/utils.py
https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/utils.py#L175-L181
def _check_field_value(field_value, pattern): """Check a song metadata field value for a pattern.""" if isinstance(field_value, list): return any(re.search(pattern, str(value), re.I) for value in field_value) else: return re.search(pattern, str(field_value), re.I)
[ "def", "_check_field_value", "(", "field_value", ",", "pattern", ")", ":", "if", "isinstance", "(", "field_value", ",", "list", ")", ":", "return", "any", "(", "re", ".", "search", "(", "pattern", ",", "str", "(", "value", ")", ",", "re", ".", "I", ")", "for", "value", "in", "field_value", ")", "else", ":", "return", "re", ".", "search", "(", "pattern", ",", "str", "(", "field_value", ")", ",", "re", ".", "I", ")" ]
Check a song metadata field value for a pattern.
[ "Check", "a", "song", "metadata", "field", "value", "for", "a", "pattern", "." ]
python
valid
38
klahnakoski/pyLibrary
jx_elasticsearch/es52/util.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_elasticsearch/es52/util.py#L21-L56
def es_query_template(path): """ RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE :param path: THE NESTED PATH (NOT INCLUDING TABLE NAME) :return: (es_query, es_filters) TUPLE """ if not is_text(path): Log.error("expecting path to be a string") if path != ".": f0 = {} f1 = {} output = wrap({ "query": es_and([ f0, {"nested": { "path": path, "query": f1, "inner_hits": {"size": 100000} }} ]), "from": 0, "size": 0, "sort": [] }) return output, wrap([f0, f1]) else: f0 = {} output = wrap({ "query": es_and([f0]), "from": 0, "size": 0, "sort": [] }) return output, wrap([f0])
[ "def", "es_query_template", "(", "path", ")", ":", "if", "not", "is_text", "(", "path", ")", ":", "Log", ".", "error", "(", "\"expecting path to be a string\"", ")", "if", "path", "!=", "\".\"", ":", "f0", "=", "{", "}", "f1", "=", "{", "}", "output", "=", "wrap", "(", "{", "\"query\"", ":", "es_and", "(", "[", "f0", ",", "{", "\"nested\"", ":", "{", "\"path\"", ":", "path", ",", "\"query\"", ":", "f1", ",", "\"inner_hits\"", ":", "{", "\"size\"", ":", "100000", "}", "}", "}", "]", ")", ",", "\"from\"", ":", "0", ",", "\"size\"", ":", "0", ",", "\"sort\"", ":", "[", "]", "}", ")", "return", "output", ",", "wrap", "(", "[", "f0", ",", "f1", "]", ")", "else", ":", "f0", "=", "{", "}", "output", "=", "wrap", "(", "{", "\"query\"", ":", "es_and", "(", "[", "f0", "]", ")", ",", "\"from\"", ":", "0", ",", "\"size\"", ":", "0", ",", "\"sort\"", ":", "[", "]", "}", ")", "return", "output", ",", "wrap", "(", "[", "f0", "]", ")" ]
RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE :param path: THE NESTED PATH (NOT INCLUDING TABLE NAME) :return: (es_query, es_filters) TUPLE
[ "RETURN", "TEMPLATE", "AND", "PATH", "-", "TO", "-", "FILTER", "AS", "A", "2", "-", "TUPLE", ":", "param", "path", ":", "THE", "NESTED", "PATH", "(", "NOT", "INCLUDING", "TABLE", "NAME", ")", ":", "return", ":", "(", "es_query", "es_filters", ")", "TUPLE" ]
python
train
24.083333
OpenTreeOfLife/peyotl
peyotl/amendments/amendments_umbrella.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/amendments/amendments_umbrella.py#L352-L379
def TaxonomicAmendmentStore(repos_dict=None, repos_par=None, with_caching=True, assumed_doc_version=None, git_ssh=None, pkey=None, git_action_class=TaxonomicAmendmentsGitAction, mirror_info=None, infrastructure_commit_author='OpenTree API <[email protected]>'): """Factory function for a _TaxonomicAmendmentStore object. A wrapper around the _TaxonomicAmendmentStore class instantiation for the most common use case: a singleton _TaxonomicAmendmentStore. If you need distinct _TaxonomicAmendmentStore objects, you'll need to call that class directly. """ global _THE_TAXONOMIC_AMENDMENT_STORE if _THE_TAXONOMIC_AMENDMENT_STORE is None: _THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore(repos_dict=repos_dict, repos_par=repos_par, with_caching=with_caching, assumed_doc_version=assumed_doc_version, git_ssh=git_ssh, pkey=pkey, git_action_class=git_action_class, mirror_info=mirror_info, infrastructure_commit_author=infrastructure_commit_author) return _THE_TAXONOMIC_AMENDMENT_STORE
[ "def", "TaxonomicAmendmentStore", "(", "repos_dict", "=", "None", ",", "repos_par", "=", "None", ",", "with_caching", "=", "True", ",", "assumed_doc_version", "=", "None", ",", "git_ssh", "=", "None", ",", "pkey", "=", "None", ",", "git_action_class", "=", "TaxonomicAmendmentsGitAction", ",", "mirror_info", "=", "None", ",", "infrastructure_commit_author", "=", "'OpenTree API <[email protected]>'", ")", ":", "global", "_THE_TAXONOMIC_AMENDMENT_STORE", "if", "_THE_TAXONOMIC_AMENDMENT_STORE", "is", "None", ":", "_THE_TAXONOMIC_AMENDMENT_STORE", "=", "_TaxonomicAmendmentStore", "(", "repos_dict", "=", "repos_dict", ",", "repos_par", "=", "repos_par", ",", "with_caching", "=", "with_caching", ",", "assumed_doc_version", "=", "assumed_doc_version", ",", "git_ssh", "=", "git_ssh", ",", "pkey", "=", "pkey", ",", "git_action_class", "=", "git_action_class", ",", "mirror_info", "=", "mirror_info", ",", "infrastructure_commit_author", "=", "infrastructure_commit_author", ")", "return", "_THE_TAXONOMIC_AMENDMENT_STORE" ]
Factory function for a _TaxonomicAmendmentStore object. A wrapper around the _TaxonomicAmendmentStore class instantiation for the most common use case: a singleton _TaxonomicAmendmentStore. If you need distinct _TaxonomicAmendmentStore objects, you'll need to call that class directly.
[ "Factory", "function", "for", "a", "_TaxonomicAmendmentStore", "object", "." ]
python
train
63.035714
chbrown/pi
pi/commands/install.py
https://github.com/chbrown/pi/blob/a3661eccf1c6f0105e34a0ee24328022bf4e6b92/pi/commands/install.py#L14-L23
def cli(parser): ''' Currently a cop-out -- just calls easy_install ''' parser.add_argument('-n', '--dry-run', action='store_true', help='Print uninstall actions without running') parser.add_argument('packages', nargs='+', help='Packages to install') opts = parser.parse_args() for package in opts.packages: install(package, execute=not opts.dry_run)
[ "def", "cli", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-n'", ",", "'--dry-run'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Print uninstall actions without running'", ")", "parser", ".", "add_argument", "(", "'packages'", ",", "nargs", "=", "'+'", ",", "help", "=", "'Packages to install'", ")", "opts", "=", "parser", ".", "parse_args", "(", ")", "for", "package", "in", "opts", ".", "packages", ":", "install", "(", "package", ",", "execute", "=", "not", "opts", ".", "dry_run", ")" ]
Currently a cop-out -- just calls easy_install
[ "Currently", "a", "cop", "-", "out", "--", "just", "calls", "easy_install" ]
python
train
37.8
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3086-L3098
def _get_item_cache(self, item): """Return the cached item, item represents a label indexer.""" cache = self._item_cache res = cache.get(item) if res is None: values = self._data.get(item) res = self._box_item_values(item, values) cache[item] = res res._set_as_cached(item, self) # for a chain res._is_copy = self._is_copy return res
[ "def", "_get_item_cache", "(", "self", ",", "item", ")", ":", "cache", "=", "self", ".", "_item_cache", "res", "=", "cache", ".", "get", "(", "item", ")", "if", "res", "is", "None", ":", "values", "=", "self", ".", "_data", ".", "get", "(", "item", ")", "res", "=", "self", ".", "_box_item_values", "(", "item", ",", "values", ")", "cache", "[", "item", "]", "=", "res", "res", ".", "_set_as_cached", "(", "item", ",", "self", ")", "# for a chain", "res", ".", "_is_copy", "=", "self", ".", "_is_copy", "return", "res" ]
Return the cached item, item represents a label indexer.
[ "Return", "the", "cached", "item", "item", "represents", "a", "label", "indexer", "." ]
python
train
33.384615
ManiacalLabs/PixelWeb
pixelweb/bottle.py
https://github.com/ManiacalLabs/PixelWeb/blob/9eacbfd40a1d35011c2dcea15c303da9636c6b9e/pixelweb/bottle.py#L2158-L2163
def meta_set(self, key, metafield, value): ''' Set the meta field for a key to a new value. This triggers the on-change handler for existing keys. ''' self._meta.setdefault(key, {})[metafield] = value if key in self: self[key] = self[key]
[ "def", "meta_set", "(", "self", ",", "key", ",", "metafield", ",", "value", ")", ":", "self", ".", "_meta", ".", "setdefault", "(", "key", ",", "{", "}", ")", "[", "metafield", "]", "=", "value", "if", "key", "in", "self", ":", "self", "[", "key", "]", "=", "self", "[", "key", "]" ]
Set the meta field for a key to a new value. This triggers the on-change handler for existing keys.
[ "Set", "the", "meta", "field", "for", "a", "key", "to", "a", "new", "value", ".", "This", "triggers", "the", "on", "-", "change", "handler", "for", "existing", "keys", "." ]
python
train
46.833333
apache/incubator-mxnet
example/rcnn/symdata/bbox.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rcnn/symdata/bbox.py#L79-L104
def bbox_transform(ex_rois, gt_rois, box_stds): """ compute bounding box regression targets from ex_rois to gt_rois :param ex_rois: [N, 4] :param gt_rois: [N, 4] :return: [N, 4] """ assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number' ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0 ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0 ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0) ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0) gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0 gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0 gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0) gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0) targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14) / box_stds[0] targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14) / box_stds[1] targets_dw = np.log(gt_widths / ex_widths) / box_stds[2] targets_dh = np.log(gt_heights / ex_heights) / box_stds[3] targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose() return targets
[ "def", "bbox_transform", "(", "ex_rois", ",", "gt_rois", ",", "box_stds", ")", ":", "assert", "ex_rois", ".", "shape", "[", "0", "]", "==", "gt_rois", ".", "shape", "[", "0", "]", ",", "'inconsistent rois number'", "ex_widths", "=", "ex_rois", "[", ":", ",", "2", "]", "-", "ex_rois", "[", ":", ",", "0", "]", "+", "1.0", "ex_heights", "=", "ex_rois", "[", ":", ",", "3", "]", "-", "ex_rois", "[", ":", ",", "1", "]", "+", "1.0", "ex_ctr_x", "=", "ex_rois", "[", ":", ",", "0", "]", "+", "0.5", "*", "(", "ex_widths", "-", "1.0", ")", "ex_ctr_y", "=", "ex_rois", "[", ":", ",", "1", "]", "+", "0.5", "*", "(", "ex_heights", "-", "1.0", ")", "gt_widths", "=", "gt_rois", "[", ":", ",", "2", "]", "-", "gt_rois", "[", ":", ",", "0", "]", "+", "1.0", "gt_heights", "=", "gt_rois", "[", ":", ",", "3", "]", "-", "gt_rois", "[", ":", ",", "1", "]", "+", "1.0", "gt_ctr_x", "=", "gt_rois", "[", ":", ",", "0", "]", "+", "0.5", "*", "(", "gt_widths", "-", "1.0", ")", "gt_ctr_y", "=", "gt_rois", "[", ":", ",", "1", "]", "+", "0.5", "*", "(", "gt_heights", "-", "1.0", ")", "targets_dx", "=", "(", "gt_ctr_x", "-", "ex_ctr_x", ")", "/", "(", "ex_widths", "+", "1e-14", ")", "/", "box_stds", "[", "0", "]", "targets_dy", "=", "(", "gt_ctr_y", "-", "ex_ctr_y", ")", "/", "(", "ex_heights", "+", "1e-14", ")", "/", "box_stds", "[", "1", "]", "targets_dw", "=", "np", ".", "log", "(", "gt_widths", "/", "ex_widths", ")", "/", "box_stds", "[", "2", "]", "targets_dh", "=", "np", ".", "log", "(", "gt_heights", "/", "ex_heights", ")", "/", "box_stds", "[", "3", "]", "targets", "=", "np", ".", "vstack", "(", "(", "targets_dx", ",", "targets_dy", ",", "targets_dw", ",", "targets_dh", ")", ")", ".", "transpose", "(", ")", "return", "targets" ]
compute bounding box regression targets from ex_rois to gt_rois :param ex_rois: [N, 4] :param gt_rois: [N, 4] :return: [N, 4]
[ "compute", "bounding", "box", "regression", "targets", "from", "ex_rois", "to", "gt_rois", ":", "param", "ex_rois", ":", "[", "N", "4", "]", ":", "param", "gt_rois", ":", "[", "N", "4", "]", ":", "return", ":", "[", "N", "4", "]" ]
python
train
41.230769
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1995-L1998
def p_sysargs(self, p): 'sysargs : sysargs COMMA sysarg' p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
[ "def", "p_sysargs", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "(", "p", "[", "3", "]", ",", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
sysargs : sysargs COMMA sysarg
[ "sysargs", ":", "sysargs", "COMMA", "sysarg" ]
python
train
32
annoviko/pyclustering
pyclustering/nnet/som.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/som.py#L712-L728
def get_winner_number(self): """! @brief Calculates number of winner at the last step of learning process. @return (uint) Number of winner. """ if self.__ccore_som_pointer is not None: self._award = wrapper.som_get_awards(self.__ccore_som_pointer) winner_number = 0 for i in range(self._size): if self._award[i] > 0: winner_number += 1 return winner_number
[ "def", "get_winner_number", "(", "self", ")", ":", "if", "self", ".", "__ccore_som_pointer", "is", "not", "None", ":", "self", ".", "_award", "=", "wrapper", ".", "som_get_awards", "(", "self", ".", "__ccore_som_pointer", ")", "winner_number", "=", "0", "for", "i", "in", "range", "(", "self", ".", "_size", ")", ":", "if", "self", ".", "_award", "[", "i", "]", ">", "0", ":", "winner_number", "+=", "1", "return", "winner_number" ]
! @brief Calculates number of winner at the last step of learning process. @return (uint) Number of winner.
[ "!" ]
python
valid
30.176471
pymacaron/pymacaron-core
pymacaron_core/swagger/spec.py
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/spec.py#L142-L213
def call_on_each_endpoint(self, callback): """Find all server endpoints defined in the swagger spec and calls 'callback' for each, with an instance of EndpointData as argument. """ if 'paths' not in self.swagger_dict: return for path, d in list(self.swagger_dict['paths'].items()): for method, op_spec in list(d.items()): data = EndpointData(path, method) # Which server method handles this endpoint? if 'x-bind-server' not in op_spec: if 'x-no-bind-server' in op_spec: # That route should not be auto-generated log.info("Skipping generation of %s %s" % (method, path)) continue else: raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path)) data.handler_server = op_spec['x-bind-server'] # Make sure that endpoint only produces 'application/json' if 'produces' not in op_spec: raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path)) if len(op_spec['produces']) != 1: raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path)) if op_spec['produces'][0] == 'application/json': data.produces_json = True elif op_spec['produces'][0] == 'text/html': data.produces_html = True else: raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path)) # Which client method handles this endpoint? if 'x-bind-client' in op_spec: data.handler_client = op_spec['x-bind-client'] # Should we decorate the server handler? if 'x-decorate-server' in op_spec: data.decorate_server = op_spec['x-decorate-server'] # Should we manipulate the requests parameters? if 'x-decorate-request' in op_spec: data.decorate_request = op_spec['x-decorate-request'] # Generate a bravado-core operation object data.operation = Operation.from_spec(self.spec, path, method, op_spec) # Figure out how parameters are passed: one json in body? one or # more values in query? if 'parameters' in op_spec: params = op_spec['parameters'] for p in params: if p['in'] == 'body': data.param_in_body = True if p['in'] == 'query': data.param_in_query = True if p['in'] == 'path': data.param_in_path = True if data.param_in_path: # Substitute {...} with <...> in path, to make a Flask friendly path data.path = data.path.replace('{', '<').replace('}', '>') if data.param_in_body and data.param_in_query: raise Exception("Cannot support params in both body and param (%s %s)" % (method, path)) else: data.no_params = True callback(data)
[ "def", "call_on_each_endpoint", "(", "self", ",", "callback", ")", ":", "if", "'paths'", "not", "in", "self", ".", "swagger_dict", ":", "return", "for", "path", ",", "d", "in", "list", "(", "self", ".", "swagger_dict", "[", "'paths'", "]", ".", "items", "(", ")", ")", ":", "for", "method", ",", "op_spec", "in", "list", "(", "d", ".", "items", "(", ")", ")", ":", "data", "=", "EndpointData", "(", "path", ",", "method", ")", "# Which server method handles this endpoint?", "if", "'x-bind-server'", "not", "in", "op_spec", ":", "if", "'x-no-bind-server'", "in", "op_spec", ":", "# That route should not be auto-generated", "log", ".", "info", "(", "\"Skipping generation of %s %s\"", "%", "(", "method", ",", "path", ")", ")", "continue", "else", ":", "raise", "Exception", "(", "\"Swagger api defines no x-bind-server for %s %s\"", "%", "(", "method", ",", "path", ")", ")", "data", ".", "handler_server", "=", "op_spec", "[", "'x-bind-server'", "]", "# Make sure that endpoint only produces 'application/json'", "if", "'produces'", "not", "in", "op_spec", ":", "raise", "Exception", "(", "\"Swagger api has no 'produces' section for %s %s\"", "%", "(", "method", ",", "path", ")", ")", "if", "len", "(", "op_spec", "[", "'produces'", "]", ")", "!=", "1", ":", "raise", "Exception", "(", "\"Expecting only one type under 'produces' for %s %s\"", "%", "(", "method", ",", "path", ")", ")", "if", "op_spec", "[", "'produces'", "]", "[", "0", "]", "==", "'application/json'", ":", "data", ".", "produces_json", "=", "True", "elif", "op_spec", "[", "'produces'", "]", "[", "0", "]", "==", "'text/html'", ":", "data", ".", "produces_html", "=", "True", "else", ":", "raise", "Exception", "(", "\"Only 'application/json' or 'text/html' are supported. See %s %s\"", "%", "(", "method", ",", "path", ")", ")", "# Which client method handles this endpoint?", "if", "'x-bind-client'", "in", "op_spec", ":", "data", ".", "handler_client", "=", "op_spec", "[", "'x-bind-client'", "]", "# Should we decorate the server handler?", "if", "'x-decorate-server'", "in", "op_spec", ":", "data", ".", "decorate_server", "=", "op_spec", "[", "'x-decorate-server'", "]", "# Should we manipulate the requests parameters?", "if", "'x-decorate-request'", "in", "op_spec", ":", "data", ".", "decorate_request", "=", "op_spec", "[", "'x-decorate-request'", "]", "# Generate a bravado-core operation object", "data", ".", "operation", "=", "Operation", ".", "from_spec", "(", "self", ".", "spec", ",", "path", ",", "method", ",", "op_spec", ")", "# Figure out how parameters are passed: one json in body? one or", "# more values in query?", "if", "'parameters'", "in", "op_spec", ":", "params", "=", "op_spec", "[", "'parameters'", "]", "for", "p", "in", "params", ":", "if", "p", "[", "'in'", "]", "==", "'body'", ":", "data", ".", "param_in_body", "=", "True", "if", "p", "[", "'in'", "]", "==", "'query'", ":", "data", ".", "param_in_query", "=", "True", "if", "p", "[", "'in'", "]", "==", "'path'", ":", "data", ".", "param_in_path", "=", "True", "if", "data", ".", "param_in_path", ":", "# Substitute {...} with <...> in path, to make a Flask friendly path", "data", ".", "path", "=", "data", ".", "path", ".", "replace", "(", "'{'", ",", "'<'", ")", ".", "replace", "(", "'}'", ",", "'>'", ")", "if", "data", ".", "param_in_body", "and", "data", ".", "param_in_query", ":", "raise", "Exception", "(", "\"Cannot support params in both body and param (%s %s)\"", "%", "(", "method", ",", "path", ")", ")", "else", ":", "data", ".", "no_params", "=", "True", "callback", "(", "data", ")" ]
Find all server endpoints defined in the swagger spec and calls 'callback' for each, with an instance of EndpointData as argument.
[ "Find", "all", "server", "endpoints", "defined", "in", "the", "swagger", "spec", "and", "calls", "callback", "for", "each", "with", "an", "instance", "of", "EndpointData", "as", "argument", "." ]
python
train
47.222222
zhanglab/psamm
psamm/fluxcoupling.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/fluxcoupling.py#L94-L121
def solve(self, reaction_1, reaction_2): """Return the flux coupling between two reactions The flux coupling is returned as a tuple indicating the minimum and maximum value of the v1/v2 reaction flux ratio. A value of None as either the minimum or maximum indicates that the interval is unbounded in that direction. """ # Update objective for reaction_1 self._prob.set_objective(self._vbow(reaction_1)) # Update constraint for reaction_2 if self._reaction_constr is not None: self._reaction_constr.delete() self._reaction_constr, = self._prob.add_linear_constraints( self._vbow(reaction_2) == 1) results = [] for sense in (lp.ObjectiveSense.Minimize, lp.ObjectiveSense.Maximize): try: result = self._prob.solve(sense) except lp.SolverError: results.append(None) else: results.append(result.get_value(self._vbow(reaction_1))) return tuple(results)
[ "def", "solve", "(", "self", ",", "reaction_1", ",", "reaction_2", ")", ":", "# Update objective for reaction_1", "self", ".", "_prob", ".", "set_objective", "(", "self", ".", "_vbow", "(", "reaction_1", ")", ")", "# Update constraint for reaction_2", "if", "self", ".", "_reaction_constr", "is", "not", "None", ":", "self", ".", "_reaction_constr", ".", "delete", "(", ")", "self", ".", "_reaction_constr", ",", "=", "self", ".", "_prob", ".", "add_linear_constraints", "(", "self", ".", "_vbow", "(", "reaction_2", ")", "==", "1", ")", "results", "=", "[", "]", "for", "sense", "in", "(", "lp", ".", "ObjectiveSense", ".", "Minimize", ",", "lp", ".", "ObjectiveSense", ".", "Maximize", ")", ":", "try", ":", "result", "=", "self", ".", "_prob", ".", "solve", "(", "sense", ")", "except", "lp", ".", "SolverError", ":", "results", ".", "append", "(", "None", ")", "else", ":", "results", ".", "append", "(", "result", ".", "get_value", "(", "self", ".", "_vbow", "(", "reaction_1", ")", ")", ")", "return", "tuple", "(", "results", ")" ]
Return the flux coupling between two reactions The flux coupling is returned as a tuple indicating the minimum and maximum value of the v1/v2 reaction flux ratio. A value of None as either the minimum or maximum indicates that the interval is unbounded in that direction.
[ "Return", "the", "flux", "coupling", "between", "two", "reactions" ]
python
train
37.285714
saltstack/salt
salt/modules/trafficserver.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L450-L466
def clear_alarms(alarm): ''' Clear (acknowledge) an alarm event. The arguments are “all” for all current alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier (e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘). .. code-block:: bash salt '*' trafficserver.clear_alarms [all | #event | name] ''' if _TRAFFICCTL: cmd = _traffic_ctl('alarm', 'clear', alarm) else: cmd = _traffic_line('--clear_alarms', alarm) return _subprocess(cmd)
[ "def", "clear_alarms", "(", "alarm", ")", ":", "if", "_TRAFFICCTL", ":", "cmd", "=", "_traffic_ctl", "(", "'alarm'", ",", "'clear'", ",", "alarm", ")", "else", ":", "cmd", "=", "_traffic_line", "(", "'--clear_alarms'", ",", "alarm", ")", "return", "_subprocess", "(", "cmd", ")" ]
Clear (acknowledge) an alarm event. The arguments are “all” for all current alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier (e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘). .. code-block:: bash salt '*' trafficserver.clear_alarms [all | #event | name]
[ "Clear", "(", "acknowledge", ")", "an", "alarm", "event", ".", "The", "arguments", "are", "“all”", "for", "all", "current", "alarms", "a", "specific", "alarm", "number", "(", "e", ".", "g", ".", "‘‘1’‘", ")", "or", "an", "alarm", "string", "identifier", "(", "e", ".", "g", ".", "‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘", ")", "." ]
python
train
28.705882
bwohlberg/sporco
sporco/admm/cbpdn.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/cbpdn.py#L1347-L1357
def uinit(self, ushape): """Return initialiser for working variable U.""" if self.opt['Y0'] is None: return np.zeros(ushape, dtype=self.dtype) else: # If initial Y is non-zero, initial U is chosen so that # the relevant dual optimality criterion (see (3.10) in # boyd-2010-distributed) is satisfied. # NB: still needs to be worked out. return np.zeros(ushape, dtype=self.dtype)
[ "def", "uinit", "(", "self", ",", "ushape", ")", ":", "if", "self", ".", "opt", "[", "'Y0'", "]", "is", "None", ":", "return", "np", ".", "zeros", "(", "ushape", ",", "dtype", "=", "self", ".", "dtype", ")", "else", ":", "# If initial Y is non-zero, initial U is chosen so that", "# the relevant dual optimality criterion (see (3.10) in", "# boyd-2010-distributed) is satisfied.", "# NB: still needs to be worked out.", "return", "np", ".", "zeros", "(", "ushape", ",", "dtype", "=", "self", ".", "dtype", ")" ]
Return initialiser for working variable U.
[ "Return", "initialiser", "for", "working", "variable", "U", "." ]
python
train
42.181818
brechtm/rinohtype
src/rinoh/dimension.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/dimension.py#L49-L61
def _make_operator(method_name): """Return an operator method that takes parameters of type :class:`Dimension`, evaluates them, and delegates to the :class:`float` operator with name `method_name`""" def operator(self, other): """Operator delegating to the :class:`float` method `method_name`""" float_operator = getattr(float, method_name) try: float_other = float(other) except (ValueError, TypeError): return False return float_operator(float(self), float_other) return operator
[ "def", "_make_operator", "(", "method_name", ")", ":", "def", "operator", "(", "self", ",", "other", ")", ":", "\"\"\"Operator delegating to the :class:`float` method `method_name`\"\"\"", "float_operator", "=", "getattr", "(", "float", ",", "method_name", ")", "try", ":", "float_other", "=", "float", "(", "other", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "False", "return", "float_operator", "(", "float", "(", "self", ")", ",", "float_other", ")", "return", "operator" ]
Return an operator method that takes parameters of type :class:`Dimension`, evaluates them, and delegates to the :class:`float` operator with name `method_name`
[ "Return", "an", "operator", "method", "that", "takes", "parameters", "of", "type", ":", "class", ":", "Dimension", "evaluates", "them", "and", "delegates", "to", "the", ":", "class", ":", "float", "operator", "with", "name", "method_name" ]
python
train
46.230769
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L280-L286
def get_out_ip_addr(cls, tenant_id): """Retrieves the 'out' service subnet attributes. """ if tenant_id not in cls.serv_obj_dict: LOG.error("Fabric not prepared for tenant %s", tenant_id) return tenant_obj = cls.serv_obj_dict.get(tenant_id) return tenant_obj.get_out_ip_addr()
[ "def", "get_out_ip_addr", "(", "cls", ",", "tenant_id", ")", ":", "if", "tenant_id", "not", "in", "cls", ".", "serv_obj_dict", ":", "LOG", ".", "error", "(", "\"Fabric not prepared for tenant %s\"", ",", "tenant_id", ")", "return", "tenant_obj", "=", "cls", ".", "serv_obj_dict", ".", "get", "(", "tenant_id", ")", "return", "tenant_obj", ".", "get_out_ip_addr", "(", ")" ]
Retrieves the 'out' service subnet attributes.
[ "Retrieves", "the", "out", "service", "subnet", "attributes", "." ]
python
train
46.571429
mbr/flask-kvsession
flask_kvsession/__init__.py
https://github.com/mbr/flask-kvsession/blob/83238b74d4e4d2ffbdfd65c1c0a00ceb4bdfd9fa/flask_kvsession/__init__.py#L89-L104
def destroy(self): """Destroys a session completely, by deleting all keys and removing it from the internal store immediately. This allows removing a session for security reasons, e.g. a login stored in a session will cease to exist if the session is destroyed. """ for k in list(self.keys()): del self[k] if getattr(self, 'sid_s', None): current_app.kvsession_store.delete(self.sid_s) self.sid_s = None self.modified = False self.new = False
[ "def", "destroy", "(", "self", ")", ":", "for", "k", "in", "list", "(", "self", ".", "keys", "(", ")", ")", ":", "del", "self", "[", "k", "]", "if", "getattr", "(", "self", ",", "'sid_s'", ",", "None", ")", ":", "current_app", ".", "kvsession_store", ".", "delete", "(", "self", ".", "sid_s", ")", "self", ".", "sid_s", "=", "None", "self", ".", "modified", "=", "False", "self", ".", "new", "=", "False" ]
Destroys a session completely, by deleting all keys and removing it from the internal store immediately. This allows removing a session for security reasons, e.g. a login stored in a session will cease to exist if the session is destroyed.
[ "Destroys", "a", "session", "completely", "by", "deleting", "all", "keys", "and", "removing", "it", "from", "the", "internal", "store", "immediately", "." ]
python
train
33.625
budacom/trading-bots
trading_bots/utils.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L57-L61
def spread_value(value: Decimal, spread_p: Decimal) -> Tuple[Decimal, Decimal]: """Returns a lower and upper value separated by a spread percentage""" upper = value * (1 + spread_p) lower = value / (1 + spread_p) return lower, upper
[ "def", "spread_value", "(", "value", ":", "Decimal", ",", "spread_p", ":", "Decimal", ")", "->", "Tuple", "[", "Decimal", ",", "Decimal", "]", ":", "upper", "=", "value", "*", "(", "1", "+", "spread_p", ")", "lower", "=", "value", "/", "(", "1", "+", "spread_p", ")", "return", "lower", ",", "upper" ]
Returns a lower and upper value separated by a spread percentage
[ "Returns", "a", "lower", "and", "upper", "value", "separated", "by", "a", "spread", "percentage" ]
python
train
48.8
ramses-tech/ramses
ramses/models.py
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/models.py#L59-L82
def prepare_relationship(config, model_name, raml_resource): """ Create referenced model if it doesn't exist. When preparing a relationship, we check to see if the model that will be referenced already exists. If not, it is created so that it will be possible to use it in a relationship. Thus the first usage of this model in RAML file must provide its schema in POST method resource body schema. :param model_name: Name of model which should be generated. :param raml_resource: Instance of ramlfications.raml.ResourceNode for which :model_name: will be defined. """ if get_existing_model(model_name) is None: plural_route = '/' + pluralize(model_name.lower()) route = '/' + model_name.lower() for res in raml_resource.root.resources: if res.method.upper() != 'POST': continue if res.path.endswith(plural_route) or res.path.endswith(route): break else: raise ValueError('Model `{}` used in relationship is not ' 'defined'.format(model_name)) setup_data_model(config, res, model_name)
[ "def", "prepare_relationship", "(", "config", ",", "model_name", ",", "raml_resource", ")", ":", "if", "get_existing_model", "(", "model_name", ")", "is", "None", ":", "plural_route", "=", "'/'", "+", "pluralize", "(", "model_name", ".", "lower", "(", ")", ")", "route", "=", "'/'", "+", "model_name", ".", "lower", "(", ")", "for", "res", "in", "raml_resource", ".", "root", ".", "resources", ":", "if", "res", ".", "method", ".", "upper", "(", ")", "!=", "'POST'", ":", "continue", "if", "res", ".", "path", ".", "endswith", "(", "plural_route", ")", "or", "res", ".", "path", ".", "endswith", "(", "route", ")", ":", "break", "else", ":", "raise", "ValueError", "(", "'Model `{}` used in relationship is not '", "'defined'", ".", "format", "(", "model_name", ")", ")", "setup_data_model", "(", "config", ",", "res", ",", "model_name", ")" ]
Create referenced model if it doesn't exist. When preparing a relationship, we check to see if the model that will be referenced already exists. If not, it is created so that it will be possible to use it in a relationship. Thus the first usage of this model in RAML file must provide its schema in POST method resource body schema. :param model_name: Name of model which should be generated. :param raml_resource: Instance of ramlfications.raml.ResourceNode for which :model_name: will be defined.
[ "Create", "referenced", "model", "if", "it", "doesn", "t", "exist", "." ]
python
train
47.666667
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/jwk/rsa.py
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/jwk/rsa.py#L503-L523
def new_rsa_key(key_size=2048, kid='', use='', public_exponent=65537): """ Creates a new RSA key pair and wraps it in a :py:class:`cryptojwt.jwk.rsa.RSAKey` instance :param key_size: The size of the key :param kid: The key ID :param use: What the is supposed to be used for. 2 choices 'sig'/'enc' :param public_exponent: The value of the public exponent. :return: A :py:class:`cryptojwt.jwk.rsa.RSAKey` instance """ _key = rsa.generate_private_key(public_exponent=public_exponent, key_size=key_size, backend=default_backend()) _rk = RSAKey(priv_key=_key, use=use, kid=kid) if not kid: _rk.add_kid() return _rk
[ "def", "new_rsa_key", "(", "key_size", "=", "2048", ",", "kid", "=", "''", ",", "use", "=", "''", ",", "public_exponent", "=", "65537", ")", ":", "_key", "=", "rsa", ".", "generate_private_key", "(", "public_exponent", "=", "public_exponent", ",", "key_size", "=", "key_size", ",", "backend", "=", "default_backend", "(", ")", ")", "_rk", "=", "RSAKey", "(", "priv_key", "=", "_key", ",", "use", "=", "use", ",", "kid", "=", "kid", ")", "if", "not", "kid", ":", "_rk", ".", "add_kid", "(", ")", "return", "_rk" ]
Creates a new RSA key pair and wraps it in a :py:class:`cryptojwt.jwk.rsa.RSAKey` instance :param key_size: The size of the key :param kid: The key ID :param use: What the is supposed to be used for. 2 choices 'sig'/'enc' :param public_exponent: The value of the public exponent. :return: A :py:class:`cryptojwt.jwk.rsa.RSAKey` instance
[ "Creates", "a", "new", "RSA", "key", "pair", "and", "wraps", "it", "in", "a", ":", "py", ":", "class", ":", "cryptojwt", ".", "jwk", ".", "rsa", ".", "RSAKey", "instance" ]
python
train
34.52381
LogicalDash/LiSE
allegedb/allegedb/cache.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/cache.py#L931-L938
def iter_predecessors(self, graph, dest, branch, turn, tick, *, forward=None): """Iterate over predecessors to a given destination node at a given time.""" if self.db._no_kc: yield from self._adds_dels_sucpred(self.predecessors[graph, dest], branch, turn, tick)[0] return if forward is None: forward = self.db._forward yield from self._get_origcache(graph, dest, branch, turn, tick, forward=forward)
[ "def", "iter_predecessors", "(", "self", ",", "graph", ",", "dest", ",", "branch", ",", "turn", ",", "tick", ",", "*", ",", "forward", "=", "None", ")", ":", "if", "self", ".", "db", ".", "_no_kc", ":", "yield", "from", "self", ".", "_adds_dels_sucpred", "(", "self", ".", "predecessors", "[", "graph", ",", "dest", "]", ",", "branch", ",", "turn", ",", "tick", ")", "[", "0", "]", "return", "if", "forward", "is", "None", ":", "forward", "=", "self", ".", "db", ".", "_forward", "yield", "from", "self", ".", "_get_origcache", "(", "graph", ",", "dest", ",", "branch", ",", "turn", ",", "tick", ",", "forward", "=", "forward", ")" ]
Iterate over predecessors to a given destination node at a given time.
[ "Iterate", "over", "predecessors", "to", "a", "given", "destination", "node", "at", "a", "given", "time", "." ]
python
train
57.5
MeirKriheli/django-bidi-utils
bidiutils/context_processors.py
https://github.com/MeirKriheli/django-bidi-utils/blob/48a8c481fe728fbccf486582999e79454195036e/bidiutils/context_processors.py#L3-L33
def bidi(request): """Adds to the context BiDi related variables LANGUAGE_DIRECTION -- Direction of current language ('ltr' or 'rtl') LANGUAGE_START -- Start of language layout ('right' for rtl, 'left' for 'ltr') LANGUAGE_END -- End of language layout ('left' for rtl, 'right' for 'ltr') LANGUAGE_MARKER -- Language marker entity ('&rlm;' for rtl, '&lrm' for ltr) """ from django.utils import translation from django.utils.safestring import mark_safe if translation.get_language_bidi(): extra_context = { 'LANGUAGE_DIRECTION':'rtl', 'LANGUAGE_START':'right', 'LANGUAGE_END':'left', 'LANGUAGE_MARKER': mark_safe('&rlm;'), } else: extra_context = { 'LANGUAGE_DIRECTION':'ltr', 'LANGUAGE_START':'left', 'LANGUAGE_END':'right', 'LANGUAGE_MARKER': mark_safe('&lrm;'), } return extra_context
[ "def", "bidi", "(", "request", ")", ":", "from", "django", ".", "utils", "import", "translation", "from", "django", ".", "utils", ".", "safestring", "import", "mark_safe", "if", "translation", ".", "get_language_bidi", "(", ")", ":", "extra_context", "=", "{", "'LANGUAGE_DIRECTION'", ":", "'rtl'", ",", "'LANGUAGE_START'", ":", "'right'", ",", "'LANGUAGE_END'", ":", "'left'", ",", "'LANGUAGE_MARKER'", ":", "mark_safe", "(", "'&rlm;'", ")", ",", "}", "else", ":", "extra_context", "=", "{", "'LANGUAGE_DIRECTION'", ":", "'ltr'", ",", "'LANGUAGE_START'", ":", "'left'", ",", "'LANGUAGE_END'", ":", "'right'", ",", "'LANGUAGE_MARKER'", ":", "mark_safe", "(", "'&lrm;'", ")", ",", "}", "return", "extra_context" ]
Adds to the context BiDi related variables LANGUAGE_DIRECTION -- Direction of current language ('ltr' or 'rtl') LANGUAGE_START -- Start of language layout ('right' for rtl, 'left' for 'ltr') LANGUAGE_END -- End of language layout ('left' for rtl, 'right' for 'ltr') LANGUAGE_MARKER -- Language marker entity ('&rlm;' for rtl, '&lrm' for ltr)
[ "Adds", "to", "the", "context", "BiDi", "related", "variables" ]
python
test
32.193548
tensorforce/tensorforce
tensorforce/contrib/openai_universe.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/contrib/openai_universe.py#L86-L96
def _int_to_pos(self, flat_position): """Returns x, y from flat_position integer. Args: flat_position: flattened position integer Returns: x, y """ return flat_position % self.env.action_space.screen_shape[0],\ flat_position % self.env.action_space.screen_shape[1]
[ "def", "_int_to_pos", "(", "self", ",", "flat_position", ")", ":", "return", "flat_position", "%", "self", ".", "env", ".", "action_space", ".", "screen_shape", "[", "0", "]", ",", "flat_position", "%", "self", ".", "env", ".", "action_space", ".", "screen_shape", "[", "1", "]" ]
Returns x, y from flat_position integer. Args: flat_position: flattened position integer Returns: x, y
[ "Returns", "x", "y", "from", "flat_position", "integer", "." ]
python
valid
29.181818
bluec0re/android-backup-tools
android_backup/android_backup.py
https://github.com/bluec0re/android-backup-tools/blob/e2e0d95e56624c1a99a176df9e307398e837d908/android_backup/android_backup.py#L145-L236
def _decrypt(self, fp, password=None): """ Internal decryption function Uses either the password argument for the decryption, or, if not supplied, the password field of the object :param fp: a file object or similar which supports the readline and read methods :rtype: Proxy """ if AES is None: raise ImportError("PyCrypto required") if password is None: password = self.password if password is None: raise ValueError( "Password need to be provided to extract encrypted archives") # read the PBKDF2 parameters # salt user_salt = fp.readline().strip() user_salt = binascii.a2b_hex(user_salt) # checksum salt ck_salt = fp.readline().strip() ck_salt = binascii.a2b_hex(ck_salt) # hashing rounds rounds = fp.readline().strip() rounds = int(rounds) # encryption IV iv = fp.readline().strip() iv = binascii.a2b_hex(iv) # encrypted master key master_key = fp.readline().strip() master_key = binascii.a2b_hex(master_key) # generate key for decrypting the master key user_key = PBKDF2(password, user_salt, dkLen=256 // 8, count=rounds) # decrypt the master key and iv cipher = AES.new(user_key, mode=AES.MODE_CBC, IV=iv) master_key = bytearray(cipher.decrypt(master_key)) # format: <len IV: 1 byte><IV: n bytes><len key: 1 byte><key: m bytes><len checksum: 1 byte><checksum: k bytes> # get IV l = master_key.pop(0) master_iv = bytes(master_key[:l]) master_key = master_key[l:] # get key l = master_key.pop(0) mk = bytes(master_key[:l]) master_key = master_key[l:] # get checksum l = master_key.pop(0) master_ck = bytes(master_key[:l]) # double encode utf8 utf8mk = self.encode_utf8(mk) # calculate checksum by using PBKDF2 calc_ck = PBKDF2(utf8mk, ck_salt, dkLen=256//8, count=rounds) assert calc_ck == master_ck # install decryption key cipher = AES.new(mk, mode=AES.MODE_CBC, IV=master_iv) off = fp.tell() fp.seek(0, 2) length = fp.tell() - off fp.seek(off) if self.stream: # decryption transformer for Proxy class def decrypt(data): data = bytearray(cipher.decrypt(data)) if fp.tell() - off >= length: # check padding (PKCS#7) pad = data[-1] assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:]) data = data[:-pad] return data return Proxy(decrypt, fp, cipher.block_size) else: data = bytearray(cipher.decrypt(fp.read())) pad = data[-1] assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:]) data = data[:-pad] return io.BytesIO(data)
[ "def", "_decrypt", "(", "self", ",", "fp", ",", "password", "=", "None", ")", ":", "if", "AES", "is", "None", ":", "raise", "ImportError", "(", "\"PyCrypto required\"", ")", "if", "password", "is", "None", ":", "password", "=", "self", ".", "password", "if", "password", "is", "None", ":", "raise", "ValueError", "(", "\"Password need to be provided to extract encrypted archives\"", ")", "# read the PBKDF2 parameters", "# salt", "user_salt", "=", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "user_salt", "=", "binascii", ".", "a2b_hex", "(", "user_salt", ")", "# checksum salt", "ck_salt", "=", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "ck_salt", "=", "binascii", ".", "a2b_hex", "(", "ck_salt", ")", "# hashing rounds", "rounds", "=", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "rounds", "=", "int", "(", "rounds", ")", "# encryption IV", "iv", "=", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "iv", "=", "binascii", ".", "a2b_hex", "(", "iv", ")", "# encrypted master key", "master_key", "=", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "master_key", "=", "binascii", ".", "a2b_hex", "(", "master_key", ")", "# generate key for decrypting the master key", "user_key", "=", "PBKDF2", "(", "password", ",", "user_salt", ",", "dkLen", "=", "256", "//", "8", ",", "count", "=", "rounds", ")", "# decrypt the master key and iv", "cipher", "=", "AES", ".", "new", "(", "user_key", ",", "mode", "=", "AES", ".", "MODE_CBC", ",", "IV", "=", "iv", ")", "master_key", "=", "bytearray", "(", "cipher", ".", "decrypt", "(", "master_key", ")", ")", "# format: <len IV: 1 byte><IV: n bytes><len key: 1 byte><key: m bytes><len checksum: 1 byte><checksum: k bytes>", "# get IV", "l", "=", "master_key", ".", "pop", "(", "0", ")", "master_iv", "=", "bytes", "(", "master_key", "[", ":", "l", "]", ")", "master_key", "=", "master_key", "[", "l", ":", "]", "# get key", "l", "=", "master_key", ".", "pop", "(", "0", ")", "mk", "=", "bytes", "(", "master_key", "[", ":", "l", "]", ")", "master_key", "=", "master_key", "[", "l", ":", "]", "# get checksum", "l", "=", "master_key", ".", "pop", "(", "0", ")", "master_ck", "=", "bytes", "(", "master_key", "[", ":", "l", "]", ")", "# double encode utf8", "utf8mk", "=", "self", ".", "encode_utf8", "(", "mk", ")", "# calculate checksum by using PBKDF2", "calc_ck", "=", "PBKDF2", "(", "utf8mk", ",", "ck_salt", ",", "dkLen", "=", "256", "//", "8", ",", "count", "=", "rounds", ")", "assert", "calc_ck", "==", "master_ck", "# install decryption key", "cipher", "=", "AES", ".", "new", "(", "mk", ",", "mode", "=", "AES", ".", "MODE_CBC", ",", "IV", "=", "master_iv", ")", "off", "=", "fp", ".", "tell", "(", ")", "fp", ".", "seek", "(", "0", ",", "2", ")", "length", "=", "fp", ".", "tell", "(", ")", "-", "off", "fp", ".", "seek", "(", "off", ")", "if", "self", ".", "stream", ":", "# decryption transformer for Proxy class", "def", "decrypt", "(", "data", ")", ":", "data", "=", "bytearray", "(", "cipher", ".", "decrypt", "(", "data", ")", ")", "if", "fp", ".", "tell", "(", ")", "-", "off", ">=", "length", ":", "# check padding (PKCS#7)", "pad", "=", "data", "[", "-", "1", "]", "assert", "data", ".", "endswith", "(", "bytearray", "(", "[", "pad", "]", "*", "pad", ")", ")", ",", "\"Expected {!r} got {!r}\"", ".", "format", "(", "bytearray", "(", "[", "pad", "]", "*", "pad", ")", ",", "data", "[", "-", "pad", ":", "]", ")", "data", "=", "data", "[", ":", "-", "pad", "]", "return", "data", "return", "Proxy", "(", "decrypt", ",", "fp", ",", "cipher", ".", "block_size", ")", "else", ":", "data", "=", "bytearray", "(", "cipher", ".", "decrypt", "(", "fp", ".", "read", "(", ")", ")", ")", "pad", "=", "data", "[", "-", "1", "]", "assert", "data", ".", "endswith", "(", "bytearray", "(", "[", "pad", "]", "*", "pad", ")", ")", ",", "\"Expected {!r} got {!r}\"", ".", "format", "(", "bytearray", "(", "[", "pad", "]", "*", "pad", ")", ",", "data", "[", "-", "pad", ":", "]", ")", "data", "=", "data", "[", ":", "-", "pad", "]", "return", "io", ".", "BytesIO", "(", "data", ")" ]
Internal decryption function Uses either the password argument for the decryption, or, if not supplied, the password field of the object :param fp: a file object or similar which supports the readline and read methods :rtype: Proxy
[ "Internal", "decryption", "function" ]
python
train
34.804348
gatkin/declxml
declxml.py
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L374-L430
def array( item_processor, # type: Processor alias=None, # type: Optional[Text] nested=None, # type: Optional[Text] omit_empty=False, # type: bool hooks=None # type: Optional[Hooks] ): # type: (...) -> RootProcessor """ Create an array processor that can be used to parse and serialize array data. XML arrays may be nested within an array element, or they may be embedded within their parent. A nested array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <nested-array> <array-item>0</array-item> <array-item>1</array-item> </nested-array> </root-element> The corresponding embedded array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <array-item>0</array-item> <array-item>1</array-item> </root-element> An array is considered required when its item processor is configured as being required. :param item_processor: A declxml processor object for the items of the array. :param alias: If specified, the name given to the array when read from XML. If not specified, then the name of the item processor is used instead. :param nested: If the array is a nested array, then this should be the name of the element under which all array items are located. If not specified, then the array is treated as an embedded array. Can also be specified using supported XPath syntax. :param omit_empty: If True, then nested arrays will be omitted when serializing if they are empty. Only valid when nested is specified. Note that an empty array may only be omitted if it is not itself contained within an array. That is, for an array of arrays, any empty arrays in the outer array will always be serialized to prevent information about the original array from being lost when serializing. :param hooks: A Hooks object. :return: A declxml processor object. """ processor = _Array(item_processor, alias, nested, omit_empty) return _processor_wrap_if_hooks(processor, hooks)
[ "def", "array", "(", "item_processor", ",", "# type: Processor", "alias", "=", "None", ",", "# type: Optional[Text]", "nested", "=", "None", ",", "# type: Optional[Text]", "omit_empty", "=", "False", ",", "# type: bool", "hooks", "=", "None", "# type: Optional[Hooks]", ")", ":", "# type: (...) -> RootProcessor", "processor", "=", "_Array", "(", "item_processor", ",", "alias", ",", "nested", ",", "omit_empty", ")", "return", "_processor_wrap_if_hooks", "(", "processor", ",", "hooks", ")" ]
Create an array processor that can be used to parse and serialize array data. XML arrays may be nested within an array element, or they may be embedded within their parent. A nested array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <nested-array> <array-item>0</array-item> <array-item>1</array-item> </nested-array> </root-element> The corresponding embedded array would look like: .. sourcecode:: xml <root-element> <some-element>ABC</some-element> <array-item>0</array-item> <array-item>1</array-item> </root-element> An array is considered required when its item processor is configured as being required. :param item_processor: A declxml processor object for the items of the array. :param alias: If specified, the name given to the array when read from XML. If not specified, then the name of the item processor is used instead. :param nested: If the array is a nested array, then this should be the name of the element under which all array items are located. If not specified, then the array is treated as an embedded array. Can also be specified using supported XPath syntax. :param omit_empty: If True, then nested arrays will be omitted when serializing if they are empty. Only valid when nested is specified. Note that an empty array may only be omitted if it is not itself contained within an array. That is, for an array of arrays, any empty arrays in the outer array will always be serialized to prevent information about the original array from being lost when serializing. :param hooks: A Hooks object. :return: A declxml processor object.
[ "Create", "an", "array", "processor", "that", "can", "be", "used", "to", "parse", "and", "serialize", "array", "data", "." ]
python
train
38.877193
MacHu-GWU/single_file_module-project
sfm/rnd.py
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rnd.py#L92-L102
def rand_ssn(): """Random SSN. (9 digits) Example:: >>> rand_ssn() 295-50-0178 """ return "%s-%s-%s" % (rand_str(3, string.digits), rand_str(2, string.digits), rand_str(4, string.digits))
[ "def", "rand_ssn", "(", ")", ":", "return", "\"%s-%s-%s\"", "%", "(", "rand_str", "(", "3", ",", "string", ".", "digits", ")", ",", "rand_str", "(", "2", ",", "string", ".", "digits", ")", ",", "rand_str", "(", "4", ",", "string", ".", "digits", ")", ")" ]
Random SSN. (9 digits) Example:: >>> rand_ssn() 295-50-0178
[ "Random", "SSN", ".", "(", "9", "digits", ")" ]
python
train
23.727273
jtwhite79/pyemu
pyemu/prototypes/da.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/prototypes/da.py#L299-L309
def update(self): """update performs the analysis, then runs the forecast using the updated self.parensemble. This can be called repeatedly to iterate...""" parensemble = self.analysis_evensen() obsensemble = self.forecast(parensemble=parensemble) # todo: check for phi improvement if True: self.obsensemble = obsensemble self.parensemble = parensemble self.iter_num += 1
[ "def", "update", "(", "self", ")", ":", "parensemble", "=", "self", ".", "analysis_evensen", "(", ")", "obsensemble", "=", "self", ".", "forecast", "(", "parensemble", "=", "parensemble", ")", "# todo: check for phi improvement", "if", "True", ":", "self", ".", "obsensemble", "=", "obsensemble", "self", ".", "parensemble", "=", "parensemble", "self", ".", "iter_num", "+=", "1" ]
update performs the analysis, then runs the forecast using the updated self.parensemble. This can be called repeatedly to iterate...
[ "update", "performs", "the", "analysis", "then", "runs", "the", "forecast", "using", "the", "updated", "self", ".", "parensemble", ".", "This", "can", "be", "called", "repeatedly", "to", "iterate", "..." ]
python
train
40.272727
ergoithz/browsepy
browsepy/manager.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/manager.py#L302-L339
def create_widget(self, place, type, file=None, **kwargs): ''' Create a widget object based on given arguments. If file object is provided, callable arguments will be resolved: its return value will be used after calling them with file as first parameter. All extra `kwargs` parameters will be passed to widget constructor. :param place: place hint where widget should be shown. :type place: str :param type: widget type name as taken from :attr:`widget_types` dict keys. :type type: str :param file: optional file object for widget attribute resolving :type type: browsepy.files.Node or None :returns: widget instance :rtype: object ''' widget_class = self.widget_types.get(type, self.widget_types['base']) kwargs.update(place=place, type=type) try: element = widget_class(**kwargs) except TypeError as e: message = e.args[0] if e.args else '' if ( 'unexpected keyword argument' in message or 'required positional argument' in message ): raise WidgetParameterException( 'type %s; %s; available: %r' % (type, message, widget_class._fields) ) raise e if file and any(map(callable, element)): return self._resolve_widget(file, element) return element
[ "def", "create_widget", "(", "self", ",", "place", ",", "type", ",", "file", "=", "None", ",", "*", "*", "kwargs", ")", ":", "widget_class", "=", "self", ".", "widget_types", ".", "get", "(", "type", ",", "self", ".", "widget_types", "[", "'base'", "]", ")", "kwargs", ".", "update", "(", "place", "=", "place", ",", "type", "=", "type", ")", "try", ":", "element", "=", "widget_class", "(", "*", "*", "kwargs", ")", "except", "TypeError", "as", "e", ":", "message", "=", "e", ".", "args", "[", "0", "]", "if", "e", ".", "args", "else", "''", "if", "(", "'unexpected keyword argument'", "in", "message", "or", "'required positional argument'", "in", "message", ")", ":", "raise", "WidgetParameterException", "(", "'type %s; %s; available: %r'", "%", "(", "type", ",", "message", ",", "widget_class", ".", "_fields", ")", ")", "raise", "e", "if", "file", "and", "any", "(", "map", "(", "callable", ",", "element", ")", ")", ":", "return", "self", ".", "_resolve_widget", "(", "file", ",", "element", ")", "return", "element" ]
Create a widget object based on given arguments. If file object is provided, callable arguments will be resolved: its return value will be used after calling them with file as first parameter. All extra `kwargs` parameters will be passed to widget constructor. :param place: place hint where widget should be shown. :type place: str :param type: widget type name as taken from :attr:`widget_types` dict keys. :type type: str :param file: optional file object for widget attribute resolving :type type: browsepy.files.Node or None :returns: widget instance :rtype: object
[ "Create", "a", "widget", "object", "based", "on", "given", "arguments", "." ]
python
train
38.973684
KrzyHonk/bpmn-python
bpmn_python/bpmn_process_csv_export.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_process_csv_export.py#L110-L205
def export_element(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="", who="", add_join=False): """ Export a node with "Element" classification (task, subprocess or gateway) :param bpmn_graph: an instance of BpmnDiagramGraph class, :param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that will be used in exported CSV document, :param node: networkx.Node object, :param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels, :param order: the order param of exported node, :param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify the branch :param condition: the condition param of exported node, :param who: the condition param of exported node, :param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV. :return: None or the next node object if the exported node was a gateway join. """ node_type = node[1][consts.Consts.type] node_classification = nodes_classification[node[0]] outgoing_flows = node[1].get(consts.Consts.outgoing_flow) if node_type != consts.Consts.parallel_gateway and consts.Consts.default in node[1] \ and node[1][consts.Consts.default] is not None: default_flow_id = node[1][consts.Consts.default] else: default_flow_id = None if BpmnDiagramGraphCsvExport.classification_join in node_classification and not add_join: # If the node is a join, then retract the recursion back to the split. # In case of activity - return current node. In case of gateway - return outgoing node # (we are making assumption that join has only one outgoing node) if node_type == consts.Consts.task or node_type == consts.Consts.subprocess: return node else: outgoing_flow_id = outgoing_flows[0] outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id) outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref]) return outgoing_node else: if node_type == consts.Consts.task: export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name], "Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""}) elif node_type == consts.Consts.subprocess: export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name], "Condition": condition, "Who": who, "Subprocess": "yes", "Terminated": ""}) if BpmnDiagramGraphCsvExport.classification_split in node_classification: next_node = None alphabet_suffix_index = 0 for outgoing_flow_id in outgoing_flows: outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id) outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref]) # This will work only up to 26 outgoing flows suffix = string.ascii_lowercase[alphabet_suffix_index] next_prefix = prefix + str(order) + suffix alphabet_suffix_index += 1 # parallel gateway does not uses conditions if node_type != consts.Consts.parallel_gateway and consts.Consts.name in outgoing_flow[2] \ and outgoing_flow[2][consts.Consts.name] is not None: condition = outgoing_flow[2][consts.Consts.name] else: condition = "" if BpmnDiagramGraphCsvExport.classification_join in nodes_classification[outgoing_node[0]]: export_elements.append( {"Order": next_prefix + str(1), "Activity": "goto " + prefix + str(order + 1), "Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""}) elif outgoing_flow_id == default_flow_id: tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node, nodes_classification, 1, next_prefix, "else", who) if tmp_next_node is not None: next_node = tmp_next_node else: tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node, nodes_classification, 1, next_prefix, condition, who) if tmp_next_node is not None: next_node = tmp_next_node if next_node is not None: return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, next_node, nodes_classification, order=(order + 1), prefix=prefix, who=who, add_join=True) elif len(outgoing_flows) == 1: outgoing_flow_id = outgoing_flows[0] outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id) outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref]) return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node, nodes_classification, order=(order + 1), prefix=prefix, who=who) else: return None
[ "def", "export_element", "(", "bpmn_graph", ",", "export_elements", ",", "node", ",", "nodes_classification", ",", "order", "=", "0", ",", "prefix", "=", "\"\"", ",", "condition", "=", "\"\"", ",", "who", "=", "\"\"", ",", "add_join", "=", "False", ")", ":", "node_type", "=", "node", "[", "1", "]", "[", "consts", ".", "Consts", ".", "type", "]", "node_classification", "=", "nodes_classification", "[", "node", "[", "0", "]", "]", "outgoing_flows", "=", "node", "[", "1", "]", ".", "get", "(", "consts", ".", "Consts", ".", "outgoing_flow", ")", "if", "node_type", "!=", "consts", ".", "Consts", ".", "parallel_gateway", "and", "consts", ".", "Consts", ".", "default", "in", "node", "[", "1", "]", "and", "node", "[", "1", "]", "[", "consts", ".", "Consts", ".", "default", "]", "is", "not", "None", ":", "default_flow_id", "=", "node", "[", "1", "]", "[", "consts", ".", "Consts", ".", "default", "]", "else", ":", "default_flow_id", "=", "None", "if", "BpmnDiagramGraphCsvExport", ".", "classification_join", "in", "node_classification", "and", "not", "add_join", ":", "# If the node is a join, then retract the recursion back to the split.", "# In case of activity - return current node. In case of gateway - return outgoing node", "# (we are making assumption that join has only one outgoing node)", "if", "node_type", "==", "consts", ".", "Consts", ".", "task", "or", "node_type", "==", "consts", ".", "Consts", ".", "subprocess", ":", "return", "node", "else", ":", "outgoing_flow_id", "=", "outgoing_flows", "[", "0", "]", "outgoing_flow", "=", "bpmn_graph", ".", "get_flow_by_id", "(", "outgoing_flow_id", ")", "outgoing_node", "=", "bpmn_graph", ".", "get_node_by_id", "(", "outgoing_flow", "[", "2", "]", "[", "consts", ".", "Consts", ".", "target_ref", "]", ")", "return", "outgoing_node", "else", ":", "if", "node_type", "==", "consts", ".", "Consts", ".", "task", ":", "export_elements", ".", "append", "(", "{", "\"Order\"", ":", "prefix", "+", "str", "(", "order", ")", ",", "\"Activity\"", ":", "node", "[", "1", "]", "[", "consts", ".", "Consts", ".", "node_name", "]", ",", "\"Condition\"", ":", "condition", ",", "\"Who\"", ":", "who", ",", "\"Subprocess\"", ":", "\"\"", ",", "\"Terminated\"", ":", "\"\"", "}", ")", "elif", "node_type", "==", "consts", ".", "Consts", ".", "subprocess", ":", "export_elements", ".", "append", "(", "{", "\"Order\"", ":", "prefix", "+", "str", "(", "order", ")", ",", "\"Activity\"", ":", "node", "[", "1", "]", "[", "consts", ".", "Consts", ".", "node_name", "]", ",", "\"Condition\"", ":", "condition", ",", "\"Who\"", ":", "who", ",", "\"Subprocess\"", ":", "\"yes\"", ",", "\"Terminated\"", ":", "\"\"", "}", ")", "if", "BpmnDiagramGraphCsvExport", ".", "classification_split", "in", "node_classification", ":", "next_node", "=", "None", "alphabet_suffix_index", "=", "0", "for", "outgoing_flow_id", "in", "outgoing_flows", ":", "outgoing_flow", "=", "bpmn_graph", ".", "get_flow_by_id", "(", "outgoing_flow_id", ")", "outgoing_node", "=", "bpmn_graph", ".", "get_node_by_id", "(", "outgoing_flow", "[", "2", "]", "[", "consts", ".", "Consts", ".", "target_ref", "]", ")", "# This will work only up to 26 outgoing flows", "suffix", "=", "string", ".", "ascii_lowercase", "[", "alphabet_suffix_index", "]", "next_prefix", "=", "prefix", "+", "str", "(", "order", ")", "+", "suffix", "alphabet_suffix_index", "+=", "1", "# parallel gateway does not uses conditions", "if", "node_type", "!=", "consts", ".", "Consts", ".", "parallel_gateway", "and", "consts", ".", "Consts", ".", "name", "in", "outgoing_flow", "[", "2", "]", "and", "outgoing_flow", "[", "2", "]", "[", "consts", ".", "Consts", ".", "name", "]", "is", "not", "None", ":", "condition", "=", "outgoing_flow", "[", "2", "]", "[", "consts", ".", "Consts", ".", "name", "]", "else", ":", "condition", "=", "\"\"", "if", "BpmnDiagramGraphCsvExport", ".", "classification_join", "in", "nodes_classification", "[", "outgoing_node", "[", "0", "]", "]", ":", "export_elements", ".", "append", "(", "{", "\"Order\"", ":", "next_prefix", "+", "str", "(", "1", ")", ",", "\"Activity\"", ":", "\"goto \"", "+", "prefix", "+", "str", "(", "order", "+", "1", ")", ",", "\"Condition\"", ":", "condition", ",", "\"Who\"", ":", "who", ",", "\"Subprocess\"", ":", "\"\"", ",", "\"Terminated\"", ":", "\"\"", "}", ")", "elif", "outgoing_flow_id", "==", "default_flow_id", ":", "tmp_next_node", "=", "BpmnDiagramGraphCsvExport", ".", "export_node", "(", "bpmn_graph", ",", "export_elements", ",", "outgoing_node", ",", "nodes_classification", ",", "1", ",", "next_prefix", ",", "\"else\"", ",", "who", ")", "if", "tmp_next_node", "is", "not", "None", ":", "next_node", "=", "tmp_next_node", "else", ":", "tmp_next_node", "=", "BpmnDiagramGraphCsvExport", ".", "export_node", "(", "bpmn_graph", ",", "export_elements", ",", "outgoing_node", ",", "nodes_classification", ",", "1", ",", "next_prefix", ",", "condition", ",", "who", ")", "if", "tmp_next_node", "is", "not", "None", ":", "next_node", "=", "tmp_next_node", "if", "next_node", "is", "not", "None", ":", "return", "BpmnDiagramGraphCsvExport", ".", "export_node", "(", "bpmn_graph", ",", "export_elements", ",", "next_node", ",", "nodes_classification", ",", "order", "=", "(", "order", "+", "1", ")", ",", "prefix", "=", "prefix", ",", "who", "=", "who", ",", "add_join", "=", "True", ")", "elif", "len", "(", "outgoing_flows", ")", "==", "1", ":", "outgoing_flow_id", "=", "outgoing_flows", "[", "0", "]", "outgoing_flow", "=", "bpmn_graph", ".", "get_flow_by_id", "(", "outgoing_flow_id", ")", "outgoing_node", "=", "bpmn_graph", ".", "get_node_by_id", "(", "outgoing_flow", "[", "2", "]", "[", "consts", ".", "Consts", ".", "target_ref", "]", ")", "return", "BpmnDiagramGraphCsvExport", ".", "export_node", "(", "bpmn_graph", ",", "export_elements", ",", "outgoing_node", ",", "nodes_classification", ",", "order", "=", "(", "order", "+", "1", ")", ",", "prefix", "=", "prefix", ",", "who", "=", "who", ")", "else", ":", "return", "None" ]
Export a node with "Element" classification (task, subprocess or gateway) :param bpmn_graph: an instance of BpmnDiagramGraph class, :param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that will be used in exported CSV document, :param node: networkx.Node object, :param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels, :param order: the order param of exported node, :param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify the branch :param condition: the condition param of exported node, :param who: the condition param of exported node, :param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV. :return: None or the next node object if the exported node was a gateway join.
[ "Export", "a", "node", "with", "Element", "classification", "(", "task", "subprocess", "or", "gateway", ")" ]
python
train
62.833333
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L370-L418
def partial_autocorrelation(x, param): """ Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and :math:`x_{t-k}`, adjusted for the intermediate variables :math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]). Following [2], it can be defined as .. math:: \\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})} {\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}} with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})` being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`. It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p` and zero for `k>p`." With this property, it is used to determine the lag of an AR-Process. .. rubric:: References | [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015). | Time series analysis: forecasting and control. John Wiley & Sons. | [2] https://onlinecourses.science.psu.edu/stat510/node/62 :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned :type param: list :return: the value of this feature :return type: float """ # Check the difference between demanded lags by param and possible lags to calculate (depends on len(x)) max_demanded_lag = max([lag["lag"] for lag in param]) n = len(x) # Check if list is too short to make calculations if n <= 1: pacf_coeffs = [np.nan] * (max_demanded_lag + 1) else: if (n <= max_demanded_lag): max_lag = n - 1 else: max_lag = max_demanded_lag pacf_coeffs = list(pacf(x, method="ld", nlags=max_lag)) pacf_coeffs = pacf_coeffs + [np.nan] * max(0, (max_demanded_lag - max_lag)) return [("lag_{}".format(lag["lag"]), pacf_coeffs[lag["lag"]]) for lag in param]
[ "def", "partial_autocorrelation", "(", "x", ",", "param", ")", ":", "# Check the difference between demanded lags by param and possible lags to calculate (depends on len(x))", "max_demanded_lag", "=", "max", "(", "[", "lag", "[", "\"lag\"", "]", "for", "lag", "in", "param", "]", ")", "n", "=", "len", "(", "x", ")", "# Check if list is too short to make calculations", "if", "n", "<=", "1", ":", "pacf_coeffs", "=", "[", "np", ".", "nan", "]", "*", "(", "max_demanded_lag", "+", "1", ")", "else", ":", "if", "(", "n", "<=", "max_demanded_lag", ")", ":", "max_lag", "=", "n", "-", "1", "else", ":", "max_lag", "=", "max_demanded_lag", "pacf_coeffs", "=", "list", "(", "pacf", "(", "x", ",", "method", "=", "\"ld\"", ",", "nlags", "=", "max_lag", ")", ")", "pacf_coeffs", "=", "pacf_coeffs", "+", "[", "np", ".", "nan", "]", "*", "max", "(", "0", ",", "(", "max_demanded_lag", "-", "max_lag", ")", ")", "return", "[", "(", "\"lag_{}\"", ".", "format", "(", "lag", "[", "\"lag\"", "]", ")", ",", "pacf_coeffs", "[", "lag", "[", "\"lag\"", "]", "]", ")", "for", "lag", "in", "param", "]" ]
Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and :math:`x_{t-k}`, adjusted for the intermediate variables :math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]). Following [2], it can be defined as .. math:: \\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})} {\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}} with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})` being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`. It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p` and zero for `k>p`." With this property, it is used to determine the lag of an AR-Process. .. rubric:: References | [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015). | Time series analysis: forecasting and control. John Wiley & Sons. | [2] https://onlinecourses.science.psu.edu/stat510/node/62 :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned :type param: list :return: the value of this feature :return type: float
[ "Calculates", "the", "value", "of", "the", "partial", "autocorrelation", "function", "at", "the", "given", "lag", ".", "The", "lag", "k", "partial", "autocorrelation", "of", "a", "time", "series", ":", "math", ":", "\\\\", "lbrace", "x_t", "t", "=", "1", "\\\\", "ldots", "T", "\\\\", "rbrace", "equals", "the", "partial", "correlation", "of", ":", "math", ":", "x_t", "and", ":", "math", ":", "x_", "{", "t", "-", "k", "}", "adjusted", "for", "the", "intermediate", "variables", ":", "math", ":", "\\\\", "lbrace", "x_", "{", "t", "-", "1", "}", "\\\\", "ldots", "x_", "{", "t", "-", "k", "+", "1", "}", "\\\\", "rbrace", "(", "[", "1", "]", ")", ".", "Following", "[", "2", "]", "it", "can", "be", "defined", "as" ]
python
train
47.897959
zetaops/pyoko
pyoko/db/queryset.py
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/db/queryset.py#L356-L382
def get(self, key=None, **kwargs): """ Ensures that only one result is returned from DB and raises an exception otherwise. Can work in 3 different way. - If no argument is given, only does "ensuring about one and only object" job. - If key given as only argument, retrieves the object from DB. - if query filters given, implicitly calls filter() method. Raises: MultipleObjectsReturned: If there is more than one (1) record is returned. """ clone = copy.deepcopy(self) # If we are in a slice, adjust the start and rows if self._start: clone.adapter.set_params(start=self._start) if self._rows: clone.adapter.set_params(rows=self._rows) if key: data, key = clone.adapter.get(key) elif kwargs: data, key = clone.filter(**kwargs).adapter.get() else: data, key = clone.adapter.get() if clone._cfg['rtype'] == ReturnType.Object: return data, key return self._make_model(data, key)
[ "def", "get", "(", "self", ",", "key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "clone", "=", "copy", ".", "deepcopy", "(", "self", ")", "# If we are in a slice, adjust the start and rows", "if", "self", ".", "_start", ":", "clone", ".", "adapter", ".", "set_params", "(", "start", "=", "self", ".", "_start", ")", "if", "self", ".", "_rows", ":", "clone", ".", "adapter", ".", "set_params", "(", "rows", "=", "self", ".", "_rows", ")", "if", "key", ":", "data", ",", "key", "=", "clone", ".", "adapter", ".", "get", "(", "key", ")", "elif", "kwargs", ":", "data", ",", "key", "=", "clone", ".", "filter", "(", "*", "*", "kwargs", ")", ".", "adapter", ".", "get", "(", ")", "else", ":", "data", ",", "key", "=", "clone", ".", "adapter", ".", "get", "(", ")", "if", "clone", ".", "_cfg", "[", "'rtype'", "]", "==", "ReturnType", ".", "Object", ":", "return", "data", ",", "key", "return", "self", ".", "_make_model", "(", "data", ",", "key", ")" ]
Ensures that only one result is returned from DB and raises an exception otherwise. Can work in 3 different way. - If no argument is given, only does "ensuring about one and only object" job. - If key given as only argument, retrieves the object from DB. - if query filters given, implicitly calls filter() method. Raises: MultipleObjectsReturned: If there is more than one (1) record is returned.
[ "Ensures", "that", "only", "one", "result", "is", "returned", "from", "DB", "and", "raises", "an", "exception", "otherwise", ".", "Can", "work", "in", "3", "different", "way", "." ]
python
train
40.111111
globocom/GloboNetworkAPI-client-python
networkapiclient/Roteiro.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Roteiro.py#L171-L201
def listar_por_equipamento(self, id_equipment): """List all Script related Equipment. :param id_equipment: Identifier of the Equipment. Integer value and greater than zero. :return: Dictionary with the following structure: :: {script': [{‘id’: < id >, ‘nome’: < nome >, ‘descricao’: < descricao >, ‘id_tipo_roteiro’: < id_tipo_roteiro >, ‘nome_tipo_roteiro’: < nome_tipo_roteiro >, ‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]} :raise InvalidParameterError: The identifier of Equipment is null and invalid. :raise EquipamentoNaoExisteError: Equipment not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_equipment): raise InvalidParameterError( u'The identifier of Equipment is invalid or was not informed.') url = 'script/equipment/' + str(id_equipment) + '/' code, map = self.submit(None, 'GET', url) key = 'script' return get_list_map(self.response(code, map, [key]), key)
[ "def", "listar_por_equipamento", "(", "self", ",", "id_equipment", ")", ":", "if", "not", "is_valid_int_param", "(", "id_equipment", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Equipment is invalid or was not informed.'", ")", "url", "=", "'script/equipment/'", "+", "str", "(", "id_equipment", ")", "+", "'/'", "code", ",", "map", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "key", "=", "'script'", "return", "get_list_map", "(", "self", ".", "response", "(", "code", ",", "map", ",", "[", "key", "]", ")", ",", "key", ")" ]
List all Script related Equipment. :param id_equipment: Identifier of the Equipment. Integer value and greater than zero. :return: Dictionary with the following structure: :: {script': [{‘id’: < id >, ‘nome’: < nome >, ‘descricao’: < descricao >, ‘id_tipo_roteiro’: < id_tipo_roteiro >, ‘nome_tipo_roteiro’: < nome_tipo_roteiro >, ‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]} :raise InvalidParameterError: The identifier of Equipment is null and invalid. :raise EquipamentoNaoExisteError: Equipment not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "List", "all", "Script", "related", "Equipment", "." ]
python
train
39.322581
coleifer/irc
irc.py
https://github.com/coleifer/irc/blob/f9d2bd6369aafe6cb0916c9406270ca8ecea2080/irc.py#L246-L270
def enter_event_loop(self): """\ Main loop of the IRCConnection - reads from the socket and dispatches based on regex matching """ patterns = self.dispatch_patterns() self.logger.debug('entering receive loop') while 1: try: data = self._sock_file.readline() except socket.error: data = None if not data: self.logger.info('server closed connection') self.close() return True data = data.rstrip() for pattern, callback in patterns: match = pattern.match(data) if match: callback(**match.groupdict())
[ "def", "enter_event_loop", "(", "self", ")", ":", "patterns", "=", "self", ".", "dispatch_patterns", "(", ")", "self", ".", "logger", ".", "debug", "(", "'entering receive loop'", ")", "while", "1", ":", "try", ":", "data", "=", "self", ".", "_sock_file", ".", "readline", "(", ")", "except", "socket", ".", "error", ":", "data", "=", "None", "if", "not", "data", ":", "self", ".", "logger", ".", "info", "(", "'server closed connection'", ")", "self", ".", "close", "(", ")", "return", "True", "data", "=", "data", ".", "rstrip", "(", ")", "for", "pattern", ",", "callback", "in", "patterns", ":", "match", "=", "pattern", ".", "match", "(", "data", ")", "if", "match", ":", "callback", "(", "*", "*", "match", ".", "groupdict", "(", ")", ")" ]
\ Main loop of the IRCConnection - reads from the socket and dispatches based on regex matching
[ "\\", "Main", "loop", "of", "the", "IRCConnection", "-", "reads", "from", "the", "socket", "and", "dispatches", "based", "on", "regex", "matching" ]
python
test
29
wiredrive/wtframework
wtframework/wtf/web/page.py
https://github.com/wiredrive/wtframework/blob/ef7f86c4d4cf7fb17745fd627b3cc4a41f4c0216/wtframework/wtf/web/page.py#L114-L130
def create_page(cls, webdriver=None, **kwargs): """Class method short cut to call PageFactory on itself. Use it to instantiate this PageObject using a webdriver. Args: webdriver (Webdriver): Instance of Selenium Webdriver. Returns: PageObject Raises: InvalidPageError """ if not webdriver: webdriver = WTF_WEBDRIVER_MANAGER.get_driver() return PageFactory.create_page(cls, webdriver=webdriver, **kwargs)
[ "def", "create_page", "(", "cls", ",", "webdriver", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "webdriver", ":", "webdriver", "=", "WTF_WEBDRIVER_MANAGER", ".", "get_driver", "(", ")", "return", "PageFactory", ".", "create_page", "(", "cls", ",", "webdriver", "=", "webdriver", ",", "*", "*", "kwargs", ")" ]
Class method short cut to call PageFactory on itself. Use it to instantiate this PageObject using a webdriver. Args: webdriver (Webdriver): Instance of Selenium Webdriver. Returns: PageObject Raises: InvalidPageError
[ "Class", "method", "short", "cut", "to", "call", "PageFactory", "on", "itself", ".", "Use", "it", "to", "instantiate", "this", "PageObject", "using", "a", "webdriver", "." ]
python
train
29.705882
zhmcclient/python-zhmcclient
zhmcclient/_storage_volume.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_storage_volume.py#L496-L554
def indicate_fulfillment_ficon(self, control_unit, unit_address): """ TODO: Add ControlUnit objects etc for FICON support. Indicate completion of :term:`fulfillment` for this ECKD (=FICON) storage volume and provide identifying information (control unit and unit address) about the actual storage volume on the storage subsystem. Manually indicating fulfillment is required for all ECKD volumes, because they are not auto-discovered by the CPC. This method performs the "Fulfill FICON Storage Volume" HMC operation. Upon successful completion of this operation, the "fulfillment-state" property of this storage volume object will have been set to "complete". That is necessary for the CPC to be able to address and connect to the volume. If the "fulfillment-state" properties of all storage volumes in the owning storage group are "complete", the owning storage group's "fulfillment-state" property will also be set to "complete". Parameters: control_unit (:class:`~zhmcclient.ControlUnit`): Logical control unit (LCU) in which the backing ECKD volume is defined. unit_address (:term:`string`): Unit address of the backing ECKD volume within its logical control unit, as a hexadecimal number of up to 2 characters in any lexical case. Authorization requirements: * Object-access permission to the storage group owning this storage volume. * Task permission to the "Configure Storage - Storage Administrator" task. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ # The operation requires exactly 2 characters in lower case unit_address_2 = format(int(unit_address, 16), '02x') body = { 'control-unit-uri': control_unit.uri, 'unit-address': unit_address_2, } self.manager.session.post( self.uri + '/operations/fulfill-ficon-storage-volume', body=body)
[ "def", "indicate_fulfillment_ficon", "(", "self", ",", "control_unit", ",", "unit_address", ")", ":", "# The operation requires exactly 2 characters in lower case", "unit_address_2", "=", "format", "(", "int", "(", "unit_address", ",", "16", ")", ",", "'02x'", ")", "body", "=", "{", "'control-unit-uri'", ":", "control_unit", ".", "uri", ",", "'unit-address'", ":", "unit_address_2", ",", "}", "self", ".", "manager", ".", "session", ".", "post", "(", "self", ".", "uri", "+", "'/operations/fulfill-ficon-storage-volume'", ",", "body", "=", "body", ")" ]
TODO: Add ControlUnit objects etc for FICON support. Indicate completion of :term:`fulfillment` for this ECKD (=FICON) storage volume and provide identifying information (control unit and unit address) about the actual storage volume on the storage subsystem. Manually indicating fulfillment is required for all ECKD volumes, because they are not auto-discovered by the CPC. This method performs the "Fulfill FICON Storage Volume" HMC operation. Upon successful completion of this operation, the "fulfillment-state" property of this storage volume object will have been set to "complete". That is necessary for the CPC to be able to address and connect to the volume. If the "fulfillment-state" properties of all storage volumes in the owning storage group are "complete", the owning storage group's "fulfillment-state" property will also be set to "complete". Parameters: control_unit (:class:`~zhmcclient.ControlUnit`): Logical control unit (LCU) in which the backing ECKD volume is defined. unit_address (:term:`string`): Unit address of the backing ECKD volume within its logical control unit, as a hexadecimal number of up to 2 characters in any lexical case. Authorization requirements: * Object-access permission to the storage group owning this storage volume. * Task permission to the "Configure Storage - Storage Administrator" task. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
[ "TODO", ":", "Add", "ControlUnit", "objects", "etc", "for", "FICON", "support", "." ]
python
train
37.220339
pyroscope/pyrobase
src/pyrobase/bencode.py
https://github.com/pyroscope/pyrobase/blob/7a2591baa492c3d8997ab4801b97c7b1f2ebc6b1/src/pyrobase/bencode.py#L170-L180
def bread(stream): """ Decode a file or stream to an object. """ if hasattr(stream, "read"): return bdecode(stream.read()) else: handle = open(stream, "rb") try: return bdecode(handle.read()) finally: handle.close()
[ "def", "bread", "(", "stream", ")", ":", "if", "hasattr", "(", "stream", ",", "\"read\"", ")", ":", "return", "bdecode", "(", "stream", ".", "read", "(", ")", ")", "else", ":", "handle", "=", "open", "(", "stream", ",", "\"rb\"", ")", "try", ":", "return", "bdecode", "(", "handle", ".", "read", "(", ")", ")", "finally", ":", "handle", ".", "close", "(", ")" ]
Decode a file or stream to an object.
[ "Decode", "a", "file", "or", "stream", "to", "an", "object", "." ]
python
train
25.181818
loli/medpy
medpy/metric/histogram.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L34-L93
def minowski(h1, h2, p = 2): # 46..45..14,11..43..44 / 45 us for p=int(-inf..-24..-1,1..24..inf) / float @array, +20 us @list \w 100 bins r""" Minowski distance. With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance, and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`. The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p \right)^{\frac{1}{p}} *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \sqrt[p]{2}]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. p : float The :math:`p` value in the Minowksi distance formula. Returns ------- minowski : float Minowski distance. Raises ------ ValueError If ``p`` is zero. """ h1, h2 = __prepare_histogram(h1, h2) if 0 == p: raise ValueError('p can not be zero') elif int == type(p): if p > 0 and p < 25: return __minowski_low_positive_integer_p(h1, h2, p) elif p < 0 and p > -25: return __minowski_low_negative_integer_p(h1, h2, p) return math.pow(scipy.sum(scipy.power(scipy.absolute(h1 - h2), p)), 1./p)
[ "def", "minowski", "(", "h1", ",", "h2", ",", "p", "=", "2", ")", ":", "# 46..45..14,11..43..44 / 45 us for p=int(-inf..-24..-1,1..24..inf) / float @array, +20 us @list \\w 100 bins", "h1", ",", "h2", "=", "__prepare_histogram", "(", "h1", ",", "h2", ")", "if", "0", "==", "p", ":", "raise", "ValueError", "(", "'p can not be zero'", ")", "elif", "int", "==", "type", "(", "p", ")", ":", "if", "p", ">", "0", "and", "p", "<", "25", ":", "return", "__minowski_low_positive_integer_p", "(", "h1", ",", "h2", ",", "p", ")", "elif", "p", "<", "0", "and", "p", ">", "-", "25", ":", "return", "__minowski_low_negative_integer_p", "(", "h1", ",", "h2", ",", "p", ")", "return", "math", ".", "pow", "(", "scipy", ".", "sum", "(", "scipy", ".", "power", "(", "scipy", ".", "absolute", "(", "h1", "-", "h2", ")", ",", "p", ")", ")", ",", "1.", "/", "p", ")" ]
r""" Minowski distance. With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance, and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`. The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p \right)^{\frac{1}{p}} *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \sqrt[p]{2}]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. p : float The :math:`p` value in the Minowksi distance formula. Returns ------- minowski : float Minowski distance. Raises ------ ValueError If ``p`` is zero.
[ "r", "Minowski", "distance", ".", "With", ":", "math", ":", "p", "=", "2", "equal", "to", "the", "Euclidean", "distance", "with", ":", "math", ":", "p", "=", "1", "equal", "to", "the", "Manhattan", "distance", "and", "the", "Chebyshev", "distance", "implementation", "represents", "the", "case", "of", ":", "math", ":", "p", "=", "\\", "pm", "inf", ".", "The", "Minowksi", "distance", "between", "two", "histograms", ":", "math", ":", "H", "and", ":", "math", ":", "H", "of", "size", ":", "math", ":", "m", "is", "defined", "as", ":", "..", "math", "::", "d_p", "(", "H", "H", ")", "=", "\\", "left", "(", "\\", "sum_", "{", "m", "=", "1", "}", "^M|H_m", "-", "H", "_m|^p", "\\", "right", ")", "^", "{", "\\", "frac", "{", "1", "}", "{", "p", "}}" ]
python
train
28.116667
abw333/dominoes
dominoes/game.py
https://github.com/abw333/dominoes/blob/ea9f532c9b834117a5c07d214711515872f7537e/dominoes/game.py#L375-L400
def missing_values(self): ''' Computes the values that must be missing from each player's hand, based on when they have passed. :return: a list of sets, each one containing the values that must be missing from the corresponding player's hand ''' missing = [set() for _ in self.hands] # replay the game from the beginning board = dominoes.SkinnyBoard() player = self.starting_player for move in self.moves: if move is None: # pass - update the missing values missing[player].update([board.left_end(), board.right_end()]) else: # not a pass - update the board board.add(*move) # move on to the next player player = next_player(player) return missing
[ "def", "missing_values", "(", "self", ")", ":", "missing", "=", "[", "set", "(", ")", "for", "_", "in", "self", ".", "hands", "]", "# replay the game from the beginning", "board", "=", "dominoes", ".", "SkinnyBoard", "(", ")", "player", "=", "self", ".", "starting_player", "for", "move", "in", "self", ".", "moves", ":", "if", "move", "is", "None", ":", "# pass - update the missing values", "missing", "[", "player", "]", ".", "update", "(", "[", "board", ".", "left_end", "(", ")", ",", "board", ".", "right_end", "(", ")", "]", ")", "else", ":", "# not a pass - update the board", "board", ".", "add", "(", "*", "move", ")", "# move on to the next player", "player", "=", "next_player", "(", "player", ")", "return", "missing" ]
Computes the values that must be missing from each player's hand, based on when they have passed. :return: a list of sets, each one containing the values that must be missing from the corresponding player's hand
[ "Computes", "the", "values", "that", "must", "be", "missing", "from", "each", "player", "s", "hand", "based", "on", "when", "they", "have", "passed", "." ]
python
train
33.076923
Erotemic/utool
utool/util_inspect.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_inspect.py#L1052-L1101
def is_defined_by_module(item, module, parent=None): """ Check if item is directly defined by a module. This check may be prone to errors. """ flag = False if isinstance(item, types.ModuleType): if not hasattr(item, '__file__'): try: # hack for cv2 and xfeatures2d import utool as ut name = ut.get_modname_from_modpath(module.__file__) flag = name in str(item) except: flag = False else: item_modpath = os.path.realpath(dirname(item.__file__)) mod_fpath = module.__file__.replace('.pyc', '.py') if not mod_fpath.endswith('__init__.py'): flag = False else: modpath = os.path.realpath(dirname(mod_fpath)) modpath = modpath.replace('.pyc', '.py') flag = item_modpath.startswith(modpath) elif hasattr(item, '_utinfo'): # Capture case where there is a utool wrapper orig_func = item._utinfo['orig_func'] flag = is_defined_by_module(orig_func, module, parent) else: if isinstance(item, staticmethod): # static methods are a wrapper around a function item = item.__func__ try: func_globals = meta_util_six.get_funcglobals(item) func_module_name = func_globals['__name__'] if func_module_name == 'line_profiler': valid_names = dir(module) if parent is not None: valid_names += dir(parent) if item.func_name in valid_names: # hack to prevent small names #if len(item.func_name) > 8: if len(item.func_name) > 6: flag = True elif func_module_name == module.__name__: flag = True except AttributeError: if hasattr(item, '__module__'): flag = item.__module__ == module.__name__ return flag
[ "def", "is_defined_by_module", "(", "item", ",", "module", ",", "parent", "=", "None", ")", ":", "flag", "=", "False", "if", "isinstance", "(", "item", ",", "types", ".", "ModuleType", ")", ":", "if", "not", "hasattr", "(", "item", ",", "'__file__'", ")", ":", "try", ":", "# hack for cv2 and xfeatures2d", "import", "utool", "as", "ut", "name", "=", "ut", ".", "get_modname_from_modpath", "(", "module", ".", "__file__", ")", "flag", "=", "name", "in", "str", "(", "item", ")", "except", ":", "flag", "=", "False", "else", ":", "item_modpath", "=", "os", ".", "path", ".", "realpath", "(", "dirname", "(", "item", ".", "__file__", ")", ")", "mod_fpath", "=", "module", ".", "__file__", ".", "replace", "(", "'.pyc'", ",", "'.py'", ")", "if", "not", "mod_fpath", ".", "endswith", "(", "'__init__.py'", ")", ":", "flag", "=", "False", "else", ":", "modpath", "=", "os", ".", "path", ".", "realpath", "(", "dirname", "(", "mod_fpath", ")", ")", "modpath", "=", "modpath", ".", "replace", "(", "'.pyc'", ",", "'.py'", ")", "flag", "=", "item_modpath", ".", "startswith", "(", "modpath", ")", "elif", "hasattr", "(", "item", ",", "'_utinfo'", ")", ":", "# Capture case where there is a utool wrapper", "orig_func", "=", "item", ".", "_utinfo", "[", "'orig_func'", "]", "flag", "=", "is_defined_by_module", "(", "orig_func", ",", "module", ",", "parent", ")", "else", ":", "if", "isinstance", "(", "item", ",", "staticmethod", ")", ":", "# static methods are a wrapper around a function", "item", "=", "item", ".", "__func__", "try", ":", "func_globals", "=", "meta_util_six", ".", "get_funcglobals", "(", "item", ")", "func_module_name", "=", "func_globals", "[", "'__name__'", "]", "if", "func_module_name", "==", "'line_profiler'", ":", "valid_names", "=", "dir", "(", "module", ")", "if", "parent", "is", "not", "None", ":", "valid_names", "+=", "dir", "(", "parent", ")", "if", "item", ".", "func_name", "in", "valid_names", ":", "# hack to prevent small names", "#if len(item.func_name) > 8:", "if", "len", "(", "item", ".", "func_name", ")", ">", "6", ":", "flag", "=", "True", "elif", "func_module_name", "==", "module", ".", "__name__", ":", "flag", "=", "True", "except", "AttributeError", ":", "if", "hasattr", "(", "item", ",", "'__module__'", ")", ":", "flag", "=", "item", ".", "__module__", "==", "module", ".", "__name__", "return", "flag" ]
Check if item is directly defined by a module. This check may be prone to errors.
[ "Check", "if", "item", "is", "directly", "defined", "by", "a", "module", ".", "This", "check", "may", "be", "prone", "to", "errors", "." ]
python
train
40.24
joeferraro/mm
mm/sforce/base.py
https://github.com/joeferraro/mm/blob/43dce48a2249faab4d872c228ada9fbdbeec147b/mm/sforce/base.py#L562-L568
def describeSObject(self, sObjectsType): ''' Describes metadata (field list and object properties) for the specified object. ''' self._setHeaders('describeSObject') return self._sforce.service.describeSObject(sObjectsType)
[ "def", "describeSObject", "(", "self", ",", "sObjectsType", ")", ":", "self", ".", "_setHeaders", "(", "'describeSObject'", ")", "return", "self", ".", "_sforce", ".", "service", ".", "describeSObject", "(", "sObjectsType", ")" ]
Describes metadata (field list and object properties) for the specified object.
[ "Describes", "metadata", "(", "field", "list", "and", "object", "properties", ")", "for", "the", "specified", "object", "." ]
python
train
34.285714
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/g5k_api_utils.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/g5k_api_utils.py#L455-L490
def can_start_on_cluster(nodes_status, nodes, start, walltime): """Check if #nodes can be started on a given cluster. This is intended to give a good enough approximation. This can be use to prefiltered possible reservation dates before submitting them on oar. """ candidates = [] for node, status in nodes_status.items(): reservations = status.get("reservations", []) # we search for the overlapping reservations overlapping_reservations = [] for reservation in reservations: queue = reservation.get("queue") if queue == "besteffort": # ignoring any besteffort reservation continue r_start = reservation.get("started_at", reservation.get("scheduled_at")) if r_start is None: break r_start = int(r_start) r_end = r_start + int(reservation["walltime"]) # compute segment intersection _intersect = min(r_end, start + walltime) - max(r_start, start) if _intersect > 0: overlapping_reservations.append(reservation) if len(overlapping_reservations) == 0: # this node can be accounted for a potential reservation candidates.append(node) if len(candidates) >= nodes: return True return False
[ "def", "can_start_on_cluster", "(", "nodes_status", ",", "nodes", ",", "start", ",", "walltime", ")", ":", "candidates", "=", "[", "]", "for", "node", ",", "status", "in", "nodes_status", ".", "items", "(", ")", ":", "reservations", "=", "status", ".", "get", "(", "\"reservations\"", ",", "[", "]", ")", "# we search for the overlapping reservations", "overlapping_reservations", "=", "[", "]", "for", "reservation", "in", "reservations", ":", "queue", "=", "reservation", ".", "get", "(", "\"queue\"", ")", "if", "queue", "==", "\"besteffort\"", ":", "# ignoring any besteffort reservation", "continue", "r_start", "=", "reservation", ".", "get", "(", "\"started_at\"", ",", "reservation", ".", "get", "(", "\"scheduled_at\"", ")", ")", "if", "r_start", "is", "None", ":", "break", "r_start", "=", "int", "(", "r_start", ")", "r_end", "=", "r_start", "+", "int", "(", "reservation", "[", "\"walltime\"", "]", ")", "# compute segment intersection", "_intersect", "=", "min", "(", "r_end", ",", "start", "+", "walltime", ")", "-", "max", "(", "r_start", ",", "start", ")", "if", "_intersect", ">", "0", ":", "overlapping_reservations", ".", "append", "(", "reservation", ")", "if", "len", "(", "overlapping_reservations", ")", "==", "0", ":", "# this node can be accounted for a potential reservation", "candidates", ".", "append", "(", "node", ")", "if", "len", "(", "candidates", ")", ">=", "nodes", ":", "return", "True", "return", "False" ]
Check if #nodes can be started on a given cluster. This is intended to give a good enough approximation. This can be use to prefiltered possible reservation dates before submitting them on oar.
[ "Check", "if", "#nodes", "can", "be", "started", "on", "a", "given", "cluster", "." ]
python
train
40.083333
sunlightlabs/name-cleaver
name_cleaver/names.py
https://github.com/sunlightlabs/name-cleaver/blob/48d3838fd9521235bd1586017fa4b31236ffc88e/name_cleaver/names.py#L176-L242
def new_from_tokens(self, *args, **kwargs): """ Takes in a name that has been split by spaces. Names which are in [last, first] format need to be preprocessed. The nickname must be in double quotes to be recognized as such. This can take name parts in in these orders: first, middle, last, nick, suffix, honorific first, middle, last, nick, suffix first, middle, last, suffix, honorific first, middle, last, honorific first, middle, last, suffix first, middle, last, nick first, last, honorific first, last, suffix first, last, nick first, middle, last first, last last """ if kwargs.get('allow_quoted_nicknames'): args = [ x.strip() for x in args if not re.match(r'^[(]', x) ] else: args = [ x.strip() for x in args if not re.match(r'^[("]', x) ] if len(args) > 2: self.detect_and_fix_two_part_surname(args) # set defaults self.first = '' self.last = '' # the final few tokens should always be detectable, otherwise a last name if len(args): if self.is_an_honorific(args[-1]): self.honorific = args.pop() if not self.honorific[-1] == '.': self.honorific += '.' if self.is_a_suffix(args[-1]): self.suffix = args.pop() if re.match(r'[js]r(?!\.)', self.suffix, re.IGNORECASE): self.suffix += '.' if self.is_a_nickname(args[-1]): self.nick = args.pop() self.last = args.pop() num_remaining_parts = len(args) if num_remaining_parts == 3: # if we've still got this many parts, we'll consider what's left as first name # plus multi-part middle name self.first = args[0] self.middle = ' '.join(args[1:3]) elif num_remaining_parts == 2: self.first, self.middle = args if len(self.middle) == 1: self.middle += '.' elif num_remaining_parts == 1: self.first = ' '.join(args) if self.first and len(self.first) == 1: self.first += '.' return self
[ "def", "new_from_tokens", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'allow_quoted_nicknames'", ")", ":", "args", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "args", "if", "not", "re", ".", "match", "(", "r'^[(]'", ",", "x", ")", "]", "else", ":", "args", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "args", "if", "not", "re", ".", "match", "(", "r'^[(\"]'", ",", "x", ")", "]", "if", "len", "(", "args", ")", ">", "2", ":", "self", ".", "detect_and_fix_two_part_surname", "(", "args", ")", "# set defaults", "self", ".", "first", "=", "''", "self", ".", "last", "=", "''", "# the final few tokens should always be detectable, otherwise a last name", "if", "len", "(", "args", ")", ":", "if", "self", ".", "is_an_honorific", "(", "args", "[", "-", "1", "]", ")", ":", "self", ".", "honorific", "=", "args", ".", "pop", "(", ")", "if", "not", "self", ".", "honorific", "[", "-", "1", "]", "==", "'.'", ":", "self", ".", "honorific", "+=", "'.'", "if", "self", ".", "is_a_suffix", "(", "args", "[", "-", "1", "]", ")", ":", "self", ".", "suffix", "=", "args", ".", "pop", "(", ")", "if", "re", ".", "match", "(", "r'[js]r(?!\\.)'", ",", "self", ".", "suffix", ",", "re", ".", "IGNORECASE", ")", ":", "self", ".", "suffix", "+=", "'.'", "if", "self", ".", "is_a_nickname", "(", "args", "[", "-", "1", "]", ")", ":", "self", ".", "nick", "=", "args", ".", "pop", "(", ")", "self", ".", "last", "=", "args", ".", "pop", "(", ")", "num_remaining_parts", "=", "len", "(", "args", ")", "if", "num_remaining_parts", "==", "3", ":", "# if we've still got this many parts, we'll consider what's left as first name", "# plus multi-part middle name", "self", ".", "first", "=", "args", "[", "0", "]", "self", ".", "middle", "=", "' '", ".", "join", "(", "args", "[", "1", ":", "3", "]", ")", "elif", "num_remaining_parts", "==", "2", ":", "self", ".", "first", ",", "self", ".", "middle", "=", "args", "if", "len", "(", "self", ".", "middle", ")", "==", "1", ":", "self", ".", "middle", "+=", "'.'", "elif", "num_remaining_parts", "==", "1", ":", "self", ".", "first", "=", "' '", ".", "join", "(", "args", ")", "if", "self", ".", "first", "and", "len", "(", "self", ".", "first", ")", "==", "1", ":", "self", ".", "first", "+=", "'.'", "return", "self" ]
Takes in a name that has been split by spaces. Names which are in [last, first] format need to be preprocessed. The nickname must be in double quotes to be recognized as such. This can take name parts in in these orders: first, middle, last, nick, suffix, honorific first, middle, last, nick, suffix first, middle, last, suffix, honorific first, middle, last, honorific first, middle, last, suffix first, middle, last, nick first, last, honorific first, last, suffix first, last, nick first, middle, last first, last last
[ "Takes", "in", "a", "name", "that", "has", "been", "split", "by", "spaces", ".", "Names", "which", "are", "in", "[", "last", "first", "]", "format", "need", "to", "be", "preprocessed", ".", "The", "nickname", "must", "be", "in", "double", "quotes", "to", "be", "recognized", "as", "such", "." ]
python
train
34.537313
rocky/python-uncompyle6
uncompyle6/semantics/linemap.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/semantics/linemap.py#L25-L35
def write(self, *data): """Augment write routine to keep track of current line""" for l in data: ## print("XXX write: '%s'" % l) for i in str(l): if i == '\n': self.current_line_number += 1 pass pass pass return super(LineMapWalker, self).write(*data)
[ "def", "write", "(", "self", ",", "*", "data", ")", ":", "for", "l", "in", "data", ":", "## print(\"XXX write: '%s'\" % l)", "for", "i", "in", "str", "(", "l", ")", ":", "if", "i", "==", "'\\n'", ":", "self", ".", "current_line_number", "+=", "1", "pass", "pass", "pass", "return", "super", "(", "LineMapWalker", ",", "self", ")", ".", "write", "(", "*", "data", ")" ]
Augment write routine to keep track of current line
[ "Augment", "write", "routine", "to", "keep", "track", "of", "current", "line" ]
python
train
33.909091
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L3935-L3940
def remove_not_valid_selections(self): """ update the value to remove any that are (no longer) valid """ value = [v for v in self.get_value() if self.valid_selection(v)] self.set_value(value)
[ "def", "remove_not_valid_selections", "(", "self", ")", ":", "value", "=", "[", "v", "for", "v", "in", "self", ".", "get_value", "(", ")", "if", "self", ".", "valid_selection", "(", "v", ")", "]", "self", ".", "set_value", "(", "value", ")" ]
update the value to remove any that are (no longer) valid
[ "update", "the", "value", "to", "remove", "any", "that", "are", "(", "no", "longer", ")", "valid" ]
python
train
37.666667
mjirik/io3d
io3d/datawriter.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L208-L237
def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays): """ Function make 3D data from dicom file slices :dcmfilelist list of sorted .dcm files :overlays dictionary of binary overlays. {1:np.array([...]), 3:...} :out_dir output directory """ dcmlist = dcmfilelist # data3d = [] for i in range(len(dcmlist)): onefile = dcmlist[i] logger.info(onefile) data = dicom.read_file(onefile) for i_overlay in overlays.keys(): overlay3d = overlays[i_overlay] data = self.encode_overlay_slice(data, overlay3d[-1 - i, :, :], i_overlay) # construct output path head, tail = os.path.split(os.path.normpath(onefile)) filename_out = os.path.join(out_dir, tail) # save data.save_as(filename_out)
[ "def", "DataCopyWithOverlay", "(", "self", ",", "dcmfilelist", ",", "out_dir", ",", "overlays", ")", ":", "dcmlist", "=", "dcmfilelist", "# data3d = []", "for", "i", "in", "range", "(", "len", "(", "dcmlist", ")", ")", ":", "onefile", "=", "dcmlist", "[", "i", "]", "logger", ".", "info", "(", "onefile", ")", "data", "=", "dicom", ".", "read_file", "(", "onefile", ")", "for", "i_overlay", "in", "overlays", ".", "keys", "(", ")", ":", "overlay3d", "=", "overlays", "[", "i_overlay", "]", "data", "=", "self", ".", "encode_overlay_slice", "(", "data", ",", "overlay3d", "[", "-", "1", "-", "i", ",", ":", ",", ":", "]", ",", "i_overlay", ")", "# construct output path", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "os", ".", "path", ".", "normpath", "(", "onefile", ")", ")", "filename_out", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "tail", ")", "# save", "data", ".", "save_as", "(", "filename_out", ")" ]
Function make 3D data from dicom file slices :dcmfilelist list of sorted .dcm files :overlays dictionary of binary overlays. {1:np.array([...]), 3:...} :out_dir output directory
[ "Function", "make", "3D", "data", "from", "dicom", "file", "slices" ]
python
train
32
gwastro/pycbc
pycbc/io/hdf.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L241-L258
def cluster(self, window): """ Cluster the dict array, assuming it has the relevant Coinc colums, time1, time2, stat, and timeslide_id """ # If no events, do nothing pivot_ifo = self.attrs['pivot'] fixed_ifo = self.attrs['fixed'] if len(self.data['%s/time' % pivot_ifo]) == 0 or len(self.data['%s/time' % fixed_ifo]) == 0: return self from pycbc.events import cluster_coincs interval = self.attrs['timeslide_interval'] cid = cluster_coincs(self.stat, self.data['%s/time' % pivot_ifo], self.data['%s/time' % fixed_ifo], self.timeslide_id, interval, window) return self.select(cid)
[ "def", "cluster", "(", "self", ",", "window", ")", ":", "# If no events, do nothing", "pivot_ifo", "=", "self", ".", "attrs", "[", "'pivot'", "]", "fixed_ifo", "=", "self", ".", "attrs", "[", "'fixed'", "]", "if", "len", "(", "self", ".", "data", "[", "'%s/time'", "%", "pivot_ifo", "]", ")", "==", "0", "or", "len", "(", "self", ".", "data", "[", "'%s/time'", "%", "fixed_ifo", "]", ")", "==", "0", ":", "return", "self", "from", "pycbc", ".", "events", "import", "cluster_coincs", "interval", "=", "self", ".", "attrs", "[", "'timeslide_interval'", "]", "cid", "=", "cluster_coincs", "(", "self", ".", "stat", ",", "self", ".", "data", "[", "'%s/time'", "%", "pivot_ifo", "]", ",", "self", ".", "data", "[", "'%s/time'", "%", "fixed_ifo", "]", ",", "self", ".", "timeslide_id", ",", "interval", ",", "window", ")", "return", "self", ".", "select", "(", "cid", ")" ]
Cluster the dict array, assuming it has the relevant Coinc colums, time1, time2, stat, and timeslide_id
[ "Cluster", "the", "dict", "array", "assuming", "it", "has", "the", "relevant", "Coinc", "colums", "time1", "time2", "stat", "and", "timeslide_id" ]
python
train
44.833333
thriftrw/thriftrw-python
thriftrw/idl/parser.py
https://github.com/thriftrw/thriftrw-python/blob/4f2f71acd7a0ac716c9ea5cdcea2162aa561304a/thriftrw/idl/parser.py#L76-L78
def p_namespace(self, p): '''namespace : NAMESPACE namespace_scope IDENTIFIER''' p[0] = ast.Namespace(scope=p[2], name=p[3], lineno=p.lineno(1))
[ "def", "p_namespace", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "Namespace", "(", "scope", "=", "p", "[", "2", "]", ",", "name", "=", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
namespace : NAMESPACE namespace_scope IDENTIFIER
[ "namespace", ":", "NAMESPACE", "namespace_scope", "IDENTIFIER" ]
python
train
52.666667
singularityhub/sregistry-cli
sregistry/main/s3/query.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/s3/query.py#L41-L77
def search_all(self, quiet=False): '''a "show all" search that doesn't require a query Parameters ========== quiet: if quiet is True, we only are using the function to return rows of results. ''' results = [] for obj in self.bucket.objects.all(): subsrc = obj.Object() # Metadata bug will capitalize all fields, workaround is to lowercase # https://github.com/boto/boto3/issues/1709 metadata = dict((k.lower(), v) for k, v in subsrc.metadata.items()) size = '' # MM-DD-YYYY datestr = "%s-%s-%s" %(obj.last_modified.month, obj.last_modified.day, obj.last_modified.year) if 'sizemb' in metadata: size = '%sMB' % metadata['sizemb'] results.append([obj.key, datestr, size ]) if len(results) == 0: bot.info("No container collections found.") sys.exit(1) if not quiet: bot.info("Containers") bot.table(results) return results
[ "def", "search_all", "(", "self", ",", "quiet", "=", "False", ")", ":", "results", "=", "[", "]", "for", "obj", "in", "self", ".", "bucket", ".", "objects", ".", "all", "(", ")", ":", "subsrc", "=", "obj", ".", "Object", "(", ")", "# Metadata bug will capitalize all fields, workaround is to lowercase", "# https://github.com/boto/boto3/issues/1709", "metadata", "=", "dict", "(", "(", "k", ".", "lower", "(", ")", ",", "v", ")", "for", "k", ",", "v", "in", "subsrc", ".", "metadata", ".", "items", "(", ")", ")", "size", "=", "''", "# MM-DD-YYYY", "datestr", "=", "\"%s-%s-%s\"", "%", "(", "obj", ".", "last_modified", ".", "month", ",", "obj", ".", "last_modified", ".", "day", ",", "obj", ".", "last_modified", ".", "year", ")", "if", "'sizemb'", "in", "metadata", ":", "size", "=", "'%sMB'", "%", "metadata", "[", "'sizemb'", "]", "results", ".", "append", "(", "[", "obj", ".", "key", ",", "datestr", ",", "size", "]", ")", "if", "len", "(", "results", ")", "==", "0", ":", "bot", ".", "info", "(", "\"No container collections found.\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "quiet", ":", "bot", ".", "info", "(", "\"Containers\"", ")", "bot", ".", "table", "(", "results", ")", "return", "results" ]
a "show all" search that doesn't require a query Parameters ========== quiet: if quiet is True, we only are using the function to return rows of results.
[ "a", "show", "all", "search", "that", "doesn", "t", "require", "a", "query", "Parameters", "==========", "quiet", ":", "if", "quiet", "is", "True", "we", "only", "are", "using", "the", "function", "to", "return", "rows", "of", "results", "." ]
python
test
27.864865
gwastro/pycbc
docs/_include/inference_io_inheritance_diagrams.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/docs/_include/inference_io_inheritance_diagrams.py#L8-L12
def get_topclasses(cls): """Gets the base classes that are in pycbc.""" bases = [c for c in inspect.getmro(cls) if c.__module__.startswith('pycbc') and c != cls] return ', '.join(['{}.{}'.format(c.__module__, c.__name__) for c in bases])
[ "def", "get_topclasses", "(", "cls", ")", ":", "bases", "=", "[", "c", "for", "c", "in", "inspect", ".", "getmro", "(", "cls", ")", "if", "c", ".", "__module__", ".", "startswith", "(", "'pycbc'", ")", "and", "c", "!=", "cls", "]", "return", "', '", ".", "join", "(", "[", "'{}.{}'", ".", "format", "(", "c", ".", "__module__", ",", "c", ".", "__name__", ")", "for", "c", "in", "bases", "]", ")" ]
Gets the base classes that are in pycbc.
[ "Gets", "the", "base", "classes", "that", "are", "in", "pycbc", "." ]
python
train
51.6
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L2389-L2440
def linear_set_layer(layer_size, inputs, context=None, activation_fn=tf.nn.relu, dropout=0.0, name=None): """Basic layer type for doing funky things with sets. Applies a linear transformation to each element in the input set. If a context is supplied, it is concatenated with the inputs. e.g. One can use global_pool_1d to get a representation of the set which can then be used as the context for the next layer. TODO: Add bias add (or control the biases used). Args: layer_size: Dimension to transform the input vectors to. inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. context: A tensor of shape [batch_size, context_dims] containing a global statistic about the set. activation_fn: The activation function to use. dropout: Dropout probability. name: name. Returns: Tensor of shape [batch_size, sequence_length, output_dims] containing the sequences of transformed vectors. """ with tf.variable_scope( name, default_name="linear_set_layer", values=[inputs]): # Apply 1D convolution to apply linear filter to each element # along the 2nd dimension. outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv") # Apply the context if it exists. if context is not None: # Unfortunately tf doesn't support broadcasting via concat, but we can # simply add the transformed context to get the same effect. if len(context.get_shape().as_list()) == 2: context = tf.expand_dims(context, axis=1) cont_tfm = conv1d( context, layer_size, 1, activation=None, name="cont_conv") outputs += cont_tfm if activation_fn is not None: outputs = activation_fn(outputs) if dropout != 0.0: outputs = tf.nn.dropout(outputs, 1.0 - dropout) return outputs
[ "def", "linear_set_layer", "(", "layer_size", ",", "inputs", ",", "context", "=", "None", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ",", "dropout", "=", "0.0", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"linear_set_layer\"", ",", "values", "=", "[", "inputs", "]", ")", ":", "# Apply 1D convolution to apply linear filter to each element", "# along the 2nd dimension.", "outputs", "=", "conv1d", "(", "inputs", ",", "layer_size", ",", "1", ",", "activation", "=", "None", ",", "name", "=", "\"set_conv\"", ")", "# Apply the context if it exists.", "if", "context", "is", "not", "None", ":", "# Unfortunately tf doesn't support broadcasting via concat, but we can", "# simply add the transformed context to get the same effect.", "if", "len", "(", "context", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", ")", "==", "2", ":", "context", "=", "tf", ".", "expand_dims", "(", "context", ",", "axis", "=", "1", ")", "cont_tfm", "=", "conv1d", "(", "context", ",", "layer_size", ",", "1", ",", "activation", "=", "None", ",", "name", "=", "\"cont_conv\"", ")", "outputs", "+=", "cont_tfm", "if", "activation_fn", "is", "not", "None", ":", "outputs", "=", "activation_fn", "(", "outputs", ")", "if", "dropout", "!=", "0.0", ":", "outputs", "=", "tf", ".", "nn", ".", "dropout", "(", "outputs", ",", "1.0", "-", "dropout", ")", "return", "outputs" ]
Basic layer type for doing funky things with sets. Applies a linear transformation to each element in the input set. If a context is supplied, it is concatenated with the inputs. e.g. One can use global_pool_1d to get a representation of the set which can then be used as the context for the next layer. TODO: Add bias add (or control the biases used). Args: layer_size: Dimension to transform the input vectors to. inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. context: A tensor of shape [batch_size, context_dims] containing a global statistic about the set. activation_fn: The activation function to use. dropout: Dropout probability. name: name. Returns: Tensor of shape [batch_size, sequence_length, output_dims] containing the sequences of transformed vectors.
[ "Basic", "layer", "type", "for", "doing", "funky", "things", "with", "sets", "." ]
python
train
37.211538
JarryShaw/PyPCAPKit
src/protocols/internet/hip.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hip.py#L505-L553
def _read_para_puzzle(self, code, cbit, clen, *, desc, length, version): """Read HIP PUZZLE parameter. Structure of HIP PUZZLE parameter [RFC 5201][RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | #K, 1 byte | Lifetime | Opaque, 2 bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Random #I, RHASH_len / 8 bytes | / / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 puzzle.type Parameter Type 1 15 puzzle.critical Critical Bit 2 16 puzzle.length Length of Contents 4 32 puzzle.number Number of Verified Bits 5 40 puzzle.lifetime Lifetime 6 48 puzzle.opaque Opaque 8 64 puzzle.random Random Number """ if version == 1 and clen != 12: raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format') _numk = self._read_unpack(1) _time = self._read_unpack(1) _opak = self._read_fileng(2) _rand = self._read_unpack(clen-4) puzzle = dict( type=desc, critical=cbit, length=clen, number=_numk, lifetime=2 ** (_time - 32), opaque=_opak, random=_rand, ) _plen = length - clen if _plen: self._read_fileng(_plen) return puzzle
[ "def", "_read_para_puzzle", "(", "self", ",", "code", ",", "cbit", ",", "clen", ",", "*", ",", "desc", ",", "length", ",", "version", ")", ":", "if", "version", "==", "1", "and", "clen", "!=", "12", ":", "raise", "ProtocolError", "(", "f'HIPv{version}: [Parano {code}] invalid format'", ")", "_numk", "=", "self", ".", "_read_unpack", "(", "1", ")", "_time", "=", "self", ".", "_read_unpack", "(", "1", ")", "_opak", "=", "self", ".", "_read_fileng", "(", "2", ")", "_rand", "=", "self", ".", "_read_unpack", "(", "clen", "-", "4", ")", "puzzle", "=", "dict", "(", "type", "=", "desc", ",", "critical", "=", "cbit", ",", "length", "=", "clen", ",", "number", "=", "_numk", ",", "lifetime", "=", "2", "**", "(", "_time", "-", "32", ")", ",", "opaque", "=", "_opak", ",", "random", "=", "_rand", ",", ")", "_plen", "=", "length", "-", "clen", "if", "_plen", ":", "self", ".", "_read_fileng", "(", "_plen", ")", "return", "puzzle" ]
Read HIP PUZZLE parameter. Structure of HIP PUZZLE parameter [RFC 5201][RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | #K, 1 byte | Lifetime | Opaque, 2 bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Random #I, RHASH_len / 8 bytes | / / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 puzzle.type Parameter Type 1 15 puzzle.critical Critical Bit 2 16 puzzle.length Length of Contents 4 32 puzzle.number Number of Verified Bits 5 40 puzzle.lifetime Lifetime 6 48 puzzle.opaque Opaque 8 64 puzzle.random Random Number
[ "Read", "HIP", "PUZZLE", "parameter", "." ]
python
train
42.755102
equinor/segyio
python/segyio/segy.py
https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/segy.py#L573-L603
def xline(self): """ Interact with segy in crossline mode Returns ------- xline : Line or None Raises ------ ValueError If the file is unstructured Notes ----- .. versionadded:: 1.1 """ if self.unstructured: raise ValueError(self._unstructured_errmsg) if self._xline is not None: return self._xline self._xline = Line(self, self.xlines, self._xline_length, self._xline_stride, self.offsets, 'crossline', ) return self._xline
[ "def", "xline", "(", "self", ")", ":", "if", "self", ".", "unstructured", ":", "raise", "ValueError", "(", "self", ".", "_unstructured_errmsg", ")", "if", "self", ".", "_xline", "is", "not", "None", ":", "return", "self", ".", "_xline", "self", ".", "_xline", "=", "Line", "(", "self", ",", "self", ".", "xlines", ",", "self", ".", "_xline_length", ",", "self", ".", "_xline_stride", ",", "self", ".", "offsets", ",", "'crossline'", ",", ")", "return", "self", ".", "_xline" ]
Interact with segy in crossline mode Returns ------- xline : Line or None Raises ------ ValueError If the file is unstructured Notes ----- .. versionadded:: 1.1
[ "Interact", "with", "segy", "in", "crossline", "mode" ]
python
train
23.354839
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L569-L591
def makeelement(E, tagname, **kwargs): """Internal function""" if sys.version < '3': try: kwargs2 = {} for k,v in kwargs.items(): kwargs2[k.encode('utf-8')] = v.encode('utf-8') #return E._makeelement(tagname.encode('utf-8'), **{ k.encode('utf-8'): v.encode('utf-8') for k,v in kwargs.items() } ) #In one go fails on some older Python 2.6s return E._makeelement(tagname.encode('utf-8'), **kwargs2 ) #pylint: disable=protected-access except ValueError as e: try: #older versions of lxml may misbehave, compensate: e = E._makeelement(tagname.encode('utf-8')) #pylint: disable=protected-access for k,v in kwargs.items(): e.attrib[k.encode('utf-8')] = v return e except ValueError: print(e,file=stderr) print("tagname=",tagname,file=stderr) print("kwargs=",kwargs,file=stderr) raise e else: return E._makeelement(tagname,**kwargs)
[ "def", "makeelement", "(", "E", ",", "tagname", ",", "*", "*", "kwargs", ")", ":", "if", "sys", ".", "version", "<", "'3'", ":", "try", ":", "kwargs2", "=", "{", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "kwargs2", "[", "k", ".", "encode", "(", "'utf-8'", ")", "]", "=", "v", ".", "encode", "(", "'utf-8'", ")", "#return E._makeelement(tagname.encode('utf-8'), **{ k.encode('utf-8'): v.encode('utf-8') for k,v in kwargs.items() } ) #In one go fails on some older Python 2.6s", "return", "E", ".", "_makeelement", "(", "tagname", ".", "encode", "(", "'utf-8'", ")", ",", "*", "*", "kwargs2", ")", "#pylint: disable=protected-access", "except", "ValueError", "as", "e", ":", "try", ":", "#older versions of lxml may misbehave, compensate:", "e", "=", "E", ".", "_makeelement", "(", "tagname", ".", "encode", "(", "'utf-8'", ")", ")", "#pylint: disable=protected-access", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "e", ".", "attrib", "[", "k", ".", "encode", "(", "'utf-8'", ")", "]", "=", "v", "return", "e", "except", "ValueError", ":", "print", "(", "e", ",", "file", "=", "stderr", ")", "print", "(", "\"tagname=\"", ",", "tagname", ",", "file", "=", "stderr", ")", "print", "(", "\"kwargs=\"", ",", "kwargs", ",", "file", "=", "stderr", ")", "raise", "e", "else", ":", "return", "E", ".", "_makeelement", "(", "tagname", ",", "*", "*", "kwargs", ")" ]
Internal function
[ "Internal", "function" ]
python
train
47.043478
awslabs/sockeye
sockeye/image_captioning/utils.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/image_captioning/utils.py#L170-L198
def zero_pad_features(features: List[np.ndarray], target_shape: tuple) -> List[np.ndarray]: """ Zero pad to numpy array. :param features: List of numpy arrays. :param target_shape: Target shape of each numpy array in the list feat. Note: target_shape should be greater that the largest shapes in feat. :return: A list of padded numpy arrays. """ pad_features = [] for feature in features: feature_shape = feature.shape if len(feature_shape) < len(target_shape): # add extra dimensions for i in range(len(target_shape) - len(feature_shape)): feature = np.expand_dims(feature, axis=len(feature.shape) + 1) feature_shape = feature.shape elif len(feature_shape) > len(target_shape): raise ValueError("Provided target shape must be bigger then the original " "shape. (provided: {}, original {})".format(len(target_shape), len(feature_shape))) diff_shape = np.subtract(target_shape, feature_shape) # pylint: disable=assignment-from-no-return if np.any(diff_shape < 0): raise ValueError("Provided target values must be bigger then the original " "values for each dimension. (provided: {}, original {})".format(target_shape, feature_shape)) # pad format: ((before_1, after_1), ... (before_N, after_N)) diff_shape = [[0, d] for d in diff_shape] # pylint: disable=not-an-iterable p = np.pad(feature, diff_shape, 'constant', constant_values=0) pad_features.append(p) return pad_features
[ "def", "zero_pad_features", "(", "features", ":", "List", "[", "np", ".", "ndarray", "]", ",", "target_shape", ":", "tuple", ")", "->", "List", "[", "np", ".", "ndarray", "]", ":", "pad_features", "=", "[", "]", "for", "feature", "in", "features", ":", "feature_shape", "=", "feature", ".", "shape", "if", "len", "(", "feature_shape", ")", "<", "len", "(", "target_shape", ")", ":", "# add extra dimensions", "for", "i", "in", "range", "(", "len", "(", "target_shape", ")", "-", "len", "(", "feature_shape", ")", ")", ":", "feature", "=", "np", ".", "expand_dims", "(", "feature", ",", "axis", "=", "len", "(", "feature", ".", "shape", ")", "+", "1", ")", "feature_shape", "=", "feature", ".", "shape", "elif", "len", "(", "feature_shape", ")", ">", "len", "(", "target_shape", ")", ":", "raise", "ValueError", "(", "\"Provided target shape must be bigger then the original \"", "\"shape. (provided: {}, original {})\"", ".", "format", "(", "len", "(", "target_shape", ")", ",", "len", "(", "feature_shape", ")", ")", ")", "diff_shape", "=", "np", ".", "subtract", "(", "target_shape", ",", "feature_shape", ")", "# pylint: disable=assignment-from-no-return", "if", "np", ".", "any", "(", "diff_shape", "<", "0", ")", ":", "raise", "ValueError", "(", "\"Provided target values must be bigger then the original \"", "\"values for each dimension. (provided: {}, original {})\"", ".", "format", "(", "target_shape", ",", "feature_shape", ")", ")", "# pad format: ((before_1, after_1), ... (before_N, after_N))", "diff_shape", "=", "[", "[", "0", ",", "d", "]", "for", "d", "in", "diff_shape", "]", "# pylint: disable=not-an-iterable", "p", "=", "np", ".", "pad", "(", "feature", ",", "diff_shape", ",", "'constant'", ",", "constant_values", "=", "0", ")", "pad_features", ".", "append", "(", "p", ")", "return", "pad_features" ]
Zero pad to numpy array. :param features: List of numpy arrays. :param target_shape: Target shape of each numpy array in the list feat. Note: target_shape should be greater that the largest shapes in feat. :return: A list of padded numpy arrays.
[ "Zero", "pad", "to", "numpy", "array", "." ]
python
train
56.068966
not-na/peng3d
peng3d/gui/widgets.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/gui/widgets.py#L246-L257
def clickable(self): """ Property used for determining if the widget should be clickable by the user. This is only true if the submenu of this widget is active and this widget is enabled. The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute. """ if not isinstance(self.submenu,Container): return self.submenu.name == self.submenu.menu.activeSubMenu and self.submenu.menu.name == self.window.activeMenu and self.enabled else: return self.submenu.clickable and self.enabled
[ "def", "clickable", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "submenu", ",", "Container", ")", ":", "return", "self", ".", "submenu", ".", "name", "==", "self", ".", "submenu", ".", "menu", ".", "activeSubMenu", "and", "self", ".", "submenu", ".", "menu", ".", "name", "==", "self", ".", "window", ".", "activeMenu", "and", "self", ".", "enabled", "else", ":", "return", "self", ".", "submenu", ".", "clickable", "and", "self", ".", "enabled" ]
Property used for determining if the widget should be clickable by the user. This is only true if the submenu of this widget is active and this widget is enabled. The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute.
[ "Property", "used", "for", "determining", "if", "the", "widget", "should", "be", "clickable", "by", "the", "user", ".", "This", "is", "only", "true", "if", "the", "submenu", "of", "this", "widget", "is", "active", "and", "this", "widget", "is", "enabled", ".", "The", "widget", "may", "be", "either", "disabled", "by", "setting", "this", "property", "or", "the", ":", "py", ":", "attr", ":", "enabled", "attribute", "." ]
python
test
50
chinapnr/fishbase
fishbase/fish_random.py
https://github.com/chinapnr/fishbase/blob/23c5147a6bc0d8ed36409e55352ffb2c5b0edc82/fishbase/fish_random.py#L528-L619
def gen_random_company_name(): """ 随机生成一个公司名称 :returns: * company_name: (string) 银行名称 举例如下:: print('--- gen_random_company_name demo ---') print(gen_random_company_name()) print('---') 输出结果:: --- gen_random_company_name demo --- 上海大升旅游质询有限责任公司 --- """ region_info = ("北京,上海,广州,深圳,天津,成都,杭州,苏州,重庆,武汉,南京,大连,沈阳,长沙,郑州,西安,青岛," "无锡,济南,宁波,佛山,南通,哈尔滨,东莞,福州,长春,石家庄,烟台,合肥,唐山,常州,太原,昆明," "潍坊,南昌,泉州,温州,绍兴,嘉兴,厦门,贵阳,淄博,徐州,南宁,扬州,呼和浩特,鄂尔多斯,乌鲁木齐," "金华,台州,镇江,威海,珠海,东营,大庆,中山,盐城,包头,保定,济宁,泰州,廊坊,兰州,洛阳,宜昌," "沧州,临沂,泰安,鞍山,邯郸,惠州,江门,襄阳,湖州,吉林,芜湖,德州,聊城,漳州,株洲,淮安,榆林," "常德,咸阳,衡阳,滨州,柳州,遵义,菏泽,南阳,新乡,湛江,岳阳,郴州,许昌,连云港,枣庄,茂名,周口," "宿迁") middle_word = ("泰宏本晶辉昌昌本同永康洪皇贵久圣正裕如恒长佳协义晶合优荣汇洪千东祥复昌皇久丰兴昌国裕亚大" "荣康通仁元裕厚瑞如弘升久隆旺吉德谦长贵百久汇百伟升隆复飞佳隆浩发丰亨公荣复光福美禄欣丰大" "祥晶宏中仁宏华隆盈旺仁顺春满美中谦瑞和圣多信合盛千亚晶祥鑫隆飞鑫优合本旺发久国汇百恒佳东" "洪通恒大公中优广宝盈泰如合丰捷本伟华春元亚广中晶如浩仁汇亚永凯富富裕茂华中飞浩台美佳圣仁" "成全润金庆百贵康仁茂皇东广荣宏荣新元康公升亨洪福伟永义巨国升进合耀巨润巨元发洪源寿仁发光" "顺升凯全全辉欣成公裕康合禄兴汇顺浩贵晶捷东飞益福宏国禄元昌弘和满发巨宝生耀隆大欣昌佳本兴" "吉生宝凯润新高和元亨巨久光益旺春巨鑫进东晶中飞兴中美丰同晶复耀进洪全兴汇宝捷伟仁安宏多庆" "益生和干干福亚新复吉亚恒亚春德飞伟利庆华丰宏合德瑞进顺祥信合康富益全巨茂台谦厚台成福捷浩" "信长飞长金利美昌满丰干佳美金洪昌富千和美旺旺晶春仁华中凯浩鼎泰辉新干高进辉同欣广庆吉益德" "浩中润和春元生高进皇茂利同盈复复晶多巨圣弘捷公宝汇鑫成高新正和和巨祥光宏大丰欣恒昌昌厚合" "庆泰丰干益和金洪复元顺捷金万辉全吉庆德瑞优长鼎顺汇顺欣飞浩荣祥光泰多春凯信进公优飞昌协美" "多发中盈协成祥益昌汇泰春满千鼎东光优谦仁中飞生恒伟福晶宝信辉金皇升飞亨鑫安伟华元旺益大寿" "皇元康耀久荣满协信凯谦宝巨丰正光发康康捷中源国多多康公利顺光辉如茂晶永大高成生裕裕和万干" "飞全洪伟同发禄升欣盈高欣谦亨裕康宝复庆光皇源凯凯圣发东本辉寿捷茂和庆丰多宏亚万益公福捷升" "福茂宝捷同复合隆中汇禄鑫中新德昌新大皇安东信瑞元皇皇洪瑞弘捷本鑫中亨亚广昌永宏润同成高利" "台中生如百康旺巨福德春元通国成浩永康泰盛泰利生茂巨久昌佳复富隆通盈同庆皇顺如辉全旺捷皇长" "全富广源恒鼎顺汇本百洪鼎进欣吉凯汇欣义东长禄捷浩益旺复弘昌生发伟荣高亨元聚广新复多富千中" "兴佳升康成同贵宝生捷晶全泰全永旺发富康仁兴谦利茂亨洪佳洪元鼎全国本丰亨鑫弘富干寿春贵国成" "盛大发久弘国大金生久高久益浩晶盈益瑞正丰百浩泰台合德昌昌美皇合隆裕东广亚国升益福旺高贵信" "生汇多泰元厚瑞飞千顺盛如大德润新新顺润飞瑞优源宏千盛吉高大耀进信欣信利瑞荣升亨盈盛千合复" "隆贵丰义公优荣宏广福华洪洪捷吉进盛盛裕国洪浩祥晶弘吉欣鼎德佳成和满台光复汇佳通浩昌欣康瑞" "万亚谦兴福利千元皇瑞润禄信合长润捷中旺成金益公隆宏康亚禄隆通光广国义中优多富复盛庆千长永" "国源安永千中正康发复协利皇亚协鑫义巨源中润旺高进巨新高协兴生福恒富国协捷盛同复巨千益长洪" "亚欣美复康洪全高安进千汇通益美耀美台耀万康合洪禄中宏百凯华优鑫协泰兴裕欣进安茂丰光飞全飞" "高康进同大洪永祥飞美满兴丰谦和鑫贵百洪通裕升干永升亨光德盛永金东鼎永裕佳和德仁荣辉同瑞恒" "聚谦长广鑫金久庆国吉禄弘顺汇恒汇瑞隆洪光鼎复公鼎泰盛佳恒鼎中飞聚亚宏盈光安谦成合巨洪飞庆" "久瑞正茂信协百生盛合国圣盛同同盈信宏禄仁大中皇宝德金台优长成成亚盛公美荣成昌久禄泰亚进台" "辉佳凯安久本荣飞晶隆晶弘同丰辉华高光兴庆贵如耀飞仁宏欣皇洪宏金满鼎耀巨义德昌源中洪裕祥晶" "本国金洪昌金源恒福万义久多谦高佳欣和凯本泰春贵大浩永寿昌禄金弘仁美久升亨辉久茂皇弘泰德成" "宏美辉辉禄仁华晶春干圣长同耀光庆华晶生新辉鑫金满中千谦瑞祥昌茂复长新祥祥福同优佳恒千如兴" "裕华凯康全贵巨旺祥捷厚贵富宏义盛谦同盛同益谦润东广千进辉升复昌聚吉飞飞元公台本华升美久长" "庆亚升东正高弘亚庆和寿宏满万优伟浩新合聚庆万广寿东恒光圣润同高谦昌兴义仁安本捷公进康益金" "庆正进正千辉和升本益高广中百新庆金同如鼎寿茂鼎庆茂瑞全禄辉美贵优丰益同信兴聚浩新协宝耀圣" "晶盈飞安荣富千祥成源裕合兴佳裕旺金长禄亨本大德成亨皇通全华贵弘成福聚信福光盛丰满宏福益国" "弘生弘源新万泰成生伟兴兴辉和大元和协通千宝协伟荣长禄晶盛欣隆新本复正盛和皇升万益高盈义裕" "成仁巨弘千亚耀吉庆厚国新高利和润中捷亚信百合亨佳佳多信鑫永复公千佳捷元东宝协大贵本满泰长" "协耀圣仁旺生干盛恒义多宏益协润长皇伟晶茂大辉谦多台高恒巨兴辉台华升满公升成元利利厚隆裕厚" "高公通浩凯金皇庆新发宏大本谦升欣升华益巨益百辉亨辉成欣庆同晶瑞义久成佳利优进满康信盈东盛" "华义公贵美宝信丰正谦旺华皇吉如鑫泰协全优福寿中生厚成生亚公弘顺千信祥和圣金华康德台顺全厚" "协亨美万瑞美东飞万飞如长仁高全汇升宏利吉泰益发谦亚汇亚恒耀恒飞浩益通捷亨新恒百佳中成公圣" "宏满鑫成旺禄元福凯百永东源庆耀万鼎公春昌广润全聚德旺洪隆宝伟亨合满隆进升盛东正新多进浩康" "长合大耀和美厚如寿鑫禄德仁发庆光通义荣盈昌升荣优华国成欣大宏丰光亚复万光春鼎汇旺和辉辉伟" "捷汇通寿耀益皇盛晶隆义同合益春通万飞弘如安信本利安复协庆吉新永久公鑫广同富源公宏台长辉耀" "光千佳宝康祥盛富升顺亚吉皇美润仁广仁台瑞干隆美信优伟安生如成耀盛润升正升新公荣宏恒洪圣泰" "弘升美益顺隆大生新茂复丰亚华恒仁弘富公美昌干永满汇如洪昌荣飞新谦万百丰进宝禄贵千生进大润" "禄祥公金祥聚兴和旺盈晶百义协巨顺裕中发千辉亨美本元丰金盈盛新全国源和协富谦发万耀福大发浩" "隆正宏升弘旺长德百发鼎金满春新成新台正弘润晶大盈茂厚富泰通厚协百源复广恒欣合圣本巨复多正" "伟润高满凯仁凯高禄万本复信满德升茂金如富谦旺佳美盈千发宝禄进兴鼎丰圣广公进昌东润进优祥生" "辉茂安顺正伟圣宝优庆厚新益亚鑫皇浩兴顺多生寿金益千丰旺义东光庆泰全协吉兴千瑞丰兴茂泰庆捷" "丰升弘茂鼎润复永发多成美聚福贵合光亚聚庆大大万顺贵进光国顺飞耀佳合巨洪源祥聚百汇兴本洪荣" "利春庆协成昌瑞同厚春百光国如升同仁佳合成复凯佳汇升鼎宝宝进洪和信昌康润源圣巨康同欣浩辉正" "永汇泰禄弘鼎多厚和佳进荣如茂全贵祥飞祥祥汇禄合源盈如和庆利寿旺汇春盈荣洪宏凯宝润如洪金鼎" "聚安和吉宏捷亚伟美洪元吉厚谦吉凯汇晶中义升协吉大益祥中鑫成正盛福满辉成亨福富益洪厚禄佳益" "亨巨圣辉厚皇") service_type = ("咨询,中介,科技服务,文化交流服务,技术服务,信息服务,零售贸易,制造,批发贸易,集团,餐饮服务," "餐饮管理,旅游质询,人事服务") company_type = "股份有限公司,有限责任公司" company_pattern = '{region_info}{middle_word}{service_type}{company_type}' return company_pattern.format(region_info=random.choice(region_info.split(',')), middle_word=''.join([random.choice(middle_word) for _ in range(random.randint(2, 5))]), service_type=random.choice(service_type.split(',')), company_type=random.choice(company_type.split(',')))
[ "def", "gen_random_company_name", "(", ")", ":", "region_info", "=", "(", "\"北京,上海,广州,深圳,天津,成都,杭州,苏州,重庆,武汉,南京,大连,沈阳,长沙,郑州,西安,青岛,\"", "\"无锡,济南,宁波,佛山,南通,哈尔滨,东莞,福州,长春,石家庄,烟台,合肥,唐山,常州,太原,昆明,\"", "\"潍坊,南昌,泉州,温州,绍兴,嘉兴,厦门,贵阳,淄博,徐州,南宁,扬州,呼和浩特,鄂尔多斯,乌鲁木齐,\"", "\"金华,台州,镇江,威海,珠海,东营,大庆,中山,盐城,包头,保定,济宁,泰州,廊坊,兰州,洛阳,宜昌,\"", "\"沧州,临沂,泰安,鞍山,邯郸,惠州,江门,襄阳,湖州,吉林,芜湖,德州,聊城,漳州,株洲,淮安,榆林,\"", "\"常德,咸阳,衡阳,滨州,柳州,遵义,菏泽,南阳,新乡,湛江,岳阳,郴州,许昌,连云港,枣庄,茂名,周口,\"", "\"宿迁\")", "", "middle_word", "=", "(", "\"泰宏本晶辉昌昌本同永康洪皇贵久圣正裕如恒长佳协义晶合优荣汇洪千东祥复昌皇久丰兴昌国裕亚大\"", "\"荣康通仁元裕厚瑞如弘升久隆旺吉德谦长贵百久汇百伟升隆复飞佳隆浩发丰亨公荣复光福美禄欣丰大\"", "\"祥晶宏中仁宏华隆盈旺仁顺春满美中谦瑞和圣多信合盛千亚晶祥鑫隆飞鑫优合本旺发久国汇百恒佳东\"", "\"洪通恒大公中优广宝盈泰如合丰捷本伟华春元亚广中晶如浩仁汇亚永凯富富裕茂华中飞浩台美佳圣仁\"", "\"成全润金庆百贵康仁茂皇东广荣宏荣新元康公升亨洪福伟永义巨国升进合耀巨润巨元发洪源寿仁发光\"", "\"顺升凯全全辉欣成公裕康合禄兴汇顺浩贵晶捷东飞益福宏国禄元昌弘和满发巨宝生耀隆大欣昌佳本兴\"", "\"吉生宝凯润新高和元亨巨久光益旺春巨鑫进东晶中飞兴中美丰同晶复耀进洪全兴汇宝捷伟仁安宏多庆\"", "\"益生和干干福亚新复吉亚恒亚春德飞伟利庆华丰宏合德瑞进顺祥信合康富益全巨茂台谦厚台成福捷浩\"", "\"信长飞长金利美昌满丰干佳美金洪昌富千和美旺旺晶春仁华中凯浩鼎泰辉新干高进辉同欣广庆吉益德\"", "\"浩中润和春元生高进皇茂利同盈复复晶多巨圣弘捷公宝汇鑫成高新正和和巨祥光宏大丰欣恒昌昌厚合\"", "\"庆泰丰干益和金洪复元顺捷金万辉全吉庆德瑞优长鼎顺汇顺欣飞浩荣祥光泰多春凯信进公优飞昌协美\"", "\"多发中盈协成祥益昌汇泰春满千鼎东光优谦仁中飞生恒伟福晶宝信辉金皇升飞亨鑫安伟华元旺益大寿\"", "\"皇元康耀久荣满协信凯谦宝巨丰正光发康康捷中源国多多康公利顺光辉如茂晶永大高成生裕裕和万干\"", "\"飞全洪伟同发禄升欣盈高欣谦亨裕康宝复庆光皇源凯凯圣发东本辉寿捷茂和庆丰多宏亚万益公福捷升\"", "\"福茂宝捷同复合隆中汇禄鑫中新德昌新大皇安东信瑞元皇皇洪瑞弘捷本鑫中亨亚广昌永宏润同成高利\"", "\"台中生如百康旺巨福德春元通国成浩永康泰盛泰利生茂巨久昌佳复富隆通盈同庆皇顺如辉全旺捷皇长\"", "\"全富广源恒鼎顺汇本百洪鼎进欣吉凯汇欣义东长禄捷浩益旺复弘昌生发伟荣高亨元聚广新复多富千中\"", "\"兴佳升康成同贵宝生捷晶全泰全永旺发富康仁兴谦利茂亨洪佳洪元鼎全国本丰亨鑫弘富干寿春贵国成\"", "\"盛大发久弘国大金生久高久益浩晶盈益瑞正丰百浩泰台合德昌昌美皇合隆裕东广亚国升益福旺高贵信\"", "\"生汇多泰元厚瑞飞千顺盛如大德润新新顺润飞瑞优源宏千盛吉高大耀进信欣信利瑞荣升亨盈盛千合复\"", "\"隆贵丰义公优荣宏广福华洪洪捷吉进盛盛裕国洪浩祥晶弘吉欣鼎德佳成和满台光复汇佳通浩昌欣康瑞\"", "\"万亚谦兴福利千元皇瑞润禄信合长润捷中旺成金益公隆宏康亚禄隆通光广国义中优多富复盛庆千长永\"", "\"国源安永千中正康发复协利皇亚协鑫义巨源中润旺高进巨新高协兴生福恒富国协捷盛同复巨千益长洪\"", "\"亚欣美复康洪全高安进千汇通益美耀美台耀万康合洪禄中宏百凯华优鑫协泰兴裕欣进安茂丰光飞全飞\"", "\"高康进同大洪永祥飞美满兴丰谦和鑫贵百洪通裕升干永升亨光德盛永金东鼎永裕佳和德仁荣辉同瑞恒\"", "\"聚谦长广鑫金久庆国吉禄弘顺汇恒汇瑞隆洪光鼎复公鼎泰盛佳恒鼎中飞聚亚宏盈光安谦成合巨洪飞庆\"", "\"久瑞正茂信协百生盛合国圣盛同同盈信宏禄仁大中皇宝德金台优长成成亚盛公美荣成昌久禄泰亚进台\"", "\"辉佳凯安久本荣飞晶隆晶弘同丰辉华高光兴庆贵如耀飞仁宏欣皇洪宏金满鼎耀巨义德昌源中洪裕祥晶\"", "\"本国金洪昌金源恒福万义久多谦高佳欣和凯本泰春贵大浩永寿昌禄金弘仁美久升亨辉久茂皇弘泰德成\"", "\"宏美辉辉禄仁华晶春干圣长同耀光庆华晶生新辉鑫金满中千谦瑞祥昌茂复长新祥祥福同优佳恒千如兴\"", "\"裕华凯康全贵巨旺祥捷厚贵富宏义盛谦同盛同益谦润东广千进辉升复昌聚吉飞飞元公台本华升美久长\"", "\"庆亚升东正高弘亚庆和寿宏满万优伟浩新合聚庆万广寿东恒光圣润同高谦昌兴义仁安本捷公进康益金\"", "\"庆正进正千辉和升本益高广中百新庆金同如鼎寿茂鼎庆茂瑞全禄辉美贵优丰益同信兴聚浩新协宝耀圣\"", "\"晶盈飞安荣富千祥成源裕合兴佳裕旺金长禄亨本大德成亨皇通全华贵弘成福聚信福光盛丰满宏福益国\"", "\"弘生弘源新万泰成生伟兴兴辉和大元和协通千宝协伟荣长禄晶盛欣隆新本复正盛和皇升万益高盈义裕\"", "\"成仁巨弘千亚耀吉庆厚国新高利和润中捷亚信百合亨佳佳多信鑫永复公千佳捷元东宝协大贵本满泰长\"", "\"协耀圣仁旺生干盛恒义多宏益协润长皇伟晶茂大辉谦多台高恒巨兴辉台华升满公升成元利利厚隆裕厚\"", "\"高公通浩凯金皇庆新发宏大本谦升欣升华益巨益百辉亨辉成欣庆同晶瑞义久成佳利优进满康信盈东盛\"", "\"华义公贵美宝信丰正谦旺华皇吉如鑫泰协全优福寿中生厚成生亚公弘顺千信祥和圣金华康德台顺全厚\"", "\"协亨美万瑞美东飞万飞如长仁高全汇升宏利吉泰益发谦亚汇亚恒耀恒飞浩益通捷亨新恒百佳中成公圣\"", "\"宏满鑫成旺禄元福凯百永东源庆耀万鼎公春昌广润全聚德旺洪隆宝伟亨合满隆进升盛东正新多进浩康\"", "\"长合大耀和美厚如寿鑫禄德仁发庆光通义荣盈昌升荣优华国成欣大宏丰光亚复万光春鼎汇旺和辉辉伟\"", "\"捷汇通寿耀益皇盛晶隆义同合益春通万飞弘如安信本利安复协庆吉新永久公鑫广同富源公宏台长辉耀\"", "\"光千佳宝康祥盛富升顺亚吉皇美润仁广仁台瑞干隆美信优伟安生如成耀盛润升正升新公荣宏恒洪圣泰\"", "\"弘升美益顺隆大生新茂复丰亚华恒仁弘富公美昌干永满汇如洪昌荣飞新谦万百丰进宝禄贵千生进大润\"", "\"禄祥公金祥聚兴和旺盈晶百义协巨顺裕中发千辉亨美本元丰金盈盛新全国源和协富谦发万耀福大发浩\"", "\"隆正宏升弘旺长德百发鼎金满春新成新台正弘润晶大盈茂厚富泰通厚协百源复广恒欣合圣本巨复多正\"", "\"伟润高满凯仁凯高禄万本复信满德升茂金如富谦旺佳美盈千发宝禄进兴鼎丰圣广公进昌东润进优祥生\"", "\"辉茂安顺正伟圣宝优庆厚新益亚鑫皇浩兴顺多生寿金益千丰旺义东光庆泰全协吉兴千瑞丰兴茂泰庆捷\"", "\"丰升弘茂鼎润复永发多成美聚福贵合光亚聚庆大大万顺贵进光国顺飞耀佳合巨洪源祥聚百汇兴本洪荣\"", "\"利春庆协成昌瑞同厚春百光国如升同仁佳合成复凯佳汇升鼎宝宝进洪和信昌康润源圣巨康同欣浩辉正\"", "\"永汇泰禄弘鼎多厚和佳进荣如茂全贵祥飞祥祥汇禄合源盈如和庆利寿旺汇春盈荣洪宏凯宝润如洪金鼎\"", "\"聚安和吉宏捷亚伟美洪元吉厚谦吉凯汇晶中义升协吉大益祥中鑫成正盛福满辉成亨福富益洪厚禄佳益\"", "\"亨巨圣辉厚皇\")", "", "service_type", "=", "(", "\"咨询,中介,科技服务,文化交流服务,技术服务,信息服务,零售贸易,制造,批发贸易,集团,餐饮服务,\"", "\"餐饮管理,旅游质询,人事服务\")", "", "company_type", "=", "\"股份有限公司,有限责任公司\"", "company_pattern", "=", "'{region_info}{middle_word}{service_type}{company_type}'", "return", "company_pattern", ".", "format", "(", "region_info", "=", "random", ".", "choice", "(", "region_info", ".", "split", "(", "','", ")", ")", ",", "middle_word", "=", "''", ".", "join", "(", "[", "random", ".", "choice", "(", "middle_word", ")", "for", "_", "in", "range", "(", "random", ".", "randint", "(", "2", ",", "5", ")", ")", "]", ")", ",", "service_type", "=", "random", ".", "choice", "(", "service_type", ".", "split", "(", "','", ")", ")", ",", "company_type", "=", "random", ".", "choice", "(", "company_type", ".", "split", "(", "','", ")", ")", ")" ]
随机生成一个公司名称 :returns: * company_name: (string) 银行名称 举例如下:: print('--- gen_random_company_name demo ---') print(gen_random_company_name()) print('---') 输出结果:: --- gen_random_company_name demo --- 上海大升旅游质询有限责任公司 ---
[ "随机生成一个公司名称" ]
python
train
53.23913
log2timeline/dfwinreg
dfwinreg/regf.py
https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/regf.py#L248-L259
def data(self): """bytes: value data as a byte string. Raises: WinRegistryValueError: if the value data cannot be read. """ try: return self._pyregf_value.data except IOError as exception: raise errors.WinRegistryValueError( 'Unable to read data from value: {0:s} with error: {1!s}'.format( self._pyregf_value.name, exception))
[ "def", "data", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_pyregf_value", ".", "data", "except", "IOError", "as", "exception", ":", "raise", "errors", ".", "WinRegistryValueError", "(", "'Unable to read data from value: {0:s} with error: {1!s}'", ".", "format", "(", "self", ".", "_pyregf_value", ".", "name", ",", "exception", ")", ")" ]
bytes: value data as a byte string. Raises: WinRegistryValueError: if the value data cannot be read.
[ "bytes", ":", "value", "data", "as", "a", "byte", "string", "." ]
python
train
31.583333
openvax/mhcflurry
mhcflurry/class1_neural_network.py
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L274-L288
def get_config(self): """ serialize to a dict all attributes except model weights Returns ------- dict """ self.update_network_description() result = dict(self.__dict__) result['_network'] = None result['network_weights'] = None result['network_weights_loader'] = None result['prediction_cache'] = None return result
[ "def", "get_config", "(", "self", ")", ":", "self", ".", "update_network_description", "(", ")", "result", "=", "dict", "(", "self", ".", "__dict__", ")", "result", "[", "'_network'", "]", "=", "None", "result", "[", "'network_weights'", "]", "=", "None", "result", "[", "'network_weights_loader'", "]", "=", "None", "result", "[", "'prediction_cache'", "]", "=", "None", "return", "result" ]
serialize to a dict all attributes except model weights Returns ------- dict
[ "serialize", "to", "a", "dict", "all", "attributes", "except", "model", "weights", "Returns", "-------", "dict" ]
python
train
27.666667
mitsei/dlkit
dlkit/aws_adapter/osid/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/osid/managers.py#L24-L63
def _initialize(self, runtime): """Common initializer for OsidManager and OsidProxyManager""" if runtime is None: raise NullArgument() if self._my_runtime is not None: raise IllegalState('this manager has already been initialized.') self._my_runtime = runtime config = runtime.get_configuration() cf_public_key_param_id = Id('parameter:cloudFrontPublicKey@aws_adapter') cf_private_key_param_id = Id('parameter:cloudFrontPrivateKey@aws_adapter') cf_keypair_id_param_id = Id('parameter:cloudFrontSigningKeypairId@aws_adapter') cf_private_key_file_param_id = Id('parameter:cloudFrontSigningPrivateKeyFile@aws_adapter') cf_distro_param_id = Id('parameter:cloudFrontDistro@aws_adapter') cf_distro_id_param_id = Id('parameter:cloudFrontDistroId@aws_adapter') s3_public_key_param_id = Id('parameter:S3PublicKey@aws_adapter') s3_private_key_param_id = Id('parameter:S3PrivateKey@aws_adapter') s3_bucket_param_id = Id('parameter:S3Bucket@aws_adapter') cf_public_key = config.get_value_by_parameter(cf_public_key_param_id).get_string_value() cf_private_key = config.get_value_by_parameter(cf_private_key_param_id).get_string_value() cf_keypair_id = config.get_value_by_parameter(cf_keypair_id_param_id).get_string_value() cf_private_key_file = config.get_value_by_parameter( cf_private_key_file_param_id).get_string_value() cf_distro = config.get_value_by_parameter(cf_distro_param_id).get_string_value() cf_distro_id = config.get_value_by_parameter(cf_distro_id_param_id).get_string_value() s3_public_key = config.get_value_by_parameter(s3_public_key_param_id).get_string_value() s3_private_key = config.get_value_by_parameter(s3_private_key_param_id).get_string_value() s3_bucket = config.get_value_by_parameter(s3_bucket_param_id).get_string_value() self._config_map['cloudfront_public_key'] = cf_public_key self._config_map['cloudfront_private_key'] = cf_private_key self._config_map['cloudfront_keypair_id'] = cf_keypair_id self._config_map['cloudfront_private_key_file'] = cf_private_key_file self._config_map['cloudfront_distro'] = cf_distro self._config_map['cloudfront_distro_id'] = cf_distro_id self._config_map['put_public_key'] = s3_public_key self._config_map['put_private_key'] = s3_private_key self._config_map['s3_bucket'] = s3_bucket
[ "def", "_initialize", "(", "self", ",", "runtime", ")", ":", "if", "runtime", "is", "None", ":", "raise", "NullArgument", "(", ")", "if", "self", ".", "_my_runtime", "is", "not", "None", ":", "raise", "IllegalState", "(", "'this manager has already been initialized.'", ")", "self", ".", "_my_runtime", "=", "runtime", "config", "=", "runtime", ".", "get_configuration", "(", ")", "cf_public_key_param_id", "=", "Id", "(", "'parameter:cloudFrontPublicKey@aws_adapter'", ")", "cf_private_key_param_id", "=", "Id", "(", "'parameter:cloudFrontPrivateKey@aws_adapter'", ")", "cf_keypair_id_param_id", "=", "Id", "(", "'parameter:cloudFrontSigningKeypairId@aws_adapter'", ")", "cf_private_key_file_param_id", "=", "Id", "(", "'parameter:cloudFrontSigningPrivateKeyFile@aws_adapter'", ")", "cf_distro_param_id", "=", "Id", "(", "'parameter:cloudFrontDistro@aws_adapter'", ")", "cf_distro_id_param_id", "=", "Id", "(", "'parameter:cloudFrontDistroId@aws_adapter'", ")", "s3_public_key_param_id", "=", "Id", "(", "'parameter:S3PublicKey@aws_adapter'", ")", "s3_private_key_param_id", "=", "Id", "(", "'parameter:S3PrivateKey@aws_adapter'", ")", "s3_bucket_param_id", "=", "Id", "(", "'parameter:S3Bucket@aws_adapter'", ")", "cf_public_key", "=", "config", ".", "get_value_by_parameter", "(", "cf_public_key_param_id", ")", ".", "get_string_value", "(", ")", "cf_private_key", "=", "config", ".", "get_value_by_parameter", "(", "cf_private_key_param_id", ")", ".", "get_string_value", "(", ")", "cf_keypair_id", "=", "config", ".", "get_value_by_parameter", "(", "cf_keypair_id_param_id", ")", ".", "get_string_value", "(", ")", "cf_private_key_file", "=", "config", ".", "get_value_by_parameter", "(", "cf_private_key_file_param_id", ")", ".", "get_string_value", "(", ")", "cf_distro", "=", "config", ".", "get_value_by_parameter", "(", "cf_distro_param_id", ")", ".", "get_string_value", "(", ")", "cf_distro_id", "=", "config", ".", "get_value_by_parameter", "(", "cf_distro_id_param_id", ")", ".", "get_string_value", "(", ")", "s3_public_key", "=", "config", ".", "get_value_by_parameter", "(", "s3_public_key_param_id", ")", ".", "get_string_value", "(", ")", "s3_private_key", "=", "config", ".", "get_value_by_parameter", "(", "s3_private_key_param_id", ")", ".", "get_string_value", "(", ")", "s3_bucket", "=", "config", ".", "get_value_by_parameter", "(", "s3_bucket_param_id", ")", ".", "get_string_value", "(", ")", "self", ".", "_config_map", "[", "'cloudfront_public_key'", "]", "=", "cf_public_key", "self", ".", "_config_map", "[", "'cloudfront_private_key'", "]", "=", "cf_private_key", "self", ".", "_config_map", "[", "'cloudfront_keypair_id'", "]", "=", "cf_keypair_id", "self", ".", "_config_map", "[", "'cloudfront_private_key_file'", "]", "=", "cf_private_key_file", "self", ".", "_config_map", "[", "'cloudfront_distro'", "]", "=", "cf_distro", "self", ".", "_config_map", "[", "'cloudfront_distro_id'", "]", "=", "cf_distro_id", "self", ".", "_config_map", "[", "'put_public_key'", "]", "=", "s3_public_key", "self", ".", "_config_map", "[", "'put_private_key'", "]", "=", "s3_private_key", "self", ".", "_config_map", "[", "'s3_bucket'", "]", "=", "s3_bucket" ]
Common initializer for OsidManager and OsidProxyManager
[ "Common", "initializer", "for", "OsidManager", "and", "OsidProxyManager" ]
python
train
62.425
basho/riak-python-client
riak/table.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/table.py#L51-L64
def new(self, rows, columns=None): """ A shortcut for manually instantiating a new :class:`~riak.ts_object.TsObject` :param rows: An list of lists with timeseries data :type rows: list :param columns: An list of Column names and types. Optional. :type columns: list :rtype: :class:`~riak.ts_object.TsObject` """ from riak.ts_object import TsObject return TsObject(self._client, self, rows, columns)
[ "def", "new", "(", "self", ",", "rows", ",", "columns", "=", "None", ")", ":", "from", "riak", ".", "ts_object", "import", "TsObject", "return", "TsObject", "(", "self", ".", "_client", ",", "self", ",", "rows", ",", "columns", ")" ]
A shortcut for manually instantiating a new :class:`~riak.ts_object.TsObject` :param rows: An list of lists with timeseries data :type rows: list :param columns: An list of Column names and types. Optional. :type columns: list :rtype: :class:`~riak.ts_object.TsObject`
[ "A", "shortcut", "for", "manually", "instantiating", "a", "new", ":", "class", ":", "~riak", ".", "ts_object", ".", "TsObject" ]
python
train
33.928571
rlabbe/filterpy
filterpy/kalman/square_root.py
https://github.com/rlabbe/filterpy/blob/8123214de798ffb63db968bb0b9492ee74e77950/filterpy/kalman/square_root.py#L227-L249
def predict(self, u=0): """ Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array, optional Optional control vector. If non-zero, it is multiplied by B to create the control input into the system. """ # x = Fx + Bu self.x = dot(self.F, self.x) + dot(self.B, u) # P = FPF' + Q _, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T) self._P1_2 = P2[:self.dim_x, :self.dim_x].T # copy prior self.x_prior = np.copy(self.x) self._P1_2_prior = np.copy(self._P1_2)
[ "def", "predict", "(", "self", ",", "u", "=", "0", ")", ":", "# x = Fx + Bu", "self", ".", "x", "=", "dot", "(", "self", ".", "F", ",", "self", ".", "x", ")", "+", "dot", "(", "self", ".", "B", ",", "u", ")", "# P = FPF' + Q", "_", ",", "P2", "=", "qr", "(", "np", ".", "hstack", "(", "[", "dot", "(", "self", ".", "F", ",", "self", ".", "_P1_2", ")", ",", "self", ".", "_Q1_2", "]", ")", ".", "T", ")", "self", ".", "_P1_2", "=", "P2", "[", ":", "self", ".", "dim_x", ",", ":", "self", ".", "dim_x", "]", ".", "T", "# copy prior", "self", ".", "x_prior", "=", "np", ".", "copy", "(", "self", ".", "x", ")", "self", ".", "_P1_2_prior", "=", "np", ".", "copy", "(", "self", ".", "_P1_2", ")" ]
Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array, optional Optional control vector. If non-zero, it is multiplied by B to create the control input into the system.
[ "Predict", "next", "state", "(", "prior", ")", "using", "the", "Kalman", "filter", "state", "propagation", "equations", "." ]
python
train
28.391304
saltstack/salt
salt/modules/smf_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smf_service.py#L155-L176
def start(name): ''' Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> ''' cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name) retcode = __salt__['cmd.retcode'](cmd, python_shell=False) if not retcode: return True if retcode == 3: # Return code 3 means there was a problem with the service # A common case is being in the 'maintenance' state # Attempt a clear and try one more time clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name) __salt__['cmd.retcode'](clear_cmd, python_shell=False) return not __salt__['cmd.retcode'](cmd, python_shell=False) return False
[ "def", "start", "(", "name", ")", ":", "cmd", "=", "'/usr/sbin/svcadm enable -s -t {0}'", ".", "format", "(", "name", ")", "retcode", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "not", "retcode", ":", "return", "True", "if", "retcode", "==", "3", ":", "# Return code 3 means there was a problem with the service", "# A common case is being in the 'maintenance' state", "# Attempt a clear and try one more time", "clear_cmd", "=", "'/usr/sbin/svcadm clear {0}'", ".", "format", "(", "name", ")", "__salt__", "[", "'cmd.retcode'", "]", "(", "clear_cmd", ",", "python_shell", "=", "False", ")", "return", "not", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "return", "False" ]
Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name>
[ "Start", "the", "specified", "service" ]
python
train
31.909091
shoebot/shoebot
shoebot/core/canvas.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/core/canvas.py#L106-L111
def settings(self, **kwargs): ''' Pass a load of settings into the canvas ''' for k, v in kwargs.items(): setattr(self, k, v)
[ "def", "settings", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
Pass a load of settings into the canvas
[ "Pass", "a", "load", "of", "settings", "into", "the", "canvas" ]
python
valid
27.333333
tenforce/docker-py-aiohttp
aiodockerpy/utils/json_stream.py
https://github.com/tenforce/docker-py-aiohttp/blob/ab997f18bdbeb6d83abc6e5281934493552015f3/aiodockerpy/utils/json_stream.py#L11-L21
async def stream_as_text(stream): """ Given a stream of bytes or text, if any of the items in the stream are bytes convert them to text. This function can be removed once we return text streams instead of byte streams. """ async for data in stream: if not isinstance(data, six.text_type): data = data.decode('utf-8', 'replace') yield data
[ "async", "def", "stream_as_text", "(", "stream", ")", ":", "async", "for", "data", "in", "stream", ":", "if", "not", "isinstance", "(", "data", ",", "six", ".", "text_type", ")", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", "yield", "data" ]
Given a stream of bytes or text, if any of the items in the stream are bytes convert them to text. This function can be removed once we return text streams instead of byte streams.
[ "Given", "a", "stream", "of", "bytes", "or", "text", "if", "any", "of", "the", "items", "in", "the", "stream", "are", "bytes", "convert", "them", "to", "text", ".", "This", "function", "can", "be", "removed", "once", "we", "return", "text", "streams", "instead", "of", "byte", "streams", "." ]
python
train
34.909091
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9423-L9440
def pcpool(name, cvals): """ This entry point provides toolkit programmers a method for programmatically inserting character data into the kernel pool. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html :param name: The kernel pool name to associate with cvals. :type name: str :param cvals: An array of strings to insert into the kernel pool. :type cvals: Array of str """ name = stypes.stringToCharP(name) lenvals = ctypes.c_int(len(max(cvals, key=len)) + 1) n = ctypes.c_int(len(cvals)) cvals = stypes.listToCharArray(cvals, lenvals, n) libspice.pcpool_c(name, n, lenvals, cvals)
[ "def", "pcpool", "(", "name", ",", "cvals", ")", ":", "name", "=", "stypes", ".", "stringToCharP", "(", "name", ")", "lenvals", "=", "ctypes", ".", "c_int", "(", "len", "(", "max", "(", "cvals", ",", "key", "=", "len", ")", ")", "+", "1", ")", "n", "=", "ctypes", ".", "c_int", "(", "len", "(", "cvals", ")", ")", "cvals", "=", "stypes", ".", "listToCharArray", "(", "cvals", ",", "lenvals", ",", "n", ")", "libspice", ".", "pcpool_c", "(", "name", ",", "n", ",", "lenvals", ",", "cvals", ")" ]
This entry point provides toolkit programmers a method for programmatically inserting character data into the kernel pool. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html :param name: The kernel pool name to associate with cvals. :type name: str :param cvals: An array of strings to insert into the kernel pool. :type cvals: Array of str
[ "This", "entry", "point", "provides", "toolkit", "programmers", "a", "method", "for", "programmatically", "inserting", "character", "data", "into", "the", "kernel", "pool", "." ]
python
train
35.888889