repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ansible/tacacs_plus
tacacs_plus/client.py
https://github.com/ansible/tacacs_plus/blob/de0d01372169c8849fa284d75097e57367c8930f/tacacs_plus/client.py#L155-L224
def authenticate(self, username, password, priv_lvl=TAC_PLUS_PRIV_LVL_MIN, authen_type=TAC_PLUS_AUTHEN_TYPE_ASCII, chap_ppp_id=None, chap_challenge=None, rem_addr=TAC_PLUS_VIRTUAL_REM_ADDR, port=TAC_PLUS_VIRTUAL_PORT): """ Authenticate to a TACACS+ server with a username and password. :param username: :param password: :param priv_lvl: :param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII, TAC_PLUS_AUTHEN_TYPE_PAP, TAC_PLUS_AUTHEN_TYPE_CHAP :param chap_ppp_id: PPP ID when authen_type == 'chap' :param chap_challenge: challenge value when authen_type == 'chap' :param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR :param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT :return: TACACSAuthenticationReply :raises: socket.timeout, socket.error """ start_data = six.b('') if authen_type in (TAC_PLUS_AUTHEN_TYPE_PAP, TAC_PLUS_AUTHEN_TYPE_CHAP): self.version_min = TAC_PLUS_MINOR_VER_ONE if authen_type == TAC_PLUS_AUTHEN_TYPE_PAP: start_data = six.b(password) if authen_type == TAC_PLUS_AUTHEN_TYPE_CHAP: if not isinstance(chap_ppp_id, six.string_types): raise ValueError('chap_ppp_id must be a string') if len(chap_ppp_id) != 1: raise ValueError('chap_ppp_id must be a 1-byte string') if not isinstance(chap_challenge, six.string_types): raise ValueError('chap_challenge must be a string') if len(chap_challenge) > 255: raise ValueError('chap_challenge may not be more 255 bytes') start_data = ( six.b(chap_ppp_id) + six.b(chap_challenge) + md5(six.b( chap_ppp_id + password + chap_challenge )).digest() ) with self.closing(): packet = self.send( TACACSAuthenticationStart(username, authen_type, priv_lvl, start_data, rem_addr=rem_addr, port=port), TAC_PLUS_AUTHEN ) reply = TACACSAuthenticationReply.unpacked(packet.body) logger.debug('\n'.join([ reply.__class__.__name__, 'recv header <%s>' % packet.header, 'recv body <%s>' % reply ])) if authen_type == TAC_PLUS_AUTHEN_TYPE_ASCII and reply.getpass: packet = self.send(TACACSAuthenticationContinue(password), TAC_PLUS_AUTHEN, packet.seq_no + 1) reply = TACACSAuthenticationReply.unpacked(packet.body) logger.debug('\n'.join([ reply.__class__.__name__, 'recv header <%s>' % packet.header, 'recv body <%s>' % reply ])) if reply.flags == TAC_PLUS_CONTINUE_FLAG_ABORT: reply.status = TAC_PLUS_AUTHEN_STATUS_FAIL return reply
[ "def", "authenticate", "(", "self", ",", "username", ",", "password", ",", "priv_lvl", "=", "TAC_PLUS_PRIV_LVL_MIN", ",", "authen_type", "=", "TAC_PLUS_AUTHEN_TYPE_ASCII", ",", "chap_ppp_id", "=", "None", ",", "chap_challenge", "=", "None", ",", "rem_addr", "=", "TAC_PLUS_VIRTUAL_REM_ADDR", ",", "port", "=", "TAC_PLUS_VIRTUAL_PORT", ")", ":", "start_data", "=", "six", ".", "b", "(", "''", ")", "if", "authen_type", "in", "(", "TAC_PLUS_AUTHEN_TYPE_PAP", ",", "TAC_PLUS_AUTHEN_TYPE_CHAP", ")", ":", "self", ".", "version_min", "=", "TAC_PLUS_MINOR_VER_ONE", "if", "authen_type", "==", "TAC_PLUS_AUTHEN_TYPE_PAP", ":", "start_data", "=", "six", ".", "b", "(", "password", ")", "if", "authen_type", "==", "TAC_PLUS_AUTHEN_TYPE_CHAP", ":", "if", "not", "isinstance", "(", "chap_ppp_id", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "'chap_ppp_id must be a string'", ")", "if", "len", "(", "chap_ppp_id", ")", "!=", "1", ":", "raise", "ValueError", "(", "'chap_ppp_id must be a 1-byte string'", ")", "if", "not", "isinstance", "(", "chap_challenge", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "'chap_challenge must be a string'", ")", "if", "len", "(", "chap_challenge", ")", ">", "255", ":", "raise", "ValueError", "(", "'chap_challenge may not be more 255 bytes'", ")", "start_data", "=", "(", "six", ".", "b", "(", "chap_ppp_id", ")", "+", "six", ".", "b", "(", "chap_challenge", ")", "+", "md5", "(", "six", ".", "b", "(", "chap_ppp_id", "+", "password", "+", "chap_challenge", ")", ")", ".", "digest", "(", ")", ")", "with", "self", ".", "closing", "(", ")", ":", "packet", "=", "self", ".", "send", "(", "TACACSAuthenticationStart", "(", "username", ",", "authen_type", ",", "priv_lvl", ",", "start_data", ",", "rem_addr", "=", "rem_addr", ",", "port", "=", "port", ")", ",", "TAC_PLUS_AUTHEN", ")", "reply", "=", "TACACSAuthenticationReply", ".", "unpacked", "(", "packet", ".", "body", ")", "logger", ".", "debug", "(", "'\\n'", ".", "join", "(", "[", "reply", ".", "__class__", ".", "__name__", ",", "'recv header <%s>'", "%", "packet", ".", "header", ",", "'recv body <%s>'", "%", "reply", "]", ")", ")", "if", "authen_type", "==", "TAC_PLUS_AUTHEN_TYPE_ASCII", "and", "reply", ".", "getpass", ":", "packet", "=", "self", ".", "send", "(", "TACACSAuthenticationContinue", "(", "password", ")", ",", "TAC_PLUS_AUTHEN", ",", "packet", ".", "seq_no", "+", "1", ")", "reply", "=", "TACACSAuthenticationReply", ".", "unpacked", "(", "packet", ".", "body", ")", "logger", ".", "debug", "(", "'\\n'", ".", "join", "(", "[", "reply", ".", "__class__", ".", "__name__", ",", "'recv header <%s>'", "%", "packet", ".", "header", ",", "'recv body <%s>'", "%", "reply", "]", ")", ")", "if", "reply", ".", "flags", "==", "TAC_PLUS_CONTINUE_FLAG_ABORT", ":", "reply", ".", "status", "=", "TAC_PLUS_AUTHEN_STATUS_FAIL", "return", "reply" ]
Authenticate to a TACACS+ server with a username and password. :param username: :param password: :param priv_lvl: :param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII, TAC_PLUS_AUTHEN_TYPE_PAP, TAC_PLUS_AUTHEN_TYPE_CHAP :param chap_ppp_id: PPP ID when authen_type == 'chap' :param chap_challenge: challenge value when authen_type == 'chap' :param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR :param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT :return: TACACSAuthenticationReply :raises: socket.timeout, socket.error
[ "Authenticate", "to", "a", "TACACS", "+", "server", "with", "a", "username", "and", "password", "." ]
python
train
47.4
shaypal5/utilitime
utilitime/timestamp/timestamp.py
https://github.com/shaypal5/utilitime/blob/554ca05fa83c2dbf5d6cf9c9cfa6b03ee6cdb609/utilitime/timestamp/timestamp.py#L58-L63
def get_timestamp(timezone_name, year, month, day, hour=0, minute=0): """Epoch timestamp from timezone, year, month, day, hour and minute.""" tz = pytz.timezone(timezone_name) tz_datetime = tz.localize(datetime(year, month, day, hour, minute)) timestamp = calendar.timegm(tz_datetime.utctimetuple()) return timestamp
[ "def", "get_timestamp", "(", "timezone_name", ",", "year", ",", "month", ",", "day", ",", "hour", "=", "0", ",", "minute", "=", "0", ")", ":", "tz", "=", "pytz", ".", "timezone", "(", "timezone_name", ")", "tz_datetime", "=", "tz", ".", "localize", "(", "datetime", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ")", ")", "timestamp", "=", "calendar", ".", "timegm", "(", "tz_datetime", ".", "utctimetuple", "(", ")", ")", "return", "timestamp" ]
Epoch timestamp from timezone, year, month, day, hour and minute.
[ "Epoch", "timestamp", "from", "timezone", "year", "month", "day", "hour", "and", "minute", "." ]
python
train
55.166667
FlorianRhiem/pyGLFW
glfw/__init__.py
https://github.com/FlorianRhiem/pyGLFW/blob/87767dfbe15ba15d2a8338cdfddf6afc6a25dff5/glfw/__init__.py#L1578-L1598
def set_scroll_callback(window, cbfun): """ Sets the scroll callback. Wrapper for: GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun); """ window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if window_addr in _scroll_callback_repository: previous_callback = _scroll_callback_repository[window_addr] else: previous_callback = None if cbfun is None: cbfun = 0 c_cbfun = _GLFWscrollfun(cbfun) _scroll_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetScrollCallback(window, cbfun) if previous_callback is not None and previous_callback[0] != 0: return previous_callback[0]
[ "def", "set_scroll_callback", "(", "window", ",", "cbfun", ")", ":", "window_addr", "=", "ctypes", ".", "cast", "(", "ctypes", ".", "pointer", "(", "window", ")", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_long", ")", ")", ".", "contents", ".", "value", "if", "window_addr", "in", "_scroll_callback_repository", ":", "previous_callback", "=", "_scroll_callback_repository", "[", "window_addr", "]", "else", ":", "previous_callback", "=", "None", "if", "cbfun", "is", "None", ":", "cbfun", "=", "0", "c_cbfun", "=", "_GLFWscrollfun", "(", "cbfun", ")", "_scroll_callback_repository", "[", "window_addr", "]", "=", "(", "cbfun", ",", "c_cbfun", ")", "cbfun", "=", "c_cbfun", "_glfw", ".", "glfwSetScrollCallback", "(", "window", ",", "cbfun", ")", "if", "previous_callback", "is", "not", "None", "and", "previous_callback", "[", "0", "]", "!=", "0", ":", "return", "previous_callback", "[", "0", "]" ]
Sets the scroll callback. Wrapper for: GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun);
[ "Sets", "the", "scroll", "callback", "." ]
python
train
36.809524
shoebot/shoebot
shoebot/kgp.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/kgp.py#L38-L84
def openAnything(source, searchpaths=None): """URI, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. Examples: >>> from xml.dom import minidom >>> sock = openAnything("http://localhost/kant.xml") >>> doc = minidom.parse(sock) >>> sock.close() >>> sock = openAnything("c:\\inetpub\\wwwroot\\kant.xml") >>> doc = minidom.parse(sock) >>> sock.close() >>> sock = openAnything("<ref id='conjunction'><text>and</text><text>or</text></ref>") >>> doc = minidom.parse(sock) >>> sock.close() """ if hasattr(source, "read"): return source if source == "-": import sys return sys.stdin # try to open with urllib (if source is http, ftp, or file URL) import urllib try: return urllib.urlopen(source) except (IOError, OSError): pass # try to open with native open function (if source is pathname) for path in searchpaths or ['.']: try: return open(os.path.join(path, source)) except (IOError, OSError): pass # treat source as string import StringIO return StringIO.StringIO(str(source))
[ "def", "openAnything", "(", "source", ",", "searchpaths", "=", "None", ")", ":", "if", "hasattr", "(", "source", ",", "\"read\"", ")", ":", "return", "source", "if", "source", "==", "\"-\"", ":", "import", "sys", "return", "sys", ".", "stdin", "# try to open with urllib (if source is http, ftp, or file URL)\r", "import", "urllib", "try", ":", "return", "urllib", ".", "urlopen", "(", "source", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "pass", "# try to open with native open function (if source is pathname)\r", "for", "path", "in", "searchpaths", "or", "[", "'.'", "]", ":", "try", ":", "return", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "source", ")", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "pass", "# treat source as string\r", "import", "StringIO", "return", "StringIO", ".", "StringIO", "(", "str", "(", "source", ")", ")" ]
URI, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. Examples: >>> from xml.dom import minidom >>> sock = openAnything("http://localhost/kant.xml") >>> doc = minidom.parse(sock) >>> sock.close() >>> sock = openAnything("c:\\inetpub\\wwwroot\\kant.xml") >>> doc = minidom.parse(sock) >>> sock.close() >>> sock = openAnything("<ref id='conjunction'><text>and</text><text>or</text></ref>") >>> doc = minidom.parse(sock) >>> sock.close()
[ "URI", "filename", "or", "string", "--", ">", "stream", "This", "function", "lets", "you", "define", "parsers", "that", "take", "any", "input", "source", "(", "URL", "pathname", "to", "local", "or", "network", "file", "or", "actual", "data", "as", "a", "string", ")", "and", "deal", "with", "it", "in", "a", "uniform", "manner", ".", "Returned", "object", "is", "guaranteed", "to", "have", "all", "the", "basic", "stdio", "read", "methods", "(", "read", "readline", "readlines", ")", ".", "Just", ".", "close", "()", "the", "object", "when", "you", "re", "done", "with", "it", ".", "Examples", ":", ">>>", "from", "xml", ".", "dom", "import", "minidom", ">>>", "sock", "=", "openAnything", "(", "http", ":", "//", "localhost", "/", "kant", ".", "xml", ")", ">>>", "doc", "=", "minidom", ".", "parse", "(", "sock", ")", ">>>", "sock", ".", "close", "()", ">>>", "sock", "=", "openAnything", "(", "c", ":", "\\\\", "inetpub", "\\\\", "wwwroot", "\\\\", "kant", ".", "xml", ")", ">>>", "doc", "=", "minidom", ".", "parse", "(", "sock", ")", ">>>", "sock", ".", "close", "()", ">>>", "sock", "=", "openAnything", "(", "<ref", "id", "=", "conjunction", ">", "<text", ">", "and<", "/", "text", ">", "<text", ">", "or<", "/", "text", ">", "<", "/", "ref", ">", ")", ">>>", "doc", "=", "minidom", ".", "parse", "(", "sock", ")", ">>>", "sock", ".", "close", "()" ]
python
valid
31.787234
senaite/senaite.core
bika/lims/upgrade/v01_01_006.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_01_006.py#L348-L368
def touidref(src, dst, src_relation, src_portal_type, fieldname): """Convert an archetypes reference in src/src_relation to a UIDReference in dst/fieldname. """ field = dst.getField(fieldname) refs = src.getRefs(relationship=src_relation) if len(refs) == 1: value = get_uid(refs[0]) elif len(refs) > 1: value = filter(lambda x: x, [get_uid(ref) for ref in refs]) else: value = field.get(src) if not value: value = '' if not field: raise RuntimeError('Cannot find field %s/%s' % (fieldname, src)) if field.required and not value: logger.exception('Required %s field %s/%s has no value' % (src.portal_type, src, fieldname)) field.set(src, value)
[ "def", "touidref", "(", "src", ",", "dst", ",", "src_relation", ",", "src_portal_type", ",", "fieldname", ")", ":", "field", "=", "dst", ".", "getField", "(", "fieldname", ")", "refs", "=", "src", ".", "getRefs", "(", "relationship", "=", "src_relation", ")", "if", "len", "(", "refs", ")", "==", "1", ":", "value", "=", "get_uid", "(", "refs", "[", "0", "]", ")", "elif", "len", "(", "refs", ")", ">", "1", ":", "value", "=", "filter", "(", "lambda", "x", ":", "x", ",", "[", "get_uid", "(", "ref", ")", "for", "ref", "in", "refs", "]", ")", "else", ":", "value", "=", "field", ".", "get", "(", "src", ")", "if", "not", "value", ":", "value", "=", "''", "if", "not", "field", ":", "raise", "RuntimeError", "(", "'Cannot find field %s/%s'", "%", "(", "fieldname", ",", "src", ")", ")", "if", "field", ".", "required", "and", "not", "value", ":", "logger", ".", "exception", "(", "'Required %s field %s/%s has no value'", "%", "(", "src", ".", "portal_type", ",", "src", ",", "fieldname", ")", ")", "field", ".", "set", "(", "src", ",", "value", ")" ]
Convert an archetypes reference in src/src_relation to a UIDReference in dst/fieldname.
[ "Convert", "an", "archetypes", "reference", "in", "src", "/", "src_relation", "to", "a", "UIDReference", "in", "dst", "/", "fieldname", "." ]
python
train
35.47619
harvard-nrg/yaxil
yaxil/__init__.py
https://github.com/harvard-nrg/yaxil/blob/af594082258e62d1904d6e6841fce0bb5c0bf309/yaxil/__init__.py#L80-L138
def auth(alias=None, url=None, cfg="~/.xnat_auth"): ''' Read connection details from an xnat_auth XML file Example: >>> import yaxil >>> auth = yaxil.auth('xnatastic') >>> auth.url, auth.username, auth.password ('https://www.xnatastic.org/', 'username', '********') :param alias: XNAT alias :type alias: str :param url: XNAT URL :type url: str :param cfg: Configuration file :type cfg: str :returns: Named tuple of (url, username, password) :rtype: :mod:`yaxil.XnatAuth` ''' if not alias and not url: raise ValueError('you must provide an alias or url argument') if alias and url: raise ValueError('cannot provide both alias and url arguments') # check and parse config file cfg = os.path.expanduser(cfg) if not os.path.exists(cfg): raise AuthError("could not locate auth file %s" % cfg) tree = etree.parse(os.path.expanduser(cfg)) # search by alias or url res = None if alias: res = tree.findall("./%s" % alias) if url: res = tree.findall("./*/[url='%s']" % url) if not res: raise AuthError("failed to locate xnat credentials within %s" % cfg) elif len(res) > 1: raise AuthError("found too many sets of credentials within %s" % cfg) res = res.pop() # get url url = res.findall("url") if not url: raise AuthError("no url for %s in %s" % (alias, cfg)) elif len(url) > 1: raise AuthError("too many urls for %s in %s" % (alias, cfg)) # get username username = res.findall("username") if not username: raise AuthError("no username for %s in %s" % (alias, cfg)) elif len(username) > 1: raise AuthError("too many usernames for %s in %s" % (alias, cfg)) # get password password = res.findall("password") if not password: raise AuthError("no password for %s in %s" % (alias, cfg)) elif len(password) > 1: raise AuthError("too many passwords for %s in %s" % (alias, cfg)) return XnatAuth(url=url.pop().text, username=username.pop().text, password=password.pop().text)
[ "def", "auth", "(", "alias", "=", "None", ",", "url", "=", "None", ",", "cfg", "=", "\"~/.xnat_auth\"", ")", ":", "if", "not", "alias", "and", "not", "url", ":", "raise", "ValueError", "(", "'you must provide an alias or url argument'", ")", "if", "alias", "and", "url", ":", "raise", "ValueError", "(", "'cannot provide both alias and url arguments'", ")", "# check and parse config file", "cfg", "=", "os", ".", "path", ".", "expanduser", "(", "cfg", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "cfg", ")", ":", "raise", "AuthError", "(", "\"could not locate auth file %s\"", "%", "cfg", ")", "tree", "=", "etree", ".", "parse", "(", "os", ".", "path", ".", "expanduser", "(", "cfg", ")", ")", "# search by alias or url", "res", "=", "None", "if", "alias", ":", "res", "=", "tree", ".", "findall", "(", "\"./%s\"", "%", "alias", ")", "if", "url", ":", "res", "=", "tree", ".", "findall", "(", "\"./*/[url='%s']\"", "%", "url", ")", "if", "not", "res", ":", "raise", "AuthError", "(", "\"failed to locate xnat credentials within %s\"", "%", "cfg", ")", "elif", "len", "(", "res", ")", ">", "1", ":", "raise", "AuthError", "(", "\"found too many sets of credentials within %s\"", "%", "cfg", ")", "res", "=", "res", ".", "pop", "(", ")", "# get url", "url", "=", "res", ".", "findall", "(", "\"url\"", ")", "if", "not", "url", ":", "raise", "AuthError", "(", "\"no url for %s in %s\"", "%", "(", "alias", ",", "cfg", ")", ")", "elif", "len", "(", "url", ")", ">", "1", ":", "raise", "AuthError", "(", "\"too many urls for %s in %s\"", "%", "(", "alias", ",", "cfg", ")", ")", "# get username", "username", "=", "res", ".", "findall", "(", "\"username\"", ")", "if", "not", "username", ":", "raise", "AuthError", "(", "\"no username for %s in %s\"", "%", "(", "alias", ",", "cfg", ")", ")", "elif", "len", "(", "username", ")", ">", "1", ":", "raise", "AuthError", "(", "\"too many usernames for %s in %s\"", "%", "(", "alias", ",", "cfg", ")", ")", "# get password", "password", "=", "res", ".", "findall", "(", "\"password\"", ")", "if", "not", "password", ":", "raise", "AuthError", "(", "\"no password for %s in %s\"", "%", "(", "alias", ",", "cfg", ")", ")", "elif", "len", "(", "password", ")", ">", "1", ":", "raise", "AuthError", "(", "\"too many passwords for %s in %s\"", "%", "(", "alias", ",", "cfg", ")", ")", "return", "XnatAuth", "(", "url", "=", "url", ".", "pop", "(", ")", ".", "text", ",", "username", "=", "username", ".", "pop", "(", ")", ".", "text", ",", "password", "=", "password", ".", "pop", "(", ")", ".", "text", ")" ]
Read connection details from an xnat_auth XML file Example: >>> import yaxil >>> auth = yaxil.auth('xnatastic') >>> auth.url, auth.username, auth.password ('https://www.xnatastic.org/', 'username', '********') :param alias: XNAT alias :type alias: str :param url: XNAT URL :type url: str :param cfg: Configuration file :type cfg: str :returns: Named tuple of (url, username, password) :rtype: :mod:`yaxil.XnatAuth`
[ "Read", "connection", "details", "from", "an", "xnat_auth", "XML", "file" ]
python
train
35.847458
shopkick/flawless
flawless/client/decorators.py
https://github.com/shopkick/flawless/blob/c54b63ca1991c153e6f75080536f6df445aacc64/flawless/client/decorators.py#L44-L58
def wrap_class(cls, error_threshold=None): ''' Wraps a class with reporting to errors backend by decorating each function of the class. Decorators are injected under the classmethod decorator if they exist. ''' methods = inspect.getmembers(cls, inspect.ismethod) + inspect.getmembers(cls, inspect.isfunction) for method_name, method in methods: wrapped_method = flawless.client.client._wrap_function_with_error_decorator( method if not im_self(method) else im_func(method), save_current_stack_trace=False, error_threshold=error_threshold, ) if im_self(method): wrapped_method = classmethod(wrapped_method) setattr(cls, method_name, wrapped_method) return cls
[ "def", "wrap_class", "(", "cls", ",", "error_threshold", "=", "None", ")", ":", "methods", "=", "inspect", ".", "getmembers", "(", "cls", ",", "inspect", ".", "ismethod", ")", "+", "inspect", ".", "getmembers", "(", "cls", ",", "inspect", ".", "isfunction", ")", "for", "method_name", ",", "method", "in", "methods", ":", "wrapped_method", "=", "flawless", ".", "client", ".", "client", ".", "_wrap_function_with_error_decorator", "(", "method", "if", "not", "im_self", "(", "method", ")", "else", "im_func", "(", "method", ")", ",", "save_current_stack_trace", "=", "False", ",", "error_threshold", "=", "error_threshold", ",", ")", "if", "im_self", "(", "method", ")", ":", "wrapped_method", "=", "classmethod", "(", "wrapped_method", ")", "setattr", "(", "cls", ",", "method_name", ",", "wrapped_method", ")", "return", "cls" ]
Wraps a class with reporting to errors backend by decorating each function of the class. Decorators are injected under the classmethod decorator if they exist.
[ "Wraps", "a", "class", "with", "reporting", "to", "errors", "backend", "by", "decorating", "each", "function", "of", "the", "class", ".", "Decorators", "are", "injected", "under", "the", "classmethod", "decorator", "if", "they", "exist", "." ]
python
test
50.4
Workiva/furious
furious/extras/appengine/ndb_persistence.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/appengine/ndb_persistence.py#L156-L184
def _completion_checker(async_id, context_id): """Check if all Async jobs within a Context have been run.""" if not context_id: logging.debug("Context for async %s does not exist", async_id) return context = FuriousContext.from_id(context_id) marker = FuriousCompletionMarker.get_by_id(context_id) if marker and marker.complete: logging.info("Context %s already complete" % context_id) return True task_ids = context.task_ids if async_id in task_ids: task_ids.remove(async_id) logging.debug("Loaded context.") logging.debug(task_ids) done, has_errors = _check_markers(task_ids) if not done: return False _mark_context_complete(marker, context, has_errors) return True
[ "def", "_completion_checker", "(", "async_id", ",", "context_id", ")", ":", "if", "not", "context_id", ":", "logging", ".", "debug", "(", "\"Context for async %s does not exist\"", ",", "async_id", ")", "return", "context", "=", "FuriousContext", ".", "from_id", "(", "context_id", ")", "marker", "=", "FuriousCompletionMarker", ".", "get_by_id", "(", "context_id", ")", "if", "marker", "and", "marker", ".", "complete", ":", "logging", ".", "info", "(", "\"Context %s already complete\"", "%", "context_id", ")", "return", "True", "task_ids", "=", "context", ".", "task_ids", "if", "async_id", "in", "task_ids", ":", "task_ids", ".", "remove", "(", "async_id", ")", "logging", ".", "debug", "(", "\"Loaded context.\"", ")", "logging", ".", "debug", "(", "task_ids", ")", "done", ",", "has_errors", "=", "_check_markers", "(", "task_ids", ")", "if", "not", "done", ":", "return", "False", "_mark_context_complete", "(", "marker", ",", "context", ",", "has_errors", ")", "return", "True" ]
Check if all Async jobs within a Context have been run.
[ "Check", "if", "all", "Async", "jobs", "within", "a", "Context", "have", "been", "run", "." ]
python
train
25.793103
PredixDev/predixpy
predix/admin/cf/orgs.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/cf/orgs.py#L68-L74
def add_user(self, user_name, role='user'): """ Calls CF's associate user with org. Valid roles include `user`, `auditor`, `manager`,`billing_manager` """ role_uri = self._get_role_uri(role=role) return self.api.put(path=role_uri, data={'username': user_name})
[ "def", "add_user", "(", "self", ",", "user_name", ",", "role", "=", "'user'", ")", ":", "role_uri", "=", "self", ".", "_get_role_uri", "(", "role", "=", "role", ")", "return", "self", ".", "api", ".", "put", "(", "path", "=", "role_uri", ",", "data", "=", "{", "'username'", ":", "user_name", "}", ")" ]
Calls CF's associate user with org. Valid roles include `user`, `auditor`, `manager`,`billing_manager`
[ "Calls", "CF", "s", "associate", "user", "with", "org", ".", "Valid", "roles", "include", "user", "auditor", "manager", "billing_manager" ]
python
train
43.142857
guzzle/guzzle_sphinx_theme
guzzle_sphinx_theme/__init__.py
https://github.com/guzzle/guzzle_sphinx_theme/blob/eefd45b79383b1b4aab1607444e41366fd1348a6/guzzle_sphinx_theme/__init__.py#L23-L27
def add_html_link(app, pagename, templatename, context, doctree): """As each page is built, collect page names for the sitemap""" base_url = app.config['html_theme_options'].get('base_url', '') if base_url: app.sitemap_links.append(base_url + pagename + ".html")
[ "def", "add_html_link", "(", "app", ",", "pagename", ",", "templatename", ",", "context", ",", "doctree", ")", ":", "base_url", "=", "app", ".", "config", "[", "'html_theme_options'", "]", ".", "get", "(", "'base_url'", ",", "''", ")", "if", "base_url", ":", "app", ".", "sitemap_links", ".", "append", "(", "base_url", "+", "pagename", "+", "\".html\"", ")" ]
As each page is built, collect page names for the sitemap
[ "As", "each", "page", "is", "built", "collect", "page", "names", "for", "the", "sitemap" ]
python
train
55.6
ioos/cc-plugin-ncei
cc_plugin_ncei/util.py
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/util.py#L111-L122
def get_lat_variable(nc): ''' Returns the variable for latitude :param netcdf4.dataset nc: an open netcdf dataset object ''' if 'latitude' in nc.variables: return 'latitude' latitudes = nc.get_variables_by_attributes(standard_name="latitude") if latitudes: return latitudes[0].name return None
[ "def", "get_lat_variable", "(", "nc", ")", ":", "if", "'latitude'", "in", "nc", ".", "variables", ":", "return", "'latitude'", "latitudes", "=", "nc", ".", "get_variables_by_attributes", "(", "standard_name", "=", "\"latitude\"", ")", "if", "latitudes", ":", "return", "latitudes", "[", "0", "]", ".", "name", "return", "None" ]
Returns the variable for latitude :param netcdf4.dataset nc: an open netcdf dataset object
[ "Returns", "the", "variable", "for", "latitude" ]
python
train
27.583333
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/model.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/model.py#L973-L985
def reset_for_retry(self): """Reset self for shard retry.""" self.retries += 1 self.last_work_item = "" self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False
[ "def", "reset_for_retry", "(", "self", ")", ":", "self", ".", "retries", "+=", "1", "self", ".", "last_work_item", "=", "\"\"", "self", ".", "active", "=", "True", "self", ".", "result_status", "=", "None", "self", ".", "input_finished", "=", "False", "self", ".", "counters_map", "=", "CountersMap", "(", ")", "self", ".", "slice_id", "=", "0", "self", ".", "slice_start_time", "=", "None", "self", ".", "slice_request_id", "=", "None", "self", ".", "slice_retries", "=", "0", "self", ".", "acquired_once", "=", "False" ]
Reset self for shard retry.
[ "Reset", "self", "for", "shard", "retry", "." ]
python
train
28.615385
myyang/django-unixtimestampfield
unixtimestampfield/fields.py
https://github.com/myyang/django-unixtimestampfield/blob/d647681cd628d1a5cdde8dcbb025bcb9612e9b24/unixtimestampfield/fields.py#L347-L364
def to_utc_datetime(self, value): """ from value to datetime with tzinfo format (datetime.datetime instance) """ if isinstance(value, (six.integer_types, float, six.string_types)): value = self.to_naive_datetime(value) if isinstance(value, datetime.datetime): if timezone.is_naive(value): value = timezone.make_aware(value, timezone.utc) else: value = timezone.localtime(value, timezone.utc) return value raise exceptions.ValidationError( "Unable to convert value: '%s' to python data type" % value, code="invalid_datetime" )
[ "def", "to_utc_datetime", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "six", ".", "integer_types", ",", "float", ",", "six", ".", "string_types", ")", ")", ":", "value", "=", "self", ".", "to_naive_datetime", "(", "value", ")", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "if", "timezone", ".", "is_naive", "(", "value", ")", ":", "value", "=", "timezone", ".", "make_aware", "(", "value", ",", "timezone", ".", "utc", ")", "else", ":", "value", "=", "timezone", ".", "localtime", "(", "value", ",", "timezone", ".", "utc", ")", "return", "value", "raise", "exceptions", ".", "ValidationError", "(", "\"Unable to convert value: '%s' to python data type\"", "%", "value", ",", "code", "=", "\"invalid_datetime\"", ")" ]
from value to datetime with tzinfo format (datetime.datetime instance)
[ "from", "value", "to", "datetime", "with", "tzinfo", "format", "(", "datetime", ".", "datetime", "instance", ")" ]
python
train
37.222222
python-rope/rope
rope/refactor/inline.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/refactor/inline.py#L113-L151
def get_changes(self, remove=True, only_current=False, resources=None, task_handle=taskhandle.NullTaskHandle()): """Get the changes this refactoring makes If `remove` is `False` the definition will not be removed. If `only_current` is `True`, the the current occurrence will be inlined, only. """ changes = ChangeSet('Inline method <%s>' % self.name) if resources is None: resources = self.project.get_python_files() if only_current: resources = [self.original] if remove: resources.append(self.resource) job_set = task_handle.create_jobset('Collecting Changes', len(resources)) for file in resources: job_set.started_job(file.path) if file == self.resource: changes.add_change(self._defining_file_changes( changes, remove=remove, only_current=only_current)) else: aim = None if only_current and self.original == file: aim = self.offset handle = _InlineFunctionCallsForModuleHandle( self.project, file, self.others_generator, aim) result = move.ModuleSkipRenamer( self.occurrence_finder, file, handle).get_changed_module() if result is not None: result = _add_imports(self.project, result, file, self.imports) if remove: result = _remove_from(self.project, self.pyname, result, file) changes.add_change(ChangeContents(file, result)) job_set.finished_job() return changes
[ "def", "get_changes", "(", "self", ",", "remove", "=", "True", ",", "only_current", "=", "False", ",", "resources", "=", "None", ",", "task_handle", "=", "taskhandle", ".", "NullTaskHandle", "(", ")", ")", ":", "changes", "=", "ChangeSet", "(", "'Inline method <%s>'", "%", "self", ".", "name", ")", "if", "resources", "is", "None", ":", "resources", "=", "self", ".", "project", ".", "get_python_files", "(", ")", "if", "only_current", ":", "resources", "=", "[", "self", ".", "original", "]", "if", "remove", ":", "resources", ".", "append", "(", "self", ".", "resource", ")", "job_set", "=", "task_handle", ".", "create_jobset", "(", "'Collecting Changes'", ",", "len", "(", "resources", ")", ")", "for", "file", "in", "resources", ":", "job_set", ".", "started_job", "(", "file", ".", "path", ")", "if", "file", "==", "self", ".", "resource", ":", "changes", ".", "add_change", "(", "self", ".", "_defining_file_changes", "(", "changes", ",", "remove", "=", "remove", ",", "only_current", "=", "only_current", ")", ")", "else", ":", "aim", "=", "None", "if", "only_current", "and", "self", ".", "original", "==", "file", ":", "aim", "=", "self", ".", "offset", "handle", "=", "_InlineFunctionCallsForModuleHandle", "(", "self", ".", "project", ",", "file", ",", "self", ".", "others_generator", ",", "aim", ")", "result", "=", "move", ".", "ModuleSkipRenamer", "(", "self", ".", "occurrence_finder", ",", "file", ",", "handle", ")", ".", "get_changed_module", "(", ")", "if", "result", "is", "not", "None", ":", "result", "=", "_add_imports", "(", "self", ".", "project", ",", "result", ",", "file", ",", "self", ".", "imports", ")", "if", "remove", ":", "result", "=", "_remove_from", "(", "self", ".", "project", ",", "self", ".", "pyname", ",", "result", ",", "file", ")", "changes", ".", "add_change", "(", "ChangeContents", "(", "file", ",", "result", ")", ")", "job_set", ".", "finished_job", "(", ")", "return", "changes" ]
Get the changes this refactoring makes If `remove` is `False` the definition will not be removed. If `only_current` is `True`, the the current occurrence will be inlined, only.
[ "Get", "the", "changes", "this", "refactoring", "makes" ]
python
train
47.025641
marshmallow-code/webargs
src/webargs/pyramidparser.py
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/pyramidparser.py#L79-L82
def parse_files(self, req, name, field): """Pull a file from the request.""" files = ((k, v) for k, v in req.POST.items() if hasattr(v, "file")) return core.get_value(MultiDict(files), name, field)
[ "def", "parse_files", "(", "self", ",", "req", ",", "name", ",", "field", ")", ":", "files", "=", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "req", ".", "POST", ".", "items", "(", ")", "if", "hasattr", "(", "v", ",", "\"file\"", ")", ")", "return", "core", ".", "get_value", "(", "MultiDict", "(", "files", ")", ",", "name", ",", "field", ")" ]
Pull a file from the request.
[ "Pull", "a", "file", "from", "the", "request", "." ]
python
train
54.5
AdvancedClimateSystems/uModbus
umodbus/client/serial/rtu.py
https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/client/serial/rtu.py#L161-L171
def write_multiple_registers(slave_id, starting_address, values): """ Return ADU for Modbus function code 16: Write Multiple Registers. :param slave_id: Number of slave. :return: Byte array with ADU. """ function = WriteMultipleRegisters() function.starting_address = starting_address function.values = values return _create_request_adu(slave_id, function.request_pdu)
[ "def", "write_multiple_registers", "(", "slave_id", ",", "starting_address", ",", "values", ")", ":", "function", "=", "WriteMultipleRegisters", "(", ")", "function", ".", "starting_address", "=", "starting_address", "function", ".", "values", "=", "values", "return", "_create_request_adu", "(", "slave_id", ",", "function", ".", "request_pdu", ")" ]
Return ADU for Modbus function code 16: Write Multiple Registers. :param slave_id: Number of slave. :return: Byte array with ADU.
[ "Return", "ADU", "for", "Modbus", "function", "code", "16", ":", "Write", "Multiple", "Registers", "." ]
python
train
35.636364
googlefonts/fontbakery
Lib/fontbakery/checkrunner.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/checkrunner.py#L622-L630
def check_order(self, order): """ order must be a subset of self.order """ own_order = self.order for item in order: if item not in own_order: raise ValueError(f'Order item {item} not found.') return order
[ "def", "check_order", "(", "self", ",", "order", ")", ":", "own_order", "=", "self", ".", "order", "for", "item", "in", "order", ":", "if", "item", "not", "in", "own_order", ":", "raise", "ValueError", "(", "f'Order item {item} not found.'", ")", "return", "order" ]
order must be a subset of self.order
[ "order", "must", "be", "a", "subset", "of", "self", ".", "order" ]
python
train
26.333333
tcalmant/ipopo
pelix/ipopo/handlers/temporal.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/temporal.py#L59-L111
def _prepare_configs(configs, requires_filters, temporal_timeouts): """ Overrides the filters specified in the decorator with the given ones :param configs: Field → (Requirement, key, allow_none) dictionary :param requires_filters: Content of the 'requires.filter' component property (field → string) :param temporal_timeouts: Content of the 'temporal.timeouts' component property (field → float) :return: The new configuration dictionary """ if not isinstance(requires_filters, dict): requires_filters = {} if not isinstance(temporal_timeouts, dict): temporal_timeouts = {} if not requires_filters and not temporal_timeouts: # No explicit configuration given return configs # We need to change a part of the requirements new_configs = {} for field, config in configs.items(): # Extract values from tuple requirement, timeout = config explicit_filter = requires_filters.get(field) explicit_timeout = temporal_timeouts.get(field) # Convert the timeout value try: explicit_timeout = int(explicit_timeout) if explicit_timeout <= 0: explicit_timeout = timeout except (ValueError, TypeError): explicit_timeout = timeout if not explicit_filter and not explicit_timeout: # Nothing to do new_configs[field] = config else: try: # Store an updated copy of the requirement requirement_copy = requirement.copy() if explicit_filter: requirement_copy.set_filter(explicit_filter) new_configs[field] = (requirement_copy, explicit_timeout) except (TypeError, ValueError): # No information for this one, or invalid filter: # keep the factory requirement new_configs[field] = config return new_configs
[ "def", "_prepare_configs", "(", "configs", ",", "requires_filters", ",", "temporal_timeouts", ")", ":", "if", "not", "isinstance", "(", "requires_filters", ",", "dict", ")", ":", "requires_filters", "=", "{", "}", "if", "not", "isinstance", "(", "temporal_timeouts", ",", "dict", ")", ":", "temporal_timeouts", "=", "{", "}", "if", "not", "requires_filters", "and", "not", "temporal_timeouts", ":", "# No explicit configuration given", "return", "configs", "# We need to change a part of the requirements", "new_configs", "=", "{", "}", "for", "field", ",", "config", "in", "configs", ".", "items", "(", ")", ":", "# Extract values from tuple", "requirement", ",", "timeout", "=", "config", "explicit_filter", "=", "requires_filters", ".", "get", "(", "field", ")", "explicit_timeout", "=", "temporal_timeouts", ".", "get", "(", "field", ")", "# Convert the timeout value", "try", ":", "explicit_timeout", "=", "int", "(", "explicit_timeout", ")", "if", "explicit_timeout", "<=", "0", ":", "explicit_timeout", "=", "timeout", "except", "(", "ValueError", ",", "TypeError", ")", ":", "explicit_timeout", "=", "timeout", "if", "not", "explicit_filter", "and", "not", "explicit_timeout", ":", "# Nothing to do", "new_configs", "[", "field", "]", "=", "config", "else", ":", "try", ":", "# Store an updated copy of the requirement", "requirement_copy", "=", "requirement", ".", "copy", "(", ")", "if", "explicit_filter", ":", "requirement_copy", ".", "set_filter", "(", "explicit_filter", ")", "new_configs", "[", "field", "]", "=", "(", "requirement_copy", ",", "explicit_timeout", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# No information for this one, or invalid filter:", "# keep the factory requirement", "new_configs", "[", "field", "]", "=", "config", "return", "new_configs" ]
Overrides the filters specified in the decorator with the given ones :param configs: Field → (Requirement, key, allow_none) dictionary :param requires_filters: Content of the 'requires.filter' component property (field → string) :param temporal_timeouts: Content of the 'temporal.timeouts' component property (field → float) :return: The new configuration dictionary
[ "Overrides", "the", "filters", "specified", "in", "the", "decorator", "with", "the", "given", "ones" ]
python
train
40.90566
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/mprofile.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/mprofile.py#L49-L61
def codepoint_included(self, codepoint): """Check if codepoint matches any of the defined codepoints.""" if self.codepoints == None: return True for cp in self.codepoints: mismatch = False for i in range(len(cp)): if (cp[i] is not None) and (cp[i] != codepoint[i]): mismatch = True break if not mismatch: return True return False
[ "def", "codepoint_included", "(", "self", ",", "codepoint", ")", ":", "if", "self", ".", "codepoints", "==", "None", ":", "return", "True", "for", "cp", "in", "self", ".", "codepoints", ":", "mismatch", "=", "False", "for", "i", "in", "range", "(", "len", "(", "cp", ")", ")", ":", "if", "(", "cp", "[", "i", "]", "is", "not", "None", ")", "and", "(", "cp", "[", "i", "]", "!=", "codepoint", "[", "i", "]", ")", ":", "mismatch", "=", "True", "break", "if", "not", "mismatch", ":", "return", "True", "return", "False" ]
Check if codepoint matches any of the defined codepoints.
[ "Check", "if", "codepoint", "matches", "any", "of", "the", "defined", "codepoints", "." ]
python
train
36.076923
googledatalab/pydatalab
datalab/utils/commands/_commands.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_commands.py#L49-L64
def create_args(line, namespace): """ Expand any meta-variable references in the argument list. """ args = [] # Using shlex.split handles quotes args and escape characters. for arg in shlex.split(line): if not arg: continue if arg[0] == '$': var_name = arg[1:] if var_name in namespace: args.append((namespace[var_name])) else: raise Exception('Undefined variable referenced in command line: %s' % arg) else: args.append(arg) return args
[ "def", "create_args", "(", "line", ",", "namespace", ")", ":", "args", "=", "[", "]", "# Using shlex.split handles quotes args and escape characters.", "for", "arg", "in", "shlex", ".", "split", "(", "line", ")", ":", "if", "not", "arg", ":", "continue", "if", "arg", "[", "0", "]", "==", "'$'", ":", "var_name", "=", "arg", "[", "1", ":", "]", "if", "var_name", "in", "namespace", ":", "args", ".", "append", "(", "(", "namespace", "[", "var_name", "]", ")", ")", "else", ":", "raise", "Exception", "(", "'Undefined variable referenced in command line: %s'", "%", "arg", ")", "else", ":", "args", ".", "append", "(", "arg", ")", "return", "args" ]
Expand any meta-variable references in the argument list.
[ "Expand", "any", "meta", "-", "variable", "references", "in", "the", "argument", "list", "." ]
python
train
32.5625
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/directory.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/directory.py#L2533-L2731
def save(self): """ :return: save this OS instance on Ariane server (create or update) """ LOGGER.debug("OSInstance.save") post_payload = {} consolidated_osi_id = [] consolidated_ipa_id = [] consolidated_nic_id = [] consolidated_app_id = [] consolidated_env_id = [] consolidated_snet_id = [] consolidated_team_id = [] if self.id is not None: post_payload['osInstanceID'] = self.id if self.name is not None: post_payload['osInstanceName'] = self.name if self.description is not None: post_payload['osInstanceDescription'] = self.description if self.admin_gate_uri is not None: post_payload['osInstanceAdminGateURI'] = self.admin_gate_uri if self.embedding_osi_id is not None: post_payload['osInstanceEmbeddingOSInstanceID'] = self.embedding_osi_id if self.ost_id is not None: post_payload['osInstanceOSTypeID'] = self.ost_id if self.embedded_osi_ids is not None: consolidated_osi_id = copy.deepcopy(self.embedded_osi_ids) if self.embedded_osi_2_rm is not None: for osi_2_rm in self.embedded_osi_2_rm: if osi_2_rm.id is None: osi_2_rm.sync() consolidated_osi_id.remove(osi_2_rm.id) if self.embedded_osi_2_add is not None: for osi_id_2_add in self.embedded_osi_2_add: if osi_id_2_add.id is None: osi_id_2_add.save() consolidated_osi_id.append(osi_id_2_add.id) post_payload['osInstanceEmbeddedOSInstancesID'] = consolidated_osi_id if self.ip_address_ids is not None: consolidated_ipa_id = copy.deepcopy(self.ip_address_ids) if self.ip_address_2_rm is not None: for ipa_2_rm in self.ip_address_2_rm: if ipa_2_rm.id is None: ipa_2_rm.sync() consolidated_ipa_id.remove(ipa_2_rm.id) if self.ip_address_2_add is not None: for ipa_2_add in self.ip_address_2_add: if ipa_2_add.id is None: ipa_2_add.save() consolidated_ipa_id.append(ipa_2_add.id) post_payload['osInstanceIPAddressesID'] = consolidated_ipa_id if self.nic_ids is not None: consolidated_nic_id = copy.deepcopy(self.nic_ids) if self.nic_2_rm is not None: for nic_2_rm in self.nic_2_rm: if nic_2_rm.id is None: nic_2_rm.sync() consolidated_nic_id.remove(nic_2_rm.id) if self.nic_2_add is not None: for nic_2_add in self.nic_2_add: if nic_2_add.id is None: nic_2_add.save() consolidated_nic_id.append(nic_2_add.id) post_payload['osInstanceNICsID'] = consolidated_nic_id if self.subnet_ids is not None: consolidated_snet_id = copy.deepcopy(self.subnet_ids) if self.subnets_2_rm is not None: for snet_2_rm in self.subnets_2_rm: if snet_2_rm.id is None: snet_2_rm.sync() consolidated_snet_id.remove(snet_2_rm.id) if self.subnets_2_add is not None: for snet_2_add in self.subnets_2_add: if snet_2_add.id is None: snet_2_add.save() consolidated_snet_id.append(snet_2_add.id) post_payload['osInstanceSubnetsID'] = consolidated_snet_id if self.application_ids is not None: consolidated_app_id = copy.deepcopy(self.application_ids) if self.application_2_rm is not None: for app_2_rm in self.application_2_rm: if app_2_rm.id is None: app_2_rm.sync() consolidated_app_id.remove(app_2_rm.id) if self.application_2_add is not None: for app_2_add in self.application_2_add: if app_2_add.id is None: app_2_add.save() consolidated_app_id.append(app_2_add.id) post_payload['osInstanceApplicationsID'] = consolidated_app_id if self.environment_ids is not None: consolidated_env_id = copy.deepcopy(self.environment_ids) if self.environment_2_rm is not None: for env_2_rm in self.environment_2_rm: if env_2_rm.id is None: env_2_rm.sync() consolidated_env_id.remove(env_2_rm.id) if self.environment_2_add is not None: for env_2_add in self.environment_2_add: if env_2_add.id is None: env_2_add.save() consolidated_env_id.append(env_2_add.id) post_payload['osInstanceEnvironmentsID'] = consolidated_env_id if self.team_ids is not None: consolidated_team_id = copy.deepcopy(self.team_ids) if self.team_2_rm is not None: for team_2_rm in self.team_2_rm: if team_2_rm.id is None: team_2_rm.sync() consolidated_team_id.remove(team_2_rm.id) if self.team_2_add is not None: for team_2_add in self.team_2_add: if team_2_add.id is None: team_2_add.save() consolidated_team_id.append(team_2_add.id) post_payload['osInstanceTeamsID'] = consolidated_team_id args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( 'OSInstance.save - Problem while saving OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.id = response.response_content['osInstanceID'] if self.embedded_osi_2_add is not None: for osi_2_add in self.embedded_osi_2_add: osi_2_add.sync() if self.embedded_osi_2_rm is not None: for osi_2_rm in self.embedded_osi_2_rm: osi_2_rm.sync() if self.ip_address_2_add is not None: for ipa_2_add in self.ip_address_2_add: ipa_2_add.sync() if self.ip_address_2_rm is not None: for ipa_2_rm in self.ip_address_2_rm: ipa_2_rm.sync() if self.nic_2_add is not None: for nic_2_add in self.nic_2_add: nic_2_add.sync() if self.nic_2_rm is not None: for nic_2_rm in self.nic_2_rm: nic_2_rm.sync() if self.subnets_2_add is not None: for snet_2_add in self.subnets_2_add: snet_2_add.sync() if self.subnets_2_rm is not None: for snet_2_rm in self.subnets_2_rm: snet_2_rm.sync() if self.application_2_add is not None: for app_2_add in self.application_2_add: app_2_add.sync() if self.application_2_rm is not None: for app_2_rm in self.application_2_rm: app_2_rm.sync() if self.environment_2_add is not None: for env_2_add in self.environment_2_add: env_2_add.sync() if self.environment_2_rm is not None: for env_2_rm in self.environment_2_rm: env_2_rm.sync() if self.team_2_add is not None: for team_2_add in self.team_2_add: team_2_add.sync() if self.team_2_rm is not None: for team_2_rm in self.team_2_rm: team_2_rm.sync() self.embedded_osi_2_add.clear() self.embedded_osi_2_rm.clear() self.ip_address_2_add.clear() self.ip_address_2_rm.clear() self.nic_2_add.clear() self.nic_2_rm.clear() self.subnets_2_add.clear() self.subnets_2_rm.clear() self.application_2_add.clear() self.application_2_rm.clear() self.environment_2_add.clear() self.environment_2_rm.clear() self.team_2_add.clear() self.team_2_rm.clear() self.sync() return self
[ "def", "save", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"OSInstance.save\"", ")", "post_payload", "=", "{", "}", "consolidated_osi_id", "=", "[", "]", "consolidated_ipa_id", "=", "[", "]", "consolidated_nic_id", "=", "[", "]", "consolidated_app_id", "=", "[", "]", "consolidated_env_id", "=", "[", "]", "consolidated_snet_id", "=", "[", "]", "consolidated_team_id", "=", "[", "]", "if", "self", ".", "id", "is", "not", "None", ":", "post_payload", "[", "'osInstanceID'", "]", "=", "self", ".", "id", "if", "self", ".", "name", "is", "not", "None", ":", "post_payload", "[", "'osInstanceName'", "]", "=", "self", ".", "name", "if", "self", ".", "description", "is", "not", "None", ":", "post_payload", "[", "'osInstanceDescription'", "]", "=", "self", ".", "description", "if", "self", ".", "admin_gate_uri", "is", "not", "None", ":", "post_payload", "[", "'osInstanceAdminGateURI'", "]", "=", "self", ".", "admin_gate_uri", "if", "self", ".", "embedding_osi_id", "is", "not", "None", ":", "post_payload", "[", "'osInstanceEmbeddingOSInstanceID'", "]", "=", "self", ".", "embedding_osi_id", "if", "self", ".", "ost_id", "is", "not", "None", ":", "post_payload", "[", "'osInstanceOSTypeID'", "]", "=", "self", ".", "ost_id", "if", "self", ".", "embedded_osi_ids", "is", "not", "None", ":", "consolidated_osi_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "embedded_osi_ids", ")", "if", "self", ".", "embedded_osi_2_rm", "is", "not", "None", ":", "for", "osi_2_rm", "in", "self", ".", "embedded_osi_2_rm", ":", "if", "osi_2_rm", ".", "id", "is", "None", ":", "osi_2_rm", ".", "sync", "(", ")", "consolidated_osi_id", ".", "remove", "(", "osi_2_rm", ".", "id", ")", "if", "self", ".", "embedded_osi_2_add", "is", "not", "None", ":", "for", "osi_id_2_add", "in", "self", ".", "embedded_osi_2_add", ":", "if", "osi_id_2_add", ".", "id", "is", "None", ":", "osi_id_2_add", ".", "save", "(", ")", "consolidated_osi_id", ".", "append", "(", "osi_id_2_add", ".", "id", ")", "post_payload", "[", "'osInstanceEmbeddedOSInstancesID'", "]", "=", "consolidated_osi_id", "if", "self", ".", "ip_address_ids", "is", "not", "None", ":", "consolidated_ipa_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "ip_address_ids", ")", "if", "self", ".", "ip_address_2_rm", "is", "not", "None", ":", "for", "ipa_2_rm", "in", "self", ".", "ip_address_2_rm", ":", "if", "ipa_2_rm", ".", "id", "is", "None", ":", "ipa_2_rm", ".", "sync", "(", ")", "consolidated_ipa_id", ".", "remove", "(", "ipa_2_rm", ".", "id", ")", "if", "self", ".", "ip_address_2_add", "is", "not", "None", ":", "for", "ipa_2_add", "in", "self", ".", "ip_address_2_add", ":", "if", "ipa_2_add", ".", "id", "is", "None", ":", "ipa_2_add", ".", "save", "(", ")", "consolidated_ipa_id", ".", "append", "(", "ipa_2_add", ".", "id", ")", "post_payload", "[", "'osInstanceIPAddressesID'", "]", "=", "consolidated_ipa_id", "if", "self", ".", "nic_ids", "is", "not", "None", ":", "consolidated_nic_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "nic_ids", ")", "if", "self", ".", "nic_2_rm", "is", "not", "None", ":", "for", "nic_2_rm", "in", "self", ".", "nic_2_rm", ":", "if", "nic_2_rm", ".", "id", "is", "None", ":", "nic_2_rm", ".", "sync", "(", ")", "consolidated_nic_id", ".", "remove", "(", "nic_2_rm", ".", "id", ")", "if", "self", ".", "nic_2_add", "is", "not", "None", ":", "for", "nic_2_add", "in", "self", ".", "nic_2_add", ":", "if", "nic_2_add", ".", "id", "is", "None", ":", "nic_2_add", ".", "save", "(", ")", "consolidated_nic_id", ".", "append", "(", "nic_2_add", ".", "id", ")", "post_payload", "[", "'osInstanceNICsID'", "]", "=", "consolidated_nic_id", "if", "self", ".", "subnet_ids", "is", "not", "None", ":", "consolidated_snet_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "subnet_ids", ")", "if", "self", ".", "subnets_2_rm", "is", "not", "None", ":", "for", "snet_2_rm", "in", "self", ".", "subnets_2_rm", ":", "if", "snet_2_rm", ".", "id", "is", "None", ":", "snet_2_rm", ".", "sync", "(", ")", "consolidated_snet_id", ".", "remove", "(", "snet_2_rm", ".", "id", ")", "if", "self", ".", "subnets_2_add", "is", "not", "None", ":", "for", "snet_2_add", "in", "self", ".", "subnets_2_add", ":", "if", "snet_2_add", ".", "id", "is", "None", ":", "snet_2_add", ".", "save", "(", ")", "consolidated_snet_id", ".", "append", "(", "snet_2_add", ".", "id", ")", "post_payload", "[", "'osInstanceSubnetsID'", "]", "=", "consolidated_snet_id", "if", "self", ".", "application_ids", "is", "not", "None", ":", "consolidated_app_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "application_ids", ")", "if", "self", ".", "application_2_rm", "is", "not", "None", ":", "for", "app_2_rm", "in", "self", ".", "application_2_rm", ":", "if", "app_2_rm", ".", "id", "is", "None", ":", "app_2_rm", ".", "sync", "(", ")", "consolidated_app_id", ".", "remove", "(", "app_2_rm", ".", "id", ")", "if", "self", ".", "application_2_add", "is", "not", "None", ":", "for", "app_2_add", "in", "self", ".", "application_2_add", ":", "if", "app_2_add", ".", "id", "is", "None", ":", "app_2_add", ".", "save", "(", ")", "consolidated_app_id", ".", "append", "(", "app_2_add", ".", "id", ")", "post_payload", "[", "'osInstanceApplicationsID'", "]", "=", "consolidated_app_id", "if", "self", ".", "environment_ids", "is", "not", "None", ":", "consolidated_env_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "environment_ids", ")", "if", "self", ".", "environment_2_rm", "is", "not", "None", ":", "for", "env_2_rm", "in", "self", ".", "environment_2_rm", ":", "if", "env_2_rm", ".", "id", "is", "None", ":", "env_2_rm", ".", "sync", "(", ")", "consolidated_env_id", ".", "remove", "(", "env_2_rm", ".", "id", ")", "if", "self", ".", "environment_2_add", "is", "not", "None", ":", "for", "env_2_add", "in", "self", ".", "environment_2_add", ":", "if", "env_2_add", ".", "id", "is", "None", ":", "env_2_add", ".", "save", "(", ")", "consolidated_env_id", ".", "append", "(", "env_2_add", ".", "id", ")", "post_payload", "[", "'osInstanceEnvironmentsID'", "]", "=", "consolidated_env_id", "if", "self", ".", "team_ids", "is", "not", "None", ":", "consolidated_team_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "team_ids", ")", "if", "self", ".", "team_2_rm", "is", "not", "None", ":", "for", "team_2_rm", "in", "self", ".", "team_2_rm", ":", "if", "team_2_rm", ".", "id", "is", "None", ":", "team_2_rm", ".", "sync", "(", ")", "consolidated_team_id", ".", "remove", "(", "team_2_rm", ".", "id", ")", "if", "self", ".", "team_2_add", "is", "not", "None", ":", "for", "team_2_add", "in", "self", ".", "team_2_add", ":", "if", "team_2_add", ".", "id", "is", "None", ":", "team_2_add", ".", "save", "(", ")", "consolidated_team_id", ".", "append", "(", "team_2_add", ".", "id", ")", "post_payload", "[", "'osInstanceTeamsID'", "]", "=", "consolidated_team_id", "args", "=", "{", "'http_operation'", ":", "'POST'", ",", "'operation_path'", ":", "''", ",", "'parameters'", ":", "{", "'payload'", ":", "json", ".", "dumps", "(", "post_payload", ")", "}", "}", "response", "=", "OSInstanceService", ".", "requester", ".", "call", "(", "args", ")", "if", "response", ".", "rc", "!=", "0", ":", "LOGGER", ".", "warning", "(", "'OSInstance.save - Problem while saving OS instance '", "+", "self", ".", "name", "+", "'. Reason: '", "+", "str", "(", "response", ".", "response_content", ")", "+", "'-'", "+", "str", "(", "response", ".", "error_message", ")", "+", "\" (\"", "+", "str", "(", "response", ".", "rc", ")", "+", "\")\"", ")", "else", ":", "self", ".", "id", "=", "response", ".", "response_content", "[", "'osInstanceID'", "]", "if", "self", ".", "embedded_osi_2_add", "is", "not", "None", ":", "for", "osi_2_add", "in", "self", ".", "embedded_osi_2_add", ":", "osi_2_add", ".", "sync", "(", ")", "if", "self", ".", "embedded_osi_2_rm", "is", "not", "None", ":", "for", "osi_2_rm", "in", "self", ".", "embedded_osi_2_rm", ":", "osi_2_rm", ".", "sync", "(", ")", "if", "self", ".", "ip_address_2_add", "is", "not", "None", ":", "for", "ipa_2_add", "in", "self", ".", "ip_address_2_add", ":", "ipa_2_add", ".", "sync", "(", ")", "if", "self", ".", "ip_address_2_rm", "is", "not", "None", ":", "for", "ipa_2_rm", "in", "self", ".", "ip_address_2_rm", ":", "ipa_2_rm", ".", "sync", "(", ")", "if", "self", ".", "nic_2_add", "is", "not", "None", ":", "for", "nic_2_add", "in", "self", ".", "nic_2_add", ":", "nic_2_add", ".", "sync", "(", ")", "if", "self", ".", "nic_2_rm", "is", "not", "None", ":", "for", "nic_2_rm", "in", "self", ".", "nic_2_rm", ":", "nic_2_rm", ".", "sync", "(", ")", "if", "self", ".", "subnets_2_add", "is", "not", "None", ":", "for", "snet_2_add", "in", "self", ".", "subnets_2_add", ":", "snet_2_add", ".", "sync", "(", ")", "if", "self", ".", "subnets_2_rm", "is", "not", "None", ":", "for", "snet_2_rm", "in", "self", ".", "subnets_2_rm", ":", "snet_2_rm", ".", "sync", "(", ")", "if", "self", ".", "application_2_add", "is", "not", "None", ":", "for", "app_2_add", "in", "self", ".", "application_2_add", ":", "app_2_add", ".", "sync", "(", ")", "if", "self", ".", "application_2_rm", "is", "not", "None", ":", "for", "app_2_rm", "in", "self", ".", "application_2_rm", ":", "app_2_rm", ".", "sync", "(", ")", "if", "self", ".", "environment_2_add", "is", "not", "None", ":", "for", "env_2_add", "in", "self", ".", "environment_2_add", ":", "env_2_add", ".", "sync", "(", ")", "if", "self", ".", "environment_2_rm", "is", "not", "None", ":", "for", "env_2_rm", "in", "self", ".", "environment_2_rm", ":", "env_2_rm", ".", "sync", "(", ")", "if", "self", ".", "team_2_add", "is", "not", "None", ":", "for", "team_2_add", "in", "self", ".", "team_2_add", ":", "team_2_add", ".", "sync", "(", ")", "if", "self", ".", "team_2_rm", "is", "not", "None", ":", "for", "team_2_rm", "in", "self", ".", "team_2_rm", ":", "team_2_rm", ".", "sync", "(", ")", "self", ".", "embedded_osi_2_add", ".", "clear", "(", ")", "self", ".", "embedded_osi_2_rm", ".", "clear", "(", ")", "self", ".", "ip_address_2_add", ".", "clear", "(", ")", "self", ".", "ip_address_2_rm", ".", "clear", "(", ")", "self", ".", "nic_2_add", ".", "clear", "(", ")", "self", ".", "nic_2_rm", ".", "clear", "(", ")", "self", ".", "subnets_2_add", ".", "clear", "(", ")", "self", ".", "subnets_2_rm", ".", "clear", "(", ")", "self", ".", "application_2_add", ".", "clear", "(", ")", "self", ".", "application_2_rm", ".", "clear", "(", ")", "self", ".", "environment_2_add", ".", "clear", "(", ")", "self", ".", "environment_2_rm", ".", "clear", "(", ")", "self", ".", "team_2_add", ".", "clear", "(", ")", "self", ".", "team_2_rm", ".", "clear", "(", ")", "self", ".", "sync", "(", ")", "return", "self" ]
:return: save this OS instance on Ariane server (create or update)
[ ":", "return", ":", "save", "this", "OS", "instance", "on", "Ariane", "server", "(", "create", "or", "update", ")" ]
python
train
41.914573
googleapis/google-cloud-python
websecurityscanner/google/cloud/websecurityscanner_v1alpha/gapic/web_security_scanner_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/websecurityscanner/google/cloud/websecurityscanner_v1alpha/gapic/web_security_scanner_client.py#L87-L95
def finding_path(cls, project, scan_config, scan_run, finding): """Return a fully-qualified finding string.""" return google.api_core.path_template.expand( "projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}", project=project, scan_config=scan_config, scan_run=scan_run, finding=finding, )
[ "def", "finding_path", "(", "cls", ",", "project", ",", "scan_config", ",", "scan_run", ",", "finding", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}\"", ",", "project", "=", "project", ",", "scan_config", "=", "scan_config", ",", "scan_run", "=", "scan_run", ",", "finding", "=", "finding", ",", ")" ]
Return a fully-qualified finding string.
[ "Return", "a", "fully", "-", "qualified", "finding", "string", "." ]
python
train
44.222222
freakboy3742/pyxero
xero/auth.py
https://github.com/freakboy3742/pyxero/blob/5566f17fa06ed1f2fb9426c112951a72276b0f9a/xero/auth.py#L165-L177
def _init_oauth(self, oauth_token, oauth_token_secret): "Store and initialize a verified set of OAuth credentials" self.oauth_token = oauth_token self.oauth_token_secret = oauth_token_secret self._oauth = OAuth1( self.consumer_key, client_secret=self.consumer_secret, resource_owner_key=self.oauth_token, resource_owner_secret=self.oauth_token_secret, rsa_key=self.rsa_key, signature_method=self._signature_method )
[ "def", "_init_oauth", "(", "self", ",", "oauth_token", ",", "oauth_token_secret", ")", ":", "self", ".", "oauth_token", "=", "oauth_token", "self", ".", "oauth_token_secret", "=", "oauth_token_secret", "self", ".", "_oauth", "=", "OAuth1", "(", "self", ".", "consumer_key", ",", "client_secret", "=", "self", ".", "consumer_secret", ",", "resource_owner_key", "=", "self", ".", "oauth_token", ",", "resource_owner_secret", "=", "self", ".", "oauth_token_secret", ",", "rsa_key", "=", "self", ".", "rsa_key", ",", "signature_method", "=", "self", ".", "_signature_method", ")" ]
Store and initialize a verified set of OAuth credentials
[ "Store", "and", "initialize", "a", "verified", "set", "of", "OAuth", "credentials" ]
python
train
39.692308
taskcluster/taskcluster-client.py
taskcluster/queue.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L241-L263
def cancelTask(self, *args, **kwargs): """ Cancel Task This method will cancel a task that is either `unscheduled`, `pending` or `running`. It will resolve the current run as `exception` with `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie. it doesn't have any runs, an initial run will be added and resolved as described above. Hence, after canceling a task, it cannot be scheduled with `queue.scheduleTask`, but a new run can be created with `queue.rerun`. These semantics is equivalent to calling `queue.scheduleTask` immediately followed by `queue.cancelTask`. **Remark** this operation is idempotent, if you try to cancel a task that isn't `unscheduled`, `pending` or `running`, this operation will just return the current task status. This method gives output: ``v1/task-status-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
[ "def", "cancelTask", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"cancelTask\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Cancel Task This method will cancel a task that is either `unscheduled`, `pending` or `running`. It will resolve the current run as `exception` with `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie. it doesn't have any runs, an initial run will be added and resolved as described above. Hence, after canceling a task, it cannot be scheduled with `queue.scheduleTask`, but a new run can be created with `queue.rerun`. These semantics is equivalent to calling `queue.scheduleTask` immediately followed by `queue.cancelTask`. **Remark** this operation is idempotent, if you try to cancel a task that isn't `unscheduled`, `pending` or `running`, this operation will just return the current task status. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
[ "Cancel", "Task" ]
python
train
45.391304
NetEaseGame/ATX
atx/drivers/mixin.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/drivers/mixin.py#L246-L332
def match(self, pattern, screen=None, rect=None, offset=None, threshold=None, method=None): """Check if image position in screen Args: - pattern: Image file name or opencv image object - screen (PIL.Image): optional, if not None, screenshot method will be called - threshold (float): it depends on the image match method - method (string): choices on <template | sift> Returns: None or FindPoint, For example: FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True) Only when confidence > self.image_match_threshold, matched will be True Raises: TypeError: when image_match_method is invalid """ pattern = self.pattern_open(pattern) search_img = pattern.image pattern_scale = self._cal_scale(pattern) if pattern_scale != 1.0: search_img = cv2.resize(search_img, (0, 0), fx=pattern_scale, fy=pattern_scale, interpolation=cv2.INTER_CUBIC) screen = screen or self.region_screenshot() threshold = threshold or pattern.threshold or self.image_match_threshold # handle offset if percent, ex (0.2, 0.8) dx, dy = offset or pattern.offset or (0, 0) dx = pattern.image.shape[1] * dx # opencv object width dy = pattern.image.shape[0] * dy # opencv object height dx, dy = int(dx*pattern_scale), int(dy*pattern_scale) # image match screen = imutils.from_pillow(screen) # convert to opencv image if rect and isinstance(rect, tuple) and len(rect) == 4: (x0, y0, x1, y1) = [int(v*pattern_scale) for v in rect] (dx, dy) = dx+x0, dy+y0 screen = imutils.crop(screen, x0, y0, x1, y1) #cv2.imwrite('cc.png', screen) match_method = method or self.image_match_method ret = None confidence = None matched = False position = None if match_method == consts.IMAGE_MATCH_METHOD_TMPL: #IMG_METHOD_TMPL ret = ac.find_template(screen, search_img) if ret is None: return None confidence = ret['confidence'] if confidence > threshold: matched = True (x, y) = ret['result'] position = (x+dx, y+dy) # fix by offset elif match_method == consts.IMAGE_MATCH_METHOD_SIFT: ret = ac.find_sift(screen, search_img, min_match_count=10) if ret is None: return None confidence = ret['confidence'] matches, total = confidence if 1.0*matches/total > 0.5: # FIXME(ssx): sift just write here matched = True (x, y) = ret['result'] position = (x+dx, y+dy) # fix by offset elif match_method == consts.IMAGE_MATCH_METHOD_AUTO: fp = self._match_auto(screen, search_img, threshold) if fp is None: return None (x, y) = fp.pos position = (x+dx, y+dy) return FindPoint(position, fp.confidence, fp.method, fp.matched) else: raise TypeError("Invalid image match method: %s" %(match_method,)) (x, y) = ret['result'] position = (x+dx, y+dy) # fix by offset if self.bounds: x, y = position position = (x+self.bounds.left, y+self.bounds.top) return FindPoint(position, confidence, match_method, matched=matched)
[ "def", "match", "(", "self", ",", "pattern", ",", "screen", "=", "None", ",", "rect", "=", "None", ",", "offset", "=", "None", ",", "threshold", "=", "None", ",", "method", "=", "None", ")", ":", "pattern", "=", "self", ".", "pattern_open", "(", "pattern", ")", "search_img", "=", "pattern", ".", "image", "pattern_scale", "=", "self", ".", "_cal_scale", "(", "pattern", ")", "if", "pattern_scale", "!=", "1.0", ":", "search_img", "=", "cv2", ".", "resize", "(", "search_img", ",", "(", "0", ",", "0", ")", ",", "fx", "=", "pattern_scale", ",", "fy", "=", "pattern_scale", ",", "interpolation", "=", "cv2", ".", "INTER_CUBIC", ")", "screen", "=", "screen", "or", "self", ".", "region_screenshot", "(", ")", "threshold", "=", "threshold", "or", "pattern", ".", "threshold", "or", "self", ".", "image_match_threshold", "# handle offset if percent, ex (0.2, 0.8)", "dx", ",", "dy", "=", "offset", "or", "pattern", ".", "offset", "or", "(", "0", ",", "0", ")", "dx", "=", "pattern", ".", "image", ".", "shape", "[", "1", "]", "*", "dx", "# opencv object width", "dy", "=", "pattern", ".", "image", ".", "shape", "[", "0", "]", "*", "dy", "# opencv object height", "dx", ",", "dy", "=", "int", "(", "dx", "*", "pattern_scale", ")", ",", "int", "(", "dy", "*", "pattern_scale", ")", "# image match", "screen", "=", "imutils", ".", "from_pillow", "(", "screen", ")", "# convert to opencv image", "if", "rect", "and", "isinstance", "(", "rect", ",", "tuple", ")", "and", "len", "(", "rect", ")", "==", "4", ":", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ")", "=", "[", "int", "(", "v", "*", "pattern_scale", ")", "for", "v", "in", "rect", "]", "(", "dx", ",", "dy", ")", "=", "dx", "+", "x0", ",", "dy", "+", "y0", "screen", "=", "imutils", ".", "crop", "(", "screen", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ")", "#cv2.imwrite('cc.png', screen)", "match_method", "=", "method", "or", "self", ".", "image_match_method", "ret", "=", "None", "confidence", "=", "None", "matched", "=", "False", "position", "=", "None", "if", "match_method", "==", "consts", ".", "IMAGE_MATCH_METHOD_TMPL", ":", "#IMG_METHOD_TMPL", "ret", "=", "ac", ".", "find_template", "(", "screen", ",", "search_img", ")", "if", "ret", "is", "None", ":", "return", "None", "confidence", "=", "ret", "[", "'confidence'", "]", "if", "confidence", ">", "threshold", ":", "matched", "=", "True", "(", "x", ",", "y", ")", "=", "ret", "[", "'result'", "]", "position", "=", "(", "x", "+", "dx", ",", "y", "+", "dy", ")", "# fix by offset", "elif", "match_method", "==", "consts", ".", "IMAGE_MATCH_METHOD_SIFT", ":", "ret", "=", "ac", ".", "find_sift", "(", "screen", ",", "search_img", ",", "min_match_count", "=", "10", ")", "if", "ret", "is", "None", ":", "return", "None", "confidence", "=", "ret", "[", "'confidence'", "]", "matches", ",", "total", "=", "confidence", "if", "1.0", "*", "matches", "/", "total", ">", "0.5", ":", "# FIXME(ssx): sift just write here", "matched", "=", "True", "(", "x", ",", "y", ")", "=", "ret", "[", "'result'", "]", "position", "=", "(", "x", "+", "dx", ",", "y", "+", "dy", ")", "# fix by offset", "elif", "match_method", "==", "consts", ".", "IMAGE_MATCH_METHOD_AUTO", ":", "fp", "=", "self", ".", "_match_auto", "(", "screen", ",", "search_img", ",", "threshold", ")", "if", "fp", "is", "None", ":", "return", "None", "(", "x", ",", "y", ")", "=", "fp", ".", "pos", "position", "=", "(", "x", "+", "dx", ",", "y", "+", "dy", ")", "return", "FindPoint", "(", "position", ",", "fp", ".", "confidence", ",", "fp", ".", "method", ",", "fp", ".", "matched", ")", "else", ":", "raise", "TypeError", "(", "\"Invalid image match method: %s\"", "%", "(", "match_method", ",", ")", ")", "(", "x", ",", "y", ")", "=", "ret", "[", "'result'", "]", "position", "=", "(", "x", "+", "dx", ",", "y", "+", "dy", ")", "# fix by offset", "if", "self", ".", "bounds", ":", "x", ",", "y", "=", "position", "position", "=", "(", "x", "+", "self", ".", "bounds", ".", "left", ",", "y", "+", "self", ".", "bounds", ".", "top", ")", "return", "FindPoint", "(", "position", ",", "confidence", ",", "match_method", ",", "matched", "=", "matched", ")" ]
Check if image position in screen Args: - pattern: Image file name or opencv image object - screen (PIL.Image): optional, if not None, screenshot method will be called - threshold (float): it depends on the image match method - method (string): choices on <template | sift> Returns: None or FindPoint, For example: FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True) Only when confidence > self.image_match_threshold, matched will be True Raises: TypeError: when image_match_method is invalid
[ "Check", "if", "image", "position", "in", "screen" ]
python
train
40.011494
shin-/dockerpy-creds
dockerpycreds/store.py
https://github.com/shin-/dockerpy-creds/blob/9c0b66d2e445a838e1518f2c3273df7ddc7ec0d4/dockerpycreds/store.py#L29-L47
def get(self, server): """ Retrieve credentials for `server`. If no credentials are found, a `StoreError` will be raised. """ if not isinstance(server, six.binary_type): server = server.encode('utf-8') data = self._execute('get', server) result = json.loads(data.decode('utf-8')) # docker-credential-pass will return an object for inexistent servers # whereas other helpers will exit with returncode != 0. For # consistency, if no significant data is returned, # raise CredentialsNotFound if result['Username'] == '' and result['Secret'] == '': raise errors.CredentialsNotFound( 'No matching credentials in {}'.format(self.program) ) return result
[ "def", "get", "(", "self", ",", "server", ")", ":", "if", "not", "isinstance", "(", "server", ",", "six", ".", "binary_type", ")", ":", "server", "=", "server", ".", "encode", "(", "'utf-8'", ")", "data", "=", "self", ".", "_execute", "(", "'get'", ",", "server", ")", "result", "=", "json", ".", "loads", "(", "data", ".", "decode", "(", "'utf-8'", ")", ")", "# docker-credential-pass will return an object for inexistent servers", "# whereas other helpers will exit with returncode != 0. For", "# consistency, if no significant data is returned,", "# raise CredentialsNotFound", "if", "result", "[", "'Username'", "]", "==", "''", "and", "result", "[", "'Secret'", "]", "==", "''", ":", "raise", "errors", ".", "CredentialsNotFound", "(", "'No matching credentials in {}'", ".", "format", "(", "self", ".", "program", ")", ")", "return", "result" ]
Retrieve credentials for `server`. If no credentials are found, a `StoreError` will be raised.
[ "Retrieve", "credentials", "for", "server", ".", "If", "no", "credentials", "are", "found", "a", "StoreError", "will", "be", "raised", "." ]
python
train
41.210526
jtwhite79/pyemu
pyemu/mat/mat_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/mat/mat_handler.py#L1100-L1114
def full_s(self): """ Get the full singular value matrix of self Returns ------- Matrix : Matrix """ x = np.zeros((self.shape),dtype=np.float32) x[:self.s.shape[0],:self.s.shape[0]] = self.s.as_2d s = Matrix(x=x, row_names=self.row_names, col_names=self.col_names, isdiagonal=False, autoalign=False) return s
[ "def", "full_s", "(", "self", ")", ":", "x", "=", "np", ".", "zeros", "(", "(", "self", ".", "shape", ")", ",", "dtype", "=", "np", ".", "float32", ")", "x", "[", ":", "self", ".", "s", ".", "shape", "[", "0", "]", ",", ":", "self", ".", "s", ".", "shape", "[", "0", "]", "]", "=", "self", ".", "s", ".", "as_2d", "s", "=", "Matrix", "(", "x", "=", "x", ",", "row_names", "=", "self", ".", "row_names", ",", "col_names", "=", "self", ".", "col_names", ",", "isdiagonal", "=", "False", ",", "autoalign", "=", "False", ")", "return", "s" ]
Get the full singular value matrix of self Returns ------- Matrix : Matrix
[ "Get", "the", "full", "singular", "value", "matrix", "of", "self" ]
python
train
28.066667
lsbardel/python-stdnet
stdnet/odm/struct.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L661-L663
def push_front(self, value): '''Appends a copy of ``value`` to the beginning of the list.''' self.cache.push_front(self.value_pickler.dumps(value))
[ "def", "push_front", "(", "self", ",", "value", ")", ":", "self", ".", "cache", ".", "push_front", "(", "self", ".", "value_pickler", ".", "dumps", "(", "value", ")", ")" ]
Appends a copy of ``value`` to the beginning of the list.
[ "Appends", "a", "copy", "of", "value", "to", "the", "beginning", "of", "the", "list", "." ]
python
train
54.333333
maweigert/gputools
gputools/denoise/tv2.py
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/denoise/tv2.py#L80-L113
def tv2(data,weight,Niter=50): """ chambolles tv regularized denoising weight should be around 2+1.5*noise_sigma """ prog = OCLProgram(abspath("kernels/tv2.cl")) data_im = OCLImage.from_array(data.astype(np,float32,copy=False)) pImgs = [ dev.createImage(data.shape[::-1], mem_flags = cl.mem_flags.READ_WRITE, dtype= np.float32, channel_order = cl.channel_order.RGBA) for i in range(2)] outImg = dev.createImage(data.shape[::-1], dtype = np.float32, mem_flags = cl.mem_flags.READ_WRITE) dev.writeImage(inImg,data.astype(np.float32)); dev.writeImage(pImgs[0],np.zeros((4,)+data.shape,dtype=np.float32)); dev.writeImage(pImgs[1],np.zeros((4,)+data.shape,dtype=np.float32)); for i in range(Niter): proc.runKernel("div_step",inImg.shape,None, inImg,pImgs[i%2],outImg) proc.runKernel("grad_step",inImg.shape,None, outImg,pImgs[i%2],pImgs[1-i%2], np.float32(weight)) return dev.readImage(outImg,dtype=np.float32)
[ "def", "tv2", "(", "data", ",", "weight", ",", "Niter", "=", "50", ")", ":", "prog", "=", "OCLProgram", "(", "abspath", "(", "\"kernels/tv2.cl\"", ")", ")", "data_im", "=", "OCLImage", ".", "from_array", "(", "data", ".", "astype", "(", "np", ",", "float32", ",", "copy", "=", "False", ")", ")", "pImgs", "=", "[", "dev", ".", "createImage", "(", "data", ".", "shape", "[", ":", ":", "-", "1", "]", ",", "mem_flags", "=", "cl", ".", "mem_flags", ".", "READ_WRITE", ",", "dtype", "=", "np", ".", "float32", ",", "channel_order", "=", "cl", ".", "channel_order", ".", "RGBA", ")", "for", "i", "in", "range", "(", "2", ")", "]", "outImg", "=", "dev", ".", "createImage", "(", "data", ".", "shape", "[", ":", ":", "-", "1", "]", ",", "dtype", "=", "np", ".", "float32", ",", "mem_flags", "=", "cl", ".", "mem_flags", ".", "READ_WRITE", ")", "dev", ".", "writeImage", "(", "inImg", ",", "data", ".", "astype", "(", "np", ".", "float32", ")", ")", "dev", ".", "writeImage", "(", "pImgs", "[", "0", "]", ",", "np", ".", "zeros", "(", "(", "4", ",", ")", "+", "data", ".", "shape", ",", "dtype", "=", "np", ".", "float32", ")", ")", "dev", ".", "writeImage", "(", "pImgs", "[", "1", "]", ",", "np", ".", "zeros", "(", "(", "4", ",", ")", "+", "data", ".", "shape", ",", "dtype", "=", "np", ".", "float32", ")", ")", "for", "i", "in", "range", "(", "Niter", ")", ":", "proc", ".", "runKernel", "(", "\"div_step\"", ",", "inImg", ".", "shape", ",", "None", ",", "inImg", ",", "pImgs", "[", "i", "%", "2", "]", ",", "outImg", ")", "proc", ".", "runKernel", "(", "\"grad_step\"", ",", "inImg", ".", "shape", ",", "None", ",", "outImg", ",", "pImgs", "[", "i", "%", "2", "]", ",", "pImgs", "[", "1", "-", "i", "%", "2", "]", ",", "np", ".", "float32", "(", "weight", ")", ")", "return", "dev", ".", "readImage", "(", "outImg", ",", "dtype", "=", "np", ".", "float32", ")" ]
chambolles tv regularized denoising weight should be around 2+1.5*noise_sigma
[ "chambolles", "tv", "regularized", "denoising" ]
python
train
36.029412
pandas-dev/pandas
pandas/core/ops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L2422-L2447
def _arith_method_SPARSE_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): if isinstance(other, ABCDataFrame): return NotImplemented elif isinstance(other, ABCSeries): if not isinstance(other, ABCSparseSeries): other = other.to_sparse(fill_value=self.fill_value) return _sparse_series_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): new_values = op(self.values, other) return self._constructor(new_values, index=self.index, name=self.name) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper
[ "def", "_arith_method_SPARSE_SERIES", "(", "cls", ",", "op", ",", "special", ")", ":", "op_name", "=", "_get_op_name", "(", "op", ",", "special", ")", "def", "wrapper", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "ABCDataFrame", ")", ":", "return", "NotImplemented", "elif", "isinstance", "(", "other", ",", "ABCSeries", ")", ":", "if", "not", "isinstance", "(", "other", ",", "ABCSparseSeries", ")", ":", "other", "=", "other", ".", "to_sparse", "(", "fill_value", "=", "self", ".", "fill_value", ")", "return", "_sparse_series_op", "(", "self", ",", "other", ",", "op", ",", "op_name", ")", "elif", "is_scalar", "(", "other", ")", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "new_values", "=", "op", "(", "self", ".", "values", ",", "other", ")", "return", "self", ".", "_constructor", "(", "new_values", ",", "index", "=", "self", ".", "index", ",", "name", "=", "self", ".", "name", ")", "else", ":", "# pragma: no cover", "raise", "TypeError", "(", "'operation with {other} not supported'", ".", "format", "(", "other", "=", "type", "(", "other", ")", ")", ")", "wrapper", ".", "__name__", "=", "op_name", "return", "wrapper" ]
Wrapper function for Series arithmetic operations, to avoid code duplication.
[ "Wrapper", "function", "for", "Series", "arithmetic", "operations", "to", "avoid", "code", "duplication", "." ]
python
train
38.346154
rafaelmartins/dnsimple-dyndns
dnsimple_dyndns/dnsimple.py
https://github.com/rafaelmartins/dnsimple-dyndns/blob/36d9ec7508673b5354d324cf7c59128440d5bfd1/dnsimple_dyndns/dnsimple.py#L62-L77
def _update_record(self, record_id, name, address, ttl): """Updates an existing record.""" data = json.dumps({'record': {'name': name, 'content': address, 'ttl': ttl}}) headers = {'Content-Type': 'application/json'} request = self._session.put(self._baseurl + '/%d' % record_id, data=data, headers=headers) if not request.ok: raise RuntimeError('Failed to update record: %s - %s' % (self._format_hostname(name), request.json())) record = request.json() if 'record' not in record or 'id' not in record['record']: raise RuntimeError('Invalid record JSON format: %s - %s' % (self._format_hostname(name), request.json())) return record['record']
[ "def", "_update_record", "(", "self", ",", "record_id", ",", "name", ",", "address", ",", "ttl", ")", ":", "data", "=", "json", ".", "dumps", "(", "{", "'record'", ":", "{", "'name'", ":", "name", ",", "'content'", ":", "address", ",", "'ttl'", ":", "ttl", "}", "}", ")", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "request", "=", "self", ".", "_session", ".", "put", "(", "self", ".", "_baseurl", "+", "'/%d'", "%", "record_id", ",", "data", "=", "data", ",", "headers", "=", "headers", ")", "if", "not", "request", ".", "ok", ":", "raise", "RuntimeError", "(", "'Failed to update record: %s - %s'", "%", "(", "self", ".", "_format_hostname", "(", "name", ")", ",", "request", ".", "json", "(", ")", ")", ")", "record", "=", "request", ".", "json", "(", ")", "if", "'record'", "not", "in", "record", "or", "'id'", "not", "in", "record", "[", "'record'", "]", ":", "raise", "RuntimeError", "(", "'Invalid record JSON format: %s - %s'", "%", "(", "self", ".", "_format_hostname", "(", "name", ")", ",", "request", ".", "json", "(", ")", ")", ")", "return", "record", "[", "'record'", "]" ]
Updates an existing record.
[ "Updates", "an", "existing", "record", "." ]
python
train
55.5
emencia/emencia-django-forum
forum/forms/category.py
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/category.py#L29-L38
def clean_description(self): """ Text content validation """ description = self.cleaned_data.get("description") validation_helper = safe_import_module(settings.FORUM_TEXT_VALIDATOR_HELPER_PATH) if validation_helper is not None: return validation_helper(self, description) else: return description
[ "def", "clean_description", "(", "self", ")", ":", "description", "=", "self", ".", "cleaned_data", ".", "get", "(", "\"description\"", ")", "validation_helper", "=", "safe_import_module", "(", "settings", ".", "FORUM_TEXT_VALIDATOR_HELPER_PATH", ")", "if", "validation_helper", "is", "not", "None", ":", "return", "validation_helper", "(", "self", ",", "description", ")", "else", ":", "return", "description" ]
Text content validation
[ "Text", "content", "validation" ]
python
train
36.7
mitsei/dlkit
dlkit/json_/assessment/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/managers.py#L2128-L2144
def get_item_admin_session(self, proxy): """Gets the ``OsidSession`` associated with the item administration service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.ItemAdminSession) - an ``ItemAdminSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_item_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_admin()`` is ``true``.* """ if not self.supports_item_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ItemAdminSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_item_admin_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_item_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ItemAdminSession", "(", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the ``OsidSession`` associated with the item administration service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.ItemAdminSession) - an ``ItemAdminSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_item_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_admin()`` is ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "item", "administration", "service", "." ]
python
train
44.941176
QInfer/python-qinfer
src/qinfer/utils.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/utils.py#L235-L287
def particle_covariance_mtx(weights,locations): """ Returns an estimate of the covariance of a distribution represented by a given set of SMC particle. :param weights: An array containing the weights of each particle. :param location: An array containing the locations of each particle. :rtype: :class:`numpy.ndarray`, shape ``(n_modelparams, n_modelparams)``. :returns: An array containing the estimated covariance matrix. """ # TODO: add shapes to docstring. warnings.warn('particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution', DeprecationWarning) # Find the mean model vector, shape (n_modelparams, ). mu = particle_meanfn(weights, locations) # Transpose the particle locations to have shape # (n_modelparams, n_particles). xs = locations.transpose([1, 0]) # Give a shorter name to the particle weights, shape (n_particles, ). ws = weights cov = ( # This sum is a reduction over the particle index, chosen to be # axis=2. Thus, the sum represents an expectation value over the # outer product $x . x^T$. # # All three factors have the particle index as the rightmost # index, axis=2. Using the Einstein summation convention (ESC), # we can reduce over the particle index easily while leaving # the model parameter index to vary between the two factors # of xs. # # This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i} # using the ESC, where A_{m,n} is the temporary array created. np.einsum('i,mi,ni', ws, xs, xs) # We finish by subracting from the above expectation value # the outer product $mu . mu^T$. - np.dot(mu[..., np.newaxis], mu[np.newaxis, ...]) ) # The SMC approximation is not guaranteed to produce a # positive-semidefinite covariance matrix. If a negative eigenvalue # is produced, we should warn the caller of this. assert np.all(np.isfinite(cov)) if not np.all(la.eig(cov)[0] >= 0): warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning) return cov
[ "def", "particle_covariance_mtx", "(", "weights", ",", "locations", ")", ":", "# TODO: add shapes to docstring.", "warnings", ".", "warn", "(", "'particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution'", ",", "DeprecationWarning", ")", "# Find the mean model vector, shape (n_modelparams, ).", "mu", "=", "particle_meanfn", "(", "weights", ",", "locations", ")", "# Transpose the particle locations to have shape", "# (n_modelparams, n_particles).", "xs", "=", "locations", ".", "transpose", "(", "[", "1", ",", "0", "]", ")", "# Give a shorter name to the particle weights, shape (n_particles, ).", "ws", "=", "weights", "cov", "=", "(", "# This sum is a reduction over the particle index, chosen to be", "# axis=2. Thus, the sum represents an expectation value over the", "# outer product $x . x^T$.", "#", "# All three factors have the particle index as the rightmost", "# index, axis=2. Using the Einstein summation convention (ESC),", "# we can reduce over the particle index easily while leaving", "# the model parameter index to vary between the two factors", "# of xs.", "#", "# This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i}", "# using the ESC, where A_{m,n} is the temporary array created.", "np", ".", "einsum", "(", "'i,mi,ni'", ",", "ws", ",", "xs", ",", "xs", ")", "# We finish by subracting from the above expectation value", "# the outer product $mu . mu^T$.", "-", "np", ".", "dot", "(", "mu", "[", "...", ",", "np", ".", "newaxis", "]", ",", "mu", "[", "np", ".", "newaxis", ",", "...", "]", ")", ")", "# The SMC approximation is not guaranteed to produce a", "# positive-semidefinite covariance matrix. If a negative eigenvalue", "# is produced, we should warn the caller of this.", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "cov", ")", ")", "if", "not", "np", ".", "all", "(", "la", ".", "eig", "(", "cov", ")", "[", "0", "]", ">=", "0", ")", ":", "warnings", ".", "warn", "(", "'Numerical error in covariance estimation causing positive semidefinite violation.'", ",", "ApproximationWarning", ")", "return", "cov" ]
Returns an estimate of the covariance of a distribution represented by a given set of SMC particle. :param weights: An array containing the weights of each particle. :param location: An array containing the locations of each particle. :rtype: :class:`numpy.ndarray`, shape ``(n_modelparams, n_modelparams)``. :returns: An array containing the estimated covariance matrix.
[ "Returns", "an", "estimate", "of", "the", "covariance", "of", "a", "distribution", "represented", "by", "a", "given", "set", "of", "SMC", "particle", "." ]
python
train
41.679245
tjcsl/ion
intranet/apps/notifications/views.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/notifications/views.py#L22-L49
def android_setup_view(request): """Set up a GCM session. This does *not* require a valid login session. Instead, a token from the client session is sent to the Android backend, which queries a POST request to this view. The "android_gcm_rand" is randomly set when the Android app is detected through the user agent. If it has the same value, it is assumed to be correct. """ logger.debug(request.POST) if request.method == "POST": if "user_token" in request.POST and "gcm_token" in request.POST: user_token = request.POST.get("user_token") gcm_token = request.POST.get("gcm_token") logger.debug(user_token) logger.debug(gcm_token) try: ncfg = NotificationConfig.objects.get(android_gcm_rand=user_token) except NotificationConfig.DoesNotExist: logger.debug("No pair") return HttpResponse('{"error":"Invalid data."}', content_type="text/json") ncfg.gcm_token = gcm_token ncfg.android_gcm_rand = None ncfg.android_gcm_date = None ncfg.save() return HttpResponse('{"success":"Now registered."}', content_type="text/json") return HttpResponse('{"error":"Invalid arguments."}', content_type="text/json")
[ "def", "android_setup_view", "(", "request", ")", ":", "logger", ".", "debug", "(", "request", ".", "POST", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "if", "\"user_token\"", "in", "request", ".", "POST", "and", "\"gcm_token\"", "in", "request", ".", "POST", ":", "user_token", "=", "request", ".", "POST", ".", "get", "(", "\"user_token\"", ")", "gcm_token", "=", "request", ".", "POST", ".", "get", "(", "\"gcm_token\"", ")", "logger", ".", "debug", "(", "user_token", ")", "logger", ".", "debug", "(", "gcm_token", ")", "try", ":", "ncfg", "=", "NotificationConfig", ".", "objects", ".", "get", "(", "android_gcm_rand", "=", "user_token", ")", "except", "NotificationConfig", ".", "DoesNotExist", ":", "logger", ".", "debug", "(", "\"No pair\"", ")", "return", "HttpResponse", "(", "'{\"error\":\"Invalid data.\"}'", ",", "content_type", "=", "\"text/json\"", ")", "ncfg", ".", "gcm_token", "=", "gcm_token", "ncfg", ".", "android_gcm_rand", "=", "None", "ncfg", ".", "android_gcm_date", "=", "None", "ncfg", ".", "save", "(", ")", "return", "HttpResponse", "(", "'{\"success\":\"Now registered.\"}'", ",", "content_type", "=", "\"text/json\"", ")", "return", "HttpResponse", "(", "'{\"error\":\"Invalid arguments.\"}'", ",", "content_type", "=", "\"text/json\"", ")" ]
Set up a GCM session. This does *not* require a valid login session. Instead, a token from the client session is sent to the Android backend, which queries a POST request to this view. The "android_gcm_rand" is randomly set when the Android app is detected through the user agent. If it has the same value, it is assumed to be correct.
[ "Set", "up", "a", "GCM", "session", ".", "This", "does", "*", "not", "*", "require", "a", "valid", "login", "session", ".", "Instead", "a", "token", "from", "the", "client", "session", "is", "sent", "to", "the", "Android", "backend", "which", "queries", "a", "POST", "request", "to", "this", "view", "." ]
python
train
46.357143
manrajgrover/halo
halo/halo.py
https://github.com/manrajgrover/halo/blob/0ac5149dea965b27b09f0776df9095ebf013fb4d/halo/halo.py#L321-L334
def clear(self): """Clears the line and returns cursor to the start. of line Returns ------- self """ if not self._enabled: return self self._stream.write('\r') self._stream.write(self.CLEAR_LINE) return self
[ "def", "clear", "(", "self", ")", ":", "if", "not", "self", ".", "_enabled", ":", "return", "self", "self", ".", "_stream", ".", "write", "(", "'\\r'", ")", "self", ".", "_stream", ".", "write", "(", "self", ".", "CLEAR_LINE", ")", "return", "self" ]
Clears the line and returns cursor to the start. of line Returns ------- self
[ "Clears", "the", "line", "and", "returns", "cursor", "to", "the", "start", ".", "of", "line", "Returns", "-------", "self" ]
python
train
20.642857
trevisanj/a99
a99/search.py
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/search.py#L15-L29
def index_nearest(array, value): """ Finds index of nearest value in array. Args: array: numpy array value: Returns: int http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array """ idx = (np.abs(array-value)).argmin() return idx
[ "def", "index_nearest", "(", "array", ",", "value", ")", ":", "idx", "=", "(", "np", ".", "abs", "(", "array", "-", "value", ")", ")", ".", "argmin", "(", ")", "return", "idx" ]
Finds index of nearest value in array. Args: array: numpy array value: Returns: int http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
[ "Finds", "index", "of", "nearest", "value", "in", "array", ".", "Args", ":", "array", ":", "numpy", "array", "value", ":", "Returns", ":", "int", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "2566412", "/", "find", "-", "nearest", "-", "value", "-", "in", "-", "numpy", "-", "array" ]
python
train
20.533333
google/python-gflags
gflags/flagvalues.py
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L490-L511
def _IsUnparsedFlagAccessAllowed(self, name): """Determine whether to allow unparsed flag access or not.""" if _UNPARSED_FLAG_ACCESS_ENV_NAME in os.environ: # We've been told explicitly what to do. allow_unparsed_flag_access = ( os.getenv(_UNPARSED_FLAG_ACCESS_ENV_NAME) == '1') elif self.__dict__['__reset_called']: # Raise exception if .Reset() was called. This mostly happens in tests. allow_unparsed_flag_access = False elif _helpers.IsRunningTest(): # Staged "rollout", based on name of the flag so that we don't break # everyone. Hashing the flag is a way of choosing a random but # consistent subset of flags to lock down which we can make larger # over time. name_bytes = name.encode('utf8') if not isinstance(name, bytes) else name flag_percentile = ( struct.unpack('<I', hashlib.md5(name_bytes).digest()[:4])[0] % 100) allow_unparsed_flag_access = ( _UNPARSED_ACCESS_DISABLED_PERCENT <= flag_percentile) else: allow_unparsed_flag_access = True return allow_unparsed_flag_access
[ "def", "_IsUnparsedFlagAccessAllowed", "(", "self", ",", "name", ")", ":", "if", "_UNPARSED_FLAG_ACCESS_ENV_NAME", "in", "os", ".", "environ", ":", "# We've been told explicitly what to do.", "allow_unparsed_flag_access", "=", "(", "os", ".", "getenv", "(", "_UNPARSED_FLAG_ACCESS_ENV_NAME", ")", "==", "'1'", ")", "elif", "self", ".", "__dict__", "[", "'__reset_called'", "]", ":", "# Raise exception if .Reset() was called. This mostly happens in tests.", "allow_unparsed_flag_access", "=", "False", "elif", "_helpers", ".", "IsRunningTest", "(", ")", ":", "# Staged \"rollout\", based on name of the flag so that we don't break", "# everyone. Hashing the flag is a way of choosing a random but", "# consistent subset of flags to lock down which we can make larger", "# over time.", "name_bytes", "=", "name", ".", "encode", "(", "'utf8'", ")", "if", "not", "isinstance", "(", "name", ",", "bytes", ")", "else", "name", "flag_percentile", "=", "(", "struct", ".", "unpack", "(", "'<I'", ",", "hashlib", ".", "md5", "(", "name_bytes", ")", ".", "digest", "(", ")", "[", ":", "4", "]", ")", "[", "0", "]", "%", "100", ")", "allow_unparsed_flag_access", "=", "(", "_UNPARSED_ACCESS_DISABLED_PERCENT", "<=", "flag_percentile", ")", "else", ":", "allow_unparsed_flag_access", "=", "True", "return", "allow_unparsed_flag_access" ]
Determine whether to allow unparsed flag access or not.
[ "Determine", "whether", "to", "allow", "unparsed", "flag", "access", "or", "not", "." ]
python
train
49.681818
dsoprea/NsqSpinner
nsq/connection.py
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection.py#L423-L443
def __read_frame(self): """*Attempt* to read a frame. If we get an EAGAIN on the frame header, it'll raise to our caller. If we get it *after* we already got the header, wait-out the rest of the frame. """ if self.__frame_header_cache is None: _logger.debug("Reading frame header.") (length, frame_type) = struct.unpack('!II', self.__read(8)) self.__frame_header_cache = (length, frame_type) else: (length, frame_type) = self.__frame_header_cache try: data = self.__read(length - 4) except errno.EAGAIN: self.__frame_header_cache = (length, frame_type) raise self.__frame_header_cache = None self.__process_message(frame_type, data)
[ "def", "__read_frame", "(", "self", ")", ":", "if", "self", ".", "__frame_header_cache", "is", "None", ":", "_logger", ".", "debug", "(", "\"Reading frame header.\"", ")", "(", "length", ",", "frame_type", ")", "=", "struct", ".", "unpack", "(", "'!II'", ",", "self", ".", "__read", "(", "8", ")", ")", "self", ".", "__frame_header_cache", "=", "(", "length", ",", "frame_type", ")", "else", ":", "(", "length", ",", "frame_type", ")", "=", "self", ".", "__frame_header_cache", "try", ":", "data", "=", "self", ".", "__read", "(", "length", "-", "4", ")", "except", "errno", ".", "EAGAIN", ":", "self", ".", "__frame_header_cache", "=", "(", "length", ",", "frame_type", ")", "raise", "self", ".", "__frame_header_cache", "=", "None", "self", ".", "__process_message", "(", "frame_type", ",", "data", ")" ]
*Attempt* to read a frame. If we get an EAGAIN on the frame header, it'll raise to our caller. If we get it *after* we already got the header, wait-out the rest of the frame.
[ "*", "Attempt", "*", "to", "read", "a", "frame", ".", "If", "we", "get", "an", "EAGAIN", "on", "the", "frame", "header", "it", "ll", "raise", "to", "our", "caller", ".", "If", "we", "get", "it", "*", "after", "*", "we", "already", "got", "the", "header", "wait", "-", "out", "the", "rest", "of", "the", "frame", "." ]
python
train
37.190476
JNRowe/upoints
upoints/gpx.py
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/gpx.py#L245-L256
def sunset(self, date=None, zenith=None): """Calculate sunset times for locations. Args: date (datetime.date): Calculate rise or set for given date zenith (str): Calculate sunset events, or start of twilight times Returns: list of list of datetime.datetime: The time for the sunset for each point in each segment """ return (segment.sunset(date, zenith) for segment in self)
[ "def", "sunset", "(", "self", ",", "date", "=", "None", ",", "zenith", "=", "None", ")", ":", "return", "(", "segment", ".", "sunset", "(", "date", ",", "zenith", ")", "for", "segment", "in", "self", ")" ]
Calculate sunset times for locations. Args: date (datetime.date): Calculate rise or set for given date zenith (str): Calculate sunset events, or start of twilight times Returns: list of list of datetime.datetime: The time for the sunset for each point in each segment
[ "Calculate", "sunset", "times", "for", "locations", "." ]
python
train
38.083333
bradmontgomery/django-redis-metrics
redis_metrics/templatetags/redis_metric_tags.py
https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/templatetags/redis_metric_tags.py#L116-L159
def metric_history(slug, granularity="daily", since=None, to=None, with_data_table=False): """Template Tag to display a metric's history. * ``slug`` -- the metric's unique slug * ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly * ``since`` -- a datetime object or a string string matching one of the following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for a date & time. * ``to`` -- the date until which we start pulling metrics * ``with_data_table`` -- if True, prints the raw data in a table. """ r = get_r() try: if since and len(since) == 10: # yyyy-mm-dd since = datetime.strptime(since, "%Y-%m-%d") elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S") if to and len(to) == 10: # yyyy-mm-dd to = datetime.strptime(since, "%Y-%m-%d") elif to and len(to) == 19: # yyyy-mm-dd HH:MM:ss to = datetime.strptime(to, "%Y-%m-%d %H:%M:%S") except (TypeError, ValueError): # assume we got a datetime object or leave since = None pass metric_history = r.get_metric_history( slugs=slug, since=since, to=to, granularity=granularity ) return { 'since': since, 'to': to, 'slug': slug, 'granularity': granularity, 'metric_history': metric_history, 'with_data_table': with_data_table, }
[ "def", "metric_history", "(", "slug", ",", "granularity", "=", "\"daily\"", ",", "since", "=", "None", ",", "to", "=", "None", ",", "with_data_table", "=", "False", ")", ":", "r", "=", "get_r", "(", ")", "try", ":", "if", "since", "and", "len", "(", "since", ")", "==", "10", ":", "# yyyy-mm-dd", "since", "=", "datetime", ".", "strptime", "(", "since", ",", "\"%Y-%m-%d\"", ")", "elif", "since", "and", "len", "(", "since", ")", "==", "19", ":", "# yyyy-mm-dd HH:MM:ss", "since", "=", "datetime", ".", "strptime", "(", "since", ",", "\"%Y-%m-%d %H:%M:%S\"", ")", "if", "to", "and", "len", "(", "to", ")", "==", "10", ":", "# yyyy-mm-dd", "to", "=", "datetime", ".", "strptime", "(", "since", ",", "\"%Y-%m-%d\"", ")", "elif", "to", "and", "len", "(", "to", ")", "==", "19", ":", "# yyyy-mm-dd HH:MM:ss", "to", "=", "datetime", ".", "strptime", "(", "to", ",", "\"%Y-%m-%d %H:%M:%S\"", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# assume we got a datetime object or leave since = None", "pass", "metric_history", "=", "r", ".", "get_metric_history", "(", "slugs", "=", "slug", ",", "since", "=", "since", ",", "to", "=", "to", ",", "granularity", "=", "granularity", ")", "return", "{", "'since'", ":", "since", ",", "'to'", ":", "to", ",", "'slug'", ":", "slug", ",", "'granularity'", ":", "granularity", ",", "'metric_history'", ":", "metric_history", ",", "'with_data_table'", ":", "with_data_table", ",", "}" ]
Template Tag to display a metric's history. * ``slug`` -- the metric's unique slug * ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly * ``since`` -- a datetime object or a string string matching one of the following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for a date & time. * ``to`` -- the date until which we start pulling metrics * ``with_data_table`` -- if True, prints the raw data in a table.
[ "Template", "Tag", "to", "display", "a", "metric", "s", "history", "." ]
python
train
34.136364
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L975-L990
def crossJoin(self, other): """Returns the cartesian product with another :class:`DataFrame`. :param other: Right side of the cartesian product. >>> df.select("age", "name").collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df2.select("name", "height").collect() [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)] >>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect() [Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85), Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)] """ jdf = self._jdf.crossJoin(other._jdf) return DataFrame(jdf, self.sql_ctx)
[ "def", "crossJoin", "(", "self", ",", "other", ")", ":", "jdf", "=", "self", ".", "_jdf", ".", "crossJoin", "(", "other", ".", "_jdf", ")", "return", "DataFrame", "(", "jdf", ",", "self", ".", "sql_ctx", ")" ]
Returns the cartesian product with another :class:`DataFrame`. :param other: Right side of the cartesian product. >>> df.select("age", "name").collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df2.select("name", "height").collect() [Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)] >>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect() [Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85), Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
[ "Returns", "the", "cartesian", "product", "with", "another", ":", "class", ":", "DataFrame", "." ]
python
train
45.6875
twosigma/marbles
marbles/mixins/marbles/mixins/mixins.py
https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L884-L909
def assertFileSizeAlmostEqual( self, filename, size, places=None, msg=None, delta=None): '''Fail if ``filename`` does not have the given ``size`` as determined by their difference rounded to the given number of decimal ``places`` (default 7) and comparing to zero, or if their difference is greater than a given ``delta``. Parameters ---------- filename : str, bytes, file-like size : int, float places : int msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. delta : int, float Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like. ''' fsize = self._get_file_size(filename) self.assertAlmostEqual( fsize, size, places=places, msg=msg, delta=delta)
[ "def", "assertFileSizeAlmostEqual", "(", "self", ",", "filename", ",", "size", ",", "places", "=", "None", ",", "msg", "=", "None", ",", "delta", "=", "None", ")", ":", "fsize", "=", "self", ".", "_get_file_size", "(", "filename", ")", "self", ".", "assertAlmostEqual", "(", "fsize", ",", "size", ",", "places", "=", "places", ",", "msg", "=", "msg", ",", "delta", "=", "delta", ")" ]
Fail if ``filename`` does not have the given ``size`` as determined by their difference rounded to the given number of decimal ``places`` (default 7) and comparing to zero, or if their difference is greater than a given ``delta``. Parameters ---------- filename : str, bytes, file-like size : int, float places : int msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. delta : int, float Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like.
[ "Fail", "if", "filename", "does", "not", "have", "the", "given", "size", "as", "determined", "by", "their", "difference", "rounded", "to", "the", "given", "number", "of", "decimal", "places", "(", "default", "7", ")", "and", "comparing", "to", "zero", "or", "if", "their", "difference", "is", "greater", "than", "a", "given", "delta", "." ]
python
train
35.615385
saltstack/salt
salt/template.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/template.py#L35-L139
def compile_template(template, renderers, default, blacklist, whitelist, saltenv='base', sls='', input_data='', **kwargs): ''' Take the path to a template and return the high data structure derived from the template. Helpers: :param mask_value: Mask value for debugging purposes (prevent sensitive information etc) example: "mask_value="pass*". All "passwd", "password", "pass" will be masked (as text). ''' # if any error occurs, we return an empty dictionary ret = {} log.debug('compile template: %s', template) if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') if template != ':string:': # Template was specified incorrectly if not isinstance(template, six.string_types): log.error('Template was specified incorrectly: %s', template) return ret # Template does not exist if not os.path.isfile(template): log.error('Template does not exist: %s', template) return ret # Template is an empty file if salt.utils.files.is_empty(template): log.debug('Template is an empty file: %s', template) return ret with codecs.open(template, encoding=SLS_ENCODING) as ifile: # data input to the first render function in the pipe input_data = ifile.read() if not input_data.strip(): # Template is nothing but whitespace log.error('Template is nothing but whitespace: %s', template) return ret # Get the list of render funcs in the render pipe line. render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data) windows_newline = '\r\n' in input_data input_data = StringIO(input_data) for render, argline in render_pipe: if salt.utils.stringio.is_readable(input_data): input_data.seek(0) # pylint: disable=no-member render_kwargs = dict(renderers=renderers, tmplpath=template) render_kwargs.update(kwargs) if argline: render_kwargs['argline'] = argline start = time.time() ret = render(input_data, saltenv, sls, **render_kwargs) log.profile( 'Time (in seconds) to render \'%s\' using \'%s\' renderer: %s', template, render.__module__.split('.')[-1], time.time() - start ) if ret is None: # The file is empty or is being written elsewhere time.sleep(0.01) ret = render(input_data, saltenv, sls, **render_kwargs) input_data = ret if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member # If ret is not a StringIO (which means it was rendered using # yaml, mako, or another engine which renders to a data # structure) we don't want to log this. if salt.utils.stringio.is_readable(ret): log.debug('Rendered data from file: %s:\n%s', template, salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()), kwargs.get('mask_value'))) # pylint: disable=no-member ret.seek(0) # pylint: disable=no-member # Preserve newlines from original template if windows_newline: if salt.utils.stringio.is_readable(ret): is_stringio = True contents = ret.read() else: is_stringio = False contents = ret if isinstance(contents, six.string_types): if '\r\n' not in contents: contents = contents.replace('\n', '\r\n') ret = StringIO(contents) if is_stringio else contents else: if is_stringio: ret.seek(0) return ret
[ "def", "compile_template", "(", "template", ",", "renderers", ",", "default", ",", "blacklist", ",", "whitelist", ",", "saltenv", "=", "'base'", ",", "sls", "=", "''", ",", "input_data", "=", "''", ",", "*", "*", "kwargs", ")", ":", "# if any error occurs, we return an empty dictionary", "ret", "=", "{", "}", "log", ".", "debug", "(", "'compile template: %s'", ",", "template", ")", "if", "'env'", "in", "kwargs", ":", "# \"env\" is not supported; Use \"saltenv\".", "kwargs", ".", "pop", "(", "'env'", ")", "if", "template", "!=", "':string:'", ":", "# Template was specified incorrectly", "if", "not", "isinstance", "(", "template", ",", "six", ".", "string_types", ")", ":", "log", ".", "error", "(", "'Template was specified incorrectly: %s'", ",", "template", ")", "return", "ret", "# Template does not exist", "if", "not", "os", ".", "path", ".", "isfile", "(", "template", ")", ":", "log", ".", "error", "(", "'Template does not exist: %s'", ",", "template", ")", "return", "ret", "# Template is an empty file", "if", "salt", ".", "utils", ".", "files", ".", "is_empty", "(", "template", ")", ":", "log", ".", "debug", "(", "'Template is an empty file: %s'", ",", "template", ")", "return", "ret", "with", "codecs", ".", "open", "(", "template", ",", "encoding", "=", "SLS_ENCODING", ")", "as", "ifile", ":", "# data input to the first render function in the pipe", "input_data", "=", "ifile", ".", "read", "(", ")", "if", "not", "input_data", ".", "strip", "(", ")", ":", "# Template is nothing but whitespace", "log", ".", "error", "(", "'Template is nothing but whitespace: %s'", ",", "template", ")", "return", "ret", "# Get the list of render funcs in the render pipe line.", "render_pipe", "=", "template_shebang", "(", "template", ",", "renderers", ",", "default", ",", "blacklist", ",", "whitelist", ",", "input_data", ")", "windows_newline", "=", "'\\r\\n'", "in", "input_data", "input_data", "=", "StringIO", "(", "input_data", ")", "for", "render", ",", "argline", "in", "render_pipe", ":", "if", "salt", ".", "utils", ".", "stringio", ".", "is_readable", "(", "input_data", ")", ":", "input_data", ".", "seek", "(", "0", ")", "# pylint: disable=no-member", "render_kwargs", "=", "dict", "(", "renderers", "=", "renderers", ",", "tmplpath", "=", "template", ")", "render_kwargs", ".", "update", "(", "kwargs", ")", "if", "argline", ":", "render_kwargs", "[", "'argline'", "]", "=", "argline", "start", "=", "time", ".", "time", "(", ")", "ret", "=", "render", "(", "input_data", ",", "saltenv", ",", "sls", ",", "*", "*", "render_kwargs", ")", "log", ".", "profile", "(", "'Time (in seconds) to render \\'%s\\' using \\'%s\\' renderer: %s'", ",", "template", ",", "render", ".", "__module__", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ",", "time", ".", "time", "(", ")", "-", "start", ")", "if", "ret", "is", "None", ":", "# The file is empty or is being written elsewhere", "time", ".", "sleep", "(", "0.01", ")", "ret", "=", "render", "(", "input_data", ",", "saltenv", ",", "sls", ",", "*", "*", "render_kwargs", ")", "input_data", "=", "ret", "if", "log", ".", "isEnabledFor", "(", "logging", ".", "GARBAGE", ")", ":", "# pylint: disable=no-member", "# If ret is not a StringIO (which means it was rendered using", "# yaml, mako, or another engine which renders to a data", "# structure) we don't want to log this.", "if", "salt", ".", "utils", ".", "stringio", ".", "is_readable", "(", "ret", ")", ":", "log", ".", "debug", "(", "'Rendered data from file: %s:\\n%s'", ",", "template", ",", "salt", ".", "utils", ".", "sanitizers", ".", "mask_args_value", "(", "salt", ".", "utils", ".", "data", ".", "decode", "(", "ret", ".", "read", "(", ")", ")", ",", "kwargs", ".", "get", "(", "'mask_value'", ")", ")", ")", "# pylint: disable=no-member", "ret", ".", "seek", "(", "0", ")", "# pylint: disable=no-member", "# Preserve newlines from original template", "if", "windows_newline", ":", "if", "salt", ".", "utils", ".", "stringio", ".", "is_readable", "(", "ret", ")", ":", "is_stringio", "=", "True", "contents", "=", "ret", ".", "read", "(", ")", "else", ":", "is_stringio", "=", "False", "contents", "=", "ret", "if", "isinstance", "(", "contents", ",", "six", ".", "string_types", ")", ":", "if", "'\\r\\n'", "not", "in", "contents", ":", "contents", "=", "contents", ".", "replace", "(", "'\\n'", ",", "'\\r\\n'", ")", "ret", "=", "StringIO", "(", "contents", ")", "if", "is_stringio", "else", "contents", "else", ":", "if", "is_stringio", ":", "ret", ".", "seek", "(", "0", ")", "return", "ret" ]
Take the path to a template and return the high data structure derived from the template. Helpers: :param mask_value: Mask value for debugging purposes (prevent sensitive information etc) example: "mask_value="pass*". All "passwd", "password", "pass" will be masked (as text).
[ "Take", "the", "path", "to", "a", "template", "and", "return", "the", "high", "data", "structure", "derived", "from", "the", "template", "." ]
python
train
38.114286
aio-libs/aioodbc
aioodbc/connection.py
https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/connection.py#L203-L212
def set_attr(self, attr_id, value): """Calls SQLSetConnectAttr with the given values. :param attr_id: the attribute ID (integer) to set. These are ODBC or driver constants. :parm value: the connection attribute value to set. At this time only integer values are supported. """ fut = self._execute(self._conn.set_attr, attr_id, value) return fut
[ "def", "set_attr", "(", "self", ",", "attr_id", ",", "value", ")", ":", "fut", "=", "self", ".", "_execute", "(", "self", ".", "_conn", ".", "set_attr", ",", "attr_id", ",", "value", ")", "return", "fut" ]
Calls SQLSetConnectAttr with the given values. :param attr_id: the attribute ID (integer) to set. These are ODBC or driver constants. :parm value: the connection attribute value to set. At this time only integer values are supported.
[ "Calls", "SQLSetConnectAttr", "with", "the", "given", "values", "." ]
python
train
40.8
micha030201/aionationstates
aionationstates/world_.py
https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/world_.py#L222-L245
def poll(self, id): """Poll with a given id. Parameters ---------- id : int Poll id. Returns ------- an :class:`ApiQuery` of :class:`Poll` Raises ------ :class:`NotFound` If a poll with the requested id doesn't exist. """ @api_query('poll', pollid=str(id)) async def result(_, root): elem = root.find('POLL') if not elem: raise NotFound(f'No poll found with id {id}') return Poll(elem) return result(self)
[ "def", "poll", "(", "self", ",", "id", ")", ":", "@", "api_query", "(", "'poll'", ",", "pollid", "=", "str", "(", "id", ")", ")", "async", "def", "result", "(", "_", ",", "root", ")", ":", "elem", "=", "root", ".", "find", "(", "'POLL'", ")", "if", "not", "elem", ":", "raise", "NotFound", "(", "f'No poll found with id {id}'", ")", "return", "Poll", "(", "elem", ")", "return", "result", "(", "self", ")" ]
Poll with a given id. Parameters ---------- id : int Poll id. Returns ------- an :class:`ApiQuery` of :class:`Poll` Raises ------ :class:`NotFound` If a poll with the requested id doesn't exist.
[ "Poll", "with", "a", "given", "id", "." ]
python
train
23.875
PythonCharmers/python-future
src/future/backports/http/cookiejar.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1367-L1462
def _normalized_cookie_tuples(self, attrs_set): """Return list of tuples containing normalised cookie information. attrs_set is the list of lists of key,value pairs extracted from the Set-Cookie or Set-Cookie2 headers. Tuples are name, value, standard, rest, where name and value are the cookie name and value, standard is a dictionary containing the standard cookie-attributes (discard, secure, version, expires or max-age, domain, path and port) and rest is a dictionary containing the rest of the cookie-attributes. """ cookie_tuples = [] boolean_attrs = "discard", "secure" value_attrs = ("version", "expires", "max-age", "domain", "path", "port", "comment", "commenturl") for cookie_attrs in attrs_set: name, value = cookie_attrs[0] # Build dictionary of standard cookie-attributes (standard) and # dictionary of other cookie-attributes (rest). # Note: expiry time is normalised to seconds since epoch. V0 # cookies should have the Expires cookie-attribute, and V1 cookies # should have Max-Age, but since V1 includes RFC 2109 cookies (and # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we # accept either (but prefer Max-Age). max_age_set = False bad_cookie = False standard = {} rest = {} for k, v in cookie_attrs[1:]: lc = k.lower() # don't lose case distinction for unknown fields if lc in value_attrs or lc in boolean_attrs: k = lc if k in boolean_attrs and v is None: # boolean cookie-attribute is present, but has no value # (like "discard", rather than "port=80") v = True if k in standard: # only first value is significant continue if k == "domain": if v is None: _debug(" missing value for domain attribute") bad_cookie = True break # RFC 2965 section 3.3.3 v = v.lower() if k == "expires": if max_age_set: # Prefer max-age to expires (like Mozilla) continue if v is None: _debug(" missing or invalid value for expires " "attribute: treating as session cookie") continue if k == "max-age": max_age_set = True try: v = int(v) except ValueError: _debug(" missing or invalid (non-numeric) value for " "max-age attribute") bad_cookie = True break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 # age-calculation rules. Remember that zero Max-Age is a # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v if (k in value_attrs) or (k in boolean_attrs): if (v is None and k not in ("port", "comment", "commenturl")): _debug(" missing value for %s attribute" % k) bad_cookie = True break standard[k] = v else: rest[k] = v if bad_cookie: continue cookie_tuples.append((name, value, standard, rest)) return cookie_tuples
[ "def", "_normalized_cookie_tuples", "(", "self", ",", "attrs_set", ")", ":", "cookie_tuples", "=", "[", "]", "boolean_attrs", "=", "\"discard\"", ",", "\"secure\"", "value_attrs", "=", "(", "\"version\"", ",", "\"expires\"", ",", "\"max-age\"", ",", "\"domain\"", ",", "\"path\"", ",", "\"port\"", ",", "\"comment\"", ",", "\"commenturl\"", ")", "for", "cookie_attrs", "in", "attrs_set", ":", "name", ",", "value", "=", "cookie_attrs", "[", "0", "]", "# Build dictionary of standard cookie-attributes (standard) and", "# dictionary of other cookie-attributes (rest).", "# Note: expiry time is normalised to seconds since epoch. V0", "# cookies should have the Expires cookie-attribute, and V1 cookies", "# should have Max-Age, but since V1 includes RFC 2109 cookies (and", "# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we", "# accept either (but prefer Max-Age).", "max_age_set", "=", "False", "bad_cookie", "=", "False", "standard", "=", "{", "}", "rest", "=", "{", "}", "for", "k", ",", "v", "in", "cookie_attrs", "[", "1", ":", "]", ":", "lc", "=", "k", ".", "lower", "(", ")", "# don't lose case distinction for unknown fields", "if", "lc", "in", "value_attrs", "or", "lc", "in", "boolean_attrs", ":", "k", "=", "lc", "if", "k", "in", "boolean_attrs", "and", "v", "is", "None", ":", "# boolean cookie-attribute is present, but has no value", "# (like \"discard\", rather than \"port=80\")", "v", "=", "True", "if", "k", "in", "standard", ":", "# only first value is significant", "continue", "if", "k", "==", "\"domain\"", ":", "if", "v", "is", "None", ":", "_debug", "(", "\" missing value for domain attribute\"", ")", "bad_cookie", "=", "True", "break", "# RFC 2965 section 3.3.3", "v", "=", "v", ".", "lower", "(", ")", "if", "k", "==", "\"expires\"", ":", "if", "max_age_set", ":", "# Prefer max-age to expires (like Mozilla)", "continue", "if", "v", "is", "None", ":", "_debug", "(", "\" missing or invalid value for expires \"", "\"attribute: treating as session cookie\"", ")", "continue", "if", "k", "==", "\"max-age\"", ":", "max_age_set", "=", "True", "try", ":", "v", "=", "int", "(", "v", ")", "except", "ValueError", ":", "_debug", "(", "\" missing or invalid (non-numeric) value for \"", "\"max-age attribute\"", ")", "bad_cookie", "=", "True", "break", "# convert RFC 2965 Max-Age to seconds since epoch", "# XXX Strictly you're supposed to follow RFC 2616", "# age-calculation rules. Remember that zero Max-Age is a", "# is a request to discard (old and new) cookie, though.", "k", "=", "\"expires\"", "v", "=", "self", ".", "_now", "+", "v", "if", "(", "k", "in", "value_attrs", ")", "or", "(", "k", "in", "boolean_attrs", ")", ":", "if", "(", "v", "is", "None", "and", "k", "not", "in", "(", "\"port\"", ",", "\"comment\"", ",", "\"commenturl\"", ")", ")", ":", "_debug", "(", "\" missing value for %s attribute\"", "%", "k", ")", "bad_cookie", "=", "True", "break", "standard", "[", "k", "]", "=", "v", "else", ":", "rest", "[", "k", "]", "=", "v", "if", "bad_cookie", ":", "continue", "cookie_tuples", ".", "append", "(", "(", "name", ",", "value", ",", "standard", ",", "rest", ")", ")", "return", "cookie_tuples" ]
Return list of tuples containing normalised cookie information. attrs_set is the list of lists of key,value pairs extracted from the Set-Cookie or Set-Cookie2 headers. Tuples are name, value, standard, rest, where name and value are the cookie name and value, standard is a dictionary containing the standard cookie-attributes (discard, secure, version, expires or max-age, domain, path and port) and rest is a dictionary containing the rest of the cookie-attributes.
[ "Return", "list", "of", "tuples", "containing", "normalised", "cookie", "information", "." ]
python
train
41.458333
apple/turicreate
src/unity/python/turicreate/meta/asttools/mutators/prune_mutator.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/asttools/mutators/prune_mutator.py#L52-L62
def reduce(self, body): ''' remove nodes from a list ''' i = 0 while i < len(body): stmnt = body[i] if self.visit(stmnt): body.pop(i) else: i += 1
[ "def", "reduce", "(", "self", ",", "body", ")", ":", "i", "=", "0", "while", "i", "<", "len", "(", "body", ")", ":", "stmnt", "=", "body", "[", "i", "]", "if", "self", ".", "visit", "(", "stmnt", ")", ":", "body", ".", "pop", "(", "i", ")", "else", ":", "i", "+=", "1" ]
remove nodes from a list
[ "remove", "nodes", "from", "a", "list" ]
python
train
22.181818
bradleyfay/py-Goldsberry
goldsberry/apiconvertor.py
https://github.com/bradleyfay/py-Goldsberry/blob/828179f8e4aad910d7a8c58faa12d3ae2c354503/goldsberry/apiconvertor.py#L38-L52
def season_id(x): """takes in 4-digit years and returns API formatted seasonID Input Values: YYYY Used in: """ if len(str(x)) == 4: try: return "".join(["2", str(x)]) except ValueError: raise ValueError("Enter the four digit year for the first half of the desired season") else: raise ValueError("Enter the four digit year for the first half of the desired season")
[ "def", "season_id", "(", "x", ")", ":", "if", "len", "(", "str", "(", "x", ")", ")", "==", "4", ":", "try", ":", "return", "\"\"", ".", "join", "(", "[", "\"2\"", ",", "str", "(", "x", ")", "]", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Enter the four digit year for the first half of the desired season\"", ")", "else", ":", "raise", "ValueError", "(", "\"Enter the four digit year for the first half of the desired season\"", ")" ]
takes in 4-digit years and returns API formatted seasonID Input Values: YYYY Used in:
[ "takes", "in", "4", "-", "digit", "years", "and", "returns", "API", "formatted", "seasonID" ]
python
train
28.466667
KE-works/pykechain
pykechain/client.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L312-L360
def scopes(self, name=None, pk=None, status=ScopeStatus.ACTIVE, **kwargs): # type: (Optional[str], Optional[str], Optional[str], **Any) -> List[Scope] """Return all scopes visible / accessible for the logged in user. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: if provided, filter the search for a scope/project by name :type name: basestring or None :param pk: if provided, filter the search by scope_id :type pk: basestring or None :param status: if provided, filter the search for the status. eg. 'ACTIVE', 'TEMPLATE', 'LIBRARY' :type status: basestring or None :param kwargs: optional additional search arguments :type kwargs: dict or None :return: list of `Scopes` :rtype: list(:class:`models.Scope`) :raises NotFoundError: if no scopes are not found. Example ------- >>> client = Client(url='https://default.localhost:9443', verify=False) >>> client.login('admin','pass') >>> client.scopes() # doctest: Ellipsis ... >>> client.scopes(name="Bike Project") # doctest: Ellipsis ... >>> last_request = client.last_request # doctest: Ellipsis ... """ request_params = { 'name': name, 'id': pk, 'status': status, } if kwargs: request_params.update(**kwargs) response = self._request('GET', self._build_url('scopes'), params=request_params) if response.status_code != requests.codes.ok: # pragma: no cover raise NotFoundError("Could not retrieve scopes") data = response.json() return [Scope(s, client=self) for s in data['results']]
[ "def", "scopes", "(", "self", ",", "name", "=", "None", ",", "pk", "=", "None", ",", "status", "=", "ScopeStatus", ".", "ACTIVE", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional[str], Optional[str], Optional[str], **Any) -> List[Scope]", "request_params", "=", "{", "'name'", ":", "name", ",", "'id'", ":", "pk", ",", "'status'", ":", "status", ",", "}", "if", "kwargs", ":", "request_params", ".", "update", "(", "*", "*", "kwargs", ")", "response", "=", "self", ".", "_request", "(", "'GET'", ",", "self", ".", "_build_url", "(", "'scopes'", ")", ",", "params", "=", "request_params", ")", "if", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "# pragma: no cover", "raise", "NotFoundError", "(", "\"Could not retrieve scopes\"", ")", "data", "=", "response", ".", "json", "(", ")", "return", "[", "Scope", "(", "s", ",", "client", "=", "self", ")", "for", "s", "in", "data", "[", "'results'", "]", "]" ]
Return all scopes visible / accessible for the logged in user. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: if provided, filter the search for a scope/project by name :type name: basestring or None :param pk: if provided, filter the search by scope_id :type pk: basestring or None :param status: if provided, filter the search for the status. eg. 'ACTIVE', 'TEMPLATE', 'LIBRARY' :type status: basestring or None :param kwargs: optional additional search arguments :type kwargs: dict or None :return: list of `Scopes` :rtype: list(:class:`models.Scope`) :raises NotFoundError: if no scopes are not found. Example ------- >>> client = Client(url='https://default.localhost:9443', verify=False) >>> client.login('admin','pass') >>> client.scopes() # doctest: Ellipsis ... >>> client.scopes(name="Bike Project") # doctest: Ellipsis ... >>> last_request = client.last_request # doctest: Ellipsis ...
[ "Return", "all", "scopes", "visible", "/", "accessible", "for", "the", "logged", "in", "user", "." ]
python
train
38.265306
nfcpy/nfcpy
src/nfc/tag/tt1.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt1.py#L387-L392
def read_all(self): """Returns the 2 byte Header ROM and all 120 byte static memory. """ log.debug("read all static memory") cmd = "\x00\x00\x00" + self.uid return self.transceive(cmd)
[ "def", "read_all", "(", "self", ")", ":", "log", ".", "debug", "(", "\"read all static memory\"", ")", "cmd", "=", "\"\\x00\\x00\\x00\"", "+", "self", ".", "uid", "return", "self", ".", "transceive", "(", "cmd", ")" ]
Returns the 2 byte Header ROM and all 120 byte static memory.
[ "Returns", "the", "2", "byte", "Header", "ROM", "and", "all", "120", "byte", "static", "memory", "." ]
python
train
36.5
elkiwy/paynter
paynter/paynter.py
https://github.com/elkiwy/paynter/blob/f73cb5bb010a6b32ee41640a50396ed0bae8d496/paynter/paynter.py#L127-L147
def drawPoint(self, x, y, silent=True): """ Draws a point on the current :py:class:`Layer` with the current :py:class:`Brush`. Coordinates are relative to the original layer size WITHOUT downsampling applied. :param x1: Point X coordinate. :param y1: Point Y coordinate. :rtype: Nothing. """ start = time.time() #Downsample the coordinates x = int(x/config.DOWNSAMPLING) y = int(y/config.DOWNSAMPLING) #Apply the dab with or without source caching if self.brush.usesSourceCaching: applyMirroredDab_jit(self.mirrorMode, self.image.getActiveLayer().data, int(x-self.brush.brushSize*0.5), int(y-self.brush.brushSize*0.5), self.brush.coloredBrushSource.copy(), config.CANVAS_SIZE, self.brush.brushMask) else: self.brush.makeDab(self.image.getActiveLayer(), int(x), int(y), self.color, self.secondColor, mirror=self.mirrorMode) config.AVGTIME.append(time.time()-start)
[ "def", "drawPoint", "(", "self", ",", "x", ",", "y", ",", "silent", "=", "True", ")", ":", "start", "=", "time", ".", "time", "(", ")", "#Downsample the coordinates", "x", "=", "int", "(", "x", "/", "config", ".", "DOWNSAMPLING", ")", "y", "=", "int", "(", "y", "/", "config", ".", "DOWNSAMPLING", ")", "#Apply the dab with or without source caching", "if", "self", ".", "brush", ".", "usesSourceCaching", ":", "applyMirroredDab_jit", "(", "self", ".", "mirrorMode", ",", "self", ".", "image", ".", "getActiveLayer", "(", ")", ".", "data", ",", "int", "(", "x", "-", "self", ".", "brush", ".", "brushSize", "*", "0.5", ")", ",", "int", "(", "y", "-", "self", ".", "brush", ".", "brushSize", "*", "0.5", ")", ",", "self", ".", "brush", ".", "coloredBrushSource", ".", "copy", "(", ")", ",", "config", ".", "CANVAS_SIZE", ",", "self", ".", "brush", ".", "brushMask", ")", "else", ":", "self", ".", "brush", ".", "makeDab", "(", "self", ".", "image", ".", "getActiveLayer", "(", ")", ",", "int", "(", "x", ")", ",", "int", "(", "y", ")", ",", "self", ".", "color", ",", "self", ".", "secondColor", ",", "mirror", "=", "self", ".", "mirrorMode", ")", "config", ".", "AVGTIME", ".", "append", "(", "time", ".", "time", "(", ")", "-", "start", ")" ]
Draws a point on the current :py:class:`Layer` with the current :py:class:`Brush`. Coordinates are relative to the original layer size WITHOUT downsampling applied. :param x1: Point X coordinate. :param y1: Point Y coordinate. :rtype: Nothing.
[ "Draws", "a", "point", "on", "the", "current", ":", "py", ":", "class", ":", "Layer", "with", "the", "current", ":", "py", ":", "class", ":", "Brush", ".", "Coordinates", "are", "relative", "to", "the", "original", "layer", "size", "WITHOUT", "downsampling", "applied", "." ]
python
train
42
zsethna/OLGA
olga/generation_probability.py
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/generation_probability.py#L1679-L1746
def compute_Pi_V_given_J(self, CDR3_seq, V_usage_mask, J_usage_mask): """Compute Pi_V conditioned on J. This function returns the Pi array from the model factors of the V genomic contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}. For clarity in parsing the algorithm implementation, we include which instance attributes are used in the method as 'parameters.' Parameters ---------- CDR3_seq : str CDR3 sequence composed of 'amino acids' (single character symbols each corresponding to a collection of codons as given by codons_dict). V_usage_mask : list Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list Indices of the J alleles to be considered in the Pgen computation self.cutV_genomic_CDR3_segs : list of strings List of all the V genomic nucleotide sequences trimmed to begin at the conserved C residue and with the maximum number of palindromic insertions appended. self.PVdelV_nt_pos_vec : list of ndarrays For each V allele, format P(delV|V) into the correct form for a Pi array or V(J)_{x_1}. This is only done for the first and last position in each codon. self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts For each V allele, and each 'amino acid', format P(V)*P(delV|V) for positions in the middle of a codon into the correct form for a Pi array or V(J)_{x_1} given the 'amino acid'. self.PVJ : ndarray Joint probability distribution of V and J, P(V, J). Returns ------- Pi_V_given_J : list List of (4, 3L) ndarrays corresponding to V(J)_{x_1}. max_V_align: int Maximum alignment of the CDR3_seq to any genomic V allele allowed by V_usage_mask. """ #Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template. #furthermore, the genomic sequence should be pruned to start at the conserved C Pi_V_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in J_usage_mask] #Holds the aggregate weight for each nt possiblity and position alignment_lengths = [] for V_in in V_usage_mask: try: cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in] except IndexError: print 'Check provided V usage mask. Contains indicies out of allowed range.' continue current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg) alignment_lengths += [current_alignment_length] current_Pi_V = np.zeros((4, len(CDR3_seq)*3)) if current_alignment_length > 0: #For first and last nt in a codon use PVdelV_nt_pos_vec current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length] for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos] for j, J_in in enumerate(J_usage_mask): Pi_V_given_J[j][:, :current_alignment_length] += self.PVJ[V_in, J_in]*current_Pi_V[:, :current_alignment_length] return Pi_V_given_J, max(alignment_lengths)
[ "def", "compute_Pi_V_given_J", "(", "self", ",", "CDR3_seq", ",", "V_usage_mask", ",", "J_usage_mask", ")", ":", "#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.", "#furthermore, the genomic sequence should be pruned to start at the conserved C", "Pi_V_given_J", "=", "[", "np", ".", "zeros", "(", "(", "4", ",", "len", "(", "CDR3_seq", ")", "*", "3", ")", ")", "for", "i", "in", "J_usage_mask", "]", "#Holds the aggregate weight for each nt possiblity and position", "alignment_lengths", "=", "[", "]", "for", "V_in", "in", "V_usage_mask", ":", "try", ":", "cutV_gen_seg", "=", "self", ".", "cutV_genomic_CDR3_segs", "[", "V_in", "]", "except", "IndexError", ":", "print", "'Check provided V usage mask. Contains indicies out of allowed range.'", "continue", "current_alignment_length", "=", "self", ".", "max_nt_to_aa_alignment_left", "(", "CDR3_seq", ",", "cutV_gen_seg", ")", "alignment_lengths", "+=", "[", "current_alignment_length", "]", "current_Pi_V", "=", "np", ".", "zeros", "(", "(", "4", ",", "len", "(", "CDR3_seq", ")", "*", "3", ")", ")", "if", "current_alignment_length", ">", "0", ":", "#For first and last nt in a codon use PVdelV_nt_pos_vec", "current_Pi_V", "[", ":", ",", ":", "current_alignment_length", "]", "=", "self", ".", "PVdelV_nt_pos_vec", "[", "V_in", "]", "[", ":", ",", ":", "current_alignment_length", "]", "for", "pos", "in", "range", "(", "1", ",", "current_alignment_length", ",", "3", ")", ":", "#for middle nt use PVdelV_2nd_nt_pos_per_aa_vec", "current_Pi_V", "[", ":", ",", "pos", "]", "=", "self", ".", "PVdelV_2nd_nt_pos_per_aa_vec", "[", "V_in", "]", "[", "CDR3_seq", "[", "pos", "/", "3", "]", "]", "[", ":", ",", "pos", "]", "for", "j", ",", "J_in", "in", "enumerate", "(", "J_usage_mask", ")", ":", "Pi_V_given_J", "[", "j", "]", "[", ":", ",", ":", "current_alignment_length", "]", "+=", "self", ".", "PVJ", "[", "V_in", ",", "J_in", "]", "*", "current_Pi_V", "[", ":", ",", ":", "current_alignment_length", "]", "return", "Pi_V_given_J", ",", "max", "(", "alignment_lengths", ")" ]
Compute Pi_V conditioned on J. This function returns the Pi array from the model factors of the V genomic contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}. For clarity in parsing the algorithm implementation, we include which instance attributes are used in the method as 'parameters.' Parameters ---------- CDR3_seq : str CDR3 sequence composed of 'amino acids' (single character symbols each corresponding to a collection of codons as given by codons_dict). V_usage_mask : list Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list Indices of the J alleles to be considered in the Pgen computation self.cutV_genomic_CDR3_segs : list of strings List of all the V genomic nucleotide sequences trimmed to begin at the conserved C residue and with the maximum number of palindromic insertions appended. self.PVdelV_nt_pos_vec : list of ndarrays For each V allele, format P(delV|V) into the correct form for a Pi array or V(J)_{x_1}. This is only done for the first and last position in each codon. self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts For each V allele, and each 'amino acid', format P(V)*P(delV|V) for positions in the middle of a codon into the correct form for a Pi array or V(J)_{x_1} given the 'amino acid'. self.PVJ : ndarray Joint probability distribution of V and J, P(V, J). Returns ------- Pi_V_given_J : list List of (4, 3L) ndarrays corresponding to V(J)_{x_1}. max_V_align: int Maximum alignment of the CDR3_seq to any genomic V allele allowed by V_usage_mask.
[ "Compute", "Pi_V", "conditioned", "on", "J", ".", "This", "function", "returns", "the", "Pi", "array", "from", "the", "model", "factors", "of", "the", "V", "genomic", "contributions", "P", "(", "V", "J", ")", "*", "P", "(", "delV|V", ")", ".", "This", "corresponds", "to", "V", "(", "J", ")", "_", "{", "x_1", "}", ".", "For", "clarity", "in", "parsing", "the", "algorithm", "implementation", "we", "include", "which", "instance", "attributes", "are", "used", "in", "the", "method", "as", "parameters", ".", "Parameters", "----------", "CDR3_seq", ":", "str", "CDR3", "sequence", "composed", "of", "amino", "acids", "(", "single", "character", "symbols", "each", "corresponding", "to", "a", "collection", "of", "codons", "as", "given", "by", "codons_dict", ")", ".", "V_usage_mask", ":", "list", "Indices", "of", "the", "V", "alleles", "to", "be", "considered", "in", "the", "Pgen", "computation", "J_usage_mask", ":", "list", "Indices", "of", "the", "J", "alleles", "to", "be", "considered", "in", "the", "Pgen", "computation", "self", ".", "cutV_genomic_CDR3_segs", ":", "list", "of", "strings", "List", "of", "all", "the", "V", "genomic", "nucleotide", "sequences", "trimmed", "to", "begin", "at", "the", "conserved", "C", "residue", "and", "with", "the", "maximum", "number", "of", "palindromic", "insertions", "appended", ".", "self", ".", "PVdelV_nt_pos_vec", ":", "list", "of", "ndarrays", "For", "each", "V", "allele", "format", "P", "(", "delV|V", ")", "into", "the", "correct", "form", "for", "a", "Pi", "array", "or", "V", "(", "J", ")", "_", "{", "x_1", "}", ".", "This", "is", "only", "done", "for", "the", "first", "and", "last", "position", "in", "each", "codon", ".", "self", ".", "PVdelV_2nd_nt_pos_per_aa_vec", ":", "list", "of", "dicts", "For", "each", "V", "allele", "and", "each", "amino", "acid", "format", "P", "(", "V", ")", "*", "P", "(", "delV|V", ")", "for", "positions", "in", "the", "middle", "of", "a", "codon", "into", "the", "correct", "form", "for", "a", "Pi", "array", "or", "V", "(", "J", ")", "_", "{", "x_1", "}", "given", "the", "amino", "acid", ".", "self", ".", "PVJ", ":", "ndarray", "Joint", "probability", "distribution", "of", "V", "and", "J", "P", "(", "V", "J", ")", ".", "Returns", "-------", "Pi_V_given_J", ":", "list", "List", "of", "(", "4", "3L", ")", "ndarrays", "corresponding", "to", "V", "(", "J", ")", "_", "{", "x_1", "}", ".", "max_V_align", ":", "int", "Maximum", "alignment", "of", "the", "CDR3_seq", "to", "any", "genomic", "V", "allele", "allowed", "by", "V_usage_mask", "." ]
python
train
52.573529
twilio/twilio-python
twilio/rest/api/v2010/account/usage/record/this_month.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/record/this_month.py#L185-L194
def get_instance(self, payload): """ Build an instance of ThisMonthInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance :rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance """ return ThisMonthInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "ThisMonthInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of ThisMonthInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance :rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
[ "Build", "an", "instance", "of", "ThisMonthInstance" ]
python
train
43.5
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L572-L581
def copy(self, *, shallow=False): """Return a copy of a table.""" table = type(self)() for label in self.labels: if shallow: column = self[label] else: column = np.copy(self[label]) self._add_column_and_format(table, label, column) return table
[ "def", "copy", "(", "self", ",", "*", ",", "shallow", "=", "False", ")", ":", "table", "=", "type", "(", "self", ")", "(", ")", "for", "label", "in", "self", ".", "labels", ":", "if", "shallow", ":", "column", "=", "self", "[", "label", "]", "else", ":", "column", "=", "np", ".", "copy", "(", "self", "[", "label", "]", ")", "self", ".", "_add_column_and_format", "(", "table", ",", "label", ",", "column", ")", "return", "table" ]
Return a copy of a table.
[ "Return", "a", "copy", "of", "a", "table", "." ]
python
train
33.5
apple/turicreate
src/unity/python/turicreate/toolkits/sound_classifier/vggish_input.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/vggish_input.py#L24-L71
def waveform_to_examples(data, sample_rate): """Converts audio waveform into an array of examples for VGGish. Args: data: np.array of either one dimension (mono) or two dimensions (multi-channel, with the outer dimension representing channels). Each sample is generally expected to lie in the range [-1.0, +1.0], although this is not required. sample_rate: Sample rate of data. Returns: 3-D np.array of shape [num_examples, num_frames, num_bands] which represents a sequence of examples, each of which contains a patch of log mel spectrogram, covering num_frames frames of audio and num_bands mel frequency bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS. """ import resampy # Convert to mono. if len(data.shape) > 1: data = np.mean(data, axis=1) # Resample to the rate assumed by VGGish. if sample_rate != vggish_params.SAMPLE_RATE: data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE) # Compute log mel spectrogram features. log_mel = mel_features.log_mel_spectrogram( data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ) # Frame features into examples. features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS example_window_length = int(round( vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)) example_hop_length = int(round( vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)) log_mel_examples = mel_features.frame( log_mel, window_length=example_window_length, hop_length=example_hop_length) return log_mel_examples
[ "def", "waveform_to_examples", "(", "data", ",", "sample_rate", ")", ":", "import", "resampy", "# Convert to mono.", "if", "len", "(", "data", ".", "shape", ")", ">", "1", ":", "data", "=", "np", ".", "mean", "(", "data", ",", "axis", "=", "1", ")", "# Resample to the rate assumed by VGGish.", "if", "sample_rate", "!=", "vggish_params", ".", "SAMPLE_RATE", ":", "data", "=", "resampy", ".", "resample", "(", "data", ",", "sample_rate", ",", "vggish_params", ".", "SAMPLE_RATE", ")", "# Compute log mel spectrogram features.", "log_mel", "=", "mel_features", ".", "log_mel_spectrogram", "(", "data", ",", "audio_sample_rate", "=", "vggish_params", ".", "SAMPLE_RATE", ",", "log_offset", "=", "vggish_params", ".", "LOG_OFFSET", ",", "window_length_secs", "=", "vggish_params", ".", "STFT_WINDOW_LENGTH_SECONDS", ",", "hop_length_secs", "=", "vggish_params", ".", "STFT_HOP_LENGTH_SECONDS", ",", "num_mel_bins", "=", "vggish_params", ".", "NUM_MEL_BINS", ",", "lower_edge_hertz", "=", "vggish_params", ".", "MEL_MIN_HZ", ",", "upper_edge_hertz", "=", "vggish_params", ".", "MEL_MAX_HZ", ")", "# Frame features into examples.", "features_sample_rate", "=", "1.0", "/", "vggish_params", ".", "STFT_HOP_LENGTH_SECONDS", "example_window_length", "=", "int", "(", "round", "(", "vggish_params", ".", "EXAMPLE_WINDOW_SECONDS", "*", "features_sample_rate", ")", ")", "example_hop_length", "=", "int", "(", "round", "(", "vggish_params", ".", "EXAMPLE_HOP_SECONDS", "*", "features_sample_rate", ")", ")", "log_mel_examples", "=", "mel_features", ".", "frame", "(", "log_mel", ",", "window_length", "=", "example_window_length", ",", "hop_length", "=", "example_hop_length", ")", "return", "log_mel_examples" ]
Converts audio waveform into an array of examples for VGGish. Args: data: np.array of either one dimension (mono) or two dimensions (multi-channel, with the outer dimension representing channels). Each sample is generally expected to lie in the range [-1.0, +1.0], although this is not required. sample_rate: Sample rate of data. Returns: 3-D np.array of shape [num_examples, num_frames, num_bands] which represents a sequence of examples, each of which contains a patch of log mel spectrogram, covering num_frames frames of audio and num_bands mel frequency bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
[ "Converts", "audio", "waveform", "into", "an", "array", "of", "examples", "for", "VGGish", "." ]
python
train
39.395833
20tab/twentytab-tree
tree/menu.py
https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L68-L74
def as_ul(self, current_linkable=False, class_current="active_link", before_1="", after_1="", before_all="", after_all=""): """ It returns menu as ul """ return self.__do_menu("as_ul", current_linkable, class_current, before_1=before_1, after_1=after_1, before_all=before_all, after_all=after_all)
[ "def", "as_ul", "(", "self", ",", "current_linkable", "=", "False", ",", "class_current", "=", "\"active_link\"", ",", "before_1", "=", "\"\"", ",", "after_1", "=", "\"\"", ",", "before_all", "=", "\"\"", ",", "after_all", "=", "\"\"", ")", ":", "return", "self", ".", "__do_menu", "(", "\"as_ul\"", ",", "current_linkable", ",", "class_current", ",", "before_1", "=", "before_1", ",", "after_1", "=", "after_1", ",", "before_all", "=", "before_all", ",", "after_all", "=", "after_all", ")" ]
It returns menu as ul
[ "It", "returns", "menu", "as", "ul" ]
python
train
52.428571
Titan-C/slaveparticles
slaveparticles/spins.py
https://github.com/Titan-C/slaveparticles/blob/e4c2f5afb1a7b195517ef2f1b5cc758965036aab/slaveparticles/spins.py#L127-L143
def set_filling(self, populations): """Sets the orbital enenergies for on the reference of the free case. By setting the desired local populations on every orbital. Then generate the necesary operators to respect such configuraion""" populations = np.asarray(populations) # # self.param['orbital_e'] -= bethe_findfill_zeroT( \ # self.param['avg_particles'], # self.param['orbital_e'], # self.param['hopping']) efermi = - bethe_find_crystalfield( populations, self.param['hopping']) self.param['populations'] = populations # fermion_avg(efermi, self.param['hopping'], 'ocupation') self.param['ekin'] = fermion_avg(efermi, self.param['hopping'], 'ekin') spin_gen_op(self.oper, estimate_gauge(populations))
[ "def", "set_filling", "(", "self", ",", "populations", ")", ":", "populations", "=", "np", ".", "asarray", "(", "populations", ")", "#", "# self.param['orbital_e'] -= bethe_findfill_zeroT( \\", "# self.param['avg_particles'],", "# self.param['orbital_e'],", "# self.param['hopping'])", "efermi", "=", "-", "bethe_find_crystalfield", "(", "populations", ",", "self", ".", "param", "[", "'hopping'", "]", ")", "self", ".", "param", "[", "'populations'", "]", "=", "populations", "# fermion_avg(efermi, self.param['hopping'], 'ocupation')", "self", ".", "param", "[", "'ekin'", "]", "=", "fermion_avg", "(", "efermi", ",", "self", ".", "param", "[", "'hopping'", "]", ",", "'ekin'", ")", "spin_gen_op", "(", "self", ".", "oper", ",", "estimate_gauge", "(", "populations", ")", ")" ]
Sets the orbital enenergies for on the reference of the free case. By setting the desired local populations on every orbital. Then generate the necesary operators to respect such configuraion
[ "Sets", "the", "orbital", "enenergies", "for", "on", "the", "reference", "of", "the", "free", "case", ".", "By", "setting", "the", "desired", "local", "populations", "on", "every", "orbital", ".", "Then", "generate", "the", "necesary", "operators", "to", "respect", "such", "configuraion" ]
python
train
53.823529
RudolfCardinal/pythonlib
cardinal_pythonlib/interval.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/interval.py#L771-L777
def start_date(self) -> Optional[datetime.date]: """ Returns the start date of the set of intervals, or ``None`` if empty. """ if not self.intervals: return None return self.start_datetime().date()
[ "def", "start_date", "(", "self", ")", "->", "Optional", "[", "datetime", ".", "date", "]", ":", "if", "not", "self", ".", "intervals", ":", "return", "None", "return", "self", ".", "start_datetime", "(", ")", ".", "date", "(", ")" ]
Returns the start date of the set of intervals, or ``None`` if empty.
[ "Returns", "the", "start", "date", "of", "the", "set", "of", "intervals", "or", "None", "if", "empty", "." ]
python
train
34.714286
llazzaro/analyzerstrategies
analyzerstrategies/sma_portfolio_strategy.py
https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L131-L150
def __placeSellShortOrder(self, tick): ''' place short sell order''' share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close)) sellShortOrder=Order(accountId=self.__strategy.accountId, action=Action.SELL_SHORT, is_market=True, security=self.__security, share=share) if self.__strategy.placeOrder(sellShortOrder): self.__buyOrder=sellShortOrder # place stop order stopOrder=Order(accountId=self.__strategy.accountId, action=Action.BUY_TO_COVER, is_stop=True, security=self.__security, price=tick.close * 1.05, share=0 - share) self.__placeStopOrder(stopOrder)
[ "def", "__placeSellShortOrder", "(", "self", ",", "tick", ")", ":", "share", "=", "math", ".", "floor", "(", "self", ".", "__strategy", ".", "getAccountCopy", "(", ")", ".", "getCash", "(", ")", "/", "float", "(", "tick", ".", "close", ")", ")", "sellShortOrder", "=", "Order", "(", "accountId", "=", "self", ".", "__strategy", ".", "accountId", ",", "action", "=", "Action", ".", "SELL_SHORT", ",", "is_market", "=", "True", ",", "security", "=", "self", ".", "__security", ",", "share", "=", "share", ")", "if", "self", ".", "__strategy", ".", "placeOrder", "(", "sellShortOrder", ")", ":", "self", ".", "__buyOrder", "=", "sellShortOrder", "# place stop order", "stopOrder", "=", "Order", "(", "accountId", "=", "self", ".", "__strategy", ".", "accountId", ",", "action", "=", "Action", ".", "BUY_TO_COVER", ",", "is_stop", "=", "True", ",", "security", "=", "self", ".", "__security", ",", "price", "=", "tick", ".", "close", "*", "1.05", ",", "share", "=", "0", "-", "share", ")", "self", ".", "__placeStopOrder", "(", "stopOrder", ")" ]
place short sell order
[ "place", "short", "sell", "order" ]
python
train
45.5
pydata/xarray
xarray/core/groupby.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/groupby.py#L482-L488
def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without metadata """ var = self._obj.variable for indices in self._group_indices: yield var[{self._group_dim: indices}]
[ "def", "_iter_grouped_shortcut", "(", "self", ")", ":", "var", "=", "self", ".", "_obj", ".", "variable", "for", "indices", "in", "self", ".", "_group_indices", ":", "yield", "var", "[", "{", "self", ".", "_group_dim", ":", "indices", "}", "]" ]
Fast version of `_iter_grouped` that yields Variables without metadata
[ "Fast", "version", "of", "_iter_grouped", "that", "yields", "Variables", "without", "metadata" ]
python
train
36.571429
KelSolaar/Foundations
foundations/io.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/io.py#L122-L132
def content(self, value): """ Setter for **self.__content** attribute. :param value: Attribute value. :type value: list """ if value is not None: assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("content", value) self.__content = value
[ "def", "content", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "list", ",", "\"'{0}' attribute: '{1}' type is not 'list'!\"", ".", "format", "(", "\"content\"", ",", "value", ")", "self", ".", "__content", "=", "value" ]
Setter for **self.__content** attribute. :param value: Attribute value. :type value: list
[ "Setter", "for", "**", "self", ".", "__content", "**", "attribute", "." ]
python
train
29.636364
project-rig/rig
rig/place_and_route/route/ner.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L413-L433
def route_has_dead_links(root, machine): """Quickly determine if a route uses any dead links. Parameters ---------- root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree` The root of the RoutingTree which contains nothing but RoutingTrees (i.e. no vertices and links). machine : :py:class:`~rig.place_and_route.Machine` The machine in which the routes exist. Returns ------- bool True if the route uses any dead/missing links, False otherwise. """ for direction, (x, y), routes in root.traverse(): for route in routes: if (x, y, route) not in machine: return True return False
[ "def", "route_has_dead_links", "(", "root", ",", "machine", ")", ":", "for", "direction", ",", "(", "x", ",", "y", ")", ",", "routes", "in", "root", ".", "traverse", "(", ")", ":", "for", "route", "in", "routes", ":", "if", "(", "x", ",", "y", ",", "route", ")", "not", "in", "machine", ":", "return", "True", "return", "False" ]
Quickly determine if a route uses any dead links. Parameters ---------- root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree` The root of the RoutingTree which contains nothing but RoutingTrees (i.e. no vertices and links). machine : :py:class:`~rig.place_and_route.Machine` The machine in which the routes exist. Returns ------- bool True if the route uses any dead/missing links, False otherwise.
[ "Quickly", "determine", "if", "a", "route", "uses", "any", "dead", "links", "." ]
python
train
32.380952
radjkarl/appBase
appbase/Session.py
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L483-L503
def _save(self, stateName, path): """save into 'stateName' to pyz-path""" print('saving...') state = {'session': dict(self.opts), 'dialogs': self.dialogs.saveState()} self.sigSave.emit(state) self.saveThread.prepare(stateName, path, self.tmp_dir_session, state) self.saveThread.start() self.current_session = stateName r = self.opts['recent sessions'] try: # is this session already exists: remove it r.pop(r.index(path)) except ValueError: pass # add this session at the beginning r.insert(0, path)
[ "def", "_save", "(", "self", ",", "stateName", ",", "path", ")", ":", "print", "(", "'saving...'", ")", "state", "=", "{", "'session'", ":", "dict", "(", "self", ".", "opts", ")", ",", "'dialogs'", ":", "self", ".", "dialogs", ".", "saveState", "(", ")", "}", "self", ".", "sigSave", ".", "emit", "(", "state", ")", "self", ".", "saveThread", ".", "prepare", "(", "stateName", ",", "path", ",", "self", ".", "tmp_dir_session", ",", "state", ")", "self", ".", "saveThread", ".", "start", "(", ")", "self", ".", "current_session", "=", "stateName", "r", "=", "self", ".", "opts", "[", "'recent sessions'", "]", "try", ":", "# is this session already exists: remove it\r", "r", ".", "pop", "(", "r", ".", "index", "(", "path", ")", ")", "except", "ValueError", ":", "pass", "# add this session at the beginning\r", "r", ".", "insert", "(", "0", ",", "path", ")" ]
save into 'stateName' to pyz-path
[ "save", "into", "stateName", "to", "pyz", "-", "path" ]
python
train
31.047619
googlefonts/glyphsLib
Lib/glyphsLib/glyphdata.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/glyphdata.py#L282-L384
def _construct_production_name(glyph_name, data=None): """Return the production name for a glyph name from the GlyphData.xml database according to the AGL specification. This should be run only if there is no official entry with a production name in it. Handles single glyphs (e.g. "brevecomb") and ligatures (e.g. "brevecomb_acutecomb"). Returns None when a valid and semantically meaningful production name can't be constructed or when the AGL specification would be violated, get_glyph() will use the bare glyph name then. Note: - Glyph name is the full name, e.g. "brevecomb_acutecomb.case". - Base name is the base part, e.g. "brevecomb_acutecomb" - Suffix is e.g. "case". """ # At this point, we have already checked the data for the full glyph name, so # directly go to the base name here (e.g. when looking at "fi.alt"). base_name, dot, suffix = glyph_name.partition(".") glyphinfo = _lookup_attributes(base_name, data) if glyphinfo and glyphinfo.get("production"): # Found the base glyph. return glyphinfo["production"] + dot + suffix if glyph_name in fontTools.agl.AGL2UV or base_name in fontTools.agl.AGL2UV: # Glyph name is actually an AGLFN name. return glyph_name if "_" not in base_name: # Nothing found so far and the glyph name isn't a ligature ("_" # somewhere in it). The name does not carry any discernable Unicode # semantics, so just return something sanitized. return _agl_compliant_name(glyph_name) # So we have a ligature that is not mapped in the data. Split it up and # look up the individual parts. base_name_parts = base_name.split("_") # If all parts are in the AGLFN list, the glyph name is our production # name already. if all(part in fontTools.agl.AGL2UV for part in base_name_parts): return _agl_compliant_name(glyph_name) # Turn all parts of the ligature into production names. _character_outside_BMP = False production_names = [] for part in base_name_parts: if part in fontTools.agl.AGL2UV: # A name present in the AGLFN is a production name already. production_names.append(part) else: part_entry = data.names.get(part) or {} part_production_name = part_entry.get("production") if part_production_name: production_names.append(part_production_name) # Take note if there are any characters outside the Unicode # BMP, e.g. "u10FFF" or "u10FFFF". Do not catch e.g. "u013B" # though. if len(part_production_name) > 5 and _is_unicode_u_value( part_production_name ): _character_outside_BMP = True else: # We hit a part that does not seem to be a valid glyph name known to us, # so the entire glyph name can't carry Unicode meaning. Return it # sanitized. return _agl_compliant_name(glyph_name) # Some names Glyphs uses resolve to other names that are not uniXXXX names and may # contain dots (e.g. idotaccent -> i.loclTRK). If there is any name with a "." in # it before the last element, punt. We'd have to introduce a "." into the ligature # midway, which is invalid according to the AGL. Example: "a_i.loclTRK" is valid, # but "a_i.loclTRK_a" isn't. if any("." in part for part in production_names[:-1]): return _agl_compliant_name(glyph_name) # If any production name starts with a "uni" and there are none of the # "uXXXXX" format, try to turn all parts into "uni" names and concatenate # them. if not _character_outside_BMP and any( part.startswith("uni") for part in production_names ): uni_names = [] for part in production_names: if part.startswith("uni"): uni_names.append(part[3:]) elif len(part) == 5 and _is_unicode_u_value(part): uni_names.append(part[1:]) elif part in fontTools.agl.AGL2UV: uni_names.append("{:04X}".format(fontTools.agl.AGL2UV[part])) else: return None final_production_name = "uni" + "".join(uni_names) + dot + suffix else: final_production_name = "_".join(production_names) + dot + suffix return _agl_compliant_name(final_production_name)
[ "def", "_construct_production_name", "(", "glyph_name", ",", "data", "=", "None", ")", ":", "# At this point, we have already checked the data for the full glyph name, so", "# directly go to the base name here (e.g. when looking at \"fi.alt\").", "base_name", ",", "dot", ",", "suffix", "=", "glyph_name", ".", "partition", "(", "\".\"", ")", "glyphinfo", "=", "_lookup_attributes", "(", "base_name", ",", "data", ")", "if", "glyphinfo", "and", "glyphinfo", ".", "get", "(", "\"production\"", ")", ":", "# Found the base glyph.", "return", "glyphinfo", "[", "\"production\"", "]", "+", "dot", "+", "suffix", "if", "glyph_name", "in", "fontTools", ".", "agl", ".", "AGL2UV", "or", "base_name", "in", "fontTools", ".", "agl", ".", "AGL2UV", ":", "# Glyph name is actually an AGLFN name.", "return", "glyph_name", "if", "\"_\"", "not", "in", "base_name", ":", "# Nothing found so far and the glyph name isn't a ligature (\"_\"", "# somewhere in it). The name does not carry any discernable Unicode", "# semantics, so just return something sanitized.", "return", "_agl_compliant_name", "(", "glyph_name", ")", "# So we have a ligature that is not mapped in the data. Split it up and", "# look up the individual parts.", "base_name_parts", "=", "base_name", ".", "split", "(", "\"_\"", ")", "# If all parts are in the AGLFN list, the glyph name is our production", "# name already.", "if", "all", "(", "part", "in", "fontTools", ".", "agl", ".", "AGL2UV", "for", "part", "in", "base_name_parts", ")", ":", "return", "_agl_compliant_name", "(", "glyph_name", ")", "# Turn all parts of the ligature into production names.", "_character_outside_BMP", "=", "False", "production_names", "=", "[", "]", "for", "part", "in", "base_name_parts", ":", "if", "part", "in", "fontTools", ".", "agl", ".", "AGL2UV", ":", "# A name present in the AGLFN is a production name already.", "production_names", ".", "append", "(", "part", ")", "else", ":", "part_entry", "=", "data", ".", "names", ".", "get", "(", "part", ")", "or", "{", "}", "part_production_name", "=", "part_entry", ".", "get", "(", "\"production\"", ")", "if", "part_production_name", ":", "production_names", ".", "append", "(", "part_production_name", ")", "# Take note if there are any characters outside the Unicode", "# BMP, e.g. \"u10FFF\" or \"u10FFFF\". Do not catch e.g. \"u013B\"", "# though.", "if", "len", "(", "part_production_name", ")", ">", "5", "and", "_is_unicode_u_value", "(", "part_production_name", ")", ":", "_character_outside_BMP", "=", "True", "else", ":", "# We hit a part that does not seem to be a valid glyph name known to us,", "# so the entire glyph name can't carry Unicode meaning. Return it", "# sanitized.", "return", "_agl_compliant_name", "(", "glyph_name", ")", "# Some names Glyphs uses resolve to other names that are not uniXXXX names and may", "# contain dots (e.g. idotaccent -> i.loclTRK). If there is any name with a \".\" in", "# it before the last element, punt. We'd have to introduce a \".\" into the ligature", "# midway, which is invalid according to the AGL. Example: \"a_i.loclTRK\" is valid,", "# but \"a_i.loclTRK_a\" isn't.", "if", "any", "(", "\".\"", "in", "part", "for", "part", "in", "production_names", "[", ":", "-", "1", "]", ")", ":", "return", "_agl_compliant_name", "(", "glyph_name", ")", "# If any production name starts with a \"uni\" and there are none of the", "# \"uXXXXX\" format, try to turn all parts into \"uni\" names and concatenate", "# them.", "if", "not", "_character_outside_BMP", "and", "any", "(", "part", ".", "startswith", "(", "\"uni\"", ")", "for", "part", "in", "production_names", ")", ":", "uni_names", "=", "[", "]", "for", "part", "in", "production_names", ":", "if", "part", ".", "startswith", "(", "\"uni\"", ")", ":", "uni_names", ".", "append", "(", "part", "[", "3", ":", "]", ")", "elif", "len", "(", "part", ")", "==", "5", "and", "_is_unicode_u_value", "(", "part", ")", ":", "uni_names", ".", "append", "(", "part", "[", "1", ":", "]", ")", "elif", "part", "in", "fontTools", ".", "agl", ".", "AGL2UV", ":", "uni_names", ".", "append", "(", "\"{:04X}\"", ".", "format", "(", "fontTools", ".", "agl", ".", "AGL2UV", "[", "part", "]", ")", ")", "else", ":", "return", "None", "final_production_name", "=", "\"uni\"", "+", "\"\"", ".", "join", "(", "uni_names", ")", "+", "dot", "+", "suffix", "else", ":", "final_production_name", "=", "\"_\"", ".", "join", "(", "production_names", ")", "+", "dot", "+", "suffix", "return", "_agl_compliant_name", "(", "final_production_name", ")" ]
Return the production name for a glyph name from the GlyphData.xml database according to the AGL specification. This should be run only if there is no official entry with a production name in it. Handles single glyphs (e.g. "brevecomb") and ligatures (e.g. "brevecomb_acutecomb"). Returns None when a valid and semantically meaningful production name can't be constructed or when the AGL specification would be violated, get_glyph() will use the bare glyph name then. Note: - Glyph name is the full name, e.g. "brevecomb_acutecomb.case". - Base name is the base part, e.g. "brevecomb_acutecomb" - Suffix is e.g. "case".
[ "Return", "the", "production", "name", "for", "a", "glyph", "name", "from", "the", "GlyphData", ".", "xml", "database", "according", "to", "the", "AGL", "specification", "." ]
python
train
42.873786
pandas-dev/pandas
pandas/core/missing.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L314-L355
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): """ Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This numberincludes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array_like The result, of length R or length M or M by R. """ from scipy import interpolate # return the method for compat with scipy version & backwards compat method = interpolate.BPoly.from_derivatives m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate) return m(x)
[ "def", "_from_derivatives", "(", "xi", ",", "yi", ",", "x", ",", "order", "=", "None", ",", "der", "=", "0", ",", "extrapolate", "=", "False", ")", ":", "from", "scipy", "import", "interpolate", "# return the method for compat with scipy version & backwards compat", "method", "=", "interpolate", ".", "BPoly", ".", "from_derivatives", "m", "=", "method", "(", "xi", ",", "yi", ".", "reshape", "(", "-", "1", ",", "1", ")", ",", "orders", "=", "order", ",", "extrapolate", "=", "extrapolate", ")", "return", "m", "(", "x", ")" ]
Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This numberincludes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array_like The result, of length R or length M or M by R.
[ "Convenience", "function", "for", "interpolate", ".", "BPoly", ".", "from_derivatives", "." ]
python
train
35.428571
holtjma/msbwt
MUS/MultiStringBWT.py
https://github.com/holtjma/msbwt/blob/7503346ec072ddb89520db86fef85569a9ba093a/MUS/MultiStringBWT.py#L169-L209
def recoverString(self, strIndex, withIndex=False): ''' This will return the string that starts at the given index @param strIndex - the index of the string we want to recover @return - string that we found starting at the specified '$' index ''' retNums = [] indices = [] #figure out the first hop backwards currIndex = strIndex prevChar = self.getCharAtIndex(currIndex) currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex) #while we haven't looped back to the start while currIndex != strIndex: #update the string retNums.append(prevChar) if withIndex: indices.append(currIndex) #figure out where to go from here prevChar = self.getCharAtIndex(currIndex) currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex) for i in xrange(0, self.vcLen): if strIndex < self.endIndex[i]: retNums.append(i) break if withIndex: indices.append(strIndex) #reverse the numbers, convert to characters, and join them in to a single sequence ret = ''.join(self.numToChar[retNums[::-1]]) #return what we found if withIndex: return (ret, indices[::-1]) else: return ret
[ "def", "recoverString", "(", "self", ",", "strIndex", ",", "withIndex", "=", "False", ")", ":", "retNums", "=", "[", "]", "indices", "=", "[", "]", "#figure out the first hop backwards", "currIndex", "=", "strIndex", "prevChar", "=", "self", ".", "getCharAtIndex", "(", "currIndex", ")", "currIndex", "=", "self", ".", "getOccurrenceOfCharAtIndex", "(", "prevChar", ",", "currIndex", ")", "#while we haven't looped back to the start", "while", "currIndex", "!=", "strIndex", ":", "#update the string", "retNums", ".", "append", "(", "prevChar", ")", "if", "withIndex", ":", "indices", ".", "append", "(", "currIndex", ")", "#figure out where to go from here", "prevChar", "=", "self", ".", "getCharAtIndex", "(", "currIndex", ")", "currIndex", "=", "self", ".", "getOccurrenceOfCharAtIndex", "(", "prevChar", ",", "currIndex", ")", "for", "i", "in", "xrange", "(", "0", ",", "self", ".", "vcLen", ")", ":", "if", "strIndex", "<", "self", ".", "endIndex", "[", "i", "]", ":", "retNums", ".", "append", "(", "i", ")", "break", "if", "withIndex", ":", "indices", ".", "append", "(", "strIndex", ")", "#reverse the numbers, convert to characters, and join them in to a single sequence", "ret", "=", "''", ".", "join", "(", "self", ".", "numToChar", "[", "retNums", "[", ":", ":", "-", "1", "]", "]", ")", "#return what we found", "if", "withIndex", ":", "return", "(", "ret", ",", "indices", "[", ":", ":", "-", "1", "]", ")", "else", ":", "return", "ret" ]
This will return the string that starts at the given index @param strIndex - the index of the string we want to recover @return - string that we found starting at the specified '$' index
[ "This", "will", "return", "the", "string", "that", "starts", "at", "the", "given", "index" ]
python
train
34.829268
edwards-lab/libGWAS
libgwas/impute_parser.py
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/impute_parser.py#L178-L200
def load_genotypes(self): """Prepares the files for genotype parsing. :return: None """ if self.file_index < len(self.archives): self.current_file = self.archives[self.file_index] info_filename = self.current_file.replace(Parser.gen_ext, Parser.info_ext) if len(self.info_files) > 0: info_filename = self.info_files[self.file_index] self.info_file = open(info_filename) self.info_file.readline() # Dump the header if DataParser.compressed_pedigree: self.freq_file = gzip.open("%s" % (self.current_file), 'rb') else: self.freq_file = open(self.current_file) self.current_chrom = self.chroms[self.file_index] self.file_index += 1 else: raise StopIteration
[ "def", "load_genotypes", "(", "self", ")", ":", "if", "self", ".", "file_index", "<", "len", "(", "self", ".", "archives", ")", ":", "self", ".", "current_file", "=", "self", ".", "archives", "[", "self", ".", "file_index", "]", "info_filename", "=", "self", ".", "current_file", ".", "replace", "(", "Parser", ".", "gen_ext", ",", "Parser", ".", "info_ext", ")", "if", "len", "(", "self", ".", "info_files", ")", ">", "0", ":", "info_filename", "=", "self", ".", "info_files", "[", "self", ".", "file_index", "]", "self", ".", "info_file", "=", "open", "(", "info_filename", ")", "self", ".", "info_file", ".", "readline", "(", ")", "# Dump the header", "if", "DataParser", ".", "compressed_pedigree", ":", "self", ".", "freq_file", "=", "gzip", ".", "open", "(", "\"%s\"", "%", "(", "self", ".", "current_file", ")", ",", "'rb'", ")", "else", ":", "self", ".", "freq_file", "=", "open", "(", "self", ".", "current_file", ")", "self", ".", "current_chrom", "=", "self", ".", "chroms", "[", "self", ".", "file_index", "]", "self", ".", "file_index", "+=", "1", "else", ":", "raise", "StopIteration" ]
Prepares the files for genotype parsing. :return: None
[ "Prepares", "the", "files", "for", "genotype", "parsing", "." ]
python
train
36.73913
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L737-L787
def setup_editorstack(self, parent, layout): """Setup editorstack's layout""" layout.setSpacing(1) self.fname_label = QLabel() self.fname_label.setStyleSheet( "QLabel {margin: 0px; padding: 3px;}") layout.addWidget(self.fname_label) menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'), tip=_('Options')) # Don't show menu arrow and remove padding if is_dark_interface(): menu_btn.setStyleSheet( ("QToolButton::menu-indicator{image: none;}\n" "QToolButton{margin: 1px; padding: 3px;}")) else: menu_btn.setStyleSheet( "QToolButton::menu-indicator{image: none;}") self.menu = QMenu(self) menu_btn.setMenu(self.menu) menu_btn.setPopupMode(menu_btn.InstantPopup) self.menu.aboutToShow.connect(self.__setup_menu) corner_widgets = {Qt.TopRightCorner: [menu_btn]} self.tabs = BaseTabs(self, menu=self.menu, menu_use_tooltips=True, corner_widgets=corner_widgets) self.tabs.tabBar().setObjectName('plugin-tab') self.tabs.set_close_function(self.close_file) self.tabs.tabBar().tabMoved.connect(self.move_editorstack_data) self.tabs.setMovable(True) self.stack_history.refresh() if hasattr(self.tabs, 'setDocumentMode') \ and not sys.platform == 'darwin': # Don't set document mode to true on OSX because it generates # a crash when the editor is detached from the main window # Fixes Issue 561 self.tabs.setDocumentMode(True) self.tabs.currentChanged.connect(self.current_changed) if sys.platform == 'darwin': tab_container = QWidget() tab_container.setObjectName('tab-container') tab_layout = QHBoxLayout(tab_container) tab_layout.setContentsMargins(0, 0, 0, 0) tab_layout.addWidget(self.tabs) layout.addWidget(tab_container) else: layout.addWidget(self.tabs)
[ "def", "setup_editorstack", "(", "self", ",", "parent", ",", "layout", ")", ":", "layout", ".", "setSpacing", "(", "1", ")", "self", ".", "fname_label", "=", "QLabel", "(", ")", "self", ".", "fname_label", ".", "setStyleSheet", "(", "\"QLabel {margin: 0px; padding: 3px;}\"", ")", "layout", ".", "addWidget", "(", "self", ".", "fname_label", ")", "menu_btn", "=", "create_toolbutton", "(", "self", ",", "icon", "=", "ima", ".", "icon", "(", "'tooloptions'", ")", ",", "tip", "=", "_", "(", "'Options'", ")", ")", "# Don't show menu arrow and remove padding\r", "if", "is_dark_interface", "(", ")", ":", "menu_btn", ".", "setStyleSheet", "(", "(", "\"QToolButton::menu-indicator{image: none;}\\n\"", "\"QToolButton{margin: 1px; padding: 3px;}\"", ")", ")", "else", ":", "menu_btn", ".", "setStyleSheet", "(", "\"QToolButton::menu-indicator{image: none;}\"", ")", "self", ".", "menu", "=", "QMenu", "(", "self", ")", "menu_btn", ".", "setMenu", "(", "self", ".", "menu", ")", "menu_btn", ".", "setPopupMode", "(", "menu_btn", ".", "InstantPopup", ")", "self", ".", "menu", ".", "aboutToShow", ".", "connect", "(", "self", ".", "__setup_menu", ")", "corner_widgets", "=", "{", "Qt", ".", "TopRightCorner", ":", "[", "menu_btn", "]", "}", "self", ".", "tabs", "=", "BaseTabs", "(", "self", ",", "menu", "=", "self", ".", "menu", ",", "menu_use_tooltips", "=", "True", ",", "corner_widgets", "=", "corner_widgets", ")", "self", ".", "tabs", ".", "tabBar", "(", ")", ".", "setObjectName", "(", "'plugin-tab'", ")", "self", ".", "tabs", ".", "set_close_function", "(", "self", ".", "close_file", ")", "self", ".", "tabs", ".", "tabBar", "(", ")", ".", "tabMoved", ".", "connect", "(", "self", ".", "move_editorstack_data", ")", "self", ".", "tabs", ".", "setMovable", "(", "True", ")", "self", ".", "stack_history", ".", "refresh", "(", ")", "if", "hasattr", "(", "self", ".", "tabs", ",", "'setDocumentMode'", ")", "and", "not", "sys", ".", "platform", "==", "'darwin'", ":", "# Don't set document mode to true on OSX because it generates\r", "# a crash when the editor is detached from the main window\r", "# Fixes Issue 561\r", "self", ".", "tabs", ".", "setDocumentMode", "(", "True", ")", "self", ".", "tabs", ".", "currentChanged", ".", "connect", "(", "self", ".", "current_changed", ")", "if", "sys", ".", "platform", "==", "'darwin'", ":", "tab_container", "=", "QWidget", "(", ")", "tab_container", ".", "setObjectName", "(", "'tab-container'", ")", "tab_layout", "=", "QHBoxLayout", "(", "tab_container", ")", "tab_layout", ".", "setContentsMargins", "(", "0", ",", "0", ",", "0", ",", "0", ")", "tab_layout", ".", "addWidget", "(", "self", ".", "tabs", ")", "layout", ".", "addWidget", "(", "tab_container", ")", "else", ":", "layout", ".", "addWidget", "(", "self", ".", "tabs", ")" ]
Setup editorstack's layout
[ "Setup", "editorstack", "s", "layout" ]
python
train
42.215686
twisted/txaws
txaws/client/discover/entry_point.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/client/discover/entry_point.py#L63-L114
def parse_options(arguments): """Parse command line arguments. The parsing logic is fairly simple. It can only parse long-style parameters of the form:: --key value Several parameters can be defined in the environment and will be used unless explicitly overridden with command-line arguments. The access key, secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID}, C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables. @param arguments: A list of command-line arguments. The first item is expected to be the name of the program being run. @raises OptionError: Raised if incorrectly formed command-line arguments are specified, or if required command-line arguments are not present. @raises UsageError: Raised if C{--help} is present in command-line arguments. @return: A C{dict} with key/value pairs extracted from the argument list. """ arguments = arguments[1:] options = {} while arguments: key = arguments.pop(0) if key in ("-h", "--help"): raise UsageError("Help requested.") if key.startswith("--"): key = key[2:] try: value = arguments.pop(0) except IndexError: raise OptionError("'--%s' is missing a value." % key) options[key] = value else: raise OptionError("Encountered unexpected value '%s'." % key) default_key = os.environ.get("AWS_ACCESS_KEY_ID") if "key" not in options and default_key: options["key"] = default_key default_secret = os.environ.get("AWS_SECRET_ACCESS_KEY") if "secret" not in options and default_secret: options["secret"] = default_secret default_endpoint = os.environ.get("AWS_ENDPOINT") if "endpoint" not in options and default_endpoint: options["endpoint"] = default_endpoint for name in ("key", "secret", "endpoint", "action"): if name not in options: raise OptionError( "The '--%s' command-line argument is required." % name) return options
[ "def", "parse_options", "(", "arguments", ")", ":", "arguments", "=", "arguments", "[", "1", ":", "]", "options", "=", "{", "}", "while", "arguments", ":", "key", "=", "arguments", ".", "pop", "(", "0", ")", "if", "key", "in", "(", "\"-h\"", ",", "\"--help\"", ")", ":", "raise", "UsageError", "(", "\"Help requested.\"", ")", "if", "key", ".", "startswith", "(", "\"--\"", ")", ":", "key", "=", "key", "[", "2", ":", "]", "try", ":", "value", "=", "arguments", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "raise", "OptionError", "(", "\"'--%s' is missing a value.\"", "%", "key", ")", "options", "[", "key", "]", "=", "value", "else", ":", "raise", "OptionError", "(", "\"Encountered unexpected value '%s'.\"", "%", "key", ")", "default_key", "=", "os", ".", "environ", ".", "get", "(", "\"AWS_ACCESS_KEY_ID\"", ")", "if", "\"key\"", "not", "in", "options", "and", "default_key", ":", "options", "[", "\"key\"", "]", "=", "default_key", "default_secret", "=", "os", ".", "environ", ".", "get", "(", "\"AWS_SECRET_ACCESS_KEY\"", ")", "if", "\"secret\"", "not", "in", "options", "and", "default_secret", ":", "options", "[", "\"secret\"", "]", "=", "default_secret", "default_endpoint", "=", "os", ".", "environ", ".", "get", "(", "\"AWS_ENDPOINT\"", ")", "if", "\"endpoint\"", "not", "in", "options", "and", "default_endpoint", ":", "options", "[", "\"endpoint\"", "]", "=", "default_endpoint", "for", "name", "in", "(", "\"key\"", ",", "\"secret\"", ",", "\"endpoint\"", ",", "\"action\"", ")", ":", "if", "name", "not", "in", "options", ":", "raise", "OptionError", "(", "\"The '--%s' command-line argument is required.\"", "%", "name", ")", "return", "options" ]
Parse command line arguments. The parsing logic is fairly simple. It can only parse long-style parameters of the form:: --key value Several parameters can be defined in the environment and will be used unless explicitly overridden with command-line arguments. The access key, secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID}, C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables. @param arguments: A list of command-line arguments. The first item is expected to be the name of the program being run. @raises OptionError: Raised if incorrectly formed command-line arguments are specified, or if required command-line arguments are not present. @raises UsageError: Raised if C{--help} is present in command-line arguments. @return: A C{dict} with key/value pairs extracted from the argument list.
[ "Parse", "command", "line", "arguments", "." ]
python
train
40
miguelgrinberg/Flask-Migrate
flask_migrate/templates/flask-multidb/env.py
https://github.com/miguelgrinberg/Flask-Migrate/blob/65fbd978681bdf2eddf8940edd04ed7272a94480/flask_migrate/templates/flask-multidb/env.py#L55-L91
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ # for the --sql use case, run migrations for each URL into # individual files. engines = { '': { 'url': context.config.get_main_option('sqlalchemy.url') } } for name in bind_names: engines[name] = rec = {} rec['url'] = context.config.get_section_option(name, "sqlalchemy.url") for name, rec in engines.items(): logger.info("Migrating database %s" % (name or '<default>')) file_ = "%s.sql" % name logger.info("Writing output to %s" % file_) with open(file_, 'w') as buffer: context.configure( url=rec['url'], output_buffer=buffer, target_metadata=get_metadata(name), literal_binds=True, ) with context.begin_transaction(): context.run_migrations(engine_name=name)
[ "def", "run_migrations_offline", "(", ")", ":", "# for the --sql use case, run migrations for each URL into", "# individual files.", "engines", "=", "{", "''", ":", "{", "'url'", ":", "context", ".", "config", ".", "get_main_option", "(", "'sqlalchemy.url'", ")", "}", "}", "for", "name", "in", "bind_names", ":", "engines", "[", "name", "]", "=", "rec", "=", "{", "}", "rec", "[", "'url'", "]", "=", "context", ".", "config", ".", "get_section_option", "(", "name", ",", "\"sqlalchemy.url\"", ")", "for", "name", ",", "rec", "in", "engines", ".", "items", "(", ")", ":", "logger", ".", "info", "(", "\"Migrating database %s\"", "%", "(", "name", "or", "'<default>'", ")", ")", "file_", "=", "\"%s.sql\"", "%", "name", "logger", ".", "info", "(", "\"Writing output to %s\"", "%", "file_", ")", "with", "open", "(", "file_", ",", "'w'", ")", "as", "buffer", ":", "context", ".", "configure", "(", "url", "=", "rec", "[", "'url'", "]", ",", "output_buffer", "=", "buffer", ",", "target_metadata", "=", "get_metadata", "(", "name", ")", ",", "literal_binds", "=", "True", ",", ")", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", "engine_name", "=", "name", ")" ]
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
[ "Run", "migrations", "in", "offline", "mode", "." ]
python
train
32.72973
palantir/conjure-python-client
conjure_python_client/_serde/decoder.py
https://github.com/palantir/conjure-python-client/blob/e6814a80bae3ec01fa147d5fd445538a677b1349/conjure_python_client/_serde/decoder.py#L166-L181
def decode_list(cls, obj, element_type): # type: (List[Any], ConjureTypeType) -> List[Any] """Decodes json into a list, handling conversion of the elements. Args: obj: the json object to decode element_type: a class object which is the conjure type of the elements in this list. Returns: A python list where the elements are instances of type element_type. """ if not isinstance(obj, list): raise Exception("expected a python list") return list(map(lambda x: cls.do_decode(x, element_type), obj))
[ "def", "decode_list", "(", "cls", ",", "obj", ",", "element_type", ")", ":", "# type: (List[Any], ConjureTypeType) -> List[Any]", "if", "not", "isinstance", "(", "obj", ",", "list", ")", ":", "raise", "Exception", "(", "\"expected a python list\"", ")", "return", "list", "(", "map", "(", "lambda", "x", ":", "cls", ".", "do_decode", "(", "x", ",", "element_type", ")", ",", "obj", ")", ")" ]
Decodes json into a list, handling conversion of the elements. Args: obj: the json object to decode element_type: a class object which is the conjure type of the elements in this list. Returns: A python list where the elements are instances of type element_type.
[ "Decodes", "json", "into", "a", "list", "handling", "conversion", "of", "the", "elements", "." ]
python
train
38.6875
eecs-autograder/autograder-sandbox
autograder_sandbox/autograder_sandbox.py
https://github.com/eecs-autograder/autograder-sandbox/blob/230e806e3740e2aaf5f5568dd6a265558f165c63/autograder_sandbox/autograder_sandbox.py#L347-L356
def add_and_rename_file(self, filename: str, new_filename: str) -> None: """ Copies the specified file into the working directory of this sandbox and renames it to new_filename. """ dest = os.path.join( self.name + ':' + SANDBOX_WORKING_DIR_NAME, new_filename) subprocess.check_call(['docker', 'cp', filename, dest]) self._chown_files([new_filename])
[ "def", "add_and_rename_file", "(", "self", ",", "filename", ":", "str", ",", "new_filename", ":", "str", ")", "->", "None", ":", "dest", "=", "os", ".", "path", ".", "join", "(", "self", ".", "name", "+", "':'", "+", "SANDBOX_WORKING_DIR_NAME", ",", "new_filename", ")", "subprocess", ".", "check_call", "(", "[", "'docker'", ",", "'cp'", ",", "filename", ",", "dest", "]", ")", "self", ".", "_chown_files", "(", "[", "new_filename", "]", ")" ]
Copies the specified file into the working directory of this sandbox and renames it to new_filename.
[ "Copies", "the", "specified", "file", "into", "the", "working", "directory", "of", "this", "sandbox", "and", "renames", "it", "to", "new_filename", "." ]
python
test
42.1
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L513-L522
def get_suppliers_per_page(self, per_page=1000, page=1, params=None): """ Get suppliers per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=SUPPLIERS, per_page=per_page, page=page, params=params)
[ "def", "get_suppliers_per_page", "(", "self", ",", "per_page", "=", "1000", ",", "page", "=", "1", ",", "params", "=", "None", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "SUPPLIERS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "params", ")" ]
Get suppliers per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
[ "Get", "suppliers", "per", "page" ]
python
train
41
wonambi-python/wonambi
wonambi/detect/slowwave.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/detect/slowwave.py#L124-L187
def detect_Massimini2004(dat_orig, s_freq, time, opts): """Slow wave detection based on Massimini et al., 2004. Parameters ---------- dat_orig : ndarray (dtype='float') vector with the data for one channel s_freq : float sampling frequency time : ndarray (dtype='float') vector with the time points for each sample opts : instance of 'DetectSlowWave' 'det_filt' : dict parameters for 'butter', 'duration' : tuple of float min and max duration of SW 'min_ptp' : float min peak-to-peak amplitude 'trough_duration' : tuple of float min and max duration of first half-wave (trough) Returns ------- list of dict list of detected SWs float SW density, per 30-s epoch References ---------- Massimini, M. et al. J Neurosci 24(31) 6862-70 (2004). """ if opts.invert: dat_orig = -dat_orig dat_det = transform_signal(dat_orig, s_freq, 'double_butter', opts.det_filt) above_zero = detect_events(dat_det, 'above_thresh', value=0.) sw_in_chan = [] if above_zero is not None: troughs = within_duration(above_zero, time, opts.trough_duration) #lg.info('troughs within duration: ' + str(troughs.shape)) if troughs is not None: troughs = select_peaks(dat_det, troughs, opts.max_trough_amp) #lg.info('troughs deep enough: ' + str(troughs.shape)) if troughs is not None: events = _add_halfwave(dat_det, troughs, s_freq, opts) #lg.info('SWs high enough: ' + str(events.shape)) if len(events): events = within_duration(events, time, opts.duration) events = remove_straddlers(events, time, s_freq) #lg.info('SWs within duration: ' + str(events.shape)) sw_in_chan = make_slow_waves(events, dat_det, time, s_freq) if len(sw_in_chan) == 0: lg.info('No slow wave found') return sw_in_chan
[ "def", "detect_Massimini2004", "(", "dat_orig", ",", "s_freq", ",", "time", ",", "opts", ")", ":", "if", "opts", ".", "invert", ":", "dat_orig", "=", "-", "dat_orig", "dat_det", "=", "transform_signal", "(", "dat_orig", ",", "s_freq", ",", "'double_butter'", ",", "opts", ".", "det_filt", ")", "above_zero", "=", "detect_events", "(", "dat_det", ",", "'above_thresh'", ",", "value", "=", "0.", ")", "sw_in_chan", "=", "[", "]", "if", "above_zero", "is", "not", "None", ":", "troughs", "=", "within_duration", "(", "above_zero", ",", "time", ",", "opts", ".", "trough_duration", ")", "#lg.info('troughs within duration: ' + str(troughs.shape))", "if", "troughs", "is", "not", "None", ":", "troughs", "=", "select_peaks", "(", "dat_det", ",", "troughs", ",", "opts", ".", "max_trough_amp", ")", "#lg.info('troughs deep enough: ' + str(troughs.shape))", "if", "troughs", "is", "not", "None", ":", "events", "=", "_add_halfwave", "(", "dat_det", ",", "troughs", ",", "s_freq", ",", "opts", ")", "#lg.info('SWs high enough: ' + str(events.shape))", "if", "len", "(", "events", ")", ":", "events", "=", "within_duration", "(", "events", ",", "time", ",", "opts", ".", "duration", ")", "events", "=", "remove_straddlers", "(", "events", ",", "time", ",", "s_freq", ")", "#lg.info('SWs within duration: ' + str(events.shape))", "sw_in_chan", "=", "make_slow_waves", "(", "events", ",", "dat_det", ",", "time", ",", "s_freq", ")", "if", "len", "(", "sw_in_chan", ")", "==", "0", ":", "lg", ".", "info", "(", "'No slow wave found'", ")", "return", "sw_in_chan" ]
Slow wave detection based on Massimini et al., 2004. Parameters ---------- dat_orig : ndarray (dtype='float') vector with the data for one channel s_freq : float sampling frequency time : ndarray (dtype='float') vector with the time points for each sample opts : instance of 'DetectSlowWave' 'det_filt' : dict parameters for 'butter', 'duration' : tuple of float min and max duration of SW 'min_ptp' : float min peak-to-peak amplitude 'trough_duration' : tuple of float min and max duration of first half-wave (trough) Returns ------- list of dict list of detected SWs float SW density, per 30-s epoch References ---------- Massimini, M. et al. J Neurosci 24(31) 6862-70 (2004).
[ "Slow", "wave", "detection", "based", "on", "Massimini", "et", "al", ".", "2004", "." ]
python
train
32.09375
assamite/creamas
creamas/nx.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/nx.py#L119-L143
def _edges2conns(G, edge_data=False): """Create a mapping from graph edges to agent connections to be created. :param G: NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each node. :param bool edge_data: If ``True``, stores also edge data to the returned dictionary. :returns: A dictionary where keys are agent addresses and values are lists of addresses to which key-agent should create connections in order to recreate the graph structure in an agent society. :rtype: dict """ cm = {} for n in G.nodes(data=True): if edge_data: cm[n[1]['addr']] = [(G.node[nb]['addr'], G[n[0]][nb]) for nb in G[n[0]]] else: cm[n[1]['addr']] = [(G.node[nb]['addr'], {}) for nb in G[n[0]]] return cm
[ "def", "_edges2conns", "(", "G", ",", "edge_data", "=", "False", ")", ":", "cm", "=", "{", "}", "for", "n", "in", "G", ".", "nodes", "(", "data", "=", "True", ")", ":", "if", "edge_data", ":", "cm", "[", "n", "[", "1", "]", "[", "'addr'", "]", "]", "=", "[", "(", "G", ".", "node", "[", "nb", "]", "[", "'addr'", "]", ",", "G", "[", "n", "[", "0", "]", "]", "[", "nb", "]", ")", "for", "nb", "in", "G", "[", "n", "[", "0", "]", "]", "]", "else", ":", "cm", "[", "n", "[", "1", "]", "[", "'addr'", "]", "]", "=", "[", "(", "G", ".", "node", "[", "nb", "]", "[", "'addr'", "]", ",", "{", "}", ")", "for", "nb", "in", "G", "[", "n", "[", "0", "]", "]", "]", "return", "cm" ]
Create a mapping from graph edges to agent connections to be created. :param G: NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each node. :param bool edge_data: If ``True``, stores also edge data to the returned dictionary. :returns: A dictionary where keys are agent addresses and values are lists of addresses to which key-agent should create connections in order to recreate the graph structure in an agent society. :rtype: dict
[ "Create", "a", "mapping", "from", "graph", "edges", "to", "agent", "connections", "to", "be", "created", "." ]
python
train
33.36
materialsvirtuallab/monty
monty/operator.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/operator.py#L9-L36
def operator_from_str(op): """ Return the operator associated to the given string `op`. raises: `KeyError` if invalid string. >>> assert operator_from_str("==")(1, 1) and operator_from_str("+")(1,1) == 2 """ d = {"==": operator.eq, "!=": operator.ne, ">": operator.gt, ">=": operator.ge, "<": operator.lt, "<=": operator.le, '+': operator.add, '-': operator.sub, '*': operator.mul, '%': operator.mod, '^': operator.xor, } try: d['/'] = operator.truediv except AttributeError: pass return d[op]
[ "def", "operator_from_str", "(", "op", ")", ":", "d", "=", "{", "\"==\"", ":", "operator", ".", "eq", ",", "\"!=\"", ":", "operator", ".", "ne", ",", "\">\"", ":", "operator", ".", "gt", ",", "\">=\"", ":", "operator", ".", "ge", ",", "\"<\"", ":", "operator", ".", "lt", ",", "\"<=\"", ":", "operator", ".", "le", ",", "'+'", ":", "operator", ".", "add", ",", "'-'", ":", "operator", ".", "sub", ",", "'*'", ":", "operator", ".", "mul", ",", "'%'", ":", "operator", ".", "mod", ",", "'^'", ":", "operator", ".", "xor", ",", "}", "try", ":", "d", "[", "'/'", "]", "=", "operator", ".", "truediv", "except", "AttributeError", ":", "pass", "return", "d", "[", "op", "]" ]
Return the operator associated to the given string `op`. raises: `KeyError` if invalid string. >>> assert operator_from_str("==")(1, 1) and operator_from_str("+")(1,1) == 2
[ "Return", "the", "operator", "associated", "to", "the", "given", "string", "op", "." ]
python
train
22.464286
erans/fabric-gce-tools
fabric_gce_tools/__init__.py
https://github.com/erans/fabric-gce-tools/blob/0c9af7a683db47e203d4e487fa8610da6459ca83/fabric_gce_tools/__init__.py#L205-L228
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None): """ Dynamically update fabric's roles by using assigning the tags associated with each machine in Google Compute Engine. use_cache - will store a local cache in ~/.gcetools/ cache_expiration - cache expiration in seconds (default: 1 day) cache_path - the path to store instances data (default: ~/.gcetools/instances) group_name - optional managed instance group to use instead of the global instance pool region - gce region name (such as `us-central1`) for a regional managed instance group zone - gce zone name (such as `us-central1-a`) for a zone managed instance group How to use: - Call 'update_roles_gce' at the end of your fabfile.py (it will run each time you run fabric). - On each function use the regular @roles decorator and set the role to the name of one of the tags associated with the instances you wish to work with """ data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone) roles = _get_roles(data) env.roledefs.update(roles) _data_loaded = True return INSTANCES_CACHE
[ "def", "update_roles_gce", "(", "use_cache", "=", "True", ",", "cache_expiration", "=", "86400", ",", "cache_path", "=", "\"~/.gcetools/instances\"", ",", "group_name", "=", "None", ",", "region", "=", "None", ",", "zone", "=", "None", ")", ":", "data", "=", "_get_data", "(", "use_cache", ",", "cache_expiration", ",", "group_name", "=", "group_name", ",", "region", "=", "region", ",", "zone", "=", "zone", ")", "roles", "=", "_get_roles", "(", "data", ")", "env", ".", "roledefs", ".", "update", "(", "roles", ")", "_data_loaded", "=", "True", "return", "INSTANCES_CACHE" ]
Dynamically update fabric's roles by using assigning the tags associated with each machine in Google Compute Engine. use_cache - will store a local cache in ~/.gcetools/ cache_expiration - cache expiration in seconds (default: 1 day) cache_path - the path to store instances data (default: ~/.gcetools/instances) group_name - optional managed instance group to use instead of the global instance pool region - gce region name (such as `us-central1`) for a regional managed instance group zone - gce zone name (such as `us-central1-a`) for a zone managed instance group How to use: - Call 'update_roles_gce' at the end of your fabfile.py (it will run each time you run fabric). - On each function use the regular @roles decorator and set the role to the name of one of the tags associated with the instances you wish to work with
[ "Dynamically", "update", "fabric", "s", "roles", "by", "using", "assigning", "the", "tags", "associated", "with", "each", "machine", "in", "Google", "Compute", "Engine", "." ]
python
train
51.208333
iotile/coretools
iotilecore/iotile/core/utilities/kvstore_json.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/kvstore_json.py#L48-L58
def _load_file(self): """Load all entries from json backing file """ if not os.path.exists(self.file): return {} with open(self.file, "r") as infile: data = json.load(infile) return data
[ "def", "_load_file", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "file", ")", ":", "return", "{", "}", "with", "open", "(", "self", ".", "file", ",", "\"r\"", ")", "as", "infile", ":", "data", "=", "json", ".", "load", "(", "infile", ")", "return", "data" ]
Load all entries from json backing file
[ "Load", "all", "entries", "from", "json", "backing", "file" ]
python
train
22.090909
RudolfCardinal/pythonlib
cardinal_pythonlib/sphinxtools.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sphinxtools.py#L825-L847
def write_index_and_rst_files(self, overwrite: bool = False, mock: bool = False) -> None: """ Writes both the individual RST files and the index. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't """ for f in self.files_to_index: if isinstance(f, FileToAutodocument): f.write_rst( prefix=self.rst_prefix, suffix=self.rst_suffix, heading_underline_char=self.source_rst_heading_underline_char, # noqa overwrite=overwrite, mock=mock, ) elif isinstance(f, AutodocIndex): f.write_index_and_rst_files(overwrite=overwrite, mock=mock) else: fail("Unknown thing in files_to_index: {!r}".format(f)) self.write_index(overwrite=overwrite, mock=mock)
[ "def", "write_index_and_rst_files", "(", "self", ",", "overwrite", ":", "bool", "=", "False", ",", "mock", ":", "bool", "=", "False", ")", "->", "None", ":", "for", "f", "in", "self", ".", "files_to_index", ":", "if", "isinstance", "(", "f", ",", "FileToAutodocument", ")", ":", "f", ".", "write_rst", "(", "prefix", "=", "self", ".", "rst_prefix", ",", "suffix", "=", "self", ".", "rst_suffix", ",", "heading_underline_char", "=", "self", ".", "source_rst_heading_underline_char", ",", "# noqa", "overwrite", "=", "overwrite", ",", "mock", "=", "mock", ",", ")", "elif", "isinstance", "(", "f", ",", "AutodocIndex", ")", ":", "f", ".", "write_index_and_rst_files", "(", "overwrite", "=", "overwrite", ",", "mock", "=", "mock", ")", "else", ":", "fail", "(", "\"Unknown thing in files_to_index: {!r}\"", ".", "format", "(", "f", ")", ")", "self", ".", "write_index", "(", "overwrite", "=", "overwrite", ",", "mock", "=", "mock", ")" ]
Writes both the individual RST files and the index. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't
[ "Writes", "both", "the", "individual", "RST", "files", "and", "the", "index", "." ]
python
train
41.913043
williamFalcon/test-tube
examples/pytorch_hpc_example.py
https://github.com/williamFalcon/test-tube/blob/db5a47067a854f76d89f8066582023c1e184bccb/examples/pytorch_hpc_example.py#L8-L30
def train(hparams, *args): """Train your awesome model. :param hparams: The arguments to run the model with. """ # Initialize experiments and track all the hyperparameters exp = Experiment( name=hparams.test_tube_exp_name, # Location to save the metrics. save_dir=hparams.log_path, autosave=False, ) exp.argparse(hparams) # Pretend to train. x = torch.rand((1, hparams.x_val)) for train_step in range(0, 100): y = torch.rand((hparams.x_val, 1)) out = x.mm(y) exp.log({'fake_err': out.item()}) # Save exp when . exp.save()
[ "def", "train", "(", "hparams", ",", "*", "args", ")", ":", "# Initialize experiments and track all the hyperparameters", "exp", "=", "Experiment", "(", "name", "=", "hparams", ".", "test_tube_exp_name", ",", "# Location to save the metrics.", "save_dir", "=", "hparams", ".", "log_path", ",", "autosave", "=", "False", ",", ")", "exp", ".", "argparse", "(", "hparams", ")", "# Pretend to train.", "x", "=", "torch", ".", "rand", "(", "(", "1", ",", "hparams", ".", "x_val", ")", ")", "for", "train_step", "in", "range", "(", "0", ",", "100", ")", ":", "y", "=", "torch", ".", "rand", "(", "(", "hparams", ".", "x_val", ",", "1", ")", ")", "out", "=", "x", ".", "mm", "(", "y", ")", "exp", ".", "log", "(", "{", "'fake_err'", ":", "out", ".", "item", "(", ")", "}", ")", "# Save exp when .", "exp", ".", "save", "(", ")" ]
Train your awesome model. :param hparams: The arguments to run the model with.
[ "Train", "your", "awesome", "model", "." ]
python
test
26.347826
chimera0/accel-brain-code
Reinforcement-Learning/pyqlearning/deep_q_learning.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/pyqlearning/deep_q_learning.py#L72-L123
def learn(self, state_arr, limit=1000): ''' Learning and searching the optimal solution. Args: state_arr: `np.ndarray` of initial state. limit: The maximum number of iterative updates based on value iteration algorithms. ''' while self.t <= limit: # Draw samples of next possible actions from any distribution. next_action_arr = self.extract_possible_actions(state_arr) # Inference Q-Values. predicted_q_arr = self.__function_approximator.inference_q(next_action_arr) # Set `np.ndarray` of rewards and next Q-Values. reward_value_arr = np.empty((next_action_arr.shape[0], 1)) next_max_q_arr = np.empty((next_action_arr.shape[0], 1)) for i in range(reward_value_arr.shape[0]): # Observe reward values. reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i]) # Inference the Max-Q-Value in next action time. next_next_action_arr = self.extract_possible_actions(next_action_arr[i]) next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max() # Select action. action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr) # Update real Q-Values. real_q_arr = self.update_q( predicted_q_arr, reward_value_arr, next_max_q_arr ) # Maximum of predicted and real Q-Values. real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]] if self.__q_logs_arr.shape[0] > 0: self.__q_logs_arr = np.r_[ self.__q_logs_arr, np.array([predicted_q, real_q]).reshape(1, 2) ] else: self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2) # Learn Q-Values. self.learn_q(predicted_q_arr, real_q_arr) # Update State. state_arr = self.update_state(state_arr, action_arr) # Epsode. self.t += 1 # Check. end_flag = self.check_the_end_flag(state_arr) if end_flag is True: break
[ "def", "learn", "(", "self", ",", "state_arr", ",", "limit", "=", "1000", ")", ":", "while", "self", ".", "t", "<=", "limit", ":", "# Draw samples of next possible actions from any distribution.", "next_action_arr", "=", "self", ".", "extract_possible_actions", "(", "state_arr", ")", "# Inference Q-Values.", "predicted_q_arr", "=", "self", ".", "__function_approximator", ".", "inference_q", "(", "next_action_arr", ")", "# Set `np.ndarray` of rewards and next Q-Values.", "reward_value_arr", "=", "np", ".", "empty", "(", "(", "next_action_arr", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "next_max_q_arr", "=", "np", ".", "empty", "(", "(", "next_action_arr", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "for", "i", "in", "range", "(", "reward_value_arr", ".", "shape", "[", "0", "]", ")", ":", "# Observe reward values.", "reward_value_arr", "[", "i", "]", "=", "self", ".", "observe_reward_value", "(", "state_arr", ",", "next_action_arr", "[", "i", "]", ")", "# Inference the Max-Q-Value in next action time.", "next_next_action_arr", "=", "self", ".", "extract_possible_actions", "(", "next_action_arr", "[", "i", "]", ")", "next_max_q_arr", "[", "i", "]", "=", "self", ".", "__function_approximator", ".", "inference_q", "(", "next_next_action_arr", ")", ".", "max", "(", ")", "# Select action.", "action_arr", ",", "predicted_q", "=", "self", ".", "select_action", "(", "next_action_arr", ",", "predicted_q_arr", ")", "# Update real Q-Values.", "real_q_arr", "=", "self", ".", "update_q", "(", "predicted_q_arr", ",", "reward_value_arr", ",", "next_max_q_arr", ")", "# Maximum of predicted and real Q-Values.", "real_q", "=", "real_q_arr", "[", "np", ".", "where", "(", "predicted_q_arr", "==", "predicted_q", ")", "[", "0", "]", "[", "0", "]", "]", "if", "self", ".", "__q_logs_arr", ".", "shape", "[", "0", "]", ">", "0", ":", "self", ".", "__q_logs_arr", "=", "np", ".", "r_", "[", "self", ".", "__q_logs_arr", ",", "np", ".", "array", "(", "[", "predicted_q", ",", "real_q", "]", ")", ".", "reshape", "(", "1", ",", "2", ")", "]", "else", ":", "self", ".", "__q_logs_arr", "=", "np", ".", "array", "(", "[", "predicted_q", ",", "real_q", "]", ")", ".", "reshape", "(", "1", ",", "2", ")", "# Learn Q-Values.", "self", ".", "learn_q", "(", "predicted_q_arr", ",", "real_q_arr", ")", "# Update State.", "state_arr", "=", "self", ".", "update_state", "(", "state_arr", ",", "action_arr", ")", "# Epsode.", "self", ".", "t", "+=", "1", "# Check.", "end_flag", "=", "self", ".", "check_the_end_flag", "(", "state_arr", ")", "if", "end_flag", "is", "True", ":", "break" ]
Learning and searching the optimal solution. Args: state_arr: `np.ndarray` of initial state. limit: The maximum number of iterative updates based on value iteration algorithms.
[ "Learning", "and", "searching", "the", "optimal", "solution", ".", "Args", ":", "state_arr", ":", "np", ".", "ndarray", "of", "initial", "state", ".", "limit", ":", "The", "maximum", "number", "of", "iterative", "updates", "based", "on", "value", "iteration", "algorithms", "." ]
python
train
44.557692
SheffieldML/GPyOpt
GPyOpt/util/general.py
https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/util/general.py#L86-L96
def get_moments(model,x): ''' Moments (mean and sdev.) of a GP model at x ''' input_dim = model.X.shape[1] x = reshape(x,input_dim) fmin = min(model.predict(model.X)[0]) m, v = model.predict(x) s = np.sqrt(np.clip(v, 0, np.inf)) return (m,s, fmin)
[ "def", "get_moments", "(", "model", ",", "x", ")", ":", "input_dim", "=", "model", ".", "X", ".", "shape", "[", "1", "]", "x", "=", "reshape", "(", "x", ",", "input_dim", ")", "fmin", "=", "min", "(", "model", ".", "predict", "(", "model", ".", "X", ")", "[", "0", "]", ")", "m", ",", "v", "=", "model", ".", "predict", "(", "x", ")", "s", "=", "np", ".", "sqrt", "(", "np", ".", "clip", "(", "v", ",", "0", ",", "np", ".", "inf", ")", ")", "return", "(", "m", ",", "s", ",", "fmin", ")" ]
Moments (mean and sdev.) of a GP model at x
[ "Moments", "(", "mean", "and", "sdev", ".", ")", "of", "a", "GP", "model", "at", "x" ]
python
train
24.909091
slarse/pdfebc-core
pdfebc_core/compress.py
https://github.com/slarse/pdfebc-core/blob/fc40857bc42365b7434714333e37d7a3487603a0/pdfebc_core/compress.py#L87-L105
def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary): """Compress all PDF files in the current directory and place the output in the given output directory. This is a generator function that first yields the amount of files to be compressed, and then yields the output path of each file. Args: source_directory (str): Filepath to the source directory. output_directory (str): Filepath to the output directory. ghostscript_binary (str): Name of the Ghostscript binary. Returns: list(str): paths to outputs. """ source_paths = _get_pdf_filenames_at(source_directory) yield len(source_paths) for source_path in source_paths: output = os.path.join(output_directory, os.path.basename(source_path)) compress_pdf(source_path, output, ghostscript_binary) yield output
[ "def", "compress_multiple_pdfs", "(", "source_directory", ",", "output_directory", ",", "ghostscript_binary", ")", ":", "source_paths", "=", "_get_pdf_filenames_at", "(", "source_directory", ")", "yield", "len", "(", "source_paths", ")", "for", "source_path", "in", "source_paths", ":", "output", "=", "os", ".", "path", ".", "join", "(", "output_directory", ",", "os", ".", "path", ".", "basename", "(", "source_path", ")", ")", "compress_pdf", "(", "source_path", ",", "output", ",", "ghostscript_binary", ")", "yield", "output" ]
Compress all PDF files in the current directory and place the output in the given output directory. This is a generator function that first yields the amount of files to be compressed, and then yields the output path of each file. Args: source_directory (str): Filepath to the source directory. output_directory (str): Filepath to the output directory. ghostscript_binary (str): Name of the Ghostscript binary. Returns: list(str): paths to outputs.
[ "Compress", "all", "PDF", "files", "in", "the", "current", "directory", "and", "place", "the", "output", "in", "the", "given", "output", "directory", ".", "This", "is", "a", "generator", "function", "that", "first", "yields", "the", "amount", "of", "files", "to", "be", "compressed", "and", "then", "yields", "the", "output", "path", "of", "each", "file", "." ]
python
train
45.526316
google/grr
grr/server/grr_response_server/hunts/implementation.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/hunts/implementation.py#L892-L897
def _Complete(self): """Marks the hunt as completed.""" self._RemoveForemanRule() if "w" in self.hunt_obj.mode: self.hunt_obj.Set(self.hunt_obj.Schema.STATE("COMPLETED")) self.hunt_obj.Flush()
[ "def", "_Complete", "(", "self", ")", ":", "self", ".", "_RemoveForemanRule", "(", ")", "if", "\"w\"", "in", "self", ".", "hunt_obj", ".", "mode", ":", "self", ".", "hunt_obj", ".", "Set", "(", "self", ".", "hunt_obj", ".", "Schema", ".", "STATE", "(", "\"COMPLETED\"", ")", ")", "self", ".", "hunt_obj", ".", "Flush", "(", ")" ]
Marks the hunt as completed.
[ "Marks", "the", "hunt", "as", "completed", "." ]
python
train
35.166667
larryng/narwal
narwal/reddit.py
https://github.com/larryng/narwal/blob/58c409a475c8ed865579a61d7010162ed8cef597/narwal/reddit.py#L645-L658
def compose(self, to, subject, text): """Login required. Sends POST to send a message to a user. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response. URL: ``http://www.reddit.com/api/compose/`` :param to: username or :class`things.Account` of user to send to :param subject: subject of message :param text: message body text """ if isinstance(to, Account): to = to.name data = dict(to=to, subject=subject, text=text) j = self.post('api', 'compose', data=data) return assert_truthy(j)
[ "def", "compose", "(", "self", ",", "to", ",", "subject", ",", "text", ")", ":", "if", "isinstance", "(", "to", ",", "Account", ")", ":", "to", "=", "to", ".", "name", "data", "=", "dict", "(", "to", "=", "to", ",", "subject", "=", "subject", ",", "text", "=", "text", ")", "j", "=", "self", ".", "post", "(", "'api'", ",", "'compose'", ",", "data", "=", "data", ")", "return", "assert_truthy", "(", "j", ")" ]
Login required. Sends POST to send a message to a user. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response. URL: ``http://www.reddit.com/api/compose/`` :param to: username or :class`things.Account` of user to send to :param subject: subject of message :param text: message body text
[ "Login", "required", ".", "Sends", "POST", "to", "send", "a", "message", "to", "a", "user", ".", "Returns", "True", "or", "raises", ":", "class", ":", "exceptions", ".", "UnexpectedResponse", "if", "non", "-", "truthy", "value", "in", "response", ".", "URL", ":", "http", ":", "//", "www", ".", "reddit", ".", "com", "/", "api", "/", "compose", "/", ":", "param", "to", ":", "username", "or", ":", "class", "things", ".", "Account", "of", "user", "to", "send", "to", ":", "param", "subject", ":", "subject", "of", "message", ":", "param", "text", ":", "message", "body", "text" ]
python
train
44.714286
lowandrew/OLCTools
databasesetup/database_setup.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L136-L144
def mob_suite_targets(self, database_name='mob_suite'): """ Download MOB-suite databases :param database_name: name of current database """ logging.info('Download MOB-suite databases') # NOTE: This requires mob_suite >=1.4.9.1. Versions before that don't have the -d option. cmd = 'mob_init -d {}'.format(os.path.join(self.databasepath, database_name)) out, err = run_subprocess(cmd)
[ "def", "mob_suite_targets", "(", "self", ",", "database_name", "=", "'mob_suite'", ")", ":", "logging", ".", "info", "(", "'Download MOB-suite databases'", ")", "# NOTE: This requires mob_suite >=1.4.9.1. Versions before that don't have the -d option.", "cmd", "=", "'mob_init -d {}'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "self", ".", "databasepath", ",", "database_name", ")", ")", "out", ",", "err", "=", "run_subprocess", "(", "cmd", ")" ]
Download MOB-suite databases :param database_name: name of current database
[ "Download", "MOB", "-", "suite", "databases", ":", "param", "database_name", ":", "name", "of", "current", "database" ]
python
train
48.777778
spacetelescope/pysynphot
commissioning/genscience.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/commissioning/genscience.py#L28-L65
def genstis(outname): """ Generate TestCases from cmdfile according to the pattern in patternfile""" pattern="""class stisS%d(countrateCase): def setUp(self): self.obsmode="%s" self.spectrum="%s" self.setglobal(__file__) self.runpy()\n""" speclist=['/grp/hst/cdbs/calspec/gd71_mod_005.fits', '/grp/hst/cdbs/calspec/gd153_mod_004.fits', '/grp/hst/cdbs/calspec/g191b2b_mod_004.fits'] glist={'g140l':'fuvmama','g230l':'nuvmama','g430l':'ccd','g750l':'ccd', 'g230lb':'ccd'} out=open(outname,'a') out.write("""from pytools import testutil import sys from basecase import calcphotCase, calcspecCase, countrateCase,SpecSourcerateSpecCase\n """) count=0 for g in glist: for sp in speclist: obsmode='stis,%s,fuvmama,s52x2'%g defn=pattern%(count,obsmode,sp) out.write(defn) count+=1 out.write("""\n\n if __name__ == '__main__': if 'debug' in sys.argv: testutil.debug(__name__) else: testutil.testall(__name__,2) """) out.close()
[ "def", "genstis", "(", "outname", ")", ":", "pattern", "=", "\"\"\"class stisS%d(countrateCase):\n def setUp(self):\n self.obsmode=\"%s\"\n self.spectrum=\"%s\"\n self.setglobal(__file__)\n self.runpy()\\n\"\"\"", "speclist", "=", "[", "'/grp/hst/cdbs/calspec/gd71_mod_005.fits'", ",", "'/grp/hst/cdbs/calspec/gd153_mod_004.fits'", ",", "'/grp/hst/cdbs/calspec/g191b2b_mod_004.fits'", "]", "glist", "=", "{", "'g140l'", ":", "'fuvmama'", ",", "'g230l'", ":", "'nuvmama'", ",", "'g430l'", ":", "'ccd'", ",", "'g750l'", ":", "'ccd'", ",", "'g230lb'", ":", "'ccd'", "}", "out", "=", "open", "(", "outname", ",", "'a'", ")", "out", ".", "write", "(", "\"\"\"from pytools import testutil\nimport sys\nfrom basecase import calcphotCase, calcspecCase, countrateCase,SpecSourcerateSpecCase\\n\n\"\"\"", ")", "count", "=", "0", "for", "g", "in", "glist", ":", "for", "sp", "in", "speclist", ":", "obsmode", "=", "'stis,%s,fuvmama,s52x2'", "%", "g", "defn", "=", "pattern", "%", "(", "count", ",", "obsmode", ",", "sp", ")", "out", ".", "write", "(", "defn", ")", "count", "+=", "1", "out", ".", "write", "(", "\"\"\"\\n\\n\nif __name__ == '__main__':\n if 'debug' in sys.argv:\n testutil.debug(__name__)\n else:\n testutil.testall(__name__,2)\n\"\"\"", ")", "out", ".", "close", "(", ")" ]
Generate TestCases from cmdfile according to the pattern in patternfile
[ "Generate", "TestCases", "from", "cmdfile", "according", "to", "the", "pattern", "in", "patternfile" ]
python
train
28.263158
TestInABox/stackInABox
stackinabox/stack.py
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/stack.py#L45-L54
def register_service(cls, service): """Add a service to the thread's StackInABox instance. :param service: StackInABoxService instance to add to the test For return value and errors see StackInABox.register() """ logger.debug('Registering service {0}'.format(service.name)) return local_store.instance.register(service)
[ "def", "register_service", "(", "cls", ",", "service", ")", ":", "logger", ".", "debug", "(", "'Registering service {0}'", ".", "format", "(", "service", ".", "name", ")", ")", "return", "local_store", ".", "instance", ".", "register", "(", "service", ")" ]
Add a service to the thread's StackInABox instance. :param service: StackInABoxService instance to add to the test For return value and errors see StackInABox.register()
[ "Add", "a", "service", "to", "the", "thread", "s", "StackInABox", "instance", "." ]
python
train
36.1
frawau/aiolifx
aiolifx/aiolifx.py
https://github.com/frawau/aiolifx/blob/9bd8c5e6d291f4c79314989402f7e2c6476d5851/aiolifx/aiolifx.py#L861-L892
def set_color(self, value, callb=None, duration=0, rapid=False): """Convenience method to set the colour status of the device This method will send a LightSetColor message to the device, and request callb be executed when an ACK is received. The default callback will simply cache the value. :param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin :type value: dict :param duration: The duration, in seconds, of the power state transition. :type duration: int :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :param rapid: Whether to ask for ack (False) or not (True). Default False :type rapid: bool :returns: None :rtype: None """ if len(value) == 4: mypartial=partial(self.resp_set_light,color=value) if callb: mycallb=lambda x,y:(mypartial(y),callb(x,y)) else: mycallb=lambda x,y:mypartial(y) #try: if rapid: self.fire_and_forget(LightSetColor, {"color": value, "duration": duration}, num_repeats=1) self.resp_set_light(None,color=value) if callb: callb(self,None) else: self.req_with_ack(LightSetColor, {"color": value, "duration": duration},callb=mycallb)
[ "def", "set_color", "(", "self", ",", "value", ",", "callb", "=", "None", ",", "duration", "=", "0", ",", "rapid", "=", "False", ")", ":", "if", "len", "(", "value", ")", "==", "4", ":", "mypartial", "=", "partial", "(", "self", ".", "resp_set_light", ",", "color", "=", "value", ")", "if", "callb", ":", "mycallb", "=", "lambda", "x", ",", "y", ":", "(", "mypartial", "(", "y", ")", ",", "callb", "(", "x", ",", "y", ")", ")", "else", ":", "mycallb", "=", "lambda", "x", ",", "y", ":", "mypartial", "(", "y", ")", "#try:", "if", "rapid", ":", "self", ".", "fire_and_forget", "(", "LightSetColor", ",", "{", "\"color\"", ":", "value", ",", "\"duration\"", ":", "duration", "}", ",", "num_repeats", "=", "1", ")", "self", ".", "resp_set_light", "(", "None", ",", "color", "=", "value", ")", "if", "callb", ":", "callb", "(", "self", ",", "None", ")", "else", ":", "self", ".", "req_with_ack", "(", "LightSetColor", ",", "{", "\"color\"", ":", "value", ",", "\"duration\"", ":", "duration", "}", ",", "callb", "=", "mycallb", ")" ]
Convenience method to set the colour status of the device This method will send a LightSetColor message to the device, and request callb be executed when an ACK is received. The default callback will simply cache the value. :param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin :type value: dict :param duration: The duration, in seconds, of the power state transition. :type duration: int :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :param rapid: Whether to ask for ack (False) or not (True). Default False :type rapid: bool :returns: None :rtype: None
[ "Convenience", "method", "to", "set", "the", "colour", "status", "of", "the", "device" ]
python
train
47.8125
mediawiki-utilities/python-mwreverts
mwreverts/api.py
https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/api.py#L210-L286
def check_deleted(session, rev_id, title=None, timestamp=None, radius=defaults.RADIUS, before=None, window=None, rvprop=None): """ Checks the revert status of a deleted revision. With this method, you can determine whether an edit is a 'reverting' edit, was 'reverted' by another edit and/or was 'reverted_to' by another edit. :Parameters: session : :class:`mwapi.Session` An API session to make use of rev_id : int the ID of the revision to check title : str the title of the page the revision occupies (slower if not provided) Note that the MediaWiki API expects the title to include the namespace prefix (e.g. "User_talk:EpochFail") radius : int a positive integer indicating the maximum number of revisions that can be reverted before : :class:`mwtypes.Timestamp` if set, limits the search for *reverting* revisions to those which were saved before this timestamp window : int if set, limits the search for *reverting* revisions to those which were saved within `window` seconds after the reverted edit rvprop : set( str ) a set of properties to include in revisions :Returns: A triple :class:`mwreverts.Revert` | `None` * reverting -- If this edit reverted other edit(s) * reverted -- If this edit was reverted by another edit * reverted_to -- If this edit was reverted to by another edit """ rev_id = int(rev_id) radius = int(radius) if radius < 1: raise TypeError("invalid radius. Expected a positive integer.") title = str(title) if title is not None else None before = Timestamp(before) if before is not None else None rvprop = set(rvprop) if rvprop is not None else set() # If we don't have the title, we're going to need to look it up if title is None or timestamp is None: title, timestamp = get_deleted_title_and_timestamp(session, rev_id) # Load history and current rev current_and_past_revs = list(n_deleted_edits_before( session, rev_id, title, timestamp, n=radius + 1, rvprop={'ids', 'timestamp', 'sha1'} | rvprop )) if len(current_and_past_revs) < 1: raise KeyError("Revision {0} not found in page {1}." .format(rev_id, title)) current_rev, past_revs = ( current_and_past_revs[-1], # Current current_and_past_revs[:-1] # Past revisions ) if window is not None and before is None: before = Timestamp(current_rev['timestamp']) + window # Load future revisions future_revs = list(n_deleted_edits_after( session, rev_id + 1, title, timestamp, n=radius, before=before, rvprop={'ids', 'timestamp', 'sha1'} | rvprop )) return build_revert_tuple( rev_id, past_revs, current_rev, future_revs, radius)
[ "def", "check_deleted", "(", "session", ",", "rev_id", ",", "title", "=", "None", ",", "timestamp", "=", "None", ",", "radius", "=", "defaults", ".", "RADIUS", ",", "before", "=", "None", ",", "window", "=", "None", ",", "rvprop", "=", "None", ")", ":", "rev_id", "=", "int", "(", "rev_id", ")", "radius", "=", "int", "(", "radius", ")", "if", "radius", "<", "1", ":", "raise", "TypeError", "(", "\"invalid radius. Expected a positive integer.\"", ")", "title", "=", "str", "(", "title", ")", "if", "title", "is", "not", "None", "else", "None", "before", "=", "Timestamp", "(", "before", ")", "if", "before", "is", "not", "None", "else", "None", "rvprop", "=", "set", "(", "rvprop", ")", "if", "rvprop", "is", "not", "None", "else", "set", "(", ")", "# If we don't have the title, we're going to need to look it up", "if", "title", "is", "None", "or", "timestamp", "is", "None", ":", "title", ",", "timestamp", "=", "get_deleted_title_and_timestamp", "(", "session", ",", "rev_id", ")", "# Load history and current rev", "current_and_past_revs", "=", "list", "(", "n_deleted_edits_before", "(", "session", ",", "rev_id", ",", "title", ",", "timestamp", ",", "n", "=", "radius", "+", "1", ",", "rvprop", "=", "{", "'ids'", ",", "'timestamp'", ",", "'sha1'", "}", "|", "rvprop", ")", ")", "if", "len", "(", "current_and_past_revs", ")", "<", "1", ":", "raise", "KeyError", "(", "\"Revision {0} not found in page {1}.\"", ".", "format", "(", "rev_id", ",", "title", ")", ")", "current_rev", ",", "past_revs", "=", "(", "current_and_past_revs", "[", "-", "1", "]", ",", "# Current", "current_and_past_revs", "[", ":", "-", "1", "]", "# Past revisions", ")", "if", "window", "is", "not", "None", "and", "before", "is", "None", ":", "before", "=", "Timestamp", "(", "current_rev", "[", "'timestamp'", "]", ")", "+", "window", "# Load future revisions", "future_revs", "=", "list", "(", "n_deleted_edits_after", "(", "session", ",", "rev_id", "+", "1", ",", "title", ",", "timestamp", ",", "n", "=", "radius", ",", "before", "=", "before", ",", "rvprop", "=", "{", "'ids'", ",", "'timestamp'", ",", "'sha1'", "}", "|", "rvprop", ")", ")", "return", "build_revert_tuple", "(", "rev_id", ",", "past_revs", ",", "current_rev", ",", "future_revs", ",", "radius", ")" ]
Checks the revert status of a deleted revision. With this method, you can determine whether an edit is a 'reverting' edit, was 'reverted' by another edit and/or was 'reverted_to' by another edit. :Parameters: session : :class:`mwapi.Session` An API session to make use of rev_id : int the ID of the revision to check title : str the title of the page the revision occupies (slower if not provided) Note that the MediaWiki API expects the title to include the namespace prefix (e.g. "User_talk:EpochFail") radius : int a positive integer indicating the maximum number of revisions that can be reverted before : :class:`mwtypes.Timestamp` if set, limits the search for *reverting* revisions to those which were saved before this timestamp window : int if set, limits the search for *reverting* revisions to those which were saved within `window` seconds after the reverted edit rvprop : set( str ) a set of properties to include in revisions :Returns: A triple :class:`mwreverts.Revert` | `None` * reverting -- If this edit reverted other edit(s) * reverted -- If this edit was reverted by another edit * reverted_to -- If this edit was reverted to by another edit
[ "Checks", "the", "revert", "status", "of", "a", "deleted", "revision", ".", "With", "this", "method", "you", "can", "determine", "whether", "an", "edit", "is", "a", "reverting", "edit", "was", "reverted", "by", "another", "edit", "and", "/", "or", "was", "reverted_to", "by", "another", "edit", "." ]
python
train
38.090909
althonos/pronto
pronto/utils.py
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/utils.py#L55-L63
def nowarnings(func): """Create a function wrapped in a context that ignores warnings. """ @functools.wraps(func) def new_func(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter('ignore') return func(*args, **kwargs) return new_func
[ "def", "nowarnings", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "new_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_func" ]
Create a function wrapped in a context that ignores warnings.
[ "Create", "a", "function", "wrapped", "in", "a", "context", "that", "ignores", "warnings", "." ]
python
train
33
rodluger/everest
everest/gp.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/gp.py#L92-L192
def GetKernelParams(time, flux, errors, kernel='Basic', mask=[], giter=3, gmaxf=200, guess=None): ''' Optimizes the GP by training it on the current de-trended light curve. Returns the white noise amplitude, red noise amplitude, and red noise timescale. :param array_like time: The time array :param array_like flux: The flux array :param array_like errors: The flux errors array :param array_like mask: The indices to be masked when training the GP. \ Default `[]` :param int giter: The number of iterations. Default 3 :param int gmaxf: The maximum number of function evaluations. Default 200 :param tuple guess: The guess to initialize the minimization with. \ Default :py:obj:`None` ''' log.info("Optimizing the GP...") # Save a copy of time and errors for later time_copy = np.array(time) errors_copy = np.array(errors) # Apply the mask time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Remove 5-sigma outliers to be safe f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Initial guesses and bounds white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)]) amp = np.nanstd(flux) tau = 30.0 if kernel == 'Basic': if guess is None: guess = [white, amp, tau] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [0.5, 100.]] elif kernel == 'QuasiPeriodic': if guess is None: guess = [white, amp, tau, 1., 20.] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [1e-5, 1e2], [0.02, 100.]] else: raise ValueError('Invalid value for `kernel`.') # Loop llbest = -np.inf xbest = np.array(guess) for i in range(giter): # Randomize an initial guess iguess = [np.inf for g in guess] for j, b in enumerate(bounds): tries = 0 while (iguess[j] < b[0]) or (iguess[j] > b[1]): iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j] tries += 1 if tries > 100: iguess[j] = b[0] + np.random.random() * (b[1] - b[0]) break # Optimize x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False, bounds=bounds, args=(time, flux, errors, kernel), maxfun=gmaxf) log.info('Iteration #%d/%d:' % (i + 1, giter)) log.info(' ' + x[2]['task'].decode('utf-8')) log.info(' ' + 'Function calls: %d' % x[2]['funcalls']) log.info(' ' + 'Log-likelihood: %.3e' % -x[1]) if kernel == 'Basic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Red timescale : %.2f days' % x[0][2]) elif kernel == 'QuasiPeriodic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Gamma : %.3e' % x[0][2]) log.info(' ' + 'Period : %.2f days' % x[0][3]) if -x[1] > llbest: llbest = -x[1] xbest = np.array(x[0]) return xbest
[ "def", "GetKernelParams", "(", "time", ",", "flux", ",", "errors", ",", "kernel", "=", "'Basic'", ",", "mask", "=", "[", "]", ",", "giter", "=", "3", ",", "gmaxf", "=", "200", ",", "guess", "=", "None", ")", ":", "log", ".", "info", "(", "\"Optimizing the GP...\"", ")", "# Save a copy of time and errors for later", "time_copy", "=", "np", ".", "array", "(", "time", ")", "errors_copy", "=", "np", ".", "array", "(", "errors", ")", "# Apply the mask", "time", "=", "np", ".", "delete", "(", "time", ",", "mask", ")", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "errors", "=", "np", ".", "delete", "(", "errors", ",", "mask", ")", "# Remove 5-sigma outliers to be safe", "f", "=", "flux", "-", "savgol_filter", "(", "flux", ",", "49", ",", "2", ")", "+", "np", ".", "nanmedian", "(", "flux", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "mask", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "5", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "5", "*", "MAD", ")", ")", "[", "0", "]", "time", "=", "np", ".", "delete", "(", "time", ",", "mask", ")", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "errors", "=", "np", ".", "delete", "(", "errors", ",", "mask", ")", "# Initial guesses and bounds", "white", "=", "np", ".", "nanmedian", "(", "[", "np", ".", "nanstd", "(", "c", ")", "for", "c", "in", "Chunks", "(", "flux", ",", "13", ")", "]", ")", "amp", "=", "np", ".", "nanstd", "(", "flux", ")", "tau", "=", "30.0", "if", "kernel", "==", "'Basic'", ":", "if", "guess", "is", "None", ":", "guess", "=", "[", "white", ",", "amp", ",", "tau", "]", "bounds", "=", "[", "[", "0.1", "*", "white", ",", "10.", "*", "white", "]", ",", "[", "1.", ",", "10000.", "*", "amp", "]", ",", "[", "0.5", ",", "100.", "]", "]", "elif", "kernel", "==", "'QuasiPeriodic'", ":", "if", "guess", "is", "None", ":", "guess", "=", "[", "white", ",", "amp", ",", "tau", ",", "1.", ",", "20.", "]", "bounds", "=", "[", "[", "0.1", "*", "white", ",", "10.", "*", "white", "]", ",", "[", "1.", ",", "10000.", "*", "amp", "]", ",", "[", "1e-5", ",", "1e2", "]", ",", "[", "0.02", ",", "100.", "]", "]", "else", ":", "raise", "ValueError", "(", "'Invalid value for `kernel`.'", ")", "# Loop", "llbest", "=", "-", "np", ".", "inf", "xbest", "=", "np", ".", "array", "(", "guess", ")", "for", "i", "in", "range", "(", "giter", ")", ":", "# Randomize an initial guess", "iguess", "=", "[", "np", ".", "inf", "for", "g", "in", "guess", "]", "for", "j", ",", "b", "in", "enumerate", "(", "bounds", ")", ":", "tries", "=", "0", "while", "(", "iguess", "[", "j", "]", "<", "b", "[", "0", "]", ")", "or", "(", "iguess", "[", "j", "]", ">", "b", "[", "1", "]", ")", ":", "iguess", "[", "j", "]", "=", "(", "1", "+", "0.5", "*", "np", ".", "random", ".", "randn", "(", ")", ")", "*", "guess", "[", "j", "]", "tries", "+=", "1", "if", "tries", ">", "100", ":", "iguess", "[", "j", "]", "=", "b", "[", "0", "]", "+", "np", ".", "random", ".", "random", "(", ")", "*", "(", "b", "[", "1", "]", "-", "b", "[", "0", "]", ")", "break", "# Optimize", "x", "=", "fmin_l_bfgs_b", "(", "NegLnLike", ",", "iguess", ",", "approx_grad", "=", "False", ",", "bounds", "=", "bounds", ",", "args", "=", "(", "time", ",", "flux", ",", "errors", ",", "kernel", ")", ",", "maxfun", "=", "gmaxf", ")", "log", ".", "info", "(", "'Iteration #%d/%d:'", "%", "(", "i", "+", "1", ",", "giter", ")", ")", "log", ".", "info", "(", "' '", "+", "x", "[", "2", "]", "[", "'task'", "]", ".", "decode", "(", "'utf-8'", ")", ")", "log", ".", "info", "(", "' '", "+", "'Function calls: %d'", "%", "x", "[", "2", "]", "[", "'funcalls'", "]", ")", "log", ".", "info", "(", "' '", "+", "'Log-likelihood: %.3e'", "%", "-", "x", "[", "1", "]", ")", "if", "kernel", "==", "'Basic'", ":", "log", ".", "info", "(", "' '", "+", "'White noise : %.3e (%.1f x error bars)'", "%", "(", "x", "[", "0", "]", "[", "0", "]", ",", "x", "[", "0", "]", "[", "0", "]", "/", "np", ".", "nanmedian", "(", "errors", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Red amplitude : %.3e (%.1f x stand dev)'", "%", "(", "x", "[", "0", "]", "[", "1", "]", ",", "x", "[", "0", "]", "[", "1", "]", "/", "np", ".", "nanstd", "(", "flux", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Red timescale : %.2f days'", "%", "x", "[", "0", "]", "[", "2", "]", ")", "elif", "kernel", "==", "'QuasiPeriodic'", ":", "log", ".", "info", "(", "' '", "+", "'White noise : %.3e (%.1f x error bars)'", "%", "(", "x", "[", "0", "]", "[", "0", "]", ",", "x", "[", "0", "]", "[", "0", "]", "/", "np", ".", "nanmedian", "(", "errors", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Red amplitude : %.3e (%.1f x stand dev)'", "%", "(", "x", "[", "0", "]", "[", "1", "]", ",", "x", "[", "0", "]", "[", "1", "]", "/", "np", ".", "nanstd", "(", "flux", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Gamma : %.3e'", "%", "x", "[", "0", "]", "[", "2", "]", ")", "log", ".", "info", "(", "' '", "+", "'Period : %.2f days'", "%", "x", "[", "0", "]", "[", "3", "]", ")", "if", "-", "x", "[", "1", "]", ">", "llbest", ":", "llbest", "=", "-", "x", "[", "1", "]", "xbest", "=", "np", ".", "array", "(", "x", "[", "0", "]", ")", "return", "xbest" ]
Optimizes the GP by training it on the current de-trended light curve. Returns the white noise amplitude, red noise amplitude, and red noise timescale. :param array_like time: The time array :param array_like flux: The flux array :param array_like errors: The flux errors array :param array_like mask: The indices to be masked when training the GP. \ Default `[]` :param int giter: The number of iterations. Default 3 :param int gmaxf: The maximum number of function evaluations. Default 200 :param tuple guess: The guess to initialize the minimization with. \ Default :py:obj:`None`
[ "Optimizes", "the", "GP", "by", "training", "it", "on", "the", "current", "de", "-", "trended", "light", "curve", ".", "Returns", "the", "white", "noise", "amplitude", "red", "noise", "amplitude", "and", "red", "noise", "timescale", "." ]
python
train
38.029703
tensorflow/datasets
tensorflow_datasets/core/download/extractor.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/extractor.py#L84-L100
def _sync_extract(self, from_path, method, to_path): """Returns `to_path` once resource has been extracted there.""" to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex) try: for path, handle in iter_archive(from_path, method): _copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp) except BaseException as err: msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err) raise ExtractError(msg) # `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty # directories, so delete destination first, if it already exists. if tf.io.gfile.exists(to_path): tf.io.gfile.rmtree(to_path) tf.io.gfile.rename(to_path_tmp, to_path) self._pbar_path.update(1) return to_path
[ "def", "_sync_extract", "(", "self", ",", "from_path", ",", "method", ",", "to_path", ")", ":", "to_path_tmp", "=", "'%s%s_%s'", "%", "(", "to_path", ",", "constants", ".", "INCOMPLETE_SUFFIX", ",", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "try", ":", "for", "path", ",", "handle", "in", "iter_archive", "(", "from_path", ",", "method", ")", ":", "_copy", "(", "handle", ",", "path", "and", "os", ".", "path", ".", "join", "(", "to_path_tmp", ",", "path", ")", "or", "to_path_tmp", ")", "except", "BaseException", "as", "err", ":", "msg", "=", "'Error while extracting %s to %s : %s'", "%", "(", "from_path", ",", "to_path", ",", "err", ")", "raise", "ExtractError", "(", "msg", ")", "# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty", "# directories, so delete destination first, if it already exists.", "if", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "to_path", ")", ":", "tf", ".", "io", ".", "gfile", ".", "rmtree", "(", "to_path", ")", "tf", ".", "io", ".", "gfile", ".", "rename", "(", "to_path_tmp", ",", "to_path", ")", "self", ".", "_pbar_path", ".", "update", "(", "1", ")", "return", "to_path" ]
Returns `to_path` once resource has been extracted there.
[ "Returns", "to_path", "once", "resource", "has", "been", "extracted", "there", "." ]
python
train
47.941176
VasilyStepanov/pywidl
pywidl/grammar.py
https://github.com/VasilyStepanov/pywidl/blob/8d84b2e53157bfe276bf16301c19e8b6b32e861e/pywidl/grammar.py#L291-L293
def p_ConstValue_float(p): """ConstValue : FLOAT""" p[0] = model.Value(type=model.Value.FLOAT, value=p[1])
[ "def", "p_ConstValue_float", "(", "p", ")", ":", "p", "[", "0", "]", "=", "model", ".", "Value", "(", "type", "=", "model", ".", "Value", ".", "FLOAT", ",", "value", "=", "p", "[", "1", "]", ")" ]
ConstValue : FLOAT
[ "ConstValue", ":", "FLOAT" ]
python
train
36
Clinical-Genomics/scout
scout/commands/view/transcripts.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/commands/view/transcripts.py#L13-L32
def transcripts(context, build, hgnc_id, json): """Show all transcripts in the database""" LOG.info("Running scout view transcripts") adapter = context.obj['adapter'] if not json: click.echo("Chromosome\tstart\tend\ttranscript_id\thgnc_id\trefseq\tis_primary") for tx_obj in adapter.transcripts(build=build, hgnc_id=hgnc_id): if json: pp(tx_obj) continue click.echo("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format( tx_obj['chrom'], tx_obj['start'], tx_obj['end'], tx_obj['ensembl_transcript_id'], tx_obj['hgnc_id'], tx_obj.get('refseq_id', ''), tx_obj.get('is_primary') or '', ))
[ "def", "transcripts", "(", "context", ",", "build", ",", "hgnc_id", ",", "json", ")", ":", "LOG", ".", "info", "(", "\"Running scout view transcripts\"", ")", "adapter", "=", "context", ".", "obj", "[", "'adapter'", "]", "if", "not", "json", ":", "click", ".", "echo", "(", "\"Chromosome\\tstart\\tend\\ttranscript_id\\thgnc_id\\trefseq\\tis_primary\"", ")", "for", "tx_obj", "in", "adapter", ".", "transcripts", "(", "build", "=", "build", ",", "hgnc_id", "=", "hgnc_id", ")", ":", "if", "json", ":", "pp", "(", "tx_obj", ")", "continue", "click", ".", "echo", "(", "\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\"", ".", "format", "(", "tx_obj", "[", "'chrom'", "]", ",", "tx_obj", "[", "'start'", "]", ",", "tx_obj", "[", "'end'", "]", ",", "tx_obj", "[", "'ensembl_transcript_id'", "]", ",", "tx_obj", "[", "'hgnc_id'", "]", ",", "tx_obj", ".", "get", "(", "'refseq_id'", ",", "''", ")", ",", "tx_obj", ".", "get", "(", "'is_primary'", ")", "or", "''", ",", ")", ")" ]
Show all transcripts in the database
[ "Show", "all", "transcripts", "in", "the", "database" ]
python
test
35.8
kwikteam/phy
phy/gui/actions.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/actions.py#L136-L146
def _show_shortcuts(shortcuts, name=None): """Display shortcuts.""" name = name or '' print('') if name: name = ' for ' + name print('Keyboard shortcuts' + name) for name in sorted(shortcuts): shortcut = _get_shortcut_string(shortcuts[name]) if not name.startswith('_'): print('- {0:<40}: {1:s}'.format(name, shortcut))
[ "def", "_show_shortcuts", "(", "shortcuts", ",", "name", "=", "None", ")", ":", "name", "=", "name", "or", "''", "print", "(", "''", ")", "if", "name", ":", "name", "=", "' for '", "+", "name", "print", "(", "'Keyboard shortcuts'", "+", "name", ")", "for", "name", "in", "sorted", "(", "shortcuts", ")", ":", "shortcut", "=", "_get_shortcut_string", "(", "shortcuts", "[", "name", "]", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", ":", "print", "(", "'- {0:<40}: {1:s}'", ".", "format", "(", "name", ",", "shortcut", ")", ")" ]
Display shortcuts.
[ "Display", "shortcuts", "." ]
python
train
33.545455
openid/python-openid
openid/store/sqlstore.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/store/sqlstore.py#L469-L485
def db_set_assoc(self, server_url, handle, secret, issued, lifetime, assoc_type): """ Set an association. This is implemented as a method because REPLACE INTO is not supported by PostgreSQL (and is not standard SQL). """ result = self.db_get_assoc(server_url, handle) rows = self.cur.fetchall() if len(rows): # Update the table since this associations already exists. return self.db_update_assoc(secret, issued, lifetime, assoc_type, server_url, handle) else: # Insert a new record because this association wasn't # found. return self.db_new_assoc(server_url, handle, secret, issued, lifetime, assoc_type)
[ "def", "db_set_assoc", "(", "self", ",", "server_url", ",", "handle", ",", "secret", ",", "issued", ",", "lifetime", ",", "assoc_type", ")", ":", "result", "=", "self", ".", "db_get_assoc", "(", "server_url", ",", "handle", ")", "rows", "=", "self", ".", "cur", ".", "fetchall", "(", ")", "if", "len", "(", "rows", ")", ":", "# Update the table since this associations already exists.", "return", "self", ".", "db_update_assoc", "(", "secret", ",", "issued", ",", "lifetime", ",", "assoc_type", ",", "server_url", ",", "handle", ")", "else", ":", "# Insert a new record because this association wasn't", "# found.", "return", "self", ".", "db_new_assoc", "(", "server_url", ",", "handle", ",", "secret", ",", "issued", ",", "lifetime", ",", "assoc_type", ")" ]
Set an association. This is implemented as a method because REPLACE INTO is not supported by PostgreSQL (and is not standard SQL).
[ "Set", "an", "association", ".", "This", "is", "implemented", "as", "a", "method", "because", "REPLACE", "INTO", "is", "not", "supported", "by", "PostgreSQL", "(", "and", "is", "not", "standard", "SQL", ")", "." ]
python
train
47
jrxFive/python-nomad
nomad/api/client.py
https://github.com/jrxFive/python-nomad/blob/37df37e4de21e6f8ac41c6154e7f1f44f1800020/nomad/api/client.py#L43-L59
def list_files(self, id=None, path="/"): """ List files in an allocation directory. https://www.nomadproject.io/docs/http/client-fs-ls.html arguments: - id - path returns: list raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException """ if id: return self.request(id, params={"path": path}, method="get").json() else: return self.request(params={"path": path}, method="get").json()
[ "def", "list_files", "(", "self", ",", "id", "=", "None", ",", "path", "=", "\"/\"", ")", ":", "if", "id", ":", "return", "self", ".", "request", "(", "id", ",", "params", "=", "{", "\"path\"", ":", "path", "}", ",", "method", "=", "\"get\"", ")", ".", "json", "(", ")", "else", ":", "return", "self", ".", "request", "(", "params", "=", "{", "\"path\"", ":", "path", "}", ",", "method", "=", "\"get\"", ")", ".", "json", "(", ")" ]
List files in an allocation directory. https://www.nomadproject.io/docs/http/client-fs-ls.html arguments: - id - path returns: list raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException
[ "List", "files", "in", "an", "allocation", "directory", "." ]
python
test
34.058824
log2timeline/dfvfs
dfvfs/vfs/tsk_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tsk_file_entry.py#L671-L705
def _TSKFileTimeCopyToStatTimeTuple(self, tsk_file, time_value): """Copies a SleuthKit file object time value to a stat timestamp tuple. Args: tsk_file (pytsk3.File): TSK file. time_value (str): name of the time value. Returns: tuple[int, int]: number of seconds since 1970-01-01 00:00:00 and fraction of second in 100 nano seconds intervals. The number of seconds is None on error, or if the file system does not include the requested timestamp. The fraction of second is None on error, or if the file system does not support sub-second precision. Raises: BackEndError: if the TSK File .info, .info.meta or info.fs_info attribute is missing. """ if (not tsk_file or not tsk_file.info or not tsk_file.info.meta or not tsk_file.info.fs_info): raise errors.BackEndError( 'Missing TSK File .info, .info.meta. or .info.fs_info') stat_time = getattr(tsk_file.info.meta, time_value, None) stat_time_nano = None if self._file_system_type in self._TSK_HAS_NANO_FS_TYPES: time_value_nano = '{0:s}_nano'.format(time_value) stat_time_nano = getattr(tsk_file.info.meta, time_value_nano, None) # Sleuthkit 4.2.0 switched from 100 nano seconds precision to # 1 nano seconds precision. if stat_time_nano is not None and pytsk3.TSK_VERSION_NUM >= 0x040200ff: stat_time_nano /= 100 return stat_time, stat_time_nano
[ "def", "_TSKFileTimeCopyToStatTimeTuple", "(", "self", ",", "tsk_file", ",", "time_value", ")", ":", "if", "(", "not", "tsk_file", "or", "not", "tsk_file", ".", "info", "or", "not", "tsk_file", ".", "info", ".", "meta", "or", "not", "tsk_file", ".", "info", ".", "fs_info", ")", ":", "raise", "errors", ".", "BackEndError", "(", "'Missing TSK File .info, .info.meta. or .info.fs_info'", ")", "stat_time", "=", "getattr", "(", "tsk_file", ".", "info", ".", "meta", ",", "time_value", ",", "None", ")", "stat_time_nano", "=", "None", "if", "self", ".", "_file_system_type", "in", "self", ".", "_TSK_HAS_NANO_FS_TYPES", ":", "time_value_nano", "=", "'{0:s}_nano'", ".", "format", "(", "time_value", ")", "stat_time_nano", "=", "getattr", "(", "tsk_file", ".", "info", ".", "meta", ",", "time_value_nano", ",", "None", ")", "# Sleuthkit 4.2.0 switched from 100 nano seconds precision to", "# 1 nano seconds precision.", "if", "stat_time_nano", "is", "not", "None", "and", "pytsk3", ".", "TSK_VERSION_NUM", ">=", "0x040200ff", ":", "stat_time_nano", "/=", "100", "return", "stat_time", ",", "stat_time_nano" ]
Copies a SleuthKit file object time value to a stat timestamp tuple. Args: tsk_file (pytsk3.File): TSK file. time_value (str): name of the time value. Returns: tuple[int, int]: number of seconds since 1970-01-01 00:00:00 and fraction of second in 100 nano seconds intervals. The number of seconds is None on error, or if the file system does not include the requested timestamp. The fraction of second is None on error, or if the file system does not support sub-second precision. Raises: BackEndError: if the TSK File .info, .info.meta or info.fs_info attribute is missing.
[ "Copies", "a", "SleuthKit", "file", "object", "time", "value", "to", "a", "stat", "timestamp", "tuple", "." ]
python
train
41