repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
emory-libraries/eulfedora
eulfedora/api.py
https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/api.py#L293-L308
def listMethods(self, pid, sdefpid=None): '''List available service methods. :param pid: object pid :param sDefPid: service definition pid :rtype: :class:`requests.models.Response` ''' # /objects/{pid}/methods ? [format, datetime] # /objects/{pid}/methods/{sdefpid} ? [format, datetime] ## NOTE: getting an error when sdefpid is specified; fedora issue? uri = 'objects/%(pid)s/methods' % {'pid': pid} if sdefpid: uri += '/' + sdefpid return self.get(uri, params=self.format_xml)
[ "def", "listMethods", "(", "self", ",", "pid", ",", "sdefpid", "=", "None", ")", ":", "# /objects/{pid}/methods ? [format, datetime]", "# /objects/{pid}/methods/{sdefpid} ? [format, datetime]", "## NOTE: getting an error when sdefpid is specified; fedora issue?", "uri", "=", "'objects/%(pid)s/methods'", "%", "{", "'pid'", ":", "pid", "}", "if", "sdefpid", ":", "uri", "+=", "'/'", "+", "sdefpid", "return", "self", ".", "get", "(", "uri", ",", "params", "=", "self", ".", "format_xml", ")" ]
List available service methods. :param pid: object pid :param sDefPid: service definition pid :rtype: :class:`requests.models.Response`
[ "List", "available", "service", "methods", "." ]
python
train
35.375
mgaitan/waliki
waliki/acl.py
https://github.com/mgaitan/waliki/blob/5baaf6f043275920a1174ff233726f7ff4bfb5cf/waliki/acl.py#L48-L87
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME): """ this is analog to django's builtin ``permission_required`` decorator, but improved to check per slug ACLRules and default permissions for anonymous and logged in users if there is a rule affecting a slug, the user needs to be part of the rule's allowed users. If there isn't a matching rule, defaults permissions apply. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception): return view_func(request, *args, **kwargs) if is_authenticated(request.user): if WALIKI_RENDER_403: return render(request, 'waliki/403.html', kwargs, status=403) else: raise PermissionDenied path = request.build_absolute_uri() # urlparse chokes on lazy objects in Python 3, force to str resolved_login_url = force_str( resolve_url(login_url or settings.LOGIN_URL)) # If the login url is the same scheme and net location then just # use the path as the "next" url. login_scheme, login_netloc = urlparse(resolved_login_url)[:2] current_scheme, current_netloc = urlparse(path)[:2] if ((not login_scheme or login_scheme == current_scheme) and (not login_netloc or login_netloc == current_netloc)): path = request.get_full_path() from django.contrib.auth.views import redirect_to_login return redirect_to_login( path, resolved_login_url, redirect_field_name) return _wrapped_view return decorator
[ "def", "permission_required", "(", "perms", ",", "login_url", "=", "None", ",", "raise_exception", "=", "False", ",", "redirect_field_name", "=", "REDIRECT_FIELD_NAME", ")", ":", "def", "decorator", "(", "view_func", ")", ":", "@", "wraps", "(", "view_func", ",", "assigned", "=", "available_attrs", "(", "view_func", ")", ")", "def", "_wrapped_view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "check_perms", "(", "perms", ",", "request", ".", "user", ",", "kwargs", "[", "'slug'", "]", ",", "raise_exception", "=", "raise_exception", ")", ":", "return", "view_func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "is_authenticated", "(", "request", ".", "user", ")", ":", "if", "WALIKI_RENDER_403", ":", "return", "render", "(", "request", ",", "'waliki/403.html'", ",", "kwargs", ",", "status", "=", "403", ")", "else", ":", "raise", "PermissionDenied", "path", "=", "request", ".", "build_absolute_uri", "(", ")", "# urlparse chokes on lazy objects in Python 3, force to str", "resolved_login_url", "=", "force_str", "(", "resolve_url", "(", "login_url", "or", "settings", ".", "LOGIN_URL", ")", ")", "# If the login url is the same scheme and net location then just", "# use the path as the \"next\" url.", "login_scheme", ",", "login_netloc", "=", "urlparse", "(", "resolved_login_url", ")", "[", ":", "2", "]", "current_scheme", ",", "current_netloc", "=", "urlparse", "(", "path", ")", "[", ":", "2", "]", "if", "(", "(", "not", "login_scheme", "or", "login_scheme", "==", "current_scheme", ")", "and", "(", "not", "login_netloc", "or", "login_netloc", "==", "current_netloc", ")", ")", ":", "path", "=", "request", ".", "get_full_path", "(", ")", "from", "django", ".", "contrib", ".", "auth", ".", "views", "import", "redirect_to_login", "return", "redirect_to_login", "(", "path", ",", "resolved_login_url", ",", "redirect_field_name", ")", "return", "_wrapped_view", "return", "decorator" ]
this is analog to django's builtin ``permission_required`` decorator, but improved to check per slug ACLRules and default permissions for anonymous and logged in users if there is a rule affecting a slug, the user needs to be part of the rule's allowed users. If there isn't a matching rule, defaults permissions apply.
[ "this", "is", "analog", "to", "django", "s", "builtin", "permission_required", "decorator", "but", "improved", "to", "check", "per", "slug", "ACLRules", "and", "default", "permissions", "for", "anonymous", "and", "logged", "in", "users" ]
python
train
46.825
daniellawrence/graphitesend
graphitesend/graphitesend.py
https://github.com/daniellawrence/graphitesend/blob/02281263e642f9b6e146886d4544e1d7aebd7753/graphitesend/graphitesend.py#L455-L490
def str2listtuple(self, string_message): "Covert a string that is ready to be sent to graphite into a tuple" if type(string_message).__name__ not in ('str', 'unicode'): raise TypeError("Must provide a string or unicode") if not string_message.endswith('\n'): string_message += "\n" tpl_list = [] for line in string_message.split('\n'): line = line.strip() if not line: continue path, metric, timestamp = (None, None, None) try: (path, metric, timestamp) = line.split() except ValueError: raise ValueError( "message must contain - metric_name, value and timestamp '%s'" % line) try: timestamp = float(timestamp) except ValueError: raise ValueError("Timestamp must be float or int") tpl_list.append((path, (timestamp, metric))) if len(tpl_list) == 0: raise GraphiteSendException("No messages to send") payload = pickle.dumps(tpl_list) header = struct.pack("!L", len(payload)) message = header + payload return message
[ "def", "str2listtuple", "(", "self", ",", "string_message", ")", ":", "if", "type", "(", "string_message", ")", ".", "__name__", "not", "in", "(", "'str'", ",", "'unicode'", ")", ":", "raise", "TypeError", "(", "\"Must provide a string or unicode\"", ")", "if", "not", "string_message", ".", "endswith", "(", "'\\n'", ")", ":", "string_message", "+=", "\"\\n\"", "tpl_list", "=", "[", "]", "for", "line", "in", "string_message", ".", "split", "(", "'\\n'", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "path", ",", "metric", ",", "timestamp", "=", "(", "None", ",", "None", ",", "None", ")", "try", ":", "(", "path", ",", "metric", ",", "timestamp", ")", "=", "line", ".", "split", "(", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"message must contain - metric_name, value and timestamp '%s'\"", "%", "line", ")", "try", ":", "timestamp", "=", "float", "(", "timestamp", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Timestamp must be float or int\"", ")", "tpl_list", ".", "append", "(", "(", "path", ",", "(", "timestamp", ",", "metric", ")", ")", ")", "if", "len", "(", "tpl_list", ")", "==", "0", ":", "raise", "GraphiteSendException", "(", "\"No messages to send\"", ")", "payload", "=", "pickle", ".", "dumps", "(", "tpl_list", ")", "header", "=", "struct", ".", "pack", "(", "\"!L\"", ",", "len", "(", "payload", ")", ")", "message", "=", "header", "+", "payload", "return", "message" ]
Covert a string that is ready to be sent to graphite into a tuple
[ "Covert", "a", "string", "that", "is", "ready", "to", "be", "sent", "to", "graphite", "into", "a", "tuple" ]
python
train
33.861111
jasonrbriggs/stomp.py
stomp/utils.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/utils.py#L190-L208
def calculate_heartbeats(shb, chb): """ Given a heartbeat string from the server, and a heartbeat tuple from the client, calculate what the actual heartbeat settings should be. :param (str,str) shb: server heartbeat numbers :param (int,int) chb: client heartbeat numbers :rtype: (int,int) """ (sx, sy) = shb (cx, cy) = chb x = 0 y = 0 if cx != 0 and sy != '0': x = max(cx, int(sy)) if cy != 0 and sx != '0': y = max(cy, int(sx)) return x, y
[ "def", "calculate_heartbeats", "(", "shb", ",", "chb", ")", ":", "(", "sx", ",", "sy", ")", "=", "shb", "(", "cx", ",", "cy", ")", "=", "chb", "x", "=", "0", "y", "=", "0", "if", "cx", "!=", "0", "and", "sy", "!=", "'0'", ":", "x", "=", "max", "(", "cx", ",", "int", "(", "sy", ")", ")", "if", "cy", "!=", "0", "and", "sx", "!=", "'0'", ":", "y", "=", "max", "(", "cy", ",", "int", "(", "sx", ")", ")", "return", "x", ",", "y" ]
Given a heartbeat string from the server, and a heartbeat tuple from the client, calculate what the actual heartbeat settings should be. :param (str,str) shb: server heartbeat numbers :param (int,int) chb: client heartbeat numbers :rtype: (int,int)
[ "Given", "a", "heartbeat", "string", "from", "the", "server", "and", "a", "heartbeat", "tuple", "from", "the", "client", "calculate", "what", "the", "actual", "heartbeat", "settings", "should", "be", "." ]
python
train
26.105263
spyder-ide/spyder
spyder/plugins/plots/widgets/figurebrowser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/plots/widgets/figurebrowser.py#L52-L64
def get_unique_figname(dirname, root, ext): """ Append a number to "root" to form a filename that does not already exist in "dirname". """ i = 1 figname = root + '_%d' % i + ext while True: if osp.exists(osp.join(dirname, figname)): i += 1 figname = root + '_%d' % i + ext else: return osp.join(dirname, figname)
[ "def", "get_unique_figname", "(", "dirname", ",", "root", ",", "ext", ")", ":", "i", "=", "1", "figname", "=", "root", "+", "'_%d'", "%", "i", "+", "ext", "while", "True", ":", "if", "osp", ".", "exists", "(", "osp", ".", "join", "(", "dirname", ",", "figname", ")", ")", ":", "i", "+=", "1", "figname", "=", "root", "+", "'_%d'", "%", "i", "+", "ext", "else", ":", "return", "osp", ".", "join", "(", "dirname", ",", "figname", ")" ]
Append a number to "root" to form a filename that does not already exist in "dirname".
[ "Append", "a", "number", "to", "root", "to", "form", "a", "filename", "that", "does", "not", "already", "exist", "in", "dirname", "." ]
python
train
29.230769
blockstack/virtualchain
virtualchain/virtualchain.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/virtualchain.py#L67-L85
def virtualchain_set_opfields( op, **fields ): """ Pass along virtualchain-reserved fields to a virtualchain operation. This layer of indirection is meant to help with future compatibility, so virtualchain implementations do not try to set operation fields directly. """ # warn about unsupported fields for f in fields.keys(): if f not in indexer.RESERVED_KEYS: log.warning("Unsupported virtualchain field '%s'" % f) # propagate reserved fields for f in fields.keys(): if f in indexer.RESERVED_KEYS: op[f] = fields[f] return op
[ "def", "virtualchain_set_opfields", "(", "op", ",", "*", "*", "fields", ")", ":", "# warn about unsupported fields", "for", "f", "in", "fields", ".", "keys", "(", ")", ":", "if", "f", "not", "in", "indexer", ".", "RESERVED_KEYS", ":", "log", ".", "warning", "(", "\"Unsupported virtualchain field '%s'\"", "%", "f", ")", "# propagate reserved fields", "for", "f", "in", "fields", ".", "keys", "(", ")", ":", "if", "f", "in", "indexer", ".", "RESERVED_KEYS", ":", "op", "[", "f", "]", "=", "fields", "[", "f", "]", "return", "op" ]
Pass along virtualchain-reserved fields to a virtualchain operation. This layer of indirection is meant to help with future compatibility, so virtualchain implementations do not try to set operation fields directly.
[ "Pass", "along", "virtualchain", "-", "reserved", "fields", "to", "a", "virtualchain", "operation", ".", "This", "layer", "of", "indirection", "is", "meant", "to", "help", "with", "future", "compatibility", "so", "virtualchain", "implementations", "do", "not", "try", "to", "set", "operation", "fields", "directly", "." ]
python
train
31.368421
coderanger/depot
depot/storage.py
https://github.com/coderanger/depot/blob/d1a96f13204ad7028432096d25718e611d4d3d9d/depot/storage.py#L102-L119
def file(cls, uri_or_path): """ Given either a URI like s3://bucket/path.txt or a path like /path.txt, return a file object for it. """ uri = urlparse(uri_or_path) if not uri.scheme: # Just a normal path return open(uri_or_path, 'rb') else: it = cls(uri_or_path).download_iter(uri.path.lstrip('/'), skip_hash=True) if not it: raise ValueError('{0} not found'.format(uri_or_path)) tmp = tempfile.TemporaryFile() for chunk in it: tmp.write(chunk) tmp.seek(0, 0) return tmp
[ "def", "file", "(", "cls", ",", "uri_or_path", ")", ":", "uri", "=", "urlparse", "(", "uri_or_path", ")", "if", "not", "uri", ".", "scheme", ":", "# Just a normal path", "return", "open", "(", "uri_or_path", ",", "'rb'", ")", "else", ":", "it", "=", "cls", "(", "uri_or_path", ")", ".", "download_iter", "(", "uri", ".", "path", ".", "lstrip", "(", "'/'", ")", ",", "skip_hash", "=", "True", ")", "if", "not", "it", ":", "raise", "ValueError", "(", "'{0} not found'", ".", "format", "(", "uri_or_path", ")", ")", "tmp", "=", "tempfile", ".", "TemporaryFile", "(", ")", "for", "chunk", "in", "it", ":", "tmp", ".", "write", "(", "chunk", ")", "tmp", ".", "seek", "(", "0", ",", "0", ")", "return", "tmp" ]
Given either a URI like s3://bucket/path.txt or a path like /path.txt, return a file object for it.
[ "Given", "either", "a", "URI", "like", "s3", ":", "//", "bucket", "/", "path", ".", "txt", "or", "a", "path", "like", "/", "path", ".", "txt", "return", "a", "file", "object", "for", "it", "." ]
python
train
35.388889
DataBiosphere/toil
src/toil/common.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/common.py#L737-L775
def start(self, rootJob): """ Invoke a Toil workflow with the given job as the root for an initial run. This method must be called in the body of a ``with Toil(...) as toil:`` statement. This method should not be called more than once for a workflow that has not finished. :param toil.job.Job rootJob: The root job of the workflow :return: The root job's return value """ self._assertContextManagerUsed() self.writePIDFile() if self.config.restart: raise ToilRestartException('A Toil workflow can only be started once. Use ' 'Toil.restart() to resume it.') self._batchSystem = self.createBatchSystem(self.config) self._setupAutoDeployment(rootJob.getUserScript()) try: self._setBatchSystemEnvVars() self._serialiseEnv() self._cacheAllJobs() # Pickle the promised return value of the root job, then write the pickled promise to # a shared file, where we can find and unpickle it at the end of the workflow. # Unpickling the promise will automatically substitute the promise for the actual # return value. with self._jobStore.writeSharedFileStream('rootJobReturnValue') as fH: rootJob.prepareForPromiseRegistration(self._jobStore) promise = rootJob.rv() pickle.dump(promise, fH, protocol=pickle.HIGHEST_PROTOCOL) # Setup the first wrapper and cache it rootJobGraph = rootJob._serialiseFirstJob(self._jobStore) self._cacheJob(rootJobGraph) self._setProvisioner() return self._runMainLoop(rootJobGraph) finally: self._shutdownBatchSystem()
[ "def", "start", "(", "self", ",", "rootJob", ")", ":", "self", ".", "_assertContextManagerUsed", "(", ")", "self", ".", "writePIDFile", "(", ")", "if", "self", ".", "config", ".", "restart", ":", "raise", "ToilRestartException", "(", "'A Toil workflow can only be started once. Use '", "'Toil.restart() to resume it.'", ")", "self", ".", "_batchSystem", "=", "self", ".", "createBatchSystem", "(", "self", ".", "config", ")", "self", ".", "_setupAutoDeployment", "(", "rootJob", ".", "getUserScript", "(", ")", ")", "try", ":", "self", ".", "_setBatchSystemEnvVars", "(", ")", "self", ".", "_serialiseEnv", "(", ")", "self", ".", "_cacheAllJobs", "(", ")", "# Pickle the promised return value of the root job, then write the pickled promise to", "# a shared file, where we can find and unpickle it at the end of the workflow.", "# Unpickling the promise will automatically substitute the promise for the actual", "# return value.", "with", "self", ".", "_jobStore", ".", "writeSharedFileStream", "(", "'rootJobReturnValue'", ")", "as", "fH", ":", "rootJob", ".", "prepareForPromiseRegistration", "(", "self", ".", "_jobStore", ")", "promise", "=", "rootJob", ".", "rv", "(", ")", "pickle", ".", "dump", "(", "promise", ",", "fH", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")", "# Setup the first wrapper and cache it", "rootJobGraph", "=", "rootJob", ".", "_serialiseFirstJob", "(", "self", ".", "_jobStore", ")", "self", ".", "_cacheJob", "(", "rootJobGraph", ")", "self", ".", "_setProvisioner", "(", ")", "return", "self", ".", "_runMainLoop", "(", "rootJobGraph", ")", "finally", ":", "self", ".", "_shutdownBatchSystem", "(", ")" ]
Invoke a Toil workflow with the given job as the root for an initial run. This method must be called in the body of a ``with Toil(...) as toil:`` statement. This method should not be called more than once for a workflow that has not finished. :param toil.job.Job rootJob: The root job of the workflow :return: The root job's return value
[ "Invoke", "a", "Toil", "workflow", "with", "the", "given", "job", "as", "the", "root", "for", "an", "initial", "run", ".", "This", "method", "must", "be", "called", "in", "the", "body", "of", "a", "with", "Toil", "(", "...", ")", "as", "toil", ":", "statement", ".", "This", "method", "should", "not", "be", "called", "more", "than", "once", "for", "a", "workflow", "that", "has", "not", "finished", "." ]
python
train
45.666667
relekang/python-semantic-release
semantic_release/vcs_helpers.py
https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/vcs_helpers.py#L38-L59
def get_last_version(skip_tags=None) -> Optional[str]: """ Return last version from repo tags. :return: A string contains version number. """ debug('get_last_version skip_tags=', skip_tags) check_repo() skip_tags = skip_tags or [] def version_finder(tag): if isinstance(tag.commit, TagObject): return tag.tag.tagged_date return tag.commit.committed_date for i in sorted(repo.tags, reverse=True, key=version_finder): if re.match(r'v\d+\.\d+\.\d+', i.name): if i.name in skip_tags: continue return i.name[1:] return None
[ "def", "get_last_version", "(", "skip_tags", "=", "None", ")", "->", "Optional", "[", "str", "]", ":", "debug", "(", "'get_last_version skip_tags='", ",", "skip_tags", ")", "check_repo", "(", ")", "skip_tags", "=", "skip_tags", "or", "[", "]", "def", "version_finder", "(", "tag", ")", ":", "if", "isinstance", "(", "tag", ".", "commit", ",", "TagObject", ")", ":", "return", "tag", ".", "tag", ".", "tagged_date", "return", "tag", ".", "commit", ".", "committed_date", "for", "i", "in", "sorted", "(", "repo", ".", "tags", ",", "reverse", "=", "True", ",", "key", "=", "version_finder", ")", ":", "if", "re", ".", "match", "(", "r'v\\d+\\.\\d+\\.\\d+'", ",", "i", ".", "name", ")", ":", "if", "i", ".", "name", "in", "skip_tags", ":", "continue", "return", "i", ".", "name", "[", "1", ":", "]", "return", "None" ]
Return last version from repo tags. :return: A string contains version number.
[ "Return", "last", "version", "from", "repo", "tags", "." ]
python
train
28.045455
EnigmaBridge/jbossply
jbossply/jbossparser.py
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L185-L189
def t_escaped_CARRIAGE_RETURN_CHAR(self, t): r'\x72' # 'r' t.lexer.pop_state() t.value = unichr(0x000d) return t
[ "def", "t_escaped_CARRIAGE_RETURN_CHAR", "(", "self", ",", "t", ")", ":", "# 'r'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x000d", ")", "return", "t" ]
r'\x72
[ "r", "\\", "x72" ]
python
train
28.2
EnigmaBridge/client.py
ebclient/eb_request.py
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/eb_request.py#L68-L99
def call(self, request=None, *args, **kwargs): """ Calls multiple time - with retry. :param request: :return: response """ if request is not None: self.request = request retry = self.request.configuration.retry if not isinstance(retry, SimpleRetry): raise Error('Currently only the fast retry strategy is supported') last_exception = None for i in range(0, retry.max_retry): try: if i > 0: retry.sleep_jitter() self.call_once() return self.response except Exception as ex: last_exception = RequestFailed(message='Request failed', cause=ex) logger.debug("Request %d failed, exception: %s" % (i, ex)) # Last exception - throw it here to have a stack if i+1 == retry.max_retry: raise last_exception raise last_exception
[ "def", "call", "(", "self", ",", "request", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", "is", "not", "None", ":", "self", ".", "request", "=", "request", "retry", "=", "self", ".", "request", ".", "configuration", ".", "retry", "if", "not", "isinstance", "(", "retry", ",", "SimpleRetry", ")", ":", "raise", "Error", "(", "'Currently only the fast retry strategy is supported'", ")", "last_exception", "=", "None", "for", "i", "in", "range", "(", "0", ",", "retry", ".", "max_retry", ")", ":", "try", ":", "if", "i", ">", "0", ":", "retry", ".", "sleep_jitter", "(", ")", "self", ".", "call_once", "(", ")", "return", "self", ".", "response", "except", "Exception", "as", "ex", ":", "last_exception", "=", "RequestFailed", "(", "message", "=", "'Request failed'", ",", "cause", "=", "ex", ")", "logger", ".", "debug", "(", "\"Request %d failed, exception: %s\"", "%", "(", "i", ",", "ex", ")", ")", "# Last exception - throw it here to have a stack", "if", "i", "+", "1", "==", "retry", ".", "max_retry", ":", "raise", "last_exception", "raise", "last_exception" ]
Calls multiple time - with retry. :param request: :return: response
[ "Calls", "multiple", "time", "-", "with", "retry", "." ]
python
train
30.625
ThreatConnect-Inc/tcex
tcex/tcex_resources.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L2566-L2576
def read(self, domain, type_name, search_command, body=None): """Read entry in ThreatConnect Data Store Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. body (str): JSON body """ return self._request(domain, type_name, search_command, 'GET', body)
[ "def", "read", "(", "self", ",", "domain", ",", "type_name", ",", "search_command", ",", "body", "=", "None", ")", ":", "return", "self", ".", "_request", "(", "domain", ",", "type_name", ",", "search_command", ",", "'GET'", ",", "body", ")" ]
Read entry in ThreatConnect Data Store Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. body (str): JSON body
[ "Read", "entry", "in", "ThreatConnect", "Data", "Store" ]
python
train
47.272727
consbio/gis-metadata-parser
gis_metadata/utils.py
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L592-L618
def validate_complex_list(prop, value, xpath_map=None): """ Default validation for Attribute Details data structure """ if value is not None: validate_type(prop, value, (dict, list)) if prop in _complex_definitions: complex_keys = _complex_definitions[prop] else: complex_keys = {} if xpath_map is None else xpath_map for idx, complex_struct in enumerate(wrap_value(value)): cs_idx = prop + '[' + str(idx) + ']' validate_type(cs_idx, complex_struct, dict) for cs_prop, cs_val in iteritems(complex_struct): cs_key = '.'.join((cs_idx, cs_prop)) if cs_prop not in complex_keys: _validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys)))) if not isinstance(cs_val, list): validate_type(cs_key, cs_val, (string_types, list)) else: for list_idx, list_val in enumerate(cs_val): list_prop = cs_key + '[' + str(list_idx) + ']' validate_type(list_prop, list_val, string_types)
[ "def", "validate_complex_list", "(", "prop", ",", "value", ",", "xpath_map", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "validate_type", "(", "prop", ",", "value", ",", "(", "dict", ",", "list", ")", ")", "if", "prop", "in", "_complex_definitions", ":", "complex_keys", "=", "_complex_definitions", "[", "prop", "]", "else", ":", "complex_keys", "=", "{", "}", "if", "xpath_map", "is", "None", "else", "xpath_map", "for", "idx", ",", "complex_struct", "in", "enumerate", "(", "wrap_value", "(", "value", ")", ")", ":", "cs_idx", "=", "prop", "+", "'['", "+", "str", "(", "idx", ")", "+", "']'", "validate_type", "(", "cs_idx", ",", "complex_struct", ",", "dict", ")", "for", "cs_prop", ",", "cs_val", "in", "iteritems", "(", "complex_struct", ")", ":", "cs_key", "=", "'.'", ".", "join", "(", "(", "cs_idx", ",", "cs_prop", ")", ")", "if", "cs_prop", "not", "in", "complex_keys", ":", "_validation_error", "(", "prop", ",", "None", ",", "value", ",", "(", "'keys: {0}'", ".", "format", "(", "','", ".", "join", "(", "complex_keys", ")", ")", ")", ")", "if", "not", "isinstance", "(", "cs_val", ",", "list", ")", ":", "validate_type", "(", "cs_key", ",", "cs_val", ",", "(", "string_types", ",", "list", ")", ")", "else", ":", "for", "list_idx", ",", "list_val", "in", "enumerate", "(", "cs_val", ")", ":", "list_prop", "=", "cs_key", "+", "'['", "+", "str", "(", "list_idx", ")", "+", "']'", "validate_type", "(", "list_prop", ",", "list_val", ",", "string_types", ")" ]
Default validation for Attribute Details data structure
[ "Default", "validation", "for", "Attribute", "Details", "data", "structure" ]
python
train
42.259259
juju-solutions/charms.reactive
charms/reactive/bus.py
https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/bus.py#L142-L151
def add_predicate(self, predicate): """ Add a new predicate callback to this handler. """ _predicate = predicate if isinstance(predicate, partial): _predicate = 'partial(%s, %s, %s)' % (predicate.func, predicate.args, predicate.keywords) if LOG_OPTS['register']: hookenv.log(' Adding predicate for %s: %s' % (self.id(), _predicate), level=hookenv.DEBUG) self._predicates.append(predicate)
[ "def", "add_predicate", "(", "self", ",", "predicate", ")", ":", "_predicate", "=", "predicate", "if", "isinstance", "(", "predicate", ",", "partial", ")", ":", "_predicate", "=", "'partial(%s, %s, %s)'", "%", "(", "predicate", ".", "func", ",", "predicate", ".", "args", ",", "predicate", ".", "keywords", ")", "if", "LOG_OPTS", "[", "'register'", "]", ":", "hookenv", ".", "log", "(", "' Adding predicate for %s: %s'", "%", "(", "self", ".", "id", "(", ")", ",", "_predicate", ")", ",", "level", "=", "hookenv", ".", "DEBUG", ")", "self", ".", "_predicates", ".", "append", "(", "predicate", ")" ]
Add a new predicate callback to this handler.
[ "Add", "a", "new", "predicate", "callback", "to", "this", "handler", "." ]
python
train
46
klmitch/turnstile
turnstile/control.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/control.py#L308-L340
def ping(daemon, channel, data=None): """ Process the 'ping' control message. :param daemon: The control daemon; used to get at the configuration and the database. :param channel: The publish channel to which to send the response. :param data: Optional extra data. Will be returned as the second argument of the response. Responds to the named channel with a command of 'pong' and with the node_name (if configured) and provided data as arguments. """ if not channel: # No place to reply to return # Get our configured node name node_name = daemon.config['control'].get('node_name') # Format the response reply = ['pong'] if node_name or data: reply.append(node_name or '') if data: reply.append(data) # And send it with utils.ignore_except(): daemon.db.publish(channel, ':'.join(reply))
[ "def", "ping", "(", "daemon", ",", "channel", ",", "data", "=", "None", ")", ":", "if", "not", "channel", ":", "# No place to reply to", "return", "# Get our configured node name", "node_name", "=", "daemon", ".", "config", "[", "'control'", "]", ".", "get", "(", "'node_name'", ")", "# Format the response", "reply", "=", "[", "'pong'", "]", "if", "node_name", "or", "data", ":", "reply", ".", "append", "(", "node_name", "or", "''", ")", "if", "data", ":", "reply", ".", "append", "(", "data", ")", "# And send it", "with", "utils", ".", "ignore_except", "(", ")", ":", "daemon", ".", "db", ".", "publish", "(", "channel", ",", "':'", ".", "join", "(", "reply", ")", ")" ]
Process the 'ping' control message. :param daemon: The control daemon; used to get at the configuration and the database. :param channel: The publish channel to which to send the response. :param data: Optional extra data. Will be returned as the second argument of the response. Responds to the named channel with a command of 'pong' and with the node_name (if configured) and provided data as arguments.
[ "Process", "the", "ping", "control", "message", "." ]
python
train
28.181818
pallets/werkzeug
src/werkzeug/http.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/http.py#L812-L843
def _dump_date(d, delim): """Used for `http_date` and `cookie_date`.""" if d is None: d = gmtime() elif isinstance(d, datetime): d = d.utctimetuple() elif isinstance(d, (integer_types, float)): d = gmtime(d) return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % ( ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday], d.tm_mday, delim, ( "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", )[d.tm_mon - 1], delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec, )
[ "def", "_dump_date", "(", "d", ",", "delim", ")", ":", "if", "d", "is", "None", ":", "d", "=", "gmtime", "(", ")", "elif", "isinstance", "(", "d", ",", "datetime", ")", ":", "d", "=", "d", ".", "utctimetuple", "(", ")", "elif", "isinstance", "(", "d", ",", "(", "integer_types", ",", "float", ")", ")", ":", "d", "=", "gmtime", "(", "d", ")", "return", "\"%s, %02d%s%s%s%s %02d:%02d:%02d GMT\"", "%", "(", "(", "\"Mon\"", ",", "\"Tue\"", ",", "\"Wed\"", ",", "\"Thu\"", ",", "\"Fri\"", ",", "\"Sat\"", ",", "\"Sun\"", ")", "[", "d", ".", "tm_wday", "]", ",", "d", ".", "tm_mday", ",", "delim", ",", "(", "\"Jan\"", ",", "\"Feb\"", ",", "\"Mar\"", ",", "\"Apr\"", ",", "\"May\"", ",", "\"Jun\"", ",", "\"Jul\"", ",", "\"Aug\"", ",", "\"Sep\"", ",", "\"Oct\"", ",", "\"Nov\"", ",", "\"Dec\"", ",", ")", "[", "d", ".", "tm_mon", "-", "1", "]", ",", "delim", ",", "str", "(", "d", ".", "tm_year", ")", ",", "d", ".", "tm_hour", ",", "d", ".", "tm_min", ",", "d", ".", "tm_sec", ",", ")" ]
Used for `http_date` and `cookie_date`.
[ "Used", "for", "http_date", "and", "cookie_date", "." ]
python
train
23
ytjia/utils-py
utils_py/time_seg_util.py
https://github.com/ytjia/utils-py/blob/68039b367e2e38fdecf234ecc625406b9e203ec0/utils_py/time_seg_util.py#L95-L103
def index_to_time_seg(time_seg_idx, slide_step): """ 将时间片索引值转换为时间片字符串 :param time_seg_idx: :param slide_step: :return: """ assert (time_seg_idx * slide_step < const.MINUTES_IN_A_DAY) return time_util.minutes_to_time_str(time_seg_idx * slide_step)
[ "def", "index_to_time_seg", "(", "time_seg_idx", ",", "slide_step", ")", ":", "assert", "(", "time_seg_idx", "*", "slide_step", "<", "const", ".", "MINUTES_IN_A_DAY", ")", "return", "time_util", ".", "minutes_to_time_str", "(", "time_seg_idx", "*", "slide_step", ")" ]
将时间片索引值转换为时间片字符串 :param time_seg_idx: :param slide_step: :return:
[ "将时间片索引值转换为时间片字符串", ":", "param", "time_seg_idx", ":", ":", "param", "slide_step", ":", ":", "return", ":" ]
python
train
30
annoviko/pyclustering
pyclustering/cluster/clarans.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/clarans.py#L66-L93
def process(self): """! @brief Performs cluster analysis in line with rules of CLARANS algorithm. @see get_clusters() @see get_medoids() """ random.seed() for _ in range(0, self.__numlocal): # set (current) random medoids self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters) # update clusters in line with random allocated medoids self.__update_clusters(self.__current) # optimize configuration self.__optimize_configuration() # obtain cost of current cluster configuration and compare it with the best obtained estimation = self.__calculate_estimation() if estimation < self.__optimal_estimation: self.__optimal_medoids = self.__current[:] self.__optimal_estimation = estimation self.__update_clusters(self.__optimal_medoids)
[ "def", "process", "(", "self", ")", ":", "random", ".", "seed", "(", ")", "for", "_", "in", "range", "(", "0", ",", "self", ".", "__numlocal", ")", ":", "# set (current) random medoids\r", "self", ".", "__current", "=", "random", ".", "sample", "(", "range", "(", "0", ",", "len", "(", "self", ".", "__pointer_data", ")", ")", ",", "self", ".", "__number_clusters", ")", "# update clusters in line with random allocated medoids\r", "self", ".", "__update_clusters", "(", "self", ".", "__current", ")", "# optimize configuration\r", "self", ".", "__optimize_configuration", "(", ")", "# obtain cost of current cluster configuration and compare it with the best obtained\r", "estimation", "=", "self", ".", "__calculate_estimation", "(", ")", "if", "estimation", "<", "self", ".", "__optimal_estimation", ":", "self", ".", "__optimal_medoids", "=", "self", ".", "__current", "[", ":", "]", "self", ".", "__optimal_estimation", "=", "estimation", "self", ".", "__update_clusters", "(", "self", ".", "__optimal_medoids", ")" ]
! @brief Performs cluster analysis in line with rules of CLARANS algorithm. @see get_clusters() @see get_medoids()
[ "!" ]
python
valid
37.571429
skggm/skggm
inverse_covariance/inverse_covariance.py
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L315-L345
def ebic_select(self, gamma=0): """Uses Extended Bayesian Information Criteria for model selection. Can only be used in path mode (doesn't really make sense otherwise). See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Lambda index with best ebic score. When multiple ebic scores are the same, returns the smallest lambda (largest index) with minimum score. """ if not isinstance(self.precision_, list): raise ValueError("EBIC requires multiple models to select from.") return if not self.is_fitted_: return ebic_scores = self.ebic(gamma=gamma) min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10) return np.max(min_indices)
[ "def", "ebic_select", "(", "self", ",", "gamma", "=", "0", ")", ":", "if", "not", "isinstance", "(", "self", ".", "precision_", ",", "list", ")", ":", "raise", "ValueError", "(", "\"EBIC requires multiple models to select from.\"", ")", "return", "if", "not", "self", ".", "is_fitted_", ":", "return", "ebic_scores", "=", "self", ".", "ebic", "(", "gamma", "=", "gamma", ")", "min_indices", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "ebic_scores", "-", "ebic_scores", ".", "min", "(", ")", ")", "<", "1e-10", ")", "return", "np", ".", "max", "(", "min_indices", ")" ]
Uses Extended Bayesian Information Criteria for model selection. Can only be used in path mode (doesn't really make sense otherwise). See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Lambda index with best ebic score. When multiple ebic scores are the same, returns the smallest lambda (largest index) with minimum score.
[ "Uses", "Extended", "Bayesian", "Information", "Criteria", "for", "model", "selection", "." ]
python
train
34.129032
nchopin/particles
particles/smc_samplers.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L134-L155
def all_distinct(l, idx): """ Returns the list [l[i] for i in idx]  When needed, objects l[i] are replaced by a copy, to make sure that the elements of the list are all distinct Parameters --------- l: iterable idx: iterable that generates ints (e.g. ndarray of ints) Returns ------- a list """ out = [] deja_vu = [False for _ in l] for i in idx: to_add = cp.deepcopy(l[i]) if deja_vu[i] else l[i] out.append(to_add) deja_vu[i] = True return out
[ "def", "all_distinct", "(", "l", ",", "idx", ")", ":", "out", "=", "[", "]", "deja_vu", "=", "[", "False", "for", "_", "in", "l", "]", "for", "i", "in", "idx", ":", "to_add", "=", "cp", ".", "deepcopy", "(", "l", "[", "i", "]", ")", "if", "deja_vu", "[", "i", "]", "else", "l", "[", "i", "]", "out", ".", "append", "(", "to_add", ")", "deja_vu", "[", "i", "]", "=", "True", "return", "out" ]
Returns the list [l[i] for i in idx]  When needed, objects l[i] are replaced by a copy, to make sure that the elements of the list are all distinct Parameters --------- l: iterable idx: iterable that generates ints (e.g. ndarray of ints) Returns ------- a list
[ "Returns", "the", "list", "[", "l", "[", "i", "]", "for", "i", "in", "idx", "]", "When", "needed", "objects", "l", "[", "i", "]", "are", "replaced", "by", "a", "copy", "to", "make", "sure", "that", "the", "elements", "of", "the", "list", "are", "all", "distinct" ]
python
train
23.409091
Jajcus/pyxmpp2
pyxmpp2/cert.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/cert.py#L497-L534
def _decode_alt_names(self, alt_names): """Load SubjectAltName from a ASN.1 GeneralNames value. :Values: - `alt_names`: the SubjectAltNama extension value :Types: - `alt_name`: `GeneralNames` """ for alt_name in alt_names: tname = alt_name.getName() comp = alt_name.getComponent() if tname == "dNSName": key = "DNS" value = _decode_asn1_string(comp) elif tname == "uniformResourceIdentifier": key = "URI" value = _decode_asn1_string(comp) elif tname == "otherName": oid = comp.getComponentByName("type-id") value = comp.getComponentByName("value") if oid == XMPPADDR_OID: key = "XmppAddr" value = der_decoder.decode(value, asn1Spec = UTF8String())[0] value = _decode_asn1_string(value) elif oid == SRVNAME_OID: key = "SRVName" value = der_decoder.decode(value, asn1Spec = IA5String())[0] value = _decode_asn1_string(value) else: logger.debug("Unknown other name: {0}".format(oid)) continue else: logger.debug("Unsupported general name: {0}" .format(tname)) continue self.alt_names[key].append(value)
[ "def", "_decode_alt_names", "(", "self", ",", "alt_names", ")", ":", "for", "alt_name", "in", "alt_names", ":", "tname", "=", "alt_name", ".", "getName", "(", ")", "comp", "=", "alt_name", ".", "getComponent", "(", ")", "if", "tname", "==", "\"dNSName\"", ":", "key", "=", "\"DNS\"", "value", "=", "_decode_asn1_string", "(", "comp", ")", "elif", "tname", "==", "\"uniformResourceIdentifier\"", ":", "key", "=", "\"URI\"", "value", "=", "_decode_asn1_string", "(", "comp", ")", "elif", "tname", "==", "\"otherName\"", ":", "oid", "=", "comp", ".", "getComponentByName", "(", "\"type-id\"", ")", "value", "=", "comp", ".", "getComponentByName", "(", "\"value\"", ")", "if", "oid", "==", "XMPPADDR_OID", ":", "key", "=", "\"XmppAddr\"", "value", "=", "der_decoder", ".", "decode", "(", "value", ",", "asn1Spec", "=", "UTF8String", "(", ")", ")", "[", "0", "]", "value", "=", "_decode_asn1_string", "(", "value", ")", "elif", "oid", "==", "SRVNAME_OID", ":", "key", "=", "\"SRVName\"", "value", "=", "der_decoder", ".", "decode", "(", "value", ",", "asn1Spec", "=", "IA5String", "(", ")", ")", "[", "0", "]", "value", "=", "_decode_asn1_string", "(", "value", ")", "else", ":", "logger", ".", "debug", "(", "\"Unknown other name: {0}\"", ".", "format", "(", "oid", ")", ")", "continue", "else", ":", "logger", ".", "debug", "(", "\"Unsupported general name: {0}\"", ".", "format", "(", "tname", ")", ")", "continue", "self", ".", "alt_names", "[", "key", "]", ".", "append", "(", "value", ")" ]
Load SubjectAltName from a ASN.1 GeneralNames value. :Values: - `alt_names`: the SubjectAltNama extension value :Types: - `alt_name`: `GeneralNames`
[ "Load", "SubjectAltName", "from", "a", "ASN", ".", "1", "GeneralNames", "value", "." ]
python
valid
41.868421
inveniosoftware/invenio-indexer
invenio_indexer/api.py
https://github.com/inveniosoftware/invenio-indexer/blob/1460aa8976b449d9a3a99d356322b158e9be6f80/invenio_indexer/api.py#L135-L146
def delete(self, record): """Delete a record. :param record: Record instance. """ index, doc_type = self.record_to_index(record) return self.client.delete( id=str(record.id), index=index, doc_type=doc_type, )
[ "def", "delete", "(", "self", ",", "record", ")", ":", "index", ",", "doc_type", "=", "self", ".", "record_to_index", "(", "record", ")", "return", "self", ".", "client", ".", "delete", "(", "id", "=", "str", "(", "record", ".", "id", ")", ",", "index", "=", "index", ",", "doc_type", "=", "doc_type", ",", ")" ]
Delete a record. :param record: Record instance.
[ "Delete", "a", "record", "." ]
python
train
23.583333
cstockton/py-gensend
gensend/providers/common.py
https://github.com/cstockton/py-gensend/blob/8c8e911f8e8c386bea42967350beb4636fc19240/gensend/providers/common.py#L161-L167
def types(self, *args): """Used for debugging, returns type of each arg. TYPES,ARG_1,...,ARG_N %{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)' """ return ', '.join(['{0}({1})'.format(type(arg).__name__, arg) for arg in args])
[ "def", "types", "(", "self", ",", "*", "args", ")", ":", "return", "', '", ".", "join", "(", "[", "'{0}({1})'", ".", "format", "(", "type", "(", "arg", ")", ".", "__name__", ",", "arg", ")", "for", "arg", "in", "args", "]", ")" ]
Used for debugging, returns type of each arg. TYPES,ARG_1,...,ARG_N %{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)'
[ "Used", "for", "debugging", "returns", "type", "of", "each", "arg", ".", "TYPES", "ARG_1", "...", "ARG_N" ]
python
train
37.857143
django-fluent/django-fluent-contents
fluent_contents/plugins/oembeditem/backend.py
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/plugins/oembeditem/backend.py#L47-L73
def _build_provider_list(): """ Construct the provider registry, using the app settings. """ registry = None if appsettings.FLUENT_OEMBED_SOURCE == 'basic': registry = bootstrap_basic() elif appsettings.FLUENT_OEMBED_SOURCE == 'embedly': params = {} if appsettings.MICAWBER_EMBEDLY_KEY: params['key'] = appsettings.MICAWBER_EMBEDLY_KEY registry = bootstrap_embedly(**params) elif appsettings.FLUENT_OEMBED_SOURCE == 'noembed': registry = bootstrap_noembed(nowrap=1) elif appsettings.FLUENT_OEMBED_SOURCE == 'list': # Fill list manually in the settings, e.g. to have a fixed set of supported secure providers. registry = ProviderRegistry() for regex, provider in appsettings.FLUENT_OEMBED_PROVIDER_LIST: registry.register(regex, Provider(provider)) else: raise ImproperlyConfigured("Invalid value of FLUENT_OEMBED_SOURCE, only 'basic', 'list', 'noembed' or 'embedly' is supported.") # Add any extra providers defined in the settings for regex, provider in appsettings.FLUENT_OEMBED_EXTRA_PROVIDERS: registry.register(regex, Provider(provider)) return registry
[ "def", "_build_provider_list", "(", ")", ":", "registry", "=", "None", "if", "appsettings", ".", "FLUENT_OEMBED_SOURCE", "==", "'basic'", ":", "registry", "=", "bootstrap_basic", "(", ")", "elif", "appsettings", ".", "FLUENT_OEMBED_SOURCE", "==", "'embedly'", ":", "params", "=", "{", "}", "if", "appsettings", ".", "MICAWBER_EMBEDLY_KEY", ":", "params", "[", "'key'", "]", "=", "appsettings", ".", "MICAWBER_EMBEDLY_KEY", "registry", "=", "bootstrap_embedly", "(", "*", "*", "params", ")", "elif", "appsettings", ".", "FLUENT_OEMBED_SOURCE", "==", "'noembed'", ":", "registry", "=", "bootstrap_noembed", "(", "nowrap", "=", "1", ")", "elif", "appsettings", ".", "FLUENT_OEMBED_SOURCE", "==", "'list'", ":", "# Fill list manually in the settings, e.g. to have a fixed set of supported secure providers.", "registry", "=", "ProviderRegistry", "(", ")", "for", "regex", ",", "provider", "in", "appsettings", ".", "FLUENT_OEMBED_PROVIDER_LIST", ":", "registry", ".", "register", "(", "regex", ",", "Provider", "(", "provider", ")", ")", "else", ":", "raise", "ImproperlyConfigured", "(", "\"Invalid value of FLUENT_OEMBED_SOURCE, only 'basic', 'list', 'noembed' or 'embedly' is supported.\"", ")", "# Add any extra providers defined in the settings", "for", "regex", ",", "provider", "in", "appsettings", ".", "FLUENT_OEMBED_EXTRA_PROVIDERS", ":", "registry", ".", "register", "(", "regex", ",", "Provider", "(", "provider", ")", ")", "return", "registry" ]
Construct the provider registry, using the app settings.
[ "Construct", "the", "provider", "registry", "using", "the", "app", "settings", "." ]
python
train
43.925926
dbcli/cli_helpers
cli_helpers/config.py
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L119-L122
def system_config_files(self): """Get a list of absolute paths to the system config files.""" return [os.path.join(f, self.filename) for f in get_system_config_dirs( self.app_name, self.app_author)]
[ "def", "system_config_files", "(", "self", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "f", ",", "self", ".", "filename", ")", "for", "f", "in", "get_system_config_dirs", "(", "self", ".", "app_name", ",", "self", ".", "app_author", ")", "]" ]
Get a list of absolute paths to the system config files.
[ "Get", "a", "list", "of", "absolute", "paths", "to", "the", "system", "config", "files", "." ]
python
test
55.75
ladybug-tools/ladybug
ladybug/designday.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L925-L941
def dew_point(self, db): """Get the dew point (C), which is constant throughout the day (except at saturation). args: db: The maximum dry bulb temperature over the day. """ if self._hum_type == 'Dewpoint': return self._hum_value elif self._hum_type == 'Wetbulb': return dew_point_from_db_wb( db, self._hum_value, self._barometric_pressure) elif self._hum_type == 'HumidityRatio': return dew_point_from_db_hr( db, self._hum_value, self._barometric_pressure) elif self._hum_type == 'Enthalpy': return dew_point_from_db_enth( db, self._hum_value / 1000, self._barometric_pressure)
[ "def", "dew_point", "(", "self", ",", "db", ")", ":", "if", "self", ".", "_hum_type", "==", "'Dewpoint'", ":", "return", "self", ".", "_hum_value", "elif", "self", ".", "_hum_type", "==", "'Wetbulb'", ":", "return", "dew_point_from_db_wb", "(", "db", ",", "self", ".", "_hum_value", ",", "self", ".", "_barometric_pressure", ")", "elif", "self", ".", "_hum_type", "==", "'HumidityRatio'", ":", "return", "dew_point_from_db_hr", "(", "db", ",", "self", ".", "_hum_value", ",", "self", ".", "_barometric_pressure", ")", "elif", "self", ".", "_hum_type", "==", "'Enthalpy'", ":", "return", "dew_point_from_db_enth", "(", "db", ",", "self", ".", "_hum_value", "/", "1000", ",", "self", ".", "_barometric_pressure", ")" ]
Get the dew point (C), which is constant throughout the day (except at saturation). args: db: The maximum dry bulb temperature over the day.
[ "Get", "the", "dew", "point", "(", "C", ")", "which", "is", "constant", "throughout", "the", "day", "(", "except", "at", "saturation", ")", "." ]
python
train
42.764706
CivicSpleen/ambry
ambry/orm/file.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/file.py#L80-L87
def update(self, of): """Update a file from another file, for copying""" # The other values should be set when the file object is created with dataset.bsfile() for p in ('mime_type', 'preference', 'state', 'hash', 'modified', 'size', 'contents', 'source_hash', 'data'): setattr(self, p, getattr(of, p)) return self
[ "def", "update", "(", "self", ",", "of", ")", ":", "# The other values should be set when the file object is created with dataset.bsfile()", "for", "p", "in", "(", "'mime_type'", ",", "'preference'", ",", "'state'", ",", "'hash'", ",", "'modified'", ",", "'size'", ",", "'contents'", ",", "'source_hash'", ",", "'data'", ")", ":", "setattr", "(", "self", ",", "p", ",", "getattr", "(", "of", ",", "p", ")", ")", "return", "self" ]
Update a file from another file, for copying
[ "Update", "a", "file", "from", "another", "file", "for", "copying" ]
python
train
44.125
chaoss/grimoirelab-manuscripts
manuscripts2/metrics/git.py
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts2/metrics/git.py#L94-L105
def aggregations(self): """ Override parent method. Obtain list of the terms and their corresponding values using "terms" aggregations for the previous time period. :returns: a data frame containing terms and their corresponding values """ prev_month_start = get_prev_month(self.end, self.query.interval_) self.query.since(prev_month_start) self.query.get_terms("author_name") return self.query.get_list(dataframe=True)
[ "def", "aggregations", "(", "self", ")", ":", "prev_month_start", "=", "get_prev_month", "(", "self", ".", "end", ",", "self", ".", "query", ".", "interval_", ")", "self", ".", "query", ".", "since", "(", "prev_month_start", ")", "self", ".", "query", ".", "get_terms", "(", "\"author_name\"", ")", "return", "self", ".", "query", ".", "get_list", "(", "dataframe", "=", "True", ")" ]
Override parent method. Obtain list of the terms and their corresponding values using "terms" aggregations for the previous time period. :returns: a data frame containing terms and their corresponding values
[ "Override", "parent", "method", ".", "Obtain", "list", "of", "the", "terms", "and", "their", "corresponding", "values", "using", "terms", "aggregations", "for", "the", "previous", "time", "period", "." ]
python
train
40.166667
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/SConf.py#L623-L629
def TryLink( self, text, extension ): """Compiles the program given in text to an executable env.Program, using extension as file extension (e.g. '.c'). Returns 1, if compilation was successful, 0 otherwise. The target is saved in self.lastTarget (for further processing). """ return self.TryBuild(self.env.Program, text, extension )
[ "def", "TryLink", "(", "self", ",", "text", ",", "extension", ")", ":", "return", "self", ".", "TryBuild", "(", "self", ".", "env", ".", "Program", ",", "text", ",", "extension", ")" ]
Compiles the program given in text to an executable env.Program, using extension as file extension (e.g. '.c'). Returns 1, if compilation was successful, 0 otherwise. The target is saved in self.lastTarget (for further processing).
[ "Compiles", "the", "program", "given", "in", "text", "to", "an", "executable", "env", ".", "Program", "using", "extension", "as", "file", "extension", "(", "e", ".", "g", ".", ".", "c", ")", ".", "Returns", "1", "if", "compilation", "was", "successful", "0", "otherwise", ".", "The", "target", "is", "saved", "in", "self", ".", "lastTarget", "(", "for", "further", "processing", ")", "." ]
python
train
53.571429
weijia/djangoautoconf
djangoautoconf/class_based_views/detail_with_inline_view.py
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/class_based_views/detail_with_inline_view.py#L34-L40
def forms_valid(self, inlines): """ If the form and formsets are valid, save the associated models. """ for formset in inlines: formset.save() return HttpResponseRedirect(self.get_success_url())
[ "def", "forms_valid", "(", "self", ",", "inlines", ")", ":", "for", "formset", "in", "inlines", ":", "formset", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "self", ".", "get_success_url", "(", ")", ")" ]
If the form and formsets are valid, save the associated models.
[ "If", "the", "form", "and", "formsets", "are", "valid", "save", "the", "associated", "models", "." ]
python
train
34.285714
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L292-L303
def _build_url(self, endpoint): """ Build complete URL """ if not issubclass(type(self), ChatApiBase) and not self.subdomain: raise ZenpyException("subdomain is required when accessing the Zendesk API!") if self.subdomain: endpoint.netloc = '{}.{}'.format(self.subdomain, self.domain) else: endpoint.netloc = self.domain endpoint.prefix_path(self.api_prefix) return endpoint.build()
[ "def", "_build_url", "(", "self", ",", "endpoint", ")", ":", "if", "not", "issubclass", "(", "type", "(", "self", ")", ",", "ChatApiBase", ")", "and", "not", "self", ".", "subdomain", ":", "raise", "ZenpyException", "(", "\"subdomain is required when accessing the Zendesk API!\"", ")", "if", "self", ".", "subdomain", ":", "endpoint", ".", "netloc", "=", "'{}.{}'", ".", "format", "(", "self", ".", "subdomain", ",", "self", ".", "domain", ")", "else", ":", "endpoint", ".", "netloc", "=", "self", ".", "domain", "endpoint", ".", "prefix_path", "(", "self", ".", "api_prefix", ")", "return", "endpoint", ".", "build", "(", ")" ]
Build complete URL
[ "Build", "complete", "URL" ]
python
train
38.083333
sdss/tree
bin/setup_tree.py
https://github.com/sdss/tree/blob/f61fe0876c138ccb61874912d4b8590dadfa835c/bin/setup_tree.py#L383-L404
def parse_args(): ''' Parse the arguments ''' parser = argparse.ArgumentParser(prog='setup_tree_modules', usage='%(prog)s [opts]') parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='Print extra information.', default=False) parser.add_argument('-r', '--root', action='store', dest='root', default=os.getenv('SAS_BASE_DIR'), help='Override the value of $SAS_BASE_DIR.', metavar='SAS_BASE_DIR') parser.add_argument('-t', '--treedir', action='store', dest='treedir', default=os.getenv('TREE_DIR'), help='Override the value of $TREE_DIR.', metavar='TREE_DIR') parser.add_argument('-m', '--modulesdir', action='store', dest='modulesdir', default=os.getenv('MODULES_DIR'), help='Your modules directory', metavar='MODULES_DIR') parser.add_argument('-e', '--env', action='store_true', dest='env', help='Create tree environment symlinks.', default=False) parser.add_argument('-i', '--mirror', action='store_true', dest='mirror', help='Use the mirror site (SAM) instead.') parser.add_argument('-o', '--only', action='store', dest='only', metavar='[xxx].cfg', default=None, help='create links for only the specified tree config.') opts = parser.parse_args() return opts
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'setup_tree_modules'", ",", "usage", "=", "'%(prog)s [opts]'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'verbose'", ",", "help", "=", "'Print extra information.'", ",", "default", "=", "False", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--root'", ",", "action", "=", "'store'", ",", "dest", "=", "'root'", ",", "default", "=", "os", ".", "getenv", "(", "'SAS_BASE_DIR'", ")", ",", "help", "=", "'Override the value of $SAS_BASE_DIR.'", ",", "metavar", "=", "'SAS_BASE_DIR'", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--treedir'", ",", "action", "=", "'store'", ",", "dest", "=", "'treedir'", ",", "default", "=", "os", ".", "getenv", "(", "'TREE_DIR'", ")", ",", "help", "=", "'Override the value of $TREE_DIR.'", ",", "metavar", "=", "'TREE_DIR'", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "'--modulesdir'", ",", "action", "=", "'store'", ",", "dest", "=", "'modulesdir'", ",", "default", "=", "os", ".", "getenv", "(", "'MODULES_DIR'", ")", ",", "help", "=", "'Your modules directory'", ",", "metavar", "=", "'MODULES_DIR'", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "'--env'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'env'", ",", "help", "=", "'Create tree environment symlinks.'", ",", "default", "=", "False", ")", "parser", ".", "add_argument", "(", "'-i'", ",", "'--mirror'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'mirror'", ",", "help", "=", "'Use the mirror site (SAM) instead.'", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--only'", ",", "action", "=", "'store'", ",", "dest", "=", "'only'", ",", "metavar", "=", "'[xxx].cfg'", ",", "default", "=", "None", ",", "help", "=", "'create links for only the specified tree config.'", ")", "opts", "=", "parser", ".", "parse_args", "(", ")", "return", "opts" ]
Parse the arguments
[ "Parse", "the", "arguments" ]
python
train
62.863636
dshean/pygeotools
pygeotools/lib/timelib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/timelib.py#L68-L75
def strptime_fuzzy(s): """Fuzzy date string parsing Note: this returns current date if not found. If only year is provided, will return current month, day """ import dateutil.parser dt = dateutil.parser.parse(str(s), fuzzy=True) return dt
[ "def", "strptime_fuzzy", "(", "s", ")", ":", "import", "dateutil", ".", "parser", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "str", "(", "s", ")", ",", "fuzzy", "=", "True", ")", "return", "dt" ]
Fuzzy date string parsing Note: this returns current date if not found. If only year is provided, will return current month, day
[ "Fuzzy", "date", "string", "parsing" ]
python
train
32.125
jjjake/giganews
giganews/giganews.py
https://github.com/jjjake/giganews/blob/8cfb26de6c10c482a8da348d438f0ce19e477573/giganews/giganews.py#L434-L467
def compress_and_sort_index(self): """Sort index, add header, and compress. :rtype: bool :returns: True """ idx_fname = '{name}.{date}.mbox.csv'.format(**self.__dict__) try: reader = csv.reader(open(idx_fname), dialect='excel-tab') except IOError: return False index = [x for x in reader if x] sorted_index = sorted(index, key=itemgetter(0)) gzip_idx_fname = idx_fname + '.gz' # Include UTF-8 BOM in header. header = [ '\xef\xbb\xbf#date', 'msg_id', 'from', 'newsgroups', 'subject', 'references', 'start', 'length', ] s = cStringIO.StringIO() writer = csv.writer(s, dialect='excel-tab') writer.writerow(header) for line in sorted_index: writer.writerow(line) compressed_index = inline_compress_chunk(s.getvalue()) s.close() with open(gzip_idx_fname, 'ab') as fp: fp.write(compressed_index) os.remove(idx_fname) return True
[ "def", "compress_and_sort_index", "(", "self", ")", ":", "idx_fname", "=", "'{name}.{date}.mbox.csv'", ".", "format", "(", "*", "*", "self", ".", "__dict__", ")", "try", ":", "reader", "=", "csv", ".", "reader", "(", "open", "(", "idx_fname", ")", ",", "dialect", "=", "'excel-tab'", ")", "except", "IOError", ":", "return", "False", "index", "=", "[", "x", "for", "x", "in", "reader", "if", "x", "]", "sorted_index", "=", "sorted", "(", "index", ",", "key", "=", "itemgetter", "(", "0", ")", ")", "gzip_idx_fname", "=", "idx_fname", "+", "'.gz'", "# Include UTF-8 BOM in header.", "header", "=", "[", "'\\xef\\xbb\\xbf#date'", ",", "'msg_id'", ",", "'from'", ",", "'newsgroups'", ",", "'subject'", ",", "'references'", ",", "'start'", ",", "'length'", ",", "]", "s", "=", "cStringIO", ".", "StringIO", "(", ")", "writer", "=", "csv", ".", "writer", "(", "s", ",", "dialect", "=", "'excel-tab'", ")", "writer", ".", "writerow", "(", "header", ")", "for", "line", "in", "sorted_index", ":", "writer", ".", "writerow", "(", "line", ")", "compressed_index", "=", "inline_compress_chunk", "(", "s", ".", "getvalue", "(", ")", ")", "s", ".", "close", "(", ")", "with", "open", "(", "gzip_idx_fname", ",", "'ab'", ")", "as", "fp", ":", "fp", ".", "write", "(", "compressed_index", ")", "os", ".", "remove", "(", "idx_fname", ")", "return", "True" ]
Sort index, add header, and compress. :rtype: bool :returns: True
[ "Sort", "index", "add", "header", "and", "compress", "." ]
python
train
30.647059
happyleavesaoc/aoc-mgz
mgz/util.py
https://github.com/happyleavesaoc/aoc-mgz/blob/13fc379cc062d7640bfa028eed9c0d45d37a7b2b/mgz/util.py#L138-L170
def _parse(self, stream, context, path): """Parse until the end of objects data.""" num_players = context._._._.replay.num_players start = stream.tell() # Have to read everything to be able to use find() read_bytes = stream.read() # Try to find the first marker, a portion of the next player structure marker_up14 = read_bytes.find(b"\x16\xc6\x00\x00\x00\x21") marker_up15 = read_bytes.find(b"\x16\xf0\x00\x00\x00\x21") marker = -1 if marker_up14 > 0 and marker_up15 < 0: marker = marker_up14 elif marker_up15 > 0 and marker_up14 < 0: marker = marker_up15 # If it exists, we're not on the last player yet if marker > 0: # Backtrack through the player name count = 0 while struct.unpack("<H", read_bytes[marker-2:marker])[0] != count: marker -= 1 count += 1 # Backtrack through the rest of the next player structure backtrack = 43 + num_players # Otherwise, this is the last player else: # Search for the scenario header marker = read_bytes.find(b"\xf6\x28\x9c\x3f") # Backtrack through the achievements and initial structure footer backtrack = ((1817 * (num_players - 1)) + 4 + 19) # Seek to the position we found end = start + marker - backtrack stream.seek(end) return end
[ "def", "_parse", "(", "self", ",", "stream", ",", "context", ",", "path", ")", ":", "num_players", "=", "context", ".", "_", ".", "_", ".", "_", ".", "replay", ".", "num_players", "start", "=", "stream", ".", "tell", "(", ")", "# Have to read everything to be able to use find()", "read_bytes", "=", "stream", ".", "read", "(", ")", "# Try to find the first marker, a portion of the next player structure", "marker_up14", "=", "read_bytes", ".", "find", "(", "b\"\\x16\\xc6\\x00\\x00\\x00\\x21\"", ")", "marker_up15", "=", "read_bytes", ".", "find", "(", "b\"\\x16\\xf0\\x00\\x00\\x00\\x21\"", ")", "marker", "=", "-", "1", "if", "marker_up14", ">", "0", "and", "marker_up15", "<", "0", ":", "marker", "=", "marker_up14", "elif", "marker_up15", ">", "0", "and", "marker_up14", "<", "0", ":", "marker", "=", "marker_up15", "# If it exists, we're not on the last player yet", "if", "marker", ">", "0", ":", "# Backtrack through the player name", "count", "=", "0", "while", "struct", ".", "unpack", "(", "\"<H\"", ",", "read_bytes", "[", "marker", "-", "2", ":", "marker", "]", ")", "[", "0", "]", "!=", "count", ":", "marker", "-=", "1", "count", "+=", "1", "# Backtrack through the rest of the next player structure", "backtrack", "=", "43", "+", "num_players", "# Otherwise, this is the last player", "else", ":", "# Search for the scenario header", "marker", "=", "read_bytes", ".", "find", "(", "b\"\\xf6\\x28\\x9c\\x3f\"", ")", "# Backtrack through the achievements and initial structure footer", "backtrack", "=", "(", "(", "1817", "*", "(", "num_players", "-", "1", ")", ")", "+", "4", "+", "19", ")", "# Seek to the position we found", "end", "=", "start", "+", "marker", "-", "backtrack", "stream", ".", "seek", "(", "end", ")", "return", "end" ]
Parse until the end of objects data.
[ "Parse", "until", "the", "end", "of", "objects", "data", "." ]
python
train
44.181818
CalebBell/fluids
fluids/geometry.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/geometry.py#L1290-L1333
def a_torispherical(D, f, k): r'''Calculates depth of a torispherical head according to [1]_. .. math:: a = a_1 + a_2 .. math:: \alpha = \sin^{-1}\frac{1-2k}{2(f-k)} .. math:: a_1 = fD(1-\cos\alpha) .. math:: a_2 = kD\cos\alpha Parameters ---------- D : float Diameter of the main cylindrical section, [m] f : float Dish-radius parameter; fD = dish radius [1/m] k : float knuckle-radius parameter ; kD = knuckle radius [1/m] Returns ------- a : float Depth of head [m] Examples -------- Example from [1]_. >>> a_torispherical(D=96., f=0.9, k=0.2) 25.684268924767125 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF''' alpha = asin((1-2*k)/(2*(f-k))) a1 = f*D*(1 - cos(alpha)) a2 = k*D*cos(alpha) return a1 + a2
[ "def", "a_torispherical", "(", "D", ",", "f", ",", "k", ")", ":", "alpha", "=", "asin", "(", "(", "1", "-", "2", "*", "k", ")", "/", "(", "2", "*", "(", "f", "-", "k", ")", ")", ")", "a1", "=", "f", "*", "D", "*", "(", "1", "-", "cos", "(", "alpha", ")", ")", "a2", "=", "k", "*", "D", "*", "cos", "(", "alpha", ")", "return", "a1", "+", "a2" ]
r'''Calculates depth of a torispherical head according to [1]_. .. math:: a = a_1 + a_2 .. math:: \alpha = \sin^{-1}\frac{1-2k}{2(f-k)} .. math:: a_1 = fD(1-\cos\alpha) .. math:: a_2 = kD\cos\alpha Parameters ---------- D : float Diameter of the main cylindrical section, [m] f : float Dish-radius parameter; fD = dish radius [1/m] k : float knuckle-radius parameter ; kD = knuckle radius [1/m] Returns ------- a : float Depth of head [m] Examples -------- Example from [1]_. >>> a_torispherical(D=96., f=0.9, k=0.2) 25.684268924767125 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF
[ "r", "Calculates", "depth", "of", "a", "torispherical", "head", "according", "to", "[", "1", "]", "_", "." ]
python
train
21.5
mitsei/dlkit
dlkit/records/osid/base_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L1783-L1930
def _update_object_map(self, obj_map): """loop through all the keys in self.my_osid_object._my_map, and see if any of them contain text like "AssetContent:<label>" If so, assume it is markup (?), replace the string with asset_content.get_url()""" # TODO: Look for <img> tags to add in alt-text and description # TODO: Look for <video> and <audio> tags to add in description, transcripts and vtt files? try: super(FilesRecord, self)._update_object_map(obj_map) except AttributeError: pass bypass_asset_content_authorization = False acls = None try: config = self.my_osid_object._runtime.get_configuration() parameter_id = Id('parameter:bypassAuthorizationForFilesRecordAssetContentLookup@json') bypass_asset_content_authorization = config.get_value_by_parameter(parameter_id).get_boolean_value() except (AttributeError, KeyError, NotFound): pass def replace_url_in_display_text(potential_display_text, dict_files_map): if ('text' in potential_display_text and potential_display_text['text'] is not None and 'AssetContent' in potential_display_text['text']): # assume markup? Wrap this in case it's not a valid XML doc # with a single parent object wrapped_text = '<wrapper>{0}</wrapper'.format(potential_display_text['text']) soup = BeautifulSoup(wrapped_text, 'xml') media_file_elements = soup.find_all(src=media_regex) media_file_elements += soup.find_all(data=media_regex) for media_file_element in media_file_elements: if 'src' in media_file_element.attrs: media_key = 'src' else: media_key = 'data' if ':' not in media_file_element[media_key]: continue media_label = media_file_element[media_key].split(':')[-1] if media_label in dict_files_map: asset_id = Id(dict_files_map[media_label]['assetId']) ac_id = Id(dict_files_map[media_label]['assetContentId']) if bypass_asset_content_authorization: ac = acls.get_asset_content(ac_id) else: ac = self._get_asset_content(asset_id=asset_id, asset_content_id=ac_id) if media_file_element.name == 'track': try: if not ac.has_files(): continue except AttributeError: # non-multi-language VTT files media_file_element[media_key] = ac.get_url() else: media_file_element[media_key] = ac.get_url() media_file_element['srclang'] = ac.get_vtt_locale_identifier().lower()[0:2] media_file_element['label'] = ac.get_vtt_locale_label() elif media_file_element.name == 'transcript': if not ac.has_files(): continue transcript_template_path = '{0}/osid/transcript_template.xml'.format(ABS_PATH) with codecs.open(transcript_template_path, 'r', encoding='utf-8') as template_file: template = template_file.read().format(media_label, ac.get_transcript_locale_label().lower(), ac.get_transcript_locale_label().title(), ac.get_transcript_text()) new_template_tag = BeautifulSoup(template, 'xml').div # media_file_element.replace_with(new_template_tag) p_parent = None for parent in media_file_element.parents: if parent is not None and parent.name != 'p': # insert the transcript after the top p tag # so that we don't create invalid HTML by nesting # <div> and <aside> inside of a <p> tag p_parent.insert_after(new_template_tag) break p_parent = parent media_file_element.extract() else: media_file_element[media_key] = ac.get_url() # check for alt-tags if 'alt' in media_file_element.attrs: alt_tag_label = media_file_element['alt'].split(':')[-1] if alt_tag_label in dict_files_map: asset_id = Id(dict_files_map[alt_tag_label]['assetId']) ac_id = Id(dict_files_map[alt_tag_label]['assetContentId']) if bypass_asset_content_authorization: ac = acls.get_asset_content(ac_id) else: ac = self._get_asset_content(asset_id=asset_id, asset_content_id=ac_id) try: media_file_element['alt'] = ac.get_alt_text().text except AttributeError: pass potential_display_text['text'] = soup.wrapper.renderContents().decode('utf-8') else: for new_key, value in potential_display_text.items(): if isinstance(value, list): new_files_map = dict_files_map if 'fileIds' in potential_display_text: new_files_map = potential_display_text['fileIds'] potential_display_text[new_key] = check_list_children(value, new_files_map) return potential_display_text def check_list_children(potential_text_list, list_files_map): updated_list = [] for child in potential_text_list: if isinstance(child, dict): files_map = list_files_map if 'fileIds' in child: files_map = child['fileIds'] updated_list.append(replace_url_in_display_text(child, files_map)) elif isinstance(child, list): updated_list.append(check_list_children(child, list_files_map)) else: updated_list.append(child) return updated_list if bypass_asset_content_authorization: # One assumption is that the object's catalogId can be used # as the repositoryId manager = self.my_osid_object._get_provider_manager('REPOSITORY') try: if self.my_osid_object._proxy is not None: acls = manager.get_asset_content_lookup_session(proxy=self.my_osid_object._proxy) else: acls = manager.get_asset_content_lookup_session() except AttributeError: pass else: acls.use_federated_repository_view() media_regex = re.compile('(AssetContent:)') original_files_map = {} if 'fileIds' in obj_map: original_files_map = obj_map['fileIds'] for key, data in obj_map.items(): if isinstance(data, dict): obj_map[key] = replace_url_in_display_text(data, original_files_map) elif isinstance(data, list): obj_map[key] = check_list_children(data, original_files_map)
[ "def", "_update_object_map", "(", "self", ",", "obj_map", ")", ":", "# TODO: Look for <img> tags to add in alt-text and description", "# TODO: Look for <video> and <audio> tags to add in description, transcripts and vtt files?", "try", ":", "super", "(", "FilesRecord", ",", "self", ")", ".", "_update_object_map", "(", "obj_map", ")", "except", "AttributeError", ":", "pass", "bypass_asset_content_authorization", "=", "False", "acls", "=", "None", "try", ":", "config", "=", "self", ".", "my_osid_object", ".", "_runtime", ".", "get_configuration", "(", ")", "parameter_id", "=", "Id", "(", "'parameter:bypassAuthorizationForFilesRecordAssetContentLookup@json'", ")", "bypass_asset_content_authorization", "=", "config", ".", "get_value_by_parameter", "(", "parameter_id", ")", ".", "get_boolean_value", "(", ")", "except", "(", "AttributeError", ",", "KeyError", ",", "NotFound", ")", ":", "pass", "def", "replace_url_in_display_text", "(", "potential_display_text", ",", "dict_files_map", ")", ":", "if", "(", "'text'", "in", "potential_display_text", "and", "potential_display_text", "[", "'text'", "]", "is", "not", "None", "and", "'AssetContent'", "in", "potential_display_text", "[", "'text'", "]", ")", ":", "# assume markup? Wrap this in case it's not a valid XML doc", "# with a single parent object", "wrapped_text", "=", "'<wrapper>{0}</wrapper'", ".", "format", "(", "potential_display_text", "[", "'text'", "]", ")", "soup", "=", "BeautifulSoup", "(", "wrapped_text", ",", "'xml'", ")", "media_file_elements", "=", "soup", ".", "find_all", "(", "src", "=", "media_regex", ")", "media_file_elements", "+=", "soup", ".", "find_all", "(", "data", "=", "media_regex", ")", "for", "media_file_element", "in", "media_file_elements", ":", "if", "'src'", "in", "media_file_element", ".", "attrs", ":", "media_key", "=", "'src'", "else", ":", "media_key", "=", "'data'", "if", "':'", "not", "in", "media_file_element", "[", "media_key", "]", ":", "continue", "media_label", "=", "media_file_element", "[", "media_key", "]", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "if", "media_label", "in", "dict_files_map", ":", "asset_id", "=", "Id", "(", "dict_files_map", "[", "media_label", "]", "[", "'assetId'", "]", ")", "ac_id", "=", "Id", "(", "dict_files_map", "[", "media_label", "]", "[", "'assetContentId'", "]", ")", "if", "bypass_asset_content_authorization", ":", "ac", "=", "acls", ".", "get_asset_content", "(", "ac_id", ")", "else", ":", "ac", "=", "self", ".", "_get_asset_content", "(", "asset_id", "=", "asset_id", ",", "asset_content_id", "=", "ac_id", ")", "if", "media_file_element", ".", "name", "==", "'track'", ":", "try", ":", "if", "not", "ac", ".", "has_files", "(", ")", ":", "continue", "except", "AttributeError", ":", "# non-multi-language VTT files", "media_file_element", "[", "media_key", "]", "=", "ac", ".", "get_url", "(", ")", "else", ":", "media_file_element", "[", "media_key", "]", "=", "ac", ".", "get_url", "(", ")", "media_file_element", "[", "'srclang'", "]", "=", "ac", ".", "get_vtt_locale_identifier", "(", ")", ".", "lower", "(", ")", "[", "0", ":", "2", "]", "media_file_element", "[", "'label'", "]", "=", "ac", ".", "get_vtt_locale_label", "(", ")", "elif", "media_file_element", ".", "name", "==", "'transcript'", ":", "if", "not", "ac", ".", "has_files", "(", ")", ":", "continue", "transcript_template_path", "=", "'{0}/osid/transcript_template.xml'", ".", "format", "(", "ABS_PATH", ")", "with", "codecs", ".", "open", "(", "transcript_template_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "template_file", ":", "template", "=", "template_file", ".", "read", "(", ")", ".", "format", "(", "media_label", ",", "ac", ".", "get_transcript_locale_label", "(", ")", ".", "lower", "(", ")", ",", "ac", ".", "get_transcript_locale_label", "(", ")", ".", "title", "(", ")", ",", "ac", ".", "get_transcript_text", "(", ")", ")", "new_template_tag", "=", "BeautifulSoup", "(", "template", ",", "'xml'", ")", ".", "div", "# media_file_element.replace_with(new_template_tag)", "p_parent", "=", "None", "for", "parent", "in", "media_file_element", ".", "parents", ":", "if", "parent", "is", "not", "None", "and", "parent", ".", "name", "!=", "'p'", ":", "# insert the transcript after the top p tag", "# so that we don't create invalid HTML by nesting", "# <div> and <aside> inside of a <p> tag", "p_parent", ".", "insert_after", "(", "new_template_tag", ")", "break", "p_parent", "=", "parent", "media_file_element", ".", "extract", "(", ")", "else", ":", "media_file_element", "[", "media_key", "]", "=", "ac", ".", "get_url", "(", ")", "# check for alt-tags", "if", "'alt'", "in", "media_file_element", ".", "attrs", ":", "alt_tag_label", "=", "media_file_element", "[", "'alt'", "]", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "if", "alt_tag_label", "in", "dict_files_map", ":", "asset_id", "=", "Id", "(", "dict_files_map", "[", "alt_tag_label", "]", "[", "'assetId'", "]", ")", "ac_id", "=", "Id", "(", "dict_files_map", "[", "alt_tag_label", "]", "[", "'assetContentId'", "]", ")", "if", "bypass_asset_content_authorization", ":", "ac", "=", "acls", ".", "get_asset_content", "(", "ac_id", ")", "else", ":", "ac", "=", "self", ".", "_get_asset_content", "(", "asset_id", "=", "asset_id", ",", "asset_content_id", "=", "ac_id", ")", "try", ":", "media_file_element", "[", "'alt'", "]", "=", "ac", ".", "get_alt_text", "(", ")", ".", "text", "except", "AttributeError", ":", "pass", "potential_display_text", "[", "'text'", "]", "=", "soup", ".", "wrapper", ".", "renderContents", "(", ")", ".", "decode", "(", "'utf-8'", ")", "else", ":", "for", "new_key", ",", "value", "in", "potential_display_text", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "new_files_map", "=", "dict_files_map", "if", "'fileIds'", "in", "potential_display_text", ":", "new_files_map", "=", "potential_display_text", "[", "'fileIds'", "]", "potential_display_text", "[", "new_key", "]", "=", "check_list_children", "(", "value", ",", "new_files_map", ")", "return", "potential_display_text", "def", "check_list_children", "(", "potential_text_list", ",", "list_files_map", ")", ":", "updated_list", "=", "[", "]", "for", "child", "in", "potential_text_list", ":", "if", "isinstance", "(", "child", ",", "dict", ")", ":", "files_map", "=", "list_files_map", "if", "'fileIds'", "in", "child", ":", "files_map", "=", "child", "[", "'fileIds'", "]", "updated_list", ".", "append", "(", "replace_url_in_display_text", "(", "child", ",", "files_map", ")", ")", "elif", "isinstance", "(", "child", ",", "list", ")", ":", "updated_list", ".", "append", "(", "check_list_children", "(", "child", ",", "list_files_map", ")", ")", "else", ":", "updated_list", ".", "append", "(", "child", ")", "return", "updated_list", "if", "bypass_asset_content_authorization", ":", "# One assumption is that the object's catalogId can be used", "# as the repositoryId", "manager", "=", "self", ".", "my_osid_object", ".", "_get_provider_manager", "(", "'REPOSITORY'", ")", "try", ":", "if", "self", ".", "my_osid_object", ".", "_proxy", "is", "not", "None", ":", "acls", "=", "manager", ".", "get_asset_content_lookup_session", "(", "proxy", "=", "self", ".", "my_osid_object", ".", "_proxy", ")", "else", ":", "acls", "=", "manager", ".", "get_asset_content_lookup_session", "(", ")", "except", "AttributeError", ":", "pass", "else", ":", "acls", ".", "use_federated_repository_view", "(", ")", "media_regex", "=", "re", ".", "compile", "(", "'(AssetContent:)'", ")", "original_files_map", "=", "{", "}", "if", "'fileIds'", "in", "obj_map", ":", "original_files_map", "=", "obj_map", "[", "'fileIds'", "]", "for", "key", ",", "data", "in", "obj_map", ".", "items", "(", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "obj_map", "[", "key", "]", "=", "replace_url_in_display_text", "(", "data", ",", "original_files_map", ")", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "obj_map", "[", "key", "]", "=", "check_list_children", "(", "data", ",", "original_files_map", ")" ]
loop through all the keys in self.my_osid_object._my_map, and see if any of them contain text like "AssetContent:<label>" If so, assume it is markup (?), replace the string with asset_content.get_url()
[ "loop", "through", "all", "the", "keys", "in", "self", ".", "my_osid_object", ".", "_my_map", "and", "see", "if", "any", "of", "them", "contain", "text", "like", "AssetContent", ":", "<label", ">", "If", "so", "assume", "it", "is", "markup", "(", "?", ")", "replace", "the", "string", "with", "asset_content", ".", "get_url", "()" ]
python
train
54.587838
trustrachel/Flask-FeatureFlags
flask_featureflags/__init__.py
https://github.com/trustrachel/Flask-FeatureFlags/blob/bf32d07c8ce72adc009619ef04e666c51736f80c/flask_featureflags/__init__.py#L48-L76
def AppConfigFlagHandler(feature=None): """ This is the default handler. It checks for feature flags in the current app's configuration. For example, to have 'unfinished_feature' hidden in production but active in development: config.py class ProductionConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : False, } class DevelopmentConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : True, } """ if not current_app: log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature)) return False try: return current_app.config[FEATURE_FLAGS_CONFIG][feature] except (AttributeError, KeyError): raise NoFeatureFlagFound()
[ "def", "AppConfigFlagHandler", "(", "feature", "=", "None", ")", ":", "if", "not", "current_app", ":", "log", ".", "warn", "(", "u\"Got a request to check for {feature} but we're outside the request context. Returning False\"", ".", "format", "(", "feature", "=", "feature", ")", ")", "return", "False", "try", ":", "return", "current_app", ".", "config", "[", "FEATURE_FLAGS_CONFIG", "]", "[", "feature", "]", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "raise", "NoFeatureFlagFound", "(", ")" ]
This is the default handler. It checks for feature flags in the current app's configuration. For example, to have 'unfinished_feature' hidden in production but active in development: config.py class ProductionConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : False, } class DevelopmentConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : True, }
[ "This", "is", "the", "default", "handler", ".", "It", "checks", "for", "feature", "flags", "in", "the", "current", "app", "s", "configuration", "." ]
python
train
25.310345
JarryShaw/PyPCAPKit
src/protocols/internet/ipv4.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/ipv4.py#L576-L658
def _read_mode_ts(self, size, kind): """Read Time Stamp option. Positional arguments: * size - int, length of option * kind - int, 68 (TS) Returns: * dict -- extracted Time Stamp (TS) option Structure of Timestamp (TS) option [RFC 791]: +--------+--------+--------+--------+ |01000100| length | pointer|oflw|flg| +--------+--------+--------+--------+ | internet address | +--------+--------+--------+--------+ | timestamp | +--------+--------+--------+--------+ | . | . . Octets Bits Name Description 0 0 ip.ts.kind Kind (25) 0 0 ip.ts.type.copy Copied Flag (0) 0 1 ip.ts.type.class Option Class (0) 0 3 ip.ts.type.number Option Number (25) 1 8 ip.ts.length Length (≤40) 2 16 ip.ts.pointer Pointer (≥5) 3 24 ip.ts.overflow Overflow Octets 3 28 ip.ts.flag Flag 4 32 ip.ts.ip Internet Address 8 64 ip.ts.timestamp Timestamp """ if size > 40 or size < 4: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') _tptr = self._read_unpack(1) _oflg = self._read_binary(1) _oflw = int(_oflg[:4], base=2) _flag = int(_oflg[4:], base=2) if _tptr < 5: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') data = dict( kind=kind, type=self._read_opt_type(kind), length=size, pointer=_tptr, overflow=_oflw, flag=_flag, ) endpoint = min(_tptr, size) if _flag == 0: if (size - 4) % 4 != 0: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') counter = 5 timestamp = list() while counter < endpoint: counter += 4 time = self._read_unpack(4, lilendian=True) timestamp.append(datetime.datetime.fromtimestamp(time)) data['timestamp'] = timestamp or None elif _flag == 1 or _flag == 3: if (size - 4) % 8 != 0: raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') counter = 5 ipaddress = list() timestamp = list() while counter < endpoint: counter += 8 ipaddress.append(self._read_ipv4_addr()) time = self._read_unpack(4, lilendian=True) timestamp.append(datetime.datetime.fromtimestamp(time)) data['ip'] = ipaddress or None data['timestamp'] = timestamp or None else: data['data'] = self._read_fileng(size - 4) or None return data
[ "def", "_read_mode_ts", "(", "self", ",", "size", ",", "kind", ")", ":", "if", "size", ">", "40", "or", "size", "<", "4", ":", "raise", "ProtocolError", "(", "f'{self.alias}: [Optno {kind}] invalid format'", ")", "_tptr", "=", "self", ".", "_read_unpack", "(", "1", ")", "_oflg", "=", "self", ".", "_read_binary", "(", "1", ")", "_oflw", "=", "int", "(", "_oflg", "[", ":", "4", "]", ",", "base", "=", "2", ")", "_flag", "=", "int", "(", "_oflg", "[", "4", ":", "]", ",", "base", "=", "2", ")", "if", "_tptr", "<", "5", ":", "raise", "ProtocolError", "(", "f'{self.alias}: [Optno {kind}] invalid format'", ")", "data", "=", "dict", "(", "kind", "=", "kind", ",", "type", "=", "self", ".", "_read_opt_type", "(", "kind", ")", ",", "length", "=", "size", ",", "pointer", "=", "_tptr", ",", "overflow", "=", "_oflw", ",", "flag", "=", "_flag", ",", ")", "endpoint", "=", "min", "(", "_tptr", ",", "size", ")", "if", "_flag", "==", "0", ":", "if", "(", "size", "-", "4", ")", "%", "4", "!=", "0", ":", "raise", "ProtocolError", "(", "f'{self.alias}: [Optno {kind}] invalid format'", ")", "counter", "=", "5", "timestamp", "=", "list", "(", ")", "while", "counter", "<", "endpoint", ":", "counter", "+=", "4", "time", "=", "self", ".", "_read_unpack", "(", "4", ",", "lilendian", "=", "True", ")", "timestamp", ".", "append", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "time", ")", ")", "data", "[", "'timestamp'", "]", "=", "timestamp", "or", "None", "elif", "_flag", "==", "1", "or", "_flag", "==", "3", ":", "if", "(", "size", "-", "4", ")", "%", "8", "!=", "0", ":", "raise", "ProtocolError", "(", "f'{self.alias}: [Optno {kind}] invalid format'", ")", "counter", "=", "5", "ipaddress", "=", "list", "(", ")", "timestamp", "=", "list", "(", ")", "while", "counter", "<", "endpoint", ":", "counter", "+=", "8", "ipaddress", ".", "append", "(", "self", ".", "_read_ipv4_addr", "(", ")", ")", "time", "=", "self", ".", "_read_unpack", "(", "4", ",", "lilendian", "=", "True", ")", "timestamp", ".", "append", "(", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "time", ")", ")", "data", "[", "'ip'", "]", "=", "ipaddress", "or", "None", "data", "[", "'timestamp'", "]", "=", "timestamp", "or", "None", "else", ":", "data", "[", "'data'", "]", "=", "self", ".", "_read_fileng", "(", "size", "-", "4", ")", "or", "None", "return", "data" ]
Read Time Stamp option. Positional arguments: * size - int, length of option * kind - int, 68 (TS) Returns: * dict -- extracted Time Stamp (TS) option Structure of Timestamp (TS) option [RFC 791]: +--------+--------+--------+--------+ |01000100| length | pointer|oflw|flg| +--------+--------+--------+--------+ | internet address | +--------+--------+--------+--------+ | timestamp | +--------+--------+--------+--------+ | . | . . Octets Bits Name Description 0 0 ip.ts.kind Kind (25) 0 0 ip.ts.type.copy Copied Flag (0) 0 1 ip.ts.type.class Option Class (0) 0 3 ip.ts.type.number Option Number (25) 1 8 ip.ts.length Length (≤40) 2 16 ip.ts.pointer Pointer (≥5) 3 24 ip.ts.overflow Overflow Octets 3 28 ip.ts.flag Flag 4 32 ip.ts.ip Internet Address 8 64 ip.ts.timestamp Timestamp
[ "Read", "Time", "Stamp", "option", "." ]
python
train
38.795181
deepmind/sonnet
sonnet/python/modules/basic.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/basic.py#L759-L777
def _infer_shape(self, dimensions): """Replaces the -1 wildcard in the output shape vector. This function infers the correct output shape given the input dimensions. Args: dimensions: List of input non-batch dimensions. Returns: Tuple of non-batch output dimensions. """ # Size of input n = np.prod(dimensions) # Size of output where defined m = np.prod(abs(np.array(self._shape))) # Replace wildcard v = np.array(self._shape) v[v == -1] = n // m return tuple(v)
[ "def", "_infer_shape", "(", "self", ",", "dimensions", ")", ":", "# Size of input", "n", "=", "np", ".", "prod", "(", "dimensions", ")", "# Size of output where defined", "m", "=", "np", ".", "prod", "(", "abs", "(", "np", ".", "array", "(", "self", ".", "_shape", ")", ")", ")", "# Replace wildcard", "v", "=", "np", ".", "array", "(", "self", ".", "_shape", ")", "v", "[", "v", "==", "-", "1", "]", "=", "n", "//", "m", "return", "tuple", "(", "v", ")" ]
Replaces the -1 wildcard in the output shape vector. This function infers the correct output shape given the input dimensions. Args: dimensions: List of input non-batch dimensions. Returns: Tuple of non-batch output dimensions.
[ "Replaces", "the", "-", "1", "wildcard", "in", "the", "output", "shape", "vector", "." ]
python
train
26.894737
joferkington/mpldatacursor
mpldatacursor/datacursor.py
https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/datacursor.py#L256-L275
def _show_annotation_box(self, event): """Update an existing box or create an annotation box for an event.""" ax = event.artist.axes # Get the pre-created annotation box for the axes or create a new one. if self.display != 'multiple': annotation = self.annotations[ax] elif event.mouseevent in self.annotations: # Avoid creating multiple datacursors for the same click event # when several artists are selected. annotation = self.annotations[event.mouseevent] else: annotation = self.annotate(ax, **self._annotation_kwargs) self.annotations[event.mouseevent] = annotation if self.display == 'single': # Hide any other annotation boxes... for ann in self.annotations.values(): ann.set_visible(False) self.update(event, annotation)
[ "def", "_show_annotation_box", "(", "self", ",", "event", ")", ":", "ax", "=", "event", ".", "artist", ".", "axes", "# Get the pre-created annotation box for the axes or create a new one.", "if", "self", ".", "display", "!=", "'multiple'", ":", "annotation", "=", "self", ".", "annotations", "[", "ax", "]", "elif", "event", ".", "mouseevent", "in", "self", ".", "annotations", ":", "# Avoid creating multiple datacursors for the same click event", "# when several artists are selected.", "annotation", "=", "self", ".", "annotations", "[", "event", ".", "mouseevent", "]", "else", ":", "annotation", "=", "self", ".", "annotate", "(", "ax", ",", "*", "*", "self", ".", "_annotation_kwargs", ")", "self", ".", "annotations", "[", "event", ".", "mouseevent", "]", "=", "annotation", "if", "self", ".", "display", "==", "'single'", ":", "# Hide any other annotation boxes...", "for", "ann", "in", "self", ".", "annotations", ".", "values", "(", ")", ":", "ann", ".", "set_visible", "(", "False", ")", "self", ".", "update", "(", "event", ",", "annotation", ")" ]
Update an existing box or create an annotation box for an event.
[ "Update", "an", "existing", "box", "or", "create", "an", "annotation", "box", "for", "an", "event", "." ]
python
train
44.4
jic-dtool/dtoolcore
dtoolcore/storagebroker.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/storagebroker.py#L615-L625
def post_freeze_hook(self): """Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions. This method is called at the end of the :meth:`dtoolcore.ProtoDataSet.freeze` method. In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes the temporary directory for storing item metadata fragment files. """ if os.path.isdir(self._metadata_fragments_abspath): shutil.rmtree(self._metadata_fragments_abspath)
[ "def", "post_freeze_hook", "(", "self", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "self", ".", "_metadata_fragments_abspath", ")", ":", "shutil", ".", "rmtree", "(", "self", ".", "_metadata_fragments_abspath", ")" ]
Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions. This method is called at the end of the :meth:`dtoolcore.ProtoDataSet.freeze` method. In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes the temporary directory for storing item metadata fragment files.
[ "Post", ":", "meth", ":", "dtoolcore", ".", "ProtoDataSet", ".", "freeze", "cleanup", "actions", "." ]
python
train
43.272727
googlefonts/fontbakery
Lib/fontbakery/profiles/universal.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/universal.py#L652-L672
def com_google_fonts_check_whitespace_ink(ttFont): """Whitespace glyphs have ink?""" from fontbakery.utils import get_glyph_name, glyph_has_ink # code-points for all "whitespace" chars: WHITESPACE_CHARACTERS = [ 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000, 0x180E, 0x200B, 0x2060, 0xFEFF ] failed = False for codepoint in WHITESPACE_CHARACTERS: g = get_glyph_name(ttFont, codepoint) if g is not None and glyph_has_ink(ttFont, g): failed = True yield FAIL, ("Glyph \"{}\" has ink." " It needs to be replaced by" " an empty glyph.").format(g) if not failed: yield PASS, "There is no whitespace glyph with ink."
[ "def", "com_google_fonts_check_whitespace_ink", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_glyph_name", ",", "glyph_has_ink", "# code-points for all \"whitespace\" chars:", "WHITESPACE_CHARACTERS", "=", "[", "0x0009", ",", "0x000A", ",", "0x000B", ",", "0x000C", ",", "0x000D", ",", "0x0020", ",", "0x0085", ",", "0x00A0", ",", "0x1680", ",", "0x2000", ",", "0x2001", ",", "0x2002", ",", "0x2003", ",", "0x2004", ",", "0x2005", ",", "0x2006", ",", "0x2007", ",", "0x2008", ",", "0x2009", ",", "0x200A", ",", "0x2028", ",", "0x2029", ",", "0x202F", ",", "0x205F", ",", "0x3000", ",", "0x180E", ",", "0x200B", ",", "0x2060", ",", "0xFEFF", "]", "failed", "=", "False", "for", "codepoint", "in", "WHITESPACE_CHARACTERS", ":", "g", "=", "get_glyph_name", "(", "ttFont", ",", "codepoint", ")", "if", "g", "is", "not", "None", "and", "glyph_has_ink", "(", "ttFont", ",", "g", ")", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Glyph \\\"{}\\\" has ink.\"", "\" It needs to be replaced by\"", "\" an empty glyph.\"", ")", ".", "format", "(", "g", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"There is no whitespace glyph with ink.\"" ]
Whitespace glyphs have ink?
[ "Whitespace", "glyphs", "have", "ink?" ]
python
train
40.285714
jpscaletti/solution
solution/utils.py
https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/utils.py#L34-L76
def get_html_attrs(kwargs=None): """Generate HTML attributes from the provided keyword arguments. The output value is sorted by the passed keys, to provide consistent output. Because of the frequent use of the normally reserved keyword `class`, `classes` is used instead. Also, all underscores are translated to regular dashes. Set any property with a `True` value. >>> _get_html_attrs({'id': 'text1', 'classes': 'myclass', 'data_id': 1, 'checked': True}) u'class="myclass" data-id="1" id="text1" checked' """ kwargs = kwargs or {} attrs = [] props = [] classes = kwargs.get('classes', '').strip() if classes: classes = ' '.join(re.split(r'\s+', classes)) classes = to_unicode(quoteattr(classes)) attrs.append('class=%s' % classes) try: del kwargs['classes'] except KeyError: pass for key, value in iteritems(kwargs): key = key.replace('_', '-') key = to_unicode(key) if isinstance(value, bool): if value is True: props.append(key) else: value = quoteattr(Markup(value)) attrs.append(u'%s=%s' % (key, value)) attrs.sort() props.sort() attrs.extend(props) return u' '.join(attrs)
[ "def", "get_html_attrs", "(", "kwargs", "=", "None", ")", ":", "kwargs", "=", "kwargs", "or", "{", "}", "attrs", "=", "[", "]", "props", "=", "[", "]", "classes", "=", "kwargs", ".", "get", "(", "'classes'", ",", "''", ")", ".", "strip", "(", ")", "if", "classes", ":", "classes", "=", "' '", ".", "join", "(", "re", ".", "split", "(", "r'\\s+'", ",", "classes", ")", ")", "classes", "=", "to_unicode", "(", "quoteattr", "(", "classes", ")", ")", "attrs", ".", "append", "(", "'class=%s'", "%", "classes", ")", "try", ":", "del", "kwargs", "[", "'classes'", "]", "except", "KeyError", ":", "pass", "for", "key", ",", "value", "in", "iteritems", "(", "kwargs", ")", ":", "key", "=", "key", ".", "replace", "(", "'_'", ",", "'-'", ")", "key", "=", "to_unicode", "(", "key", ")", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "if", "value", "is", "True", ":", "props", ".", "append", "(", "key", ")", "else", ":", "value", "=", "quoteattr", "(", "Markup", "(", "value", ")", ")", "attrs", ".", "append", "(", "u'%s=%s'", "%", "(", "key", ",", "value", ")", ")", "attrs", ".", "sort", "(", ")", "props", ".", "sort", "(", ")", "attrs", ".", "extend", "(", "props", ")", "return", "u' '", ".", "join", "(", "attrs", ")" ]
Generate HTML attributes from the provided keyword arguments. The output value is sorted by the passed keys, to provide consistent output. Because of the frequent use of the normally reserved keyword `class`, `classes` is used instead. Also, all underscores are translated to regular dashes. Set any property with a `True` value. >>> _get_html_attrs({'id': 'text1', 'classes': 'myclass', 'data_id': 1, 'checked': True}) u'class="myclass" data-id="1" id="text1" checked'
[ "Generate", "HTML", "attributes", "from", "the", "provided", "keyword", "arguments", "." ]
python
train
29.302326
MartinThoma/hwrt
hwrt/segmentation/segmentation.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/segmentation/segmentation.py#L169-L217
def get_dataset(): """Create a dataset for machine learning of segmentations. Returns ------- tuple : (X, y) where X is a list of tuples. Each tuple is a feature. y is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol') """ seg_data = "segmentation-X.npy" seg_labels = "segmentation-y.npy" # seg_ids = "segmentation-ids.npy" if os.path.isfile(seg_data) and os.path.isfile(seg_labels): X = numpy.load(seg_data) y = numpy.load(seg_labels) with open('datasets.pickle', 'rb') as f: datasets = pickle.load(f) return (X, y, datasets) datasets = get_segmented_raw_data() X, y = [], [] for i, data in enumerate(datasets): if i % 10 == 0: logging.info("[Create Dataset] i=%i/%i", i, len(datasets)) segmentation = json.loads(data['segmentation']) recording = json.loads(data['data']) X_symbol = [get_median_stroke_distance(recording)] if len([p for s in recording for p in s if p['time'] is None]) > 0: continue combis = itertools.combinations(list(range(len(recording))), 2) for strokeid1, strokeid2 in combis: stroke1 = recording[strokeid1] stroke2 = recording[strokeid2] if len(stroke1) == 0 or len(stroke2) == 0: logging.debug("stroke len 0. Skip.") continue X.append(get_stroke_features(recording, strokeid1, strokeid2) + X_symbol) same_symbol = (_get_symbol_index(strokeid1, segmentation) == _get_symbol_index(strokeid2, segmentation)) y.append(int(same_symbol)) X = numpy.array(X, dtype=numpy.float32) y = numpy.array(y, dtype=numpy.int32) numpy.save(seg_data, X) numpy.save(seg_labels, y) datasets = filter_recordings(datasets) with open('datasets.pickle', 'wb') as f: pickle.dump(datasets, f, protocol=pickle.HIGHEST_PROTOCOL) return (X, y, datasets)
[ "def", "get_dataset", "(", ")", ":", "seg_data", "=", "\"segmentation-X.npy\"", "seg_labels", "=", "\"segmentation-y.npy\"", "# seg_ids = \"segmentation-ids.npy\"", "if", "os", ".", "path", ".", "isfile", "(", "seg_data", ")", "and", "os", ".", "path", ".", "isfile", "(", "seg_labels", ")", ":", "X", "=", "numpy", ".", "load", "(", "seg_data", ")", "y", "=", "numpy", ".", "load", "(", "seg_labels", ")", "with", "open", "(", "'datasets.pickle'", ",", "'rb'", ")", "as", "f", ":", "datasets", "=", "pickle", ".", "load", "(", "f", ")", "return", "(", "X", ",", "y", ",", "datasets", ")", "datasets", "=", "get_segmented_raw_data", "(", ")", "X", ",", "y", "=", "[", "]", ",", "[", "]", "for", "i", ",", "data", "in", "enumerate", "(", "datasets", ")", ":", "if", "i", "%", "10", "==", "0", ":", "logging", ".", "info", "(", "\"[Create Dataset] i=%i/%i\"", ",", "i", ",", "len", "(", "datasets", ")", ")", "segmentation", "=", "json", ".", "loads", "(", "data", "[", "'segmentation'", "]", ")", "recording", "=", "json", ".", "loads", "(", "data", "[", "'data'", "]", ")", "X_symbol", "=", "[", "get_median_stroke_distance", "(", "recording", ")", "]", "if", "len", "(", "[", "p", "for", "s", "in", "recording", "for", "p", "in", "s", "if", "p", "[", "'time'", "]", "is", "None", "]", ")", ">", "0", ":", "continue", "combis", "=", "itertools", ".", "combinations", "(", "list", "(", "range", "(", "len", "(", "recording", ")", ")", ")", ",", "2", ")", "for", "strokeid1", ",", "strokeid2", "in", "combis", ":", "stroke1", "=", "recording", "[", "strokeid1", "]", "stroke2", "=", "recording", "[", "strokeid2", "]", "if", "len", "(", "stroke1", ")", "==", "0", "or", "len", "(", "stroke2", ")", "==", "0", ":", "logging", ".", "debug", "(", "\"stroke len 0. Skip.\"", ")", "continue", "X", ".", "append", "(", "get_stroke_features", "(", "recording", ",", "strokeid1", ",", "strokeid2", ")", "+", "X_symbol", ")", "same_symbol", "=", "(", "_get_symbol_index", "(", "strokeid1", ",", "segmentation", ")", "==", "_get_symbol_index", "(", "strokeid2", ",", "segmentation", ")", ")", "y", ".", "append", "(", "int", "(", "same_symbol", ")", ")", "X", "=", "numpy", ".", "array", "(", "X", ",", "dtype", "=", "numpy", ".", "float32", ")", "y", "=", "numpy", ".", "array", "(", "y", ",", "dtype", "=", "numpy", ".", "int32", ")", "numpy", ".", "save", "(", "seg_data", ",", "X", ")", "numpy", ".", "save", "(", "seg_labels", ",", "y", ")", "datasets", "=", "filter_recordings", "(", "datasets", ")", "with", "open", "(", "'datasets.pickle'", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "datasets", ",", "f", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")", "return", "(", "X", ",", "y", ",", "datasets", ")" ]
Create a dataset for machine learning of segmentations. Returns ------- tuple : (X, y) where X is a list of tuples. Each tuple is a feature. y is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol')
[ "Create", "a", "dataset", "for", "machine", "learning", "of", "segmentations", "." ]
python
train
40.693878
brocade/pynos
pynos/versions/base/ras.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/ras.py#L35-L72
def enable_support_autoupload(self, **kwargs): """Set Spanning Tree state. Args: enabled (bool): Is Autoupload enabled? (True, False). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... enabled = True ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) ... enabled = False ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) """ enabled = kwargs.pop('enabled') callback = kwargs.pop('callback', self._callback) if not isinstance(enabled, bool): raise ValueError('%s must be `True` or `False`.' % repr(enabled)) state_args = dict() autoupload_state = getattr(self._ras, 'support_autoupload_enable') config = autoupload_state(**state_args) if not enabled: shutdown = config.find('.//*enable') shutdown.set('operation', 'delete') return callback(config)
[ "def", "enable_support_autoupload", "(", "self", ",", "*", "*", "kwargs", ")", ":", "enabled", "=", "kwargs", ".", "pop", "(", "'enabled'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "if", "not", "isinstance", "(", "enabled", ",", "bool", ")", ":", "raise", "ValueError", "(", "'%s must be `True` or `False`.'", "%", "repr", "(", "enabled", ")", ")", "state_args", "=", "dict", "(", ")", "autoupload_state", "=", "getattr", "(", "self", ".", "_ras", ",", "'support_autoupload_enable'", ")", "config", "=", "autoupload_state", "(", "*", "*", "state_args", ")", "if", "not", "enabled", ":", "shutdown", "=", "config", ".", "find", "(", "'.//*enable'", ")", "shutdown", ".", "set", "(", "'operation'", ",", "'delete'", ")", "return", "callback", "(", "config", ")" ]
Set Spanning Tree state. Args: enabled (bool): Is Autoupload enabled? (True, False). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... enabled = True ... output = dev.ras.enable_support_autoupload( ... enabled=enabled) ... enabled = False ... output = dev.ras.enable_support_autoupload( ... enabled=enabled)
[ "Set", "Spanning", "Tree", "state", "." ]
python
train
41.473684
DataBiosphere/toil
src/toil/lib/retry.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/retry.py#L144-L157
def retryable_http_error( e ): """ Determine if an error encountered during an HTTP download is likely to go away if we try again. """ if isinstance( e, urllib.error.HTTPError ) and e.code in ('503', '408', '500'): # The server returned one of: # 503 Service Unavailable # 408 Request Timeout # 500 Internal Server Error return True if isinstance( e, BadStatusLine ): # The server didn't return a valid response at all return True return False
[ "def", "retryable_http_error", "(", "e", ")", ":", "if", "isinstance", "(", "e", ",", "urllib", ".", "error", ".", "HTTPError", ")", "and", "e", ".", "code", "in", "(", "'503'", ",", "'408'", ",", "'500'", ")", ":", "# The server returned one of:", "# 503 Service Unavailable", "# 408 Request Timeout", "# 500 Internal Server Error", "return", "True", "if", "isinstance", "(", "e", ",", "BadStatusLine", ")", ":", "# The server didn't return a valid response at all", "return", "True", "return", "False" ]
Determine if an error encountered during an HTTP download is likely to go away if we try again.
[ "Determine", "if", "an", "error", "encountered", "during", "an", "HTTP", "download", "is", "likely", "to", "go", "away", "if", "we", "try", "again", "." ]
python
train
36.428571
armet/python-armet
armet/http/response.py
https://github.com/armet/python-armet/blob/d61eca9082256cb1e7f7f3c7f2fbc4b697157de7/armet/http/response.py#L104-L106
def insert(self, name, index, value): """Insert a value at the passed index in the named header.""" return self._sequence[name].insert(index, value)
[ "def", "insert", "(", "self", ",", "name", ",", "index", ",", "value", ")", ":", "return", "self", ".", "_sequence", "[", "name", "]", ".", "insert", "(", "index", ",", "value", ")" ]
Insert a value at the passed index in the named header.
[ "Insert", "a", "value", "at", "the", "passed", "index", "in", "the", "named", "header", "." ]
python
valid
54
Tanganelli/CoAPthon3
coapthon/client/coap.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/client/coap.py#L69-L79
def close(self): """ Stop the client. """ self.stopped.set() for event in self.to_be_stopped: event.set() if self._receiver_thread is not None: self._receiver_thread.join() self._socket.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "stopped", ".", "set", "(", ")", "for", "event", "in", "self", ".", "to_be_stopped", ":", "event", ".", "set", "(", ")", "if", "self", ".", "_receiver_thread", "is", "not", "None", ":", "self", ".", "_receiver_thread", ".", "join", "(", ")", "self", ".", "_socket", ".", "close", "(", ")" ]
Stop the client.
[ "Stop", "the", "client", "." ]
python
train
24
marrow/util
marrow/util/object.py
https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/object.py#L51-L65
def merge(s, t): """Merge dictionary t into s.""" for k, v in t.items(): if isinstance(v, dict): if k not in s: s[k] = v continue s[k] = merge(s[k], v) continue s[k] = v return s
[ "def", "merge", "(", "s", ",", "t", ")", ":", "for", "k", ",", "v", "in", "t", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "if", "k", "not", "in", "s", ":", "s", "[", "k", "]", "=", "v", "continue", "s", "[", "k", "]", "=", "merge", "(", "s", "[", "k", "]", ",", "v", ")", "continue", "s", "[", "k", "]", "=", "v", "return", "s" ]
Merge dictionary t into s.
[ "Merge", "dictionary", "t", "into", "s", "." ]
python
train
17.6
saltstack/salt
salt/cloud/clouds/scaleway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/scaleway.py#L171-L183
def create_node(args): ''' Create a node. ''' node = query(method='servers', args=args, http_method='POST') action = query( method='servers', server_id=node['server']['id'], command='action', args={'action': 'poweron'}, http_method='POST' ) return node
[ "def", "create_node", "(", "args", ")", ":", "node", "=", "query", "(", "method", "=", "'servers'", ",", "args", "=", "args", ",", "http_method", "=", "'POST'", ")", "action", "=", "query", "(", "method", "=", "'servers'", ",", "server_id", "=", "node", "[", "'server'", "]", "[", "'id'", "]", ",", "command", "=", "'action'", ",", "args", "=", "{", "'action'", ":", "'poweron'", "}", ",", "http_method", "=", "'POST'", ")", "return", "node" ]
Create a node.
[ "Create", "a", "node", "." ]
python
train
23.461538
innolitics/dicom-numpy
dicom_numpy/combine_slices.py
https://github.com/innolitics/dicom-numpy/blob/c870f0302276e7eaa0b66e641bacee19fe090296/dicom_numpy/combine_slices.py#L156-L177
def _validate_image_orientation(image_orientation): ''' Ensure that the image orientation is supported - The direction cosines have magnitudes of 1 (just in case) - The direction cosines are perpendicular ''' row_cosine, column_cosine, slice_cosine = _extract_cosines(image_orientation) if not _almost_zero(np.dot(row_cosine, column_cosine), 1e-4): raise DicomImportException("Non-orthogonal direction cosines: {}, {}".format(row_cosine, column_cosine)) elif not _almost_zero(np.dot(row_cosine, column_cosine), 1e-8): logger.warning("Direction cosines aren't quite orthogonal: {}, {}".format(row_cosine, column_cosine)) if not _almost_one(np.linalg.norm(row_cosine), 1e-4): raise DicomImportException("The row direction cosine's magnitude is not 1: {}".format(row_cosine)) elif not _almost_one(np.linalg.norm(row_cosine), 1e-8): logger.warning("The row direction cosine's magnitude is not quite 1: {}".format(row_cosine)) if not _almost_one(np.linalg.norm(column_cosine), 1e-4): raise DicomImportException("The column direction cosine's magnitude is not 1: {}".format(column_cosine)) elif not _almost_one(np.linalg.norm(column_cosine), 1e-8): logger.warning("The column direction cosine's magnitude is not quite 1: {}".format(column_cosine))
[ "def", "_validate_image_orientation", "(", "image_orientation", ")", ":", "row_cosine", ",", "column_cosine", ",", "slice_cosine", "=", "_extract_cosines", "(", "image_orientation", ")", "if", "not", "_almost_zero", "(", "np", ".", "dot", "(", "row_cosine", ",", "column_cosine", ")", ",", "1e-4", ")", ":", "raise", "DicomImportException", "(", "\"Non-orthogonal direction cosines: {}, {}\"", ".", "format", "(", "row_cosine", ",", "column_cosine", ")", ")", "elif", "not", "_almost_zero", "(", "np", ".", "dot", "(", "row_cosine", ",", "column_cosine", ")", ",", "1e-8", ")", ":", "logger", ".", "warning", "(", "\"Direction cosines aren't quite orthogonal: {}, {}\"", ".", "format", "(", "row_cosine", ",", "column_cosine", ")", ")", "if", "not", "_almost_one", "(", "np", ".", "linalg", ".", "norm", "(", "row_cosine", ")", ",", "1e-4", ")", ":", "raise", "DicomImportException", "(", "\"The row direction cosine's magnitude is not 1: {}\"", ".", "format", "(", "row_cosine", ")", ")", "elif", "not", "_almost_one", "(", "np", ".", "linalg", ".", "norm", "(", "row_cosine", ")", ",", "1e-8", ")", ":", "logger", ".", "warning", "(", "\"The row direction cosine's magnitude is not quite 1: {}\"", ".", "format", "(", "row_cosine", ")", ")", "if", "not", "_almost_one", "(", "np", ".", "linalg", ".", "norm", "(", "column_cosine", ")", ",", "1e-4", ")", ":", "raise", "DicomImportException", "(", "\"The column direction cosine's magnitude is not 1: {}\"", ".", "format", "(", "column_cosine", ")", ")", "elif", "not", "_almost_one", "(", "np", ".", "linalg", ".", "norm", "(", "column_cosine", ")", ",", "1e-8", ")", ":", "logger", ".", "warning", "(", "\"The column direction cosine's magnitude is not quite 1: {}\"", ".", "format", "(", "column_cosine", ")", ")" ]
Ensure that the image orientation is supported - The direction cosines have magnitudes of 1 (just in case) - The direction cosines are perpendicular
[ "Ensure", "that", "the", "image", "orientation", "is", "supported", "-", "The", "direction", "cosines", "have", "magnitudes", "of", "1", "(", "just", "in", "case", ")", "-", "The", "direction", "cosines", "are", "perpendicular" ]
python
train
59.954545
PythonOptimizers/cygenja
cygenja/generator.py
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/generator.py#L178-L194
def register_filter(self, filter_name, filter_ref, force=False): """ Add/register one filter. Args: filter_name (str): Filter name used inside :program:`Jinja2` tags. filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function. force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not. Note: The list of user added/registered filters can be retrieve with :mth:`registered_filters_list` """ if not force and (filter_name in self.filters_list()): self.log_warning("Extension %s already exist, ignore redefinition." % ext_in) return self.__jinja2_environment.filters[filter_name] = filter_ref
[ "def", "register_filter", "(", "self", ",", "filter_name", ",", "filter_ref", ",", "force", "=", "False", ")", ":", "if", "not", "force", "and", "(", "filter_name", "in", "self", ".", "filters_list", "(", ")", ")", ":", "self", ".", "log_warning", "(", "\"Extension %s already exist, ignore redefinition.\"", "%", "ext_in", ")", "return", "self", ".", "__jinja2_environment", ".", "filters", "[", "filter_name", "]", "=", "filter_ref" ]
Add/register one filter. Args: filter_name (str): Filter name used inside :program:`Jinja2` tags. filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function. force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not. Note: The list of user added/registered filters can be retrieve with :mth:`registered_filters_list`
[ "Add", "/", "register", "one", "filter", "." ]
python
train
46.411765
bitlabstudio/django-rapid-prototyping
rapid_prototyping/context/utils.py
https://github.com/bitlabstudio/django-rapid-prototyping/blob/fd14ab5453bd7a0c2d5b973e8d96148963b03ab0/rapid_prototyping/context/utils.py#L84-L109
def append_overhead_costs(costs, new_id, overhead_percentage=0.15): """ Adds 15% overhead costs to the list of costs. Usage:: from rapid_prototyping.context.utils import append_overhead_costs costs = [ .... ] costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0]) :param costs: Your final list of costs. :param new_id: The id that this new item should get. """ total_time = 0 for item in costs: total_time += item['time'] costs.append({ 'id': new_id, 'task': 'Overhead, Bufixes & Iterations', 'time': total_time * overhead_percentage, }, ) return costs
[ "def", "append_overhead_costs", "(", "costs", ",", "new_id", ",", "overhead_percentage", "=", "0.15", ")", ":", "total_time", "=", "0", "for", "item", "in", "costs", ":", "total_time", "+=", "item", "[", "'time'", "]", "costs", ".", "append", "(", "{", "'id'", ":", "new_id", ",", "'task'", ":", "'Overhead, Bufixes & Iterations'", ",", "'time'", ":", "total_time", "*", "overhead_percentage", ",", "}", ",", ")", "return", "costs" ]
Adds 15% overhead costs to the list of costs. Usage:: from rapid_prototyping.context.utils import append_overhead_costs costs = [ .... ] costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0]) :param costs: Your final list of costs. :param new_id: The id that this new item should get.
[ "Adds", "15%", "overhead", "costs", "to", "the", "list", "of", "costs", "." ]
python
train
25.730769
uuazed/numerapi
numerapi/numerapi.py
https://github.com/uuazed/numerapi/blob/fc9dcc53b32ede95bfda1ceeb62aec1d67d26697/numerapi/numerapi.py#L1409-L1426
def tournament_name2number(self, name): """Translate tournament name to tournament number. Args: name (str): tournament name to translate Returns: number (int): number of the tournament or `None` if unknown. Examples: >>> NumerAPI().tournament_name2number('delta') 4 >>> NumerAPI().tournament_name2number('foo') None """ tournaments = self.get_tournaments() d = {t['name']: t['tournament'] for t in tournaments} return d.get(name, None)
[ "def", "tournament_name2number", "(", "self", ",", "name", ")", ":", "tournaments", "=", "self", ".", "get_tournaments", "(", ")", "d", "=", "{", "t", "[", "'name'", "]", ":", "t", "[", "'tournament'", "]", "for", "t", "in", "tournaments", "}", "return", "d", ".", "get", "(", "name", ",", "None", ")" ]
Translate tournament name to tournament number. Args: name (str): tournament name to translate Returns: number (int): number of the tournament or `None` if unknown. Examples: >>> NumerAPI().tournament_name2number('delta') 4 >>> NumerAPI().tournament_name2number('foo') None
[ "Translate", "tournament", "name", "to", "tournament", "number", "." ]
python
train
31
sirfoga/pyhal
hal/meta/attributes.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/meta/attributes.py#L34-L49
def _find_package(self, root_package): """Finds package name of file :param root_package: root package :return: package name """ package = self.path.replace(root_package, "") if package.endswith(".py"): package = package[:-3] package = package.replace(os.path.sep, MODULE_SEP) root_package = get_folder_name(root_package) package = root_package + package # add root return package
[ "def", "_find_package", "(", "self", ",", "root_package", ")", ":", "package", "=", "self", ".", "path", ".", "replace", "(", "root_package", ",", "\"\"", ")", "if", "package", ".", "endswith", "(", "\".py\"", ")", ":", "package", "=", "package", "[", ":", "-", "3", "]", "package", "=", "package", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "MODULE_SEP", ")", "root_package", "=", "get_folder_name", "(", "root_package", ")", "package", "=", "root_package", "+", "package", "# add root", "return", "package" ]
Finds package name of file :param root_package: root package :return: package name
[ "Finds", "package", "name", "of", "file" ]
python
train
28.875
alan-turing-institute/topic-modelling-tools
topicmodels/preprocess.py
https://github.com/alan-turing-institute/topic-modelling-tools/blob/f0cf90cdd06f1072e824b446f201c7469b9de5df/topicmodels/preprocess.py#L108-L116
def stem(self): """ Stem tokens with Porter Stemmer. """ def s(tokens): return [PorterStemmer().stem(t) for t in tokens] self.stems = list(map(s, self.tokens))
[ "def", "stem", "(", "self", ")", ":", "def", "s", "(", "tokens", ")", ":", "return", "[", "PorterStemmer", "(", ")", ".", "stem", "(", "t", ")", "for", "t", "in", "tokens", "]", "self", ".", "stems", "=", "list", "(", "map", "(", "s", ",", "self", ".", "tokens", ")", ")" ]
Stem tokens with Porter Stemmer.
[ "Stem", "tokens", "with", "Porter", "Stemmer", "." ]
python
train
22.777778
lsbardel/python-stdnet
stdnet/odm/struct.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L708-L711
def rank(self, dte): '''The rank of a given *dte* in the timeseries''' timestamp = self.pickler.dumps(dte) return self.backend_structure().rank(timestamp)
[ "def", "rank", "(", "self", ",", "dte", ")", ":", "timestamp", "=", "self", ".", "pickler", ".", "dumps", "(", "dte", ")", "return", "self", ".", "backend_structure", "(", ")", ".", "rank", "(", "timestamp", ")" ]
The rank of a given *dte* in the timeseries
[ "The", "rank", "of", "a", "given", "*", "dte", "*", "in", "the", "timeseries" ]
python
train
44.5
misli/django-cms-articles
cms_articles/signals/title.py
https://github.com/misli/django-cms-articles/blob/d96ac77e049022deb4c70d268e4eab74d175145c/cms_articles/signals/title.py#L1-L12
def pre_save_title(instance, **kwargs): ''' Update article.languages ''' if instance.article.languages: languages = instance.article.languages.split(',') else: languages = [] if instance.language not in languages: languages.append(instance.language) instance.article.languages = ','.join(languages) instance.article._publisher_keep_state = True instance.article.save(no_signals=True)
[ "def", "pre_save_title", "(", "instance", ",", "*", "*", "kwargs", ")", ":", "if", "instance", ".", "article", ".", "languages", ":", "languages", "=", "instance", ".", "article", ".", "languages", ".", "split", "(", "','", ")", "else", ":", "languages", "=", "[", "]", "if", "instance", ".", "language", "not", "in", "languages", ":", "languages", ".", "append", "(", "instance", ".", "language", ")", "instance", ".", "article", ".", "languages", "=", "','", ".", "join", "(", "languages", ")", "instance", ".", "article", ".", "_publisher_keep_state", "=", "True", "instance", ".", "article", ".", "save", "(", "no_signals", "=", "True", ")" ]
Update article.languages
[ "Update", "article", ".", "languages" ]
python
train
36.666667
FlorianRhiem/pyGLFW
glfw/__init__.py
https://github.com/FlorianRhiem/pyGLFW/blob/87767dfbe15ba15d2a8338cdfddf6afc6a25dff5/glfw/__init__.py#L1099-L1118
def set_window_user_pointer(window, pointer): """ Sets the user pointer of the specified window. You may pass a normal python object into this function and it will be wrapped automatically. The object will be kept in existence until the pointer is set to something else or until the window is destroyed. Wrapper for: void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer); """ data = (False, pointer) if not isinstance(pointer, ctypes.c_void_p): data = (True, pointer) # Create a void pointer for the python object pointer = ctypes.cast(ctypes.pointer(ctypes.py_object(pointer)), ctypes.c_void_p) window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value _window_user_data_repository[window_addr] = data _glfw.glfwSetWindowUserPointer(window, pointer)
[ "def", "set_window_user_pointer", "(", "window", ",", "pointer", ")", ":", "data", "=", "(", "False", ",", "pointer", ")", "if", "not", "isinstance", "(", "pointer", ",", "ctypes", ".", "c_void_p", ")", ":", "data", "=", "(", "True", ",", "pointer", ")", "# Create a void pointer for the python object", "pointer", "=", "ctypes", ".", "cast", "(", "ctypes", ".", "pointer", "(", "ctypes", ".", "py_object", "(", "pointer", ")", ")", ",", "ctypes", ".", "c_void_p", ")", "window_addr", "=", "ctypes", ".", "cast", "(", "ctypes", ".", "pointer", "(", "window", ")", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_long", ")", ")", ".", "contents", ".", "value", "_window_user_data_repository", "[", "window_addr", "]", "=", "data", "_glfw", ".", "glfwSetWindowUserPointer", "(", "window", ",", "pointer", ")" ]
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will be wrapped automatically. The object will be kept in existence until the pointer is set to something else or until the window is destroyed. Wrapper for: void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
[ "Sets", "the", "user", "pointer", "of", "the", "specified", "window", ".", "You", "may", "pass", "a", "normal", "python", "object", "into", "this", "function", "and", "it", "will", "be", "wrapped", "automatically", ".", "The", "object", "will", "be", "kept", "in", "existence", "until", "the", "pointer", "is", "set", "to", "something", "else", "or", "until", "the", "window", "is", "destroyed", "." ]
python
train
44.45
pybel/pybel
src/pybel/struct/filters/node_predicates.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/filters/node_predicates.py#L200-L205
def has_causal_out_edges(graph: BELGraph, node: BaseEntity) -> bool: """Return true if the node contains any out_edges that are causal.""" return any( data[RELATION] in CAUSAL_RELATIONS for _, _, data in graph.out_edges(node, data=True) )
[ "def", "has_causal_out_edges", "(", "graph", ":", "BELGraph", ",", "node", ":", "BaseEntity", ")", "->", "bool", ":", "return", "any", "(", "data", "[", "RELATION", "]", "in", "CAUSAL_RELATIONS", "for", "_", ",", "_", ",", "data", "in", "graph", ".", "out_edges", "(", "node", ",", "data", "=", "True", ")", ")" ]
Return true if the node contains any out_edges that are causal.
[ "Return", "true", "if", "the", "node", "contains", "any", "out_edges", "that", "are", "causal", "." ]
python
train
43.5
inbo/pyinaturalist
pyinaturalist/rest_api.py
https://github.com/inbo/pyinaturalist/blob/d380ede84bdf15eca8ccab9efefe08d2505fe6a8/pyinaturalist/rest_api.py#L146-L172
def create_observations(params: Dict[str, Dict[str, Any]], access_token: str) -> List[Dict[str, Any]]: """Create a single or several (if passed an array) observations). :param params: :param access_token: the access token, as returned by :func:`get_access_token()` :return: iNaturalist's JSON response, as a Python object :raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity) if it rejects the observation data (for example an observation date in the future or a latitude > 90. In that case the exception's `response` attribute give details about the errors. allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations Example: params = {'observation': {'species_guess': 'Pieris rapae'}, } TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned) """ response = requests.post(url="{base_url}/observations.json".format(base_url=INAT_BASE_URL), json=params, headers=_build_auth_header(access_token)) response.raise_for_status() return response.json()
[ "def", "create_observations", "(", "params", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Any", "]", "]", ",", "access_token", ":", "str", ")", "->", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "response", "=", "requests", ".", "post", "(", "url", "=", "\"{base_url}/observations.json\"", ".", "format", "(", "base_url", "=", "INAT_BASE_URL", ")", ",", "json", "=", "params", ",", "headers", "=", "_build_auth_header", "(", "access_token", ")", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
Create a single or several (if passed an array) observations). :param params: :param access_token: the access token, as returned by :func:`get_access_token()` :return: iNaturalist's JSON response, as a Python object :raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity) if it rejects the observation data (for example an observation date in the future or a latitude > 90. In that case the exception's `response` attribute give details about the errors. allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations Example: params = {'observation': {'species_guess': 'Pieris rapae'}, } TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
[ "Create", "a", "single", "or", "several", "(", "if", "passed", "an", "array", ")", "observations", ")", "." ]
python
train
49.777778
juicer/juicer
juicer/admin/JuicerAdmin.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/admin/JuicerAdmin.py#L47-L115
def create_repo(self, repo_name=None, feed=None, envs=[], checksum_type="sha256", query='/repositories/'): """ `repo_name` - Name of repository to create `feed` - Repo URL to feed from `checksum_type` - Used for generating meta-data Create repository in specified environments, associate the yum_distributor with it and publish the repo """ data = {'display_name': repo_name, 'notes': { '_repo-type': 'rpm-repo', } } juicer.utils.Log.log_debug("Create Repo: %s", repo_name) for env in envs: if juicer.utils.repo_exists_p(repo_name, self.connectors[env], env): juicer.utils.Log.log_info("repo `%s` already exists in %s... skipping!", (repo_name, env)) continue else: data['relative_path'] = '/%s/%s/' % (env, repo_name) data['id'] = '-'.join([repo_name, env]) _r = self.connectors[env].post(query, data) if _r.status_code == Constants.PULP_POST_CREATED: imp_query = '/repositories/%s/importers/' % data['id'] imp_data = { 'importer_id': 'yum_importer', 'importer_type_id': 'yum_importer', 'importer_config': {}, } if feed: imp_data['importer_config']['feed_url'] = feed _r = self.connectors[env].post(imp_query, imp_data) dist_query = '/repositories/%s/distributors/' % data['id'] dist_data = {'distributor_id': 'yum_distributor', 'distributor_type_id': 'yum_distributor', 'distributor_config': { 'relative_url': '/%s/%s/' % (env, repo_name), 'http': True, 'https': True, 'checksum_type': checksum_type }, 'auto_publish': True, 'relative_path': '/%s/%s/' % (env, repo_name) } _r = self.connectors[env].post(dist_query, dist_data) if _r.status_code == Constants.PULP_POST_CREATED: pub_query = '/repositories/%s/actions/publish/' % data['id'] pub_data = {'id': 'yum_distributor'} _r = self.connectors[env].post(pub_query, pub_data) if _r.status_code == Constants.PULP_POST_ACCEPTED: juicer.utils.Log.log_info("created repo `%s` in %s", repo_name, env) else: _r.raise_for_status() else: _r.raise_for_status() return True
[ "def", "create_repo", "(", "self", ",", "repo_name", "=", "None", ",", "feed", "=", "None", ",", "envs", "=", "[", "]", ",", "checksum_type", "=", "\"sha256\"", ",", "query", "=", "'/repositories/'", ")", ":", "data", "=", "{", "'display_name'", ":", "repo_name", ",", "'notes'", ":", "{", "'_repo-type'", ":", "'rpm-repo'", ",", "}", "}", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"Create Repo: %s\"", ",", "repo_name", ")", "for", "env", "in", "envs", ":", "if", "juicer", ".", "utils", ".", "repo_exists_p", "(", "repo_name", ",", "self", ".", "connectors", "[", "env", "]", ",", "env", ")", ":", "juicer", ".", "utils", ".", "Log", ".", "log_info", "(", "\"repo `%s` already exists in %s... skipping!\"", ",", "(", "repo_name", ",", "env", ")", ")", "continue", "else", ":", "data", "[", "'relative_path'", "]", "=", "'/%s/%s/'", "%", "(", "env", ",", "repo_name", ")", "data", "[", "'id'", "]", "=", "'-'", ".", "join", "(", "[", "repo_name", ",", "env", "]", ")", "_r", "=", "self", ".", "connectors", "[", "env", "]", ".", "post", "(", "query", ",", "data", ")", "if", "_r", ".", "status_code", "==", "Constants", ".", "PULP_POST_CREATED", ":", "imp_query", "=", "'/repositories/%s/importers/'", "%", "data", "[", "'id'", "]", "imp_data", "=", "{", "'importer_id'", ":", "'yum_importer'", ",", "'importer_type_id'", ":", "'yum_importer'", ",", "'importer_config'", ":", "{", "}", ",", "}", "if", "feed", ":", "imp_data", "[", "'importer_config'", "]", "[", "'feed_url'", "]", "=", "feed", "_r", "=", "self", ".", "connectors", "[", "env", "]", ".", "post", "(", "imp_query", ",", "imp_data", ")", "dist_query", "=", "'/repositories/%s/distributors/'", "%", "data", "[", "'id'", "]", "dist_data", "=", "{", "'distributor_id'", ":", "'yum_distributor'", ",", "'distributor_type_id'", ":", "'yum_distributor'", ",", "'distributor_config'", ":", "{", "'relative_url'", ":", "'/%s/%s/'", "%", "(", "env", ",", "repo_name", ")", ",", "'http'", ":", "True", ",", "'https'", ":", "True", ",", "'checksum_type'", ":", "checksum_type", "}", ",", "'auto_publish'", ":", "True", ",", "'relative_path'", ":", "'/%s/%s/'", "%", "(", "env", ",", "repo_name", ")", "}", "_r", "=", "self", ".", "connectors", "[", "env", "]", ".", "post", "(", "dist_query", ",", "dist_data", ")", "if", "_r", ".", "status_code", "==", "Constants", ".", "PULP_POST_CREATED", ":", "pub_query", "=", "'/repositories/%s/actions/publish/'", "%", "data", "[", "'id'", "]", "pub_data", "=", "{", "'id'", ":", "'yum_distributor'", "}", "_r", "=", "self", ".", "connectors", "[", "env", "]", ".", "post", "(", "pub_query", ",", "pub_data", ")", "if", "_r", ".", "status_code", "==", "Constants", ".", "PULP_POST_ACCEPTED", ":", "juicer", ".", "utils", ".", "Log", ".", "log_info", "(", "\"created repo `%s` in %s\"", ",", "repo_name", ",", "env", ")", "else", ":", "_r", ".", "raise_for_status", "(", ")", "else", ":", "_r", ".", "raise_for_status", "(", ")", "return", "True" ]
`repo_name` - Name of repository to create `feed` - Repo URL to feed from `checksum_type` - Used for generating meta-data Create repository in specified environments, associate the yum_distributor with it and publish the repo
[ "repo_name", "-", "Name", "of", "repository", "to", "create", "feed", "-", "Repo", "URL", "to", "feed", "from", "checksum_type", "-", "Used", "for", "generating", "meta", "-", "data" ]
python
train
42.927536
merll/docker-fabric
dockerfabric/apiclient.py
https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/apiclient.py#L294-L299
def start(self, container, **kwargs): """ Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging. """ self.push_log("Starting container '{0}'.".format(container)) super(DockerFabricClient, self).start(container, **kwargs)
[ "def", "start", "(", "self", ",", "container", ",", "*", "*", "kwargs", ")", ":", "self", ".", "push_log", "(", "\"Starting container '{0}'.\"", ".", "format", "(", "container", ")", ")", "super", "(", "DockerFabricClient", ",", "self", ")", ".", "start", "(", "container", ",", "*", "*", "kwargs", ")" ]
Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging.
[ "Identical", "to", ":", "meth", ":", "docker", ".", "api", ".", "container", ".", "ContainerApiMixin", ".", "start", "with", "additional", "logging", "." ]
python
train
48.5
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L171-L209
def new(): """Create a new community.""" form = CommunityForm(formdata=request.values) ctx = mycommunities_ctx() ctx.update({ 'form': form, 'is_new': True, 'community': None, }) if form.validate_on_submit(): data = copy.deepcopy(form.data) community_id = data.pop('identifier') del data['logo'] community = Community.create( community_id, current_user.get_id(), **data) file = request.files.get('logo', None) if file: if not community.save_logo(file.stream, file.filename): form.logo.errors.append(_( 'Cannot add this file as a logo. Supported formats: ' 'PNG, JPG and SVG. Max file size: 1.5 MB.')) db.session.rollback() community = None if community: db.session.commit() flash("Community was successfully created.", category='success') return redirect(url_for('.edit', community_id=community.id)) return render_template( current_app.config['COMMUNITIES_NEW_TEMPLATE'], community_form=form, **ctx )
[ "def", "new", "(", ")", ":", "form", "=", "CommunityForm", "(", "formdata", "=", "request", ".", "values", ")", "ctx", "=", "mycommunities_ctx", "(", ")", "ctx", ".", "update", "(", "{", "'form'", ":", "form", ",", "'is_new'", ":", "True", ",", "'community'", ":", "None", ",", "}", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "data", "=", "copy", ".", "deepcopy", "(", "form", ".", "data", ")", "community_id", "=", "data", ".", "pop", "(", "'identifier'", ")", "del", "data", "[", "'logo'", "]", "community", "=", "Community", ".", "create", "(", "community_id", ",", "current_user", ".", "get_id", "(", ")", ",", "*", "*", "data", ")", "file", "=", "request", ".", "files", ".", "get", "(", "'logo'", ",", "None", ")", "if", "file", ":", "if", "not", "community", ".", "save_logo", "(", "file", ".", "stream", ",", "file", ".", "filename", ")", ":", "form", ".", "logo", ".", "errors", ".", "append", "(", "_", "(", "'Cannot add this file as a logo. Supported formats: '", "'PNG, JPG and SVG. Max file size: 1.5 MB.'", ")", ")", "db", ".", "session", ".", "rollback", "(", ")", "community", "=", "None", "if", "community", ":", "db", ".", "session", ".", "commit", "(", ")", "flash", "(", "\"Community was successfully created.\"", ",", "category", "=", "'success'", ")", "return", "redirect", "(", "url_for", "(", "'.edit'", ",", "community_id", "=", "community", ".", "id", ")", ")", "return", "render_template", "(", "current_app", ".", "config", "[", "'COMMUNITIES_NEW_TEMPLATE'", "]", ",", "community_form", "=", "form", ",", "*", "*", "ctx", ")" ]
Create a new community.
[ "Create", "a", "new", "community", "." ]
python
train
29.538462
Diaoul/subliminal
subliminal/cli.py
https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/cli.py#L77-L80
def write(self): """Write the configuration to :attr:`path`""" with open(self.path, 'w') as f: self.config.write(f)
[ "def", "write", "(", "self", ")", ":", "with", "open", "(", "self", ".", "path", ",", "'w'", ")", "as", "f", ":", "self", ".", "config", ".", "write", "(", "f", ")" ]
Write the configuration to :attr:`path`
[ "Write", "the", "configuration", "to", ":", "attr", ":", "path" ]
python
train
35
log2timeline/plaso
plaso/multi_processing/engine.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/engine.py#L256-L274
def _RegisterProcess(self, process): """Registers a process with the engine. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is already registered with the engine. ValueError: if the process is missing. """ if process is None: raise ValueError('Missing process.') if process.pid in self._processes_per_pid: raise KeyError( 'Already managing process: {0!s} (PID: {1:d})'.format( process.name, process.pid)) self._processes_per_pid[process.pid] = process
[ "def", "_RegisterProcess", "(", "self", ",", "process", ")", ":", "if", "process", "is", "None", ":", "raise", "ValueError", "(", "'Missing process.'", ")", "if", "process", ".", "pid", "in", "self", ".", "_processes_per_pid", ":", "raise", "KeyError", "(", "'Already managing process: {0!s} (PID: {1:d})'", ".", "format", "(", "process", ".", "name", ",", "process", ".", "pid", ")", ")", "self", ".", "_processes_per_pid", "[", "process", ".", "pid", "]", "=", "process" ]
Registers a process with the engine. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is already registered with the engine. ValueError: if the process is missing.
[ "Registers", "a", "process", "with", "the", "engine", "." ]
python
train
29.210526
knagra/farnsworth
legacy/views.py
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/legacy/views.py#L93-L114
def legacy_events_view(request): """ View to see legacy events. """ events = TeacherEvent.objects.all() event_count = events.count() paginator = Paginator(events, 100) page = request.GET.get('page') try: events = paginator.page(page) except PageNotAnInteger: events = paginator.page(1) except EmptyPage: events = paginator.page(paginator.num_pages) return render_to_response( 'teacher_events.html', {'page_name': "Legacy Events", 'events': events, 'event_count': event_count,}, context_instance=RequestContext(request) )
[ "def", "legacy_events_view", "(", "request", ")", ":", "events", "=", "TeacherEvent", ".", "objects", ".", "all", "(", ")", "event_count", "=", "events", ".", "count", "(", ")", "paginator", "=", "Paginator", "(", "events", ",", "100", ")", "page", "=", "request", ".", "GET", ".", "get", "(", "'page'", ")", "try", ":", "events", "=", "paginator", ".", "page", "(", "page", ")", "except", "PageNotAnInteger", ":", "events", "=", "paginator", ".", "page", "(", "1", ")", "except", "EmptyPage", ":", "events", "=", "paginator", ".", "page", "(", "paginator", ".", "num_pages", ")", "return", "render_to_response", "(", "'teacher_events.html'", ",", "{", "'page_name'", ":", "\"Legacy Events\"", ",", "'events'", ":", "events", ",", "'event_count'", ":", "event_count", ",", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
View to see legacy events.
[ "View", "to", "see", "legacy", "events", "." ]
python
train
27.909091
thusoy/pwm
pwm/core.py
https://github.com/thusoy/pwm/blob/fff7d755c34f3a7235a8bf217ffa2ff5aed4926f/pwm/core.py#L112-L130
def _uses_db(func, self, *args, **kwargs): """ Use as a decorator for operations on the database, to ensure connection setup and teardown. Can only be used on methods on objects with a `self.session` attribute. """ if not self.session: _logger.debug('Creating new db session') self._init_db_session() try: ret = func(self, *args, **kwargs) self.session.commit() except: self.session.rollback() tb = traceback.format_exc() _logger.debug(tb) raise finally: _logger.debug('Closing db session') self.session.close() return ret
[ "def", "_uses_db", "(", "func", ",", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "session", ":", "_logger", ".", "debug", "(", "'Creating new db session'", ")", "self", ".", "_init_db_session", "(", ")", "try", ":", "ret", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "session", ".", "commit", "(", ")", "except", ":", "self", ".", "session", ".", "rollback", "(", ")", "tb", "=", "traceback", ".", "format_exc", "(", ")", "_logger", ".", "debug", "(", "tb", ")", "raise", "finally", ":", "_logger", ".", "debug", "(", "'Closing db session'", ")", "self", ".", "session", ".", "close", "(", ")", "return", "ret" ]
Use as a decorator for operations on the database, to ensure connection setup and teardown. Can only be used on methods on objects with a `self.session` attribute.
[ "Use", "as", "a", "decorator", "for", "operations", "on", "the", "database", "to", "ensure", "connection", "setup", "and", "teardown", ".", "Can", "only", "be", "used", "on", "methods", "on", "objects", "with", "a", "self", ".", "session", "attribute", "." ]
python
test
32.421053
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/causality/graph/CGNN.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/graph/CGNN.py#L315-L344
def orient_undirected_graph(self, data, umg, alg='HC'): """Orient the undirected graph using GNN and apply CGNN to improve the graph. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. umg (nx.Graph): Graph that provides the skeleton, on which the GNN then the CGNN algorithm will be applied. alg (str): Exploration heuristic to use, among ["HC", "HCr", "tabu", "EHC"] Returns: networkx.DiGraph: Solution given by CGNN. .. note:: GNN (``cdt.causality.pairwise.GNN``) is first used to orient the undirected graph and output a DAG before applying CGNN. """ warnings.warn("The pairwise GNN model is computed on each edge of the UMG " "to initialize the model and start CGNN with a DAG") gnn = GNN(nh=self.nh, lr=self.lr) og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs, nb_jobs=self.nb_jobs, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu) # Pairwise method # print(nx.adj_matrix(og).todense().shape) # print(list(og.edges())) dag = dagify_min_edge(og) # print(nx.adj_matrix(dag).todense().shape) return self.orient_directed_graph(data, dag, alg=alg)
[ "def", "orient_undirected_graph", "(", "self", ",", "data", ",", "umg", ",", "alg", "=", "'HC'", ")", ":", "warnings", ".", "warn", "(", "\"The pairwise GNN model is computed on each edge of the UMG \"", "\"to initialize the model and start CGNN with a DAG\"", ")", "gnn", "=", "GNN", "(", "nh", "=", "self", ".", "nh", ",", "lr", "=", "self", ".", "lr", ")", "og", "=", "gnn", ".", "orient_graph", "(", "data", ",", "umg", ",", "nb_runs", "=", "self", ".", "nb_runs", ",", "nb_max_runs", "=", "self", ".", "nb_runs", ",", "nb_jobs", "=", "self", ".", "nb_jobs", ",", "train_epochs", "=", "self", ".", "train_epochs", ",", "test_epochs", "=", "self", ".", "test_epochs", ",", "verbose", "=", "self", ".", "verbose", ",", "gpu", "=", "self", ".", "gpu", ")", "# Pairwise method", "# print(nx.adj_matrix(og).todense().shape)", "# print(list(og.edges()))", "dag", "=", "dagify_min_edge", "(", "og", ")", "# print(nx.adj_matrix(dag).todense().shape)", "return", "self", ".", "orient_directed_graph", "(", "data", ",", "dag", ",", "alg", "=", "alg", ")" ]
Orient the undirected graph using GNN and apply CGNN to improve the graph. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. umg (nx.Graph): Graph that provides the skeleton, on which the GNN then the CGNN algorithm will be applied. alg (str): Exploration heuristic to use, among ["HC", "HCr", "tabu", "EHC"] Returns: networkx.DiGraph: Solution given by CGNN. .. note:: GNN (``cdt.causality.pairwise.GNN``) is first used to orient the undirected graph and output a DAG before applying CGNN.
[ "Orient", "the", "undirected", "graph", "using", "GNN", "and", "apply", "CGNN", "to", "improve", "the", "graph", "." ]
python
valid
48.633333
VIVelev/PyDojoML
dojo/losses.py
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/losses.py#L75-L81
def gradient(self, P, Q, Y, i): """Computes the gradient of KL divergence with respect to the i'th example of Y""" return 4 * sum([ (P[i, j] - Q[i, j]) * (Y[i] - Y[j]) * (1 + np.linalg.norm(Y[i] - Y[j]) ** 2) ** -1 \ for j in range(Y.shape[0]) ])
[ "def", "gradient", "(", "self", ",", "P", ",", "Q", ",", "Y", ",", "i", ")", ":", "return", "4", "*", "sum", "(", "[", "(", "P", "[", "i", ",", "j", "]", "-", "Q", "[", "i", ",", "j", "]", ")", "*", "(", "Y", "[", "i", "]", "-", "Y", "[", "j", "]", ")", "*", "(", "1", "+", "np", ".", "linalg", ".", "norm", "(", "Y", "[", "i", "]", "-", "Y", "[", "j", "]", ")", "**", "2", ")", "**", "-", "1", "for", "j", "in", "range", "(", "Y", ".", "shape", "[", "0", "]", ")", "]", ")" ]
Computes the gradient of KL divergence with respect to the i'th example of Y
[ "Computes", "the", "gradient", "of", "KL", "divergence", "with", "respect", "to", "the", "i", "th", "example", "of", "Y" ]
python
train
41.285714
geronimp/graftM
graftm/greengenes_taxonomy.py
https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/greengenes_taxonomy.py#L49-L53
def read_file(input_filename): '''Like read() except uses a file rather than an IO stream, for convenience''' with open(input_filename) as f: g = GreenGenesTaxonomy.read(f) return g
[ "def", "read_file", "(", "input_filename", ")", ":", "with", "open", "(", "input_filename", ")", "as", "f", ":", "g", "=", "GreenGenesTaxonomy", ".", "read", "(", "f", ")", "return", "g" ]
Like read() except uses a file rather than an IO stream, for convenience
[ "Like", "read", "()", "except", "uses", "a", "file", "rather", "than", "an", "IO", "stream", "for", "convenience" ]
python
train
42.6
samfoo/vt102
vt102/__init__.py
https://github.com/samfoo/vt102/blob/ff5be883bc9a880a422b09bb87b210d7c408cf2c/vt102/__init__.py#L836-L845
def _color_attr(self, ground, attr): """ Given a color attribute, set the current cursor appropriately. """ attr = colors[ground][attr] attrs = self.cursor_attributes if ground == "foreground": self.cursor_attributes = (attrs[0], attr, attrs[2]) elif ground == "background": self.cursor_attributes = (attrs[0], attrs[1], attr)
[ "def", "_color_attr", "(", "self", ",", "ground", ",", "attr", ")", ":", "attr", "=", "colors", "[", "ground", "]", "[", "attr", "]", "attrs", "=", "self", ".", "cursor_attributes", "if", "ground", "==", "\"foreground\"", ":", "self", ".", "cursor_attributes", "=", "(", "attrs", "[", "0", "]", ",", "attr", ",", "attrs", "[", "2", "]", ")", "elif", "ground", "==", "\"background\"", ":", "self", ".", "cursor_attributes", "=", "(", "attrs", "[", "0", "]", ",", "attrs", "[", "1", "]", ",", "attr", ")" ]
Given a color attribute, set the current cursor appropriately.
[ "Given", "a", "color", "attribute", "set", "the", "current", "cursor", "appropriately", "." ]
python
train
39.8
exxeleron/qPython
qpython/qconnection.py
https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L209-L245
def query(self, msg_type, query, *parameters, **options): '''Performs a query against a q service. In typical use case, `query` is the name of the function to call and `parameters` are its parameters. When `parameters` list is empty, the query can be an arbitrary q expression (e.g. ``0 +/ til 100``). Calls a anonymous function with a single parameter: >>> q.query(qconnection.MessageType.SYNC,'{til x}', 10) Executes a q expression: >>> q.query(qconnection.MessageType.SYNC,'til 10') :Parameters: - `msg_type` (one of the constants defined in :class:`.MessageType`) - type of the query to be executed - `query` (`string`) - query to be executed - `parameters` (`list` or `None`) - parameters for the query :Options: - `single_char_strings` (`boolean`) - if ``True`` single char Python strings are encoded as q strings instead of chars, **Default**: ``False`` :raises: :class:`.QConnectionException`, :class:`.QWriterException` ''' if not self._connection: raise QConnectionException('Connection is not established.') if parameters and len(parameters) > 8: raise QWriterException('Too many parameters.') if not parameters or len(parameters) == 0: self._writer.write(query, msg_type, **self._options.union_dict(**options)) else: self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
[ "def", "query", "(", "self", ",", "msg_type", ",", "query", ",", "*", "parameters", ",", "*", "*", "options", ")", ":", "if", "not", "self", ".", "_connection", ":", "raise", "QConnectionException", "(", "'Connection is not established.'", ")", "if", "parameters", "and", "len", "(", "parameters", ")", ">", "8", ":", "raise", "QWriterException", "(", "'Too many parameters.'", ")", "if", "not", "parameters", "or", "len", "(", "parameters", ")", "==", "0", ":", "self", ".", "_writer", ".", "write", "(", "query", ",", "msg_type", ",", "*", "*", "self", ".", "_options", ".", "union_dict", "(", "*", "*", "options", ")", ")", "else", ":", "self", ".", "_writer", ".", "write", "(", "[", "query", "]", "+", "list", "(", "parameters", ")", ",", "msg_type", ",", "*", "*", "self", ".", "_options", ".", "union_dict", "(", "*", "*", "options", ")", ")" ]
Performs a query against a q service. In typical use case, `query` is the name of the function to call and `parameters` are its parameters. When `parameters` list is empty, the query can be an arbitrary q expression (e.g. ``0 +/ til 100``). Calls a anonymous function with a single parameter: >>> q.query(qconnection.MessageType.SYNC,'{til x}', 10) Executes a q expression: >>> q.query(qconnection.MessageType.SYNC,'til 10') :Parameters: - `msg_type` (one of the constants defined in :class:`.MessageType`) - type of the query to be executed - `query` (`string`) - query to be executed - `parameters` (`list` or `None`) - parameters for the query :Options: - `single_char_strings` (`boolean`) - if ``True`` single char Python strings are encoded as q strings instead of chars, **Default**: ``False`` :raises: :class:`.QConnectionException`, :class:`.QWriterException`
[ "Performs", "a", "query", "against", "a", "q", "service", ".", "In", "typical", "use", "case", "query", "is", "the", "name", "of", "the", "function", "to", "call", "and", "parameters", "are", "its", "parameters", ".", "When", "parameters", "list", "is", "empty", "the", "query", "can", "be", "an", "arbitrary", "q", "expression", "(", "e", ".", "g", ".", "0", "+", "/", "til", "100", ")", ".", "Calls", "a", "anonymous", "function", "with", "a", "single", "parameter", ":", ">>>", "q", ".", "query", "(", "qconnection", ".", "MessageType", ".", "SYNC", "{", "til", "x", "}", "10", ")", "Executes", "a", "q", "expression", ":", ">>>", "q", ".", "query", "(", "qconnection", ".", "MessageType", ".", "SYNC", "til", "10", ")", ":", "Parameters", ":", "-", "msg_type", "(", "one", "of", "the", "constants", "defined", "in", ":", "class", ":", ".", "MessageType", ")", "-", "type", "of", "the", "query", "to", "be", "executed", "-", "query", "(", "string", ")", "-", "query", "to", "be", "executed", "-", "parameters", "(", "list", "or", "None", ")", "-", "parameters", "for", "the", "query", ":", "Options", ":", "-", "single_char_strings", "(", "boolean", ")", "-", "if", "True", "single", "char", "Python", "strings", "are", "encoded", "as", "q", "strings", "instead", "of", "chars", "**", "Default", "**", ":", "False", ":", "raises", ":", ":", "class", ":", ".", "QConnectionException", ":", "class", ":", ".", "QWriterException" ]
python
train
43.540541
python-openxml/python-docx
docx/oxml/xmlchemy.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/xmlchemy.py#L296-L303
def _add_getter(self): """ Add a read-only ``{prop_name}`` property to the element class for this child element. """ property_ = property(self._getter, None, None) # assign unconditionally to overwrite element name definition setattr(self._element_cls, self._prop_name, property_)
[ "def", "_add_getter", "(", "self", ")", ":", "property_", "=", "property", "(", "self", ".", "_getter", ",", "None", ",", "None", ")", "# assign unconditionally to overwrite element name definition", "setattr", "(", "self", ".", "_element_cls", ",", "self", ".", "_prop_name", ",", "property_", ")" ]
Add a read-only ``{prop_name}`` property to the element class for this child element.
[ "Add", "a", "read", "-", "only", "{", "prop_name", "}", "property", "to", "the", "element", "class", "for", "this", "child", "element", "." ]
python
train
41.125
WoLpH/python-statsd
statsd/timer.py
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L127-L152
def decorate(self, function_or_name): '''Decorate a function to time the execution The method can be called with or without a name. If no name is given the function defaults to the name of the function. :keyword function_or_name: The name to post to or the function to wrap >>> from statsd import Timer >>> timer = Timer('application_name') >>> >>> @timer.decorate ... def some_function(): ... # resulting timer name: application_name.some_function ... pass >>> >>> @timer.decorate('my_timer') ... def some_other_function(): ... # resulting timer name: application_name.my_timer ... pass ''' if callable(function_or_name): return self._decorate(function_or_name.__name__, function_or_name) else: return partial(self._decorate, function_or_name)
[ "def", "decorate", "(", "self", ",", "function_or_name", ")", ":", "if", "callable", "(", "function_or_name", ")", ":", "return", "self", ".", "_decorate", "(", "function_or_name", ".", "__name__", ",", "function_or_name", ")", "else", ":", "return", "partial", "(", "self", ".", "_decorate", ",", "function_or_name", ")" ]
Decorate a function to time the execution The method can be called with or without a name. If no name is given the function defaults to the name of the function. :keyword function_or_name: The name to post to or the function to wrap >>> from statsd import Timer >>> timer = Timer('application_name') >>> >>> @timer.decorate ... def some_function(): ... # resulting timer name: application_name.some_function ... pass >>> >>> @timer.decorate('my_timer') ... def some_other_function(): ... # resulting timer name: application_name.my_timer ... pass
[ "Decorate", "a", "function", "to", "time", "the", "execution" ]
python
train
35.192308
etingof/pyasn1
pyasn1/type/univ.py
https://github.com/etingof/pyasn1/blob/25cf116ef8d11bb0e08454c0f3635c9f4002c2d6/pyasn1/type/univ.py#L2830-L2871
def setComponentByPosition(self, idx, value=noValue, verifyConstraints=True, matchTags=True, matchConstraints=True): """Assign |ASN.1| type component by position. Equivalent to Python sequence item assignment operation (e.g. `[]`). Parameters ---------- idx: :class:`int` Component index (zero-based). Must either refer to existing component or to N+1 component. In the latter case a new component type gets instantiated (if *componentType* is set, or given ASN.1 object is taken otherwise) and appended to the |ASN.1| sequence. Keyword Args ------------ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. Once a new value is set to *idx* component, previous value is dropped. verifyConstraints : :class:`bool` If `False`, skip constraints validation matchTags: :class:`bool` If `False`, skip component tags matching matchConstraints: :class:`bool` If `False`, skip component constraints matching Returns ------- self """ oldIdx = self._currentIdx Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints) self._currentIdx = idx if oldIdx is not None and oldIdx != idx: self._componentValues[oldIdx] = noValue return self
[ "def", "setComponentByPosition", "(", "self", ",", "idx", ",", "value", "=", "noValue", ",", "verifyConstraints", "=", "True", ",", "matchTags", "=", "True", ",", "matchConstraints", "=", "True", ")", ":", "oldIdx", "=", "self", ".", "_currentIdx", "Set", ".", "setComponentByPosition", "(", "self", ",", "idx", ",", "value", ",", "verifyConstraints", ",", "matchTags", ",", "matchConstraints", ")", "self", ".", "_currentIdx", "=", "idx", "if", "oldIdx", "is", "not", "None", "and", "oldIdx", "!=", "idx", ":", "self", ".", "_componentValues", "[", "oldIdx", "]", "=", "noValue", "return", "self" ]
Assign |ASN.1| type component by position. Equivalent to Python sequence item assignment operation (e.g. `[]`). Parameters ---------- idx: :class:`int` Component index (zero-based). Must either refer to existing component or to N+1 component. In the latter case a new component type gets instantiated (if *componentType* is set, or given ASN.1 object is taken otherwise) and appended to the |ASN.1| sequence. Keyword Args ------------ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative A Python value to initialize |ASN.1| component with (if *componentType* is set) or ASN.1 value object to assign to |ASN.1| component. Once a new value is set to *idx* component, previous value is dropped. verifyConstraints : :class:`bool` If `False`, skip constraints validation matchTags: :class:`bool` If `False`, skip component tags matching matchConstraints: :class:`bool` If `False`, skip component constraints matching Returns ------- self
[ "Assign", "|ASN", ".", "1|", "type", "component", "by", "position", "." ]
python
train
39.714286
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/account_management/account_management.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/account_management/account_management.py#L157-L174
def update_user(self, user_id, **kwargs): """Update user properties of specified user. :param str user_id: The ID of the user to update (Required) :param str username: The unique username of the user :param str email: The unique email of the user :param str full_name: The full name of the user :param str password: The password string of the user. :param str phone_number: Phone number of the user :param bool terms_accepted: Is 'General Terms & Conditions' accepted :param bool marketing_accepted: Is receiving marketing information accepted? :returns: the updated user object :rtype: User """ api = self._get_api(iam.AccountAdminApi) user = User._create_request_map(kwargs) body = iam.UserUpdateReq(**user) return User(api.update_user(user_id, body))
[ "def", "update_user", "(", "self", ",", "user_id", ",", "*", "*", "kwargs", ")", ":", "api", "=", "self", ".", "_get_api", "(", "iam", ".", "AccountAdminApi", ")", "user", "=", "User", ".", "_create_request_map", "(", "kwargs", ")", "body", "=", "iam", ".", "UserUpdateReq", "(", "*", "*", "user", ")", "return", "User", "(", "api", ".", "update_user", "(", "user_id", ",", "body", ")", ")" ]
Update user properties of specified user. :param str user_id: The ID of the user to update (Required) :param str username: The unique username of the user :param str email: The unique email of the user :param str full_name: The full name of the user :param str password: The password string of the user. :param str phone_number: Phone number of the user :param bool terms_accepted: Is 'General Terms & Conditions' accepted :param bool marketing_accepted: Is receiving marketing information accepted? :returns: the updated user object :rtype: User
[ "Update", "user", "properties", "of", "specified", "user", "." ]
python
train
48.055556
sernst/cauldron
cauldron/cli/sync/sync_io.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/sync/sync_io.py#L95-L124
def write_file_chunk( file_path: str, packed_chunk: str, append: bool = True, offset: int = -1 ): """ Write or append the specified chunk data to the given file path, unpacking the chunk before writing. If the file does not yet exist, it will be created. Set the append argument to False if you do not want the chunk to be appended to an existing file. :param file_path: The file where the chunk will be written or appended :param packed_chunk: The packed chunk data to write to the file. It will be unpacked before the file is written. :param append: Whether or not the chunk should be appended to the existing file. If False the chunk data will overwrite the existing file. :param offset: The byte offset in the file where the chunk should be written. If the value is less than zero, the chunk will be written or appended based on the `append` argument. Note that if you indicate an append write mode and an offset, the mode will be forced to write instead of append. """ mode = 'ab' if append else 'wb' contents = unpack_chunk(packed_chunk) writer.write_file(file_path, contents, mode=mode, offset=offset)
[ "def", "write_file_chunk", "(", "file_path", ":", "str", ",", "packed_chunk", ":", "str", ",", "append", ":", "bool", "=", "True", ",", "offset", ":", "int", "=", "-", "1", ")", ":", "mode", "=", "'ab'", "if", "append", "else", "'wb'", "contents", "=", "unpack_chunk", "(", "packed_chunk", ")", "writer", ".", "write_file", "(", "file_path", ",", "contents", ",", "mode", "=", "mode", ",", "offset", "=", "offset", ")" ]
Write or append the specified chunk data to the given file path, unpacking the chunk before writing. If the file does not yet exist, it will be created. Set the append argument to False if you do not want the chunk to be appended to an existing file. :param file_path: The file where the chunk will be written or appended :param packed_chunk: The packed chunk data to write to the file. It will be unpacked before the file is written. :param append: Whether or not the chunk should be appended to the existing file. If False the chunk data will overwrite the existing file. :param offset: The byte offset in the file where the chunk should be written. If the value is less than zero, the chunk will be written or appended based on the `append` argument. Note that if you indicate an append write mode and an offset, the mode will be forced to write instead of append.
[ "Write", "or", "append", "the", "specified", "chunk", "data", "to", "the", "given", "file", "path", "unpacking", "the", "chunk", "before", "writing", ".", "If", "the", "file", "does", "not", "yet", "exist", "it", "will", "be", "created", ".", "Set", "the", "append", "argument", "to", "False", "if", "you", "do", "not", "want", "the", "chunk", "to", "be", "appended", "to", "an", "existing", "file", "." ]
python
train
41.466667
CloudGenix/sdk-python
cloudgenix/__init__.py
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L933-L997
def update_region_to_controller(self, region): """ Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace """ # default region position in a list region_position = 1 # Check for a global "ignore region" flag if self.ignore_region: # bypass api_logger.debug("IGNORE_REGION set, not updating controller region.") return api_logger.debug("Updating Controller Region") api_logger.debug("CONTROLLER = %s", self.controller) api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("CONTROLLER_REGION = %s", self.controller_region) # Check if this is an initial region use or an update region use if self.controller_orig: controller_base = self.controller_orig else: controller_base = self.controller self.controller_orig = self.controller # splice controller string controller_full_part_list = controller_base.split('.') for idx, part in enumerate(controller_full_part_list): # is the region already in the controller string? if region == part: # yes, controller already has apropriate region api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx) # update region if it is not already set. if self.controller_region != region: self.controller_region = region api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return controller_part_count = len(controller_full_part_list) # handle short domain case if controller_part_count > 1: # insert region controller_full_part_list[region_position] = region self.controller = ".".join(controller_full_part_list) else: # short domain, just add region self.controller = ".".join(controller_full_part_list) + '.' + region # update SDK vars with region info self.controller_orig = controller_base self.controller_region = region api_logger.debug("UPDATED_CONTROLLER = %s", self.controller) api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return
[ "def", "update_region_to_controller", "(", "self", ",", "region", ")", ":", "# default region position in a list", "region_position", "=", "1", "# Check for a global \"ignore region\" flag", "if", "self", ".", "ignore_region", ":", "# bypass", "api_logger", ".", "debug", "(", "\"IGNORE_REGION set, not updating controller region.\"", ")", "return", "api_logger", ".", "debug", "(", "\"Updating Controller Region\"", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER = %s\"", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER_ORIG = %s\"", ",", "self", ".", "controller_orig", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "# Check if this is an initial region use or an update region use", "if", "self", ".", "controller_orig", ":", "controller_base", "=", "self", ".", "controller_orig", "else", ":", "controller_base", "=", "self", ".", "controller", "self", ".", "controller_orig", "=", "self", ".", "controller", "# splice controller string", "controller_full_part_list", "=", "controller_base", ".", "split", "(", "'.'", ")", "for", "idx", ",", "part", "in", "enumerate", "(", "controller_full_part_list", ")", ":", "# is the region already in the controller string?", "if", "region", "==", "part", ":", "# yes, controller already has apropriate region", "api_logger", ".", "debug", "(", "\"REGION %s ALREADY IN CONTROLLER AT INDEX = %s\"", ",", "region", ",", "idx", ")", "# update region if it is not already set.", "if", "self", ".", "controller_region", "!=", "region", ":", "self", ".", "controller_region", "=", "region", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "return", "controller_part_count", "=", "len", "(", "controller_full_part_list", ")", "# handle short domain case", "if", "controller_part_count", ">", "1", ":", "# insert region", "controller_full_part_list", "[", "region_position", "]", "=", "region", "self", ".", "controller", "=", "\".\"", ".", "join", "(", "controller_full_part_list", ")", "else", ":", "# short domain, just add region", "self", ".", "controller", "=", "\".\"", ".", "join", "(", "controller_full_part_list", ")", "+", "'.'", "+", "region", "# update SDK vars with region info", "self", ".", "controller_orig", "=", "controller_base", "self", ".", "controller_region", "=", "region", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER = %s\"", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_ORIG = %s\"", ",", "self", ".", "controller_orig", ")", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "return" ]
Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace
[ "Update", "the", "controller", "string", "with", "dynamic", "region", "info", ".", "Controller", "string", "should", "end", "up", "as", "<name", "[", "-", "env", "]", ">", ".", "<region", ">", ".", "cloudgenix", ".", "com" ]
python
train
40.292308
ScottDuckworth/python-anyvcs
anyvcs/__init__.py
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/__init__.py#L32-L46
def clone(srcpath, destpath, vcs=None): """Clone an existing repository. :param str srcpath: Path to an existing repository :param str destpath: Desired path of new repository :param str vcs: Either ``git``, ``hg``, or ``svn`` :returns VCSRepo: The newly cloned repository If ``vcs`` is not given, then the repository type is discovered from ``srcpath`` via :func:`probe`. """ vcs = vcs or probe(srcpath) cls = _get_repo_class(vcs) return cls.clone(srcpath, destpath)
[ "def", "clone", "(", "srcpath", ",", "destpath", ",", "vcs", "=", "None", ")", ":", "vcs", "=", "vcs", "or", "probe", "(", "srcpath", ")", "cls", "=", "_get_repo_class", "(", "vcs", ")", "return", "cls", ".", "clone", "(", "srcpath", ",", "destpath", ")" ]
Clone an existing repository. :param str srcpath: Path to an existing repository :param str destpath: Desired path of new repository :param str vcs: Either ``git``, ``hg``, or ``svn`` :returns VCSRepo: The newly cloned repository If ``vcs`` is not given, then the repository type is discovered from ``srcpath`` via :func:`probe`.
[ "Clone", "an", "existing", "repository", "." ]
python
train
33.333333
pmbarrett314/curses-menu
cursesmenu/curses_menu.py
https://github.com/pmbarrett314/curses-menu/blob/c76fc00ab9d518eab275e55434fc2941f49c6b30/cursesmenu/curses_menu.py#L323-L335
def select(self): """ Select the current item and run it """ self.selected_option = self.current_option self.selected_item.set_up() self.selected_item.action() self.selected_item.clean_up() self.returned_value = self.selected_item.get_return() self.should_exit = self.selected_item.should_exit if not self.should_exit: self.draw()
[ "def", "select", "(", "self", ")", ":", "self", ".", "selected_option", "=", "self", ".", "current_option", "self", ".", "selected_item", ".", "set_up", "(", ")", "self", ".", "selected_item", ".", "action", "(", ")", "self", ".", "selected_item", ".", "clean_up", "(", ")", "self", ".", "returned_value", "=", "self", ".", "selected_item", ".", "get_return", "(", ")", "self", ".", "should_exit", "=", "self", ".", "selected_item", ".", "should_exit", "if", "not", "self", ".", "should_exit", ":", "self", ".", "draw", "(", ")" ]
Select the current item and run it
[ "Select", "the", "current", "item", "and", "run", "it" ]
python
test
31.615385
saltstack/salt
salt/modules/azurearm_dns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_dns.py#L81-L130
def record_set_create_or_update(name, zone_name, resource_group, record_type, **kwargs): ''' .. versionadded:: Fluorine Creates or updates a record set within a DNS zone. :param name: The name of the record set, relative to the name of the zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of DNS record in this record set. Record sets of type SOA can be updated but not created (they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' CLI Example: .. code-block:: bash salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A arecords='[{ipv4_address: 10.0.0.1}]' ttl=300 ''' dnsconn = __utils__['azurearm.get_client']('dns', **kwargs) try: record_set_model = __utils__['azurearm.create_object_model']('dns', 'RecordSet', **kwargs) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: record_set = dnsconn.record_sets.create_or_update( relative_record_set_name=name, zone_name=zone_name, resource_group_name=resource_group, record_type=record_type, parameters=record_set_model, if_match=kwargs.get('if_match'), if_none_match=kwargs.get('if_none_match') ) result = record_set.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
[ "def", "record_set_create_or_update", "(", "name", ",", "zone_name", ",", "resource_group", ",", "record_type", ",", "*", "*", "kwargs", ")", ":", "dnsconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'dns'", ",", "*", "*", "kwargs", ")", "try", ":", "record_set_model", "=", "__utils__", "[", "'azurearm.create_object_model'", "]", "(", "'dns'", ",", "'RecordSet'", ",", "*", "*", "kwargs", ")", "except", "TypeError", "as", "exc", ":", "result", "=", "{", "'error'", ":", "'The object model could not be built. ({0})'", ".", "format", "(", "str", "(", "exc", ")", ")", "}", "return", "result", "try", ":", "record_set", "=", "dnsconn", ".", "record_sets", ".", "create_or_update", "(", "relative_record_set_name", "=", "name", ",", "zone_name", "=", "zone_name", ",", "resource_group_name", "=", "resource_group", ",", "record_type", "=", "record_type", ",", "parameters", "=", "record_set_model", ",", "if_match", "=", "kwargs", ".", "get", "(", "'if_match'", ")", ",", "if_none_match", "=", "kwargs", ".", "get", "(", "'if_none_match'", ")", ")", "result", "=", "record_set", ".", "as_dict", "(", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'dns'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "except", "SerializationError", "as", "exc", ":", "result", "=", "{", "'error'", ":", "'The object model could not be parsed. ({0})'", ".", "format", "(", "str", "(", "exc", ")", ")", "}", "return", "result" ]
.. versionadded:: Fluorine Creates or updates a record set within a DNS zone. :param name: The name of the record set, relative to the name of the zone. :param zone_name: The name of the DNS zone (without a terminating dot). :param resource_group: The name of the resource group. :param record_type: The type of DNS record in this record set. Record sets of type SOA can be updated but not created (they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' CLI Example: .. code-block:: bash salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A arecords='[{ipv4_address: 10.0.0.1}]' ttl=300
[ "..", "versionadded", "::", "Fluorine" ]
python
train
36.82
apache/airflow
airflow/_vendor/slugify/slugify.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/slugify/slugify.py#L32-L71
def smart_truncate(string, max_length=0, word_boundary=False, separator=' ', save_order=False): """ Truncate a string. :param string (str): string for modification :param max_length (int): output string length :param word_boundary (bool): :param save_order (bool): if True then word order of output string is like input string :param separator (str): separator between words :return: """ string = string.strip(separator) if not max_length: return string if len(string) < max_length: return string if not word_boundary: return string[:max_length].strip(separator) if separator not in string: return string[:max_length] truncated = '' for word in string.split(separator): if word: next_len = len(truncated) + len(word) if next_len < max_length: truncated += '{0}{1}'.format(word, separator) elif next_len == max_length: truncated += '{0}'.format(word) break else: if save_order: break if not truncated: # pragma: no cover truncated = string[:max_length] return truncated.strip(separator)
[ "def", "smart_truncate", "(", "string", ",", "max_length", "=", "0", ",", "word_boundary", "=", "False", ",", "separator", "=", "' '", ",", "save_order", "=", "False", ")", ":", "string", "=", "string", ".", "strip", "(", "separator", ")", "if", "not", "max_length", ":", "return", "string", "if", "len", "(", "string", ")", "<", "max_length", ":", "return", "string", "if", "not", "word_boundary", ":", "return", "string", "[", ":", "max_length", "]", ".", "strip", "(", "separator", ")", "if", "separator", "not", "in", "string", ":", "return", "string", "[", ":", "max_length", "]", "truncated", "=", "''", "for", "word", "in", "string", ".", "split", "(", "separator", ")", ":", "if", "word", ":", "next_len", "=", "len", "(", "truncated", ")", "+", "len", "(", "word", ")", "if", "next_len", "<", "max_length", ":", "truncated", "+=", "'{0}{1}'", ".", "format", "(", "word", ",", "separator", ")", "elif", "next_len", "==", "max_length", ":", "truncated", "+=", "'{0}'", ".", "format", "(", "word", ")", "break", "else", ":", "if", "save_order", ":", "break", "if", "not", "truncated", ":", "# pragma: no cover", "truncated", "=", "string", "[", ":", "max_length", "]", "return", "truncated", ".", "strip", "(", "separator", ")" ]
Truncate a string. :param string (str): string for modification :param max_length (int): output string length :param word_boundary (bool): :param save_order (bool): if True then word order of output string is like input string :param separator (str): separator between words :return:
[ "Truncate", "a", "string", ".", ":", "param", "string", "(", "str", ")", ":", "string", "for", "modification", ":", "param", "max_length", "(", "int", ")", ":", "output", "string", "length", ":", "param", "word_boundary", "(", "bool", ")", ":", ":", "param", "save_order", "(", "bool", ")", ":", "if", "True", "then", "word", "order", "of", "output", "string", "is", "like", "input", "string", ":", "param", "separator", "(", "str", ")", ":", "separator", "between", "words", ":", "return", ":" ]
python
test
30.1
saltstack/salt
salt/platform/win.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/platform/win.py#L1001-L1012
def grant_winsta_and_desktop(th): ''' Grant the token's user access to the current process's window station and desktop. ''' current_sid = win32security.GetTokenInformation(th, win32security.TokenUser)[0] # Add permissions for the sid to the current windows station and thread id. # This prevents windows error 0xC0000142. winsta = win32process.GetProcessWindowStation() set_user_perm(winsta, WINSTA_ALL, current_sid) desktop = win32service.GetThreadDesktop(win32api.GetCurrentThreadId()) set_user_perm(desktop, DESKTOP_ALL, current_sid)
[ "def", "grant_winsta_and_desktop", "(", "th", ")", ":", "current_sid", "=", "win32security", ".", "GetTokenInformation", "(", "th", ",", "win32security", ".", "TokenUser", ")", "[", "0", "]", "# Add permissions for the sid to the current windows station and thread id.", "# This prevents windows error 0xC0000142.", "winsta", "=", "win32process", ".", "GetProcessWindowStation", "(", ")", "set_user_perm", "(", "winsta", ",", "WINSTA_ALL", ",", "current_sid", ")", "desktop", "=", "win32service", ".", "GetThreadDesktop", "(", "win32api", ".", "GetCurrentThreadId", "(", ")", ")", "set_user_perm", "(", "desktop", ",", "DESKTOP_ALL", ",", "current_sid", ")" ]
Grant the token's user access to the current process's window station and desktop.
[ "Grant", "the", "token", "s", "user", "access", "to", "the", "current", "process", "s", "window", "station", "and", "desktop", "." ]
python
train
47.5
saltstack/salt
salt/modules/defaults.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/defaults.py#L34-L71
def _load(formula): ''' Generates a list of salt://<formula>/defaults.(json|yaml) files and fetches them from the Salt master. Returns first defaults file as python dict. ''' # Compute possibilities _mk_client() paths = [] for ext in ('yaml', 'json'): source_url = salt.utils.url.create(formula + '/defaults.' + ext) paths.append(source_url) # Fetch files from master defaults_files = __context__['cp.fileclient'].cache_files(paths) for file_ in defaults_files: if not file_: # Skip empty string returned by cp.fileclient.cache_files. continue suffix = file_.rsplit('.', 1)[-1] if suffix == 'yaml': loader = salt.utils.yaml.safe_load elif suffix == 'json': loader = salt.utils.json.load else: log.debug("Failed to determine loader for %r", file_) continue if os.path.exists(file_): log.debug("Reading defaults from %r", file_) with salt.utils.files.fopen(file_) as fhr: defaults = loader(fhr) log.debug("Read defaults %r", defaults) return defaults or {}
[ "def", "_load", "(", "formula", ")", ":", "# Compute possibilities", "_mk_client", "(", ")", "paths", "=", "[", "]", "for", "ext", "in", "(", "'yaml'", ",", "'json'", ")", ":", "source_url", "=", "salt", ".", "utils", ".", "url", ".", "create", "(", "formula", "+", "'/defaults.'", "+", "ext", ")", "paths", ".", "append", "(", "source_url", ")", "# Fetch files from master", "defaults_files", "=", "__context__", "[", "'cp.fileclient'", "]", ".", "cache_files", "(", "paths", ")", "for", "file_", "in", "defaults_files", ":", "if", "not", "file_", ":", "# Skip empty string returned by cp.fileclient.cache_files.", "continue", "suffix", "=", "file_", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", "if", "suffix", "==", "'yaml'", ":", "loader", "=", "salt", ".", "utils", ".", "yaml", ".", "safe_load", "elif", "suffix", "==", "'json'", ":", "loader", "=", "salt", ".", "utils", ".", "json", ".", "load", "else", ":", "log", ".", "debug", "(", "\"Failed to determine loader for %r\"", ",", "file_", ")", "continue", "if", "os", ".", "path", ".", "exists", "(", "file_", ")", ":", "log", ".", "debug", "(", "\"Reading defaults from %r\"", ",", "file_", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "file_", ")", "as", "fhr", ":", "defaults", "=", "loader", "(", "fhr", ")", "log", ".", "debug", "(", "\"Read defaults %r\"", ",", "defaults", ")", "return", "defaults", "or", "{", "}" ]
Generates a list of salt://<formula>/defaults.(json|yaml) files and fetches them from the Salt master. Returns first defaults file as python dict.
[ "Generates", "a", "list", "of", "salt", ":", "//", "<formula", ">", "/", "defaults", ".", "(", "json|yaml", ")", "files", "and", "fetches", "them", "from", "the", "Salt", "master", "." ]
python
train
30.921053
oscarbranson/latools
latools/filtering/filt_obj.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filt_obj.py#L409-L450
def grab_filt(self, filt, analyte=None): """ Flexible access to specific filter using any key format. Parameters ---------- f : str, dict or bool either logical filter expression, dict of expressions, or a boolean analyte : str name of analyte the filter is for. Returns ------- array_like boolean filter """ if isinstance(filt, str): if filt in self.components: if analyte is None: return self.components[filt] else: if self.switches[analyte][filt]: return self.components[filt] else: try: ind = self.make_fromkey(filt) except KeyError: print(("\n\n***Filter key invalid. Please consult " "manual and try again.")) elif isinstance(filt, dict): try: ind = self.make_fromkey(filt[analyte]) except ValueError: print(("\n\n***Filter key invalid. Please consult manual " "and try again.\nOR\nAnalyte missing from filter " "key dict.")) elif filt: ind = self.make(analyte) else: ind = ~np.zeros(self.size, dtype=bool) return ind
[ "def", "grab_filt", "(", "self", ",", "filt", ",", "analyte", "=", "None", ")", ":", "if", "isinstance", "(", "filt", ",", "str", ")", ":", "if", "filt", "in", "self", ".", "components", ":", "if", "analyte", "is", "None", ":", "return", "self", ".", "components", "[", "filt", "]", "else", ":", "if", "self", ".", "switches", "[", "analyte", "]", "[", "filt", "]", ":", "return", "self", ".", "components", "[", "filt", "]", "else", ":", "try", ":", "ind", "=", "self", ".", "make_fromkey", "(", "filt", ")", "except", "KeyError", ":", "print", "(", "(", "\"\\n\\n***Filter key invalid. Please consult \"", "\"manual and try again.\"", ")", ")", "elif", "isinstance", "(", "filt", ",", "dict", ")", ":", "try", ":", "ind", "=", "self", ".", "make_fromkey", "(", "filt", "[", "analyte", "]", ")", "except", "ValueError", ":", "print", "(", "(", "\"\\n\\n***Filter key invalid. Please consult manual \"", "\"and try again.\\nOR\\nAnalyte missing from filter \"", "\"key dict.\"", ")", ")", "elif", "filt", ":", "ind", "=", "self", ".", "make", "(", "analyte", ")", "else", ":", "ind", "=", "~", "np", ".", "zeros", "(", "self", ".", "size", ",", "dtype", "=", "bool", ")", "return", "ind" ]
Flexible access to specific filter using any key format. Parameters ---------- f : str, dict or bool either logical filter expression, dict of expressions, or a boolean analyte : str name of analyte the filter is for. Returns ------- array_like boolean filter
[ "Flexible", "access", "to", "specific", "filter", "using", "any", "key", "format", "." ]
python
test
33.261905
skyfielders/python-skyfield
skyfield/iokit.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L319-L335
def parse_deltat_data(fileobj): """Parse the United States Naval Observatory ``deltat.data`` file. Each line file gives the date and the value of Delta T:: 2016 2 1 68.1577 This function returns a 2xN array of raw Julian dates and matching Delta T values. """ array = np.loadtxt(fileobj) year, month, day = array[-1,:3].astype(int) expiration_date = date(year + 1, month, day) year, month, day, delta_t = array.T data = np.array((julian_date(year, month, day), delta_t)) return expiration_date, data
[ "def", "parse_deltat_data", "(", "fileobj", ")", ":", "array", "=", "np", ".", "loadtxt", "(", "fileobj", ")", "year", ",", "month", ",", "day", "=", "array", "[", "-", "1", ",", ":", "3", "]", ".", "astype", "(", "int", ")", "expiration_date", "=", "date", "(", "year", "+", "1", ",", "month", ",", "day", ")", "year", ",", "month", ",", "day", ",", "delta_t", "=", "array", ".", "T", "data", "=", "np", ".", "array", "(", "(", "julian_date", "(", "year", ",", "month", ",", "day", ")", ",", "delta_t", ")", ")", "return", "expiration_date", ",", "data" ]
Parse the United States Naval Observatory ``deltat.data`` file. Each line file gives the date and the value of Delta T:: 2016 2 1 68.1577 This function returns a 2xN array of raw Julian dates and matching Delta T values.
[ "Parse", "the", "United", "States", "Naval", "Observatory", "deltat", ".", "data", "file", "." ]
python
train
31.647059
bwohlberg/sporco
sporco/linalg.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L866-L901
def lu_solve_ATAI(A, rho, b, lu, piv, check_finite=True): r""" Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}` or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.lu_solve`. Parameters ---------- A : array_like Matrix :math:`A` rho : float Scalar :math:`\rho` b : array_like Vector :math:`\mathbf{b}` or matrix :math:`B` lu : array_like Matrix containing U in its upper triangle, and L in its lower triangle, as returned by :func:`scipy.linalg.lu_factor` piv : array_like Pivot indices representing the permutation matrix P, as returned by :func:`scipy.linalg.lu_factor` check_finite : bool, optional (default False) Flag indicating whether the input array should be checked for Inf and NaN values Returns ------- x : ndarray Solution to the linear system """ N, M = A.shape if N >= M: x = linalg.lu_solve((lu, piv), b, check_finite=check_finite) else: x = (b - A.T.dot(linalg.lu_solve((lu, piv), A.dot(b), 1, check_finite=check_finite))) / rho return x
[ "def", "lu_solve_ATAI", "(", "A", ",", "rho", ",", "b", ",", "lu", ",", "piv", ",", "check_finite", "=", "True", ")", ":", "N", ",", "M", "=", "A", ".", "shape", "if", "N", ">=", "M", ":", "x", "=", "linalg", ".", "lu_solve", "(", "(", "lu", ",", "piv", ")", ",", "b", ",", "check_finite", "=", "check_finite", ")", "else", ":", "x", "=", "(", "b", "-", "A", ".", "T", ".", "dot", "(", "linalg", ".", "lu_solve", "(", "(", "lu", ",", "piv", ")", ",", "A", ".", "dot", "(", "b", ")", ",", "1", ",", "check_finite", "=", "check_finite", ")", ")", ")", "/", "rho", "return", "x" ]
r""" Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}` or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.lu_solve`. Parameters ---------- A : array_like Matrix :math:`A` rho : float Scalar :math:`\rho` b : array_like Vector :math:`\mathbf{b}` or matrix :math:`B` lu : array_like Matrix containing U in its upper triangle, and L in its lower triangle, as returned by :func:`scipy.linalg.lu_factor` piv : array_like Pivot indices representing the permutation matrix P, as returned by :func:`scipy.linalg.lu_factor` check_finite : bool, optional (default False) Flag indicating whether the input array should be checked for Inf and NaN values Returns ------- x : ndarray Solution to the linear system
[ "r", "Solve", "the", "linear", "system", ":", "math", ":", "(", "A^T", "A", "+", "\\", "rho", "I", ")", "\\", "mathbf", "{", "x", "}", "=", "\\", "mathbf", "{", "b", "}", "or", ":", "math", ":", "(", "A^T", "A", "+", "\\", "rho", "I", ")", "X", "=", "B", "using", ":", "func", ":", "scipy", ".", "linalg", ".", "lu_solve", "." ]
python
train
31.805556
jaredLunde/redis_structures
redis_structures/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L1472-L1474
def remove(self, item, count=0): """ Removes @item from the list for @count number of occurences """ self._client.lrem(self.key_prefix, count, self._dumps(item))
[ "def", "remove", "(", "self", ",", "item", ",", "count", "=", "0", ")", ":", "self", ".", "_client", ".", "lrem", "(", "self", ".", "key_prefix", ",", "count", ",", "self", ".", "_dumps", "(", "item", ")", ")" ]
Removes @item from the list for @count number of occurences
[ "Removes" ]
python
train
58.333333
facelessuser/backrefs
backrefs/bregex.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/bregex.py#L387-L391
def expandf(m, format): # noqa A002 """Expand the string using the format replace pattern or function.""" _assert_expandable(format, True) return _apply_replace_backrefs(m, format, flags=FORMAT)
[ "def", "expandf", "(", "m", ",", "format", ")", ":", "# noqa A002", "_assert_expandable", "(", "format", ",", "True", ")", "return", "_apply_replace_backrefs", "(", "m", ",", "format", ",", "flags", "=", "FORMAT", ")" ]
Expand the string using the format replace pattern or function.
[ "Expand", "the", "string", "using", "the", "format", "replace", "pattern", "or", "function", "." ]
python
train
40.8
MacHu-GWU/uszipcode-project
uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L207-L266
def smart_insert(cls, engine_or_session, data, minimal_size=5, op_counter=0): """ An optimized Insert strategy. :return: number of insertion operation been executed. Usually it is greatly smaller than ``len(data)``. **中文文档** 在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要 远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略: 1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。 2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。 3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。 直到成功为止。 该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。 但时间上是各种情况下平均最优的。 """ ses, auto_close = ensure_session(engine_or_session) if isinstance(data, list): # 首先进行尝试bulk insert try: ses.add_all(data) ses.commit() op_counter += 1 # 失败了 except (IntegrityError, FlushError): ses.rollback() # 分析数据量 n = len(data) # 如果数据条数多于一定数量 if n >= minimal_size ** 2: # 则进行分包 n_chunk = math.floor(math.sqrt(n)) for chunk in grouper_list(data, n_chunk): op_counter = cls.smart_insert( ses, chunk, minimal_size, op_counter) # 否则则一条条地逐条插入 else: for obj in data: try: ses.add(obj) ses.commit() op_counter += 1 except (IntegrityError, FlushError): ses.rollback() else: try: ses.add(data) ses.commit() except (IntegrityError, FlushError): ses.rollback() if auto_close: ses.close() return op_counter
[ "def", "smart_insert", "(", "cls", ",", "engine_or_session", ",", "data", ",", "minimal_size", "=", "5", ",", "op_counter", "=", "0", ")", ":", "ses", ",", "auto_close", "=", "ensure_session", "(", "engine_or_session", ")", "if", "isinstance", "(", "data", ",", "list", ")", ":", "# 首先进行尝试bulk insert", "try", ":", "ses", ".", "add_all", "(", "data", ")", "ses", ".", "commit", "(", ")", "op_counter", "+=", "1", "# 失败了", "except", "(", "IntegrityError", ",", "FlushError", ")", ":", "ses", ".", "rollback", "(", ")", "# 分析数据量", "n", "=", "len", "(", "data", ")", "# 如果数据条数多于一定数量", "if", "n", ">=", "minimal_size", "**", "2", ":", "# 则进行分包", "n_chunk", "=", "math", ".", "floor", "(", "math", ".", "sqrt", "(", "n", ")", ")", "for", "chunk", "in", "grouper_list", "(", "data", ",", "n_chunk", ")", ":", "op_counter", "=", "cls", ".", "smart_insert", "(", "ses", ",", "chunk", ",", "minimal_size", ",", "op_counter", ")", "# 否则则一条条地逐条插入", "else", ":", "for", "obj", "in", "data", ":", "try", ":", "ses", ".", "add", "(", "obj", ")", "ses", ".", "commit", "(", ")", "op_counter", "+=", "1", "except", "(", "IntegrityError", ",", "FlushError", ")", ":", "ses", ".", "rollback", "(", ")", "else", ":", "try", ":", "ses", ".", "add", "(", "data", ")", "ses", ".", "commit", "(", ")", "except", "(", "IntegrityError", ",", "FlushError", ")", ":", "ses", ".", "rollback", "(", ")", "if", "auto_close", ":", "ses", ".", "close", "(", ")", "return", "op_counter" ]
An optimized Insert strategy. :return: number of insertion operation been executed. Usually it is greatly smaller than ``len(data)``. **中文文档** 在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要 远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略: 1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。 2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。 3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。 直到成功为止。 该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。 但时间上是各种情况下平均最优的。
[ "An", "optimized", "Insert", "strategy", "." ]
python
train
31.983333
clusterpoint/python-client-api
pycps/response.py
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L226-L266
def get_documents(self, doc_format='dict'): """ Get the documents returned from Storege in this response. Keyword args: doc_format -- Specifies the doc_format for the returned documents. Can be 'dict', 'etree' or 'string'. Default is 'dict'. Returns: A dict where keys are document ids and values depending of the required doc_format: A dict representations of documents (see etree_to_dict()); A etree Element representing the document; A raw XML document string. Raises: ParameterError -- The doc_format value is not allowed. """ def get_doc_id(root, rel_path): if not rel_path: return root.text else: child = root.find(rel_path[0]) if child is None: return None return get_doc_id(child, rel_path[1:]) if doc_format == 'dict': return dict([(get_doc_id(document, self._id_xpath), etree_to_dict(document)['document']) for document in self._get_doc_list()]) elif doc_format == 'etree': return dict([(get_doc_id(document, self._id_xpath), document) for document in self._get_doc_list()]) elif doc_format == 'list-etree': return self._get_doc_list() elif doc_format == 'list-string': return list([(ET.tostring(document)) for document in self._get_doc_list()]) elif doc_format in ('', None, 'string'): return dict([(get_doc_id(document, self._id_xpath), ET.tostring(document)) for document in self._get_doc_list()]) else: raise ParameterError("doc_format=" + doc_format)
[ "def", "get_documents", "(", "self", ",", "doc_format", "=", "'dict'", ")", ":", "def", "get_doc_id", "(", "root", ",", "rel_path", ")", ":", "if", "not", "rel_path", ":", "return", "root", ".", "text", "else", ":", "child", "=", "root", ".", "find", "(", "rel_path", "[", "0", "]", ")", "if", "child", "is", "None", ":", "return", "None", "return", "get_doc_id", "(", "child", ",", "rel_path", "[", "1", ":", "]", ")", "if", "doc_format", "==", "'dict'", ":", "return", "dict", "(", "[", "(", "get_doc_id", "(", "document", ",", "self", ".", "_id_xpath", ")", ",", "etree_to_dict", "(", "document", ")", "[", "'document'", "]", ")", "for", "document", "in", "self", ".", "_get_doc_list", "(", ")", "]", ")", "elif", "doc_format", "==", "'etree'", ":", "return", "dict", "(", "[", "(", "get_doc_id", "(", "document", ",", "self", ".", "_id_xpath", ")", ",", "document", ")", "for", "document", "in", "self", ".", "_get_doc_list", "(", ")", "]", ")", "elif", "doc_format", "==", "'list-etree'", ":", "return", "self", ".", "_get_doc_list", "(", ")", "elif", "doc_format", "==", "'list-string'", ":", "return", "list", "(", "[", "(", "ET", ".", "tostring", "(", "document", ")", ")", "for", "document", "in", "self", ".", "_get_doc_list", "(", ")", "]", ")", "elif", "doc_format", "in", "(", "''", ",", "None", ",", "'string'", ")", ":", "return", "dict", "(", "[", "(", "get_doc_id", "(", "document", ",", "self", ".", "_id_xpath", ")", ",", "ET", ".", "tostring", "(", "document", ")", ")", "for", "document", "in", "self", ".", "_get_doc_list", "(", ")", "]", ")", "else", ":", "raise", "ParameterError", "(", "\"doc_format=\"", "+", "doc_format", ")" ]
Get the documents returned from Storege in this response. Keyword args: doc_format -- Specifies the doc_format for the returned documents. Can be 'dict', 'etree' or 'string'. Default is 'dict'. Returns: A dict where keys are document ids and values depending of the required doc_format: A dict representations of documents (see etree_to_dict()); A etree Element representing the document; A raw XML document string. Raises: ParameterError -- The doc_format value is not allowed.
[ "Get", "the", "documents", "returned", "from", "Storege", "in", "this", "response", "." ]
python
train
44.902439
ArduPilot/MAVProxy
MAVProxy/modules/lib/mp_module.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/mp_module.py#L150-L156
def dist_string(self, val_meters): '''return a distance as a string''' if self.settings.dist_unit == 'nm': return "%.1fnm" % (val_meters * 0.000539957) if self.settings.dist_unit == 'miles': return "%.1fmiles" % (val_meters * 0.000621371) return "%um" % val_meters
[ "def", "dist_string", "(", "self", ",", "val_meters", ")", ":", "if", "self", ".", "settings", ".", "dist_unit", "==", "'nm'", ":", "return", "\"%.1fnm\"", "%", "(", "val_meters", "*", "0.000539957", ")", "if", "self", ".", "settings", ".", "dist_unit", "==", "'miles'", ":", "return", "\"%.1fmiles\"", "%", "(", "val_meters", "*", "0.000621371", ")", "return", "\"%um\"", "%", "val_meters" ]
return a distance as a string
[ "return", "a", "distance", "as", "a", "string" ]
python
train
44.857143
novopl/peltak
src/peltak/core/git.py
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/git.py#L107-L119
def branches(self): # type: () -> List[str] """ List of all branches this commit is a part of. """ if self._branches is None: cmd = 'git branch --contains {}'.format(self.sha1) out = shell.run( cmd, capture=True, never_pretend=True ).stdout.strip() self._branches = [x.strip('* \t\n') for x in out.splitlines()] return self._branches
[ "def", "branches", "(", "self", ")", ":", "# type: () -> List[str]", "if", "self", ".", "_branches", "is", "None", ":", "cmd", "=", "'git branch --contains {}'", ".", "format", "(", "self", ".", "sha1", ")", "out", "=", "shell", ".", "run", "(", "cmd", ",", "capture", "=", "True", ",", "never_pretend", "=", "True", ")", ".", "stdout", ".", "strip", "(", ")", "self", ".", "_branches", "=", "[", "x", ".", "strip", "(", "'* \\t\\n'", ")", "for", "x", "in", "out", ".", "splitlines", "(", ")", "]", "return", "self", ".", "_branches" ]
List of all branches this commit is a part of.
[ "List", "of", "all", "branches", "this", "commit", "is", "a", "part", "of", "." ]
python
train
34.615385
bennyrowland/suspect
suspect/io/philips.py
https://github.com/bennyrowland/suspect/blob/c09ab0a5013c5a199218214cdd791659243d7e41/suspect/io/philips.py#L69-L118
def _vax_to_ieee_single_float(data): """Converts a float in Vax format to IEEE format. data should be a single string of chars that have been read in from a binary file. These will be processed 4 at a time into float values. Thus the total number of byte/chars in the string should be divisible by 4. Based on VAX data organization in a byte file, we need to do a bunch of bitwise operations to separate out the numbers that correspond to the sign, the exponent and the fraction portions of this floating point number role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF bits : 1 2 9 10 32 bytes : byte2 byte1 byte4 byte3 """ f = [] nfloat = int(len(data) / 4) for i in range(nfloat): byte2 = data[0 + i*4] byte1 = data[1 + i*4] byte4 = data[2 + i*4] byte3 = data[3 + i*4] # hex 0x80 = binary mask 10000000 # hex 0x7f = binary mask 01111111 sign = (byte1 & 0x80) >> 7 expon = ((byte1 & 0x7f) << 1) + ((byte2 & 0x80) >> 7) fract = ((byte2 & 0x7f) << 16) + (byte3 << 8) + byte4 if sign == 0: sign_mult = 1.0 else: sign_mult = -1.0 if 0 < expon: # note 16777216.0 == 2^24 val = sign_mult * (0.5 + (fract/16777216.0)) * pow(2.0, expon - 128.0) f.append(val) elif expon == 0 and sign == 0: f.append(0) else: f.append(0) # may want to raise an exception here ... return f
[ "def", "_vax_to_ieee_single_float", "(", "data", ")", ":", "f", "=", "[", "]", "nfloat", "=", "int", "(", "len", "(", "data", ")", "/", "4", ")", "for", "i", "in", "range", "(", "nfloat", ")", ":", "byte2", "=", "data", "[", "0", "+", "i", "*", "4", "]", "byte1", "=", "data", "[", "1", "+", "i", "*", "4", "]", "byte4", "=", "data", "[", "2", "+", "i", "*", "4", "]", "byte3", "=", "data", "[", "3", "+", "i", "*", "4", "]", "# hex 0x80 = binary mask 10000000", "# hex 0x7f = binary mask 01111111", "sign", "=", "(", "byte1", "&", "0x80", ")", ">>", "7", "expon", "=", "(", "(", "byte1", "&", "0x7f", ")", "<<", "1", ")", "+", "(", "(", "byte2", "&", "0x80", ")", ">>", "7", ")", "fract", "=", "(", "(", "byte2", "&", "0x7f", ")", "<<", "16", ")", "+", "(", "byte3", "<<", "8", ")", "+", "byte4", "if", "sign", "==", "0", ":", "sign_mult", "=", "1.0", "else", ":", "sign_mult", "=", "-", "1.0", "if", "0", "<", "expon", ":", "# note 16777216.0 == 2^24", "val", "=", "sign_mult", "*", "(", "0.5", "+", "(", "fract", "/", "16777216.0", ")", ")", "*", "pow", "(", "2.0", ",", "expon", "-", "128.0", ")", "f", ".", "append", "(", "val", ")", "elif", "expon", "==", "0", "and", "sign", "==", "0", ":", "f", ".", "append", "(", "0", ")", "else", ":", "f", ".", "append", "(", "0", ")", "# may want to raise an exception here ...", "return", "f" ]
Converts a float in Vax format to IEEE format. data should be a single string of chars that have been read in from a binary file. These will be processed 4 at a time into float values. Thus the total number of byte/chars in the string should be divisible by 4. Based on VAX data organization in a byte file, we need to do a bunch of bitwise operations to separate out the numbers that correspond to the sign, the exponent and the fraction portions of this floating point number role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF bits : 1 2 9 10 32 bytes : byte2 byte1 byte4 byte3
[ "Converts", "a", "float", "in", "Vax", "format", "to", "IEEE", "format", "." ]
python
train
32.16
madprime/cgivar2gvcf
cgivar2gvcf/__init__.py
https://github.com/madprime/cgivar2gvcf/blob/13b4cd8da08669f7e4b0ceed77a7a17082f91037/cgivar2gvcf/__init__.py#L65-L117
def process_full_position(data, header, var_only=False): """ Return genetic data when all alleles called on same line. Returns an array containing one item, a tuple of five items: (string) chromosome (string) start position (1-based) (array of strings) matching dbSNP entries (string) reference allele sequence (array of strings) the genome's allele sequences """ feature_type = data[header['varType']] # Skip unmatchable, uncovered, or pseudoautosomal-in-X if (feature_type == 'no-ref' or feature_type.startswith('PAR-called-in-X')): return None if var_only and feature_type in ['no-call', 'ref']: return None filters = [] if feature_type == 'no-call': filters.append('NOCALL') if 'varQuality' in header: if 'VQLOW' in data[header['varQuality']]: filters.append('VQLOW') else: var_filter = data[header['varFilter']] if var_filter and not var_filter == "PASS": filters = filters + var_filter.split(';') chrom = data[header['chromosome']] start = data[header['begin']] ref_allele = data[header['reference']] alleles = [data[header['alleleSeq']]] dbsnp_data = [] dbsnp_data = data[header['xRef']].split(';') assert data[header['ploidy']] in ['1', '2'] if feature_type == 'ref' or feature_type == 'no-call': return [{'chrom': chrom, 'start': start, 'dbsnp_data': dbsnp_data, 'ref_seq': ref_allele, 'alleles': alleles, 'allele_count': data[header['ploidy']], 'filters': filters, 'end': data[header['end']]}] else: return [{'chrom': chrom, 'start': start, 'dbsnp_data': dbsnp_data, 'ref_seq': ref_allele, 'alleles': alleles, 'allele_count': data[header['ploidy']], 'filters': filters}]
[ "def", "process_full_position", "(", "data", ",", "header", ",", "var_only", "=", "False", ")", ":", "feature_type", "=", "data", "[", "header", "[", "'varType'", "]", "]", "# Skip unmatchable, uncovered, or pseudoautosomal-in-X", "if", "(", "feature_type", "==", "'no-ref'", "or", "feature_type", ".", "startswith", "(", "'PAR-called-in-X'", ")", ")", ":", "return", "None", "if", "var_only", "and", "feature_type", "in", "[", "'no-call'", ",", "'ref'", "]", ":", "return", "None", "filters", "=", "[", "]", "if", "feature_type", "==", "'no-call'", ":", "filters", ".", "append", "(", "'NOCALL'", ")", "if", "'varQuality'", "in", "header", ":", "if", "'VQLOW'", "in", "data", "[", "header", "[", "'varQuality'", "]", "]", ":", "filters", ".", "append", "(", "'VQLOW'", ")", "else", ":", "var_filter", "=", "data", "[", "header", "[", "'varFilter'", "]", "]", "if", "var_filter", "and", "not", "var_filter", "==", "\"PASS\"", ":", "filters", "=", "filters", "+", "var_filter", ".", "split", "(", "';'", ")", "chrom", "=", "data", "[", "header", "[", "'chromosome'", "]", "]", "start", "=", "data", "[", "header", "[", "'begin'", "]", "]", "ref_allele", "=", "data", "[", "header", "[", "'reference'", "]", "]", "alleles", "=", "[", "data", "[", "header", "[", "'alleleSeq'", "]", "]", "]", "dbsnp_data", "=", "[", "]", "dbsnp_data", "=", "data", "[", "header", "[", "'xRef'", "]", "]", ".", "split", "(", "';'", ")", "assert", "data", "[", "header", "[", "'ploidy'", "]", "]", "in", "[", "'1'", ",", "'2'", "]", "if", "feature_type", "==", "'ref'", "or", "feature_type", "==", "'no-call'", ":", "return", "[", "{", "'chrom'", ":", "chrom", ",", "'start'", ":", "start", ",", "'dbsnp_data'", ":", "dbsnp_data", ",", "'ref_seq'", ":", "ref_allele", ",", "'alleles'", ":", "alleles", ",", "'allele_count'", ":", "data", "[", "header", "[", "'ploidy'", "]", "]", ",", "'filters'", ":", "filters", ",", "'end'", ":", "data", "[", "header", "[", "'end'", "]", "]", "}", "]", "else", ":", "return", "[", "{", "'chrom'", ":", "chrom", ",", "'start'", ":", "start", ",", "'dbsnp_data'", ":", "dbsnp_data", ",", "'ref_seq'", ":", "ref_allele", ",", "'alleles'", ":", "alleles", ",", "'allele_count'", ":", "data", "[", "header", "[", "'ploidy'", "]", "]", ",", "'filters'", ":", "filters", "}", "]" ]
Return genetic data when all alleles called on same line. Returns an array containing one item, a tuple of five items: (string) chromosome (string) start position (1-based) (array of strings) matching dbSNP entries (string) reference allele sequence (array of strings) the genome's allele sequences
[ "Return", "genetic", "data", "when", "all", "alleles", "called", "on", "same", "line", "." ]
python
train
37.056604
ejeschke/ginga
ginga/util/wcs.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/wcs.py#L304-L318
def get_rotation_and_scale(header, skew_threshold=0.001): """Calculate rotation and CDELT.""" ((xrot, yrot), (cdelt1, cdelt2)) = get_xy_rotation_and_scale(header) if math.fabs(xrot) - math.fabs(yrot) > skew_threshold: raise ValueError("Skew detected: xrot=%.4f yrot=%.4f" % ( xrot, yrot)) rot = yrot lonpole = float(header.get('LONPOLE', 180.0)) if lonpole != 180.0: rot += 180.0 - lonpole return (rot, cdelt1, cdelt2)
[ "def", "get_rotation_and_scale", "(", "header", ",", "skew_threshold", "=", "0.001", ")", ":", "(", "(", "xrot", ",", "yrot", ")", ",", "(", "cdelt1", ",", "cdelt2", ")", ")", "=", "get_xy_rotation_and_scale", "(", "header", ")", "if", "math", ".", "fabs", "(", "xrot", ")", "-", "math", ".", "fabs", "(", "yrot", ")", ">", "skew_threshold", ":", "raise", "ValueError", "(", "\"Skew detected: xrot=%.4f yrot=%.4f\"", "%", "(", "xrot", ",", "yrot", ")", ")", "rot", "=", "yrot", "lonpole", "=", "float", "(", "header", ".", "get", "(", "'LONPOLE'", ",", "180.0", ")", ")", "if", "lonpole", "!=", "180.0", ":", "rot", "+=", "180.0", "-", "lonpole", "return", "(", "rot", ",", "cdelt1", ",", "cdelt2", ")" ]
Calculate rotation and CDELT.
[ "Calculate", "rotation", "and", "CDELT", "." ]
python
train
31.2
greenelab/PathCORE-T
pathcore/network.py
https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L130-L153
def read_network_file(self, path_to_network_file): """ Parameters ----------- path_to_network_file : str Expects a network file with columns "pw0" and "pw1." A "features" column that specifies the features where the (pw0, pw1) edge is present will assign a weight to the edge, though it is not required (edge will have weight 1 if no "features" column exists). "features" format: space-separated feature numbers, e.g. 0.0 1.0 2.0 """ network_df = pd.read_table(path_to_network_file) network_edges = {} for _, row in network_df.iterrows(): vertex0_id = self.add_pathway(row["pw0"]) vertex1_id = self.add_pathway(row["pw1"]) edge_id = self.edge_tuple(vertex0_id, vertex1_id) if "features" in row: network_edges[edge_id] = \ [int(float(f)) for f in row["features"].split(" ")] else: network_edges[edge_id] = [] self._augment_network(network_edges)
[ "def", "read_network_file", "(", "self", ",", "path_to_network_file", ")", ":", "network_df", "=", "pd", ".", "read_table", "(", "path_to_network_file", ")", "network_edges", "=", "{", "}", "for", "_", ",", "row", "in", "network_df", ".", "iterrows", "(", ")", ":", "vertex0_id", "=", "self", ".", "add_pathway", "(", "row", "[", "\"pw0\"", "]", ")", "vertex1_id", "=", "self", ".", "add_pathway", "(", "row", "[", "\"pw1\"", "]", ")", "edge_id", "=", "self", ".", "edge_tuple", "(", "vertex0_id", ",", "vertex1_id", ")", "if", "\"features\"", "in", "row", ":", "network_edges", "[", "edge_id", "]", "=", "[", "int", "(", "float", "(", "f", ")", ")", "for", "f", "in", "row", "[", "\"features\"", "]", ".", "split", "(", "\" \"", ")", "]", "else", ":", "network_edges", "[", "edge_id", "]", "=", "[", "]", "self", ".", "_augment_network", "(", "network_edges", ")" ]
Parameters ----------- path_to_network_file : str Expects a network file with columns "pw0" and "pw1." A "features" column that specifies the features where the (pw0, pw1) edge is present will assign a weight to the edge, though it is not required (edge will have weight 1 if no "features" column exists). "features" format: space-separated feature numbers, e.g. 0.0 1.0 2.0
[ "Parameters", "-----------", "path_to_network_file", ":", "str", "Expects", "a", "network", "file", "with", "columns", "pw0", "and", "pw1", ".", "A", "features", "column", "that", "specifies", "the", "features", "where", "the", "(", "pw0", "pw1", ")", "edge", "is", "present", "will", "assign", "a", "weight", "to", "the", "edge", "though", "it", "is", "not", "required", "(", "edge", "will", "have", "weight", "1", "if", "no", "features", "column", "exists", ")", ".", "features", "format", ":", "space", "-", "separated", "feature", "numbers", "e", ".", "g", ".", "0", ".", "0", "1", ".", "0", "2", ".", "0" ]
python
train
44.625
Microsoft/nni
src/sdk/pynni/nni/bohb_advisor/config_generator.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/bohb_advisor/config_generator.py#L114-L205
def sample_from_largest_budget(self, info_dict): """We opted for a single multidimensional KDE compared to the hierarchy of one-dimensional KDEs used in TPE. The dimensional is seperated by budget. This function sample a configuration from largest budget. Firstly we sample "num_samples" configurations, then prefer one with the largest l(x)/g(x). Parameters: ----------- info_dict: dict record the information of this configuration Returns ------- dict: new configuration named sample dict: info_dict, record the information of this configuration """ best = np.inf best_vector = None budget = max(self.kde_models.keys()) l = self.kde_models[budget]['good'].pdf g = self.kde_models[budget]['bad'].pdf minimize_me = lambda x: max(1e-32, g(x))/max(l(x), 1e-32) kde_good = self.kde_models[budget]['good'] kde_bad = self.kde_models[budget]['bad'] for i in range(self.num_samples): idx = np.random.randint(0, len(kde_good.data)) datum = kde_good.data[idx] vector = [] for m, bw, t in zip(datum, kde_good.bw, self.vartypes): bw = max(bw, self.min_bandwidth) if t == 0: bw = self.bw_factor*bw vector.append(sps.truncnorm.rvs(-m/bw, (1-m)/bw, loc=m, scale=bw)) else: if np.random.rand() < (1-bw): vector.append(int(m)) else: vector.append(np.random.randint(t)) val = minimize_me(vector) if not np.isfinite(val): logger.warning('sampled vector: %s has EI value %s'%(vector, val)) logger.warning("data in the KDEs:\n%s\n%s"%(kde_good.data, kde_bad.data)) logger.warning("bandwidth of the KDEs:\n%s\n%s"%(kde_good.bw, kde_bad.bw)) logger.warning("l(x) = %s"%(l(vector))) logger.warning("g(x) = %s"%(g(vector))) # right now, this happens because a KDE does not contain all values for a categorical parameter # this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one # if the good_kde has a finite value, i.e. there is no config with that value in the bad kde, # so it shouldn't be terrible. if np.isfinite(l(vector)): best_vector = vector break if val < best: best = val best_vector = vector if best_vector is None: logger.debug("Sampling based optimization with %i samples failed -> using random configuration"%self.num_samples) sample = self.configspace.sample_configuration().get_dictionary() info_dict['model_based_pick'] = False else: logger.debug('best_vector: {}, {}, {}, {}'.format(best_vector, best, l(best_vector), g(best_vector))) for i, hp_value in enumerate(best_vector): if isinstance( self.configspace.get_hyperparameter( self.configspace.get_hyperparameter_by_idx(i) ), ConfigSpace.hyperparameters.CategoricalHyperparameter ): best_vector[i] = int(np.rint(best_vector[i])) sample = ConfigSpace.Configuration(self.configspace, vector=best_vector).get_dictionary() sample = ConfigSpace.util.deactivate_inactive_hyperparameters( configuration_space=self.configspace, configuration=sample) info_dict['model_based_pick'] = True return sample, info_dict
[ "def", "sample_from_largest_budget", "(", "self", ",", "info_dict", ")", ":", "best", "=", "np", ".", "inf", "best_vector", "=", "None", "budget", "=", "max", "(", "self", ".", "kde_models", ".", "keys", "(", ")", ")", "l", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'good'", "]", ".", "pdf", "g", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'bad'", "]", ".", "pdf", "minimize_me", "=", "lambda", "x", ":", "max", "(", "1e-32", ",", "g", "(", "x", ")", ")", "/", "max", "(", "l", "(", "x", ")", ",", "1e-32", ")", "kde_good", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'good'", "]", "kde_bad", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'bad'", "]", "for", "i", "in", "range", "(", "self", ".", "num_samples", ")", ":", "idx", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "len", "(", "kde_good", ".", "data", ")", ")", "datum", "=", "kde_good", ".", "data", "[", "idx", "]", "vector", "=", "[", "]", "for", "m", ",", "bw", ",", "t", "in", "zip", "(", "datum", ",", "kde_good", ".", "bw", ",", "self", ".", "vartypes", ")", ":", "bw", "=", "max", "(", "bw", ",", "self", ".", "min_bandwidth", ")", "if", "t", "==", "0", ":", "bw", "=", "self", ".", "bw_factor", "*", "bw", "vector", ".", "append", "(", "sps", ".", "truncnorm", ".", "rvs", "(", "-", "m", "/", "bw", ",", "(", "1", "-", "m", ")", "/", "bw", ",", "loc", "=", "m", ",", "scale", "=", "bw", ")", ")", "else", ":", "if", "np", ".", "random", ".", "rand", "(", ")", "<", "(", "1", "-", "bw", ")", ":", "vector", ".", "append", "(", "int", "(", "m", ")", ")", "else", ":", "vector", ".", "append", "(", "np", ".", "random", ".", "randint", "(", "t", ")", ")", "val", "=", "minimize_me", "(", "vector", ")", "if", "not", "np", ".", "isfinite", "(", "val", ")", ":", "logger", ".", "warning", "(", "'sampled vector: %s has EI value %s'", "%", "(", "vector", ",", "val", ")", ")", "logger", ".", "warning", "(", "\"data in the KDEs:\\n%s\\n%s\"", "%", "(", "kde_good", ".", "data", ",", "kde_bad", ".", "data", ")", ")", "logger", ".", "warning", "(", "\"bandwidth of the KDEs:\\n%s\\n%s\"", "%", "(", "kde_good", ".", "bw", ",", "kde_bad", ".", "bw", ")", ")", "logger", ".", "warning", "(", "\"l(x) = %s\"", "%", "(", "l", "(", "vector", ")", ")", ")", "logger", ".", "warning", "(", "\"g(x) = %s\"", "%", "(", "g", "(", "vector", ")", ")", ")", "# right now, this happens because a KDE does not contain all values for a categorical parameter", "# this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one", "# if the good_kde has a finite value, i.e. there is no config with that value in the bad kde,", "# so it shouldn't be terrible.", "if", "np", ".", "isfinite", "(", "l", "(", "vector", ")", ")", ":", "best_vector", "=", "vector", "break", "if", "val", "<", "best", ":", "best", "=", "val", "best_vector", "=", "vector", "if", "best_vector", "is", "None", ":", "logger", ".", "debug", "(", "\"Sampling based optimization with %i samples failed -> using random configuration\"", "%", "self", ".", "num_samples", ")", "sample", "=", "self", ".", "configspace", ".", "sample_configuration", "(", ")", ".", "get_dictionary", "(", ")", "info_dict", "[", "'model_based_pick'", "]", "=", "False", "else", ":", "logger", ".", "debug", "(", "'best_vector: {}, {}, {}, {}'", ".", "format", "(", "best_vector", ",", "best", ",", "l", "(", "best_vector", ")", ",", "g", "(", "best_vector", ")", ")", ")", "for", "i", ",", "hp_value", "in", "enumerate", "(", "best_vector", ")", ":", "if", "isinstance", "(", "self", ".", "configspace", ".", "get_hyperparameter", "(", "self", ".", "configspace", ".", "get_hyperparameter_by_idx", "(", "i", ")", ")", ",", "ConfigSpace", ".", "hyperparameters", ".", "CategoricalHyperparameter", ")", ":", "best_vector", "[", "i", "]", "=", "int", "(", "np", ".", "rint", "(", "best_vector", "[", "i", "]", ")", ")", "sample", "=", "ConfigSpace", ".", "Configuration", "(", "self", ".", "configspace", ",", "vector", "=", "best_vector", ")", ".", "get_dictionary", "(", ")", "sample", "=", "ConfigSpace", ".", "util", ".", "deactivate_inactive_hyperparameters", "(", "configuration_space", "=", "self", ".", "configspace", ",", "configuration", "=", "sample", ")", "info_dict", "[", "'model_based_pick'", "]", "=", "True", "return", "sample", ",", "info_dict" ]
We opted for a single multidimensional KDE compared to the hierarchy of one-dimensional KDEs used in TPE. The dimensional is seperated by budget. This function sample a configuration from largest budget. Firstly we sample "num_samples" configurations, then prefer one with the largest l(x)/g(x). Parameters: ----------- info_dict: dict record the information of this configuration Returns ------- dict: new configuration named sample dict: info_dict, record the information of this configuration
[ "We", "opted", "for", "a", "single", "multidimensional", "KDE", "compared", "to", "the", "hierarchy", "of", "one", "-", "dimensional", "KDEs", "used", "in", "TPE", ".", "The", "dimensional", "is", "seperated", "by", "budget", ".", "This", "function", "sample", "a", "configuration", "from", "largest", "budget", ".", "Firstly", "we", "sample", "num_samples", "configurations", "then", "prefer", "one", "with", "the", "largest", "l", "(", "x", ")", "/", "g", "(", "x", ")", ".", "Parameters", ":", "-----------", "info_dict", ":", "dict", "record", "the", "information", "of", "this", "configuration" ]
python
train
41.5