repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
numenta/htmresearch
htmresearch/frameworks/pytorch/benchmark_utils.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/pytorch/benchmark_utils.py#L68-L94
def register_nonzero_counter(network, stats): """ Register forward hooks to count the number of nonzero floating points values from all the tensors used by the given network during inference. :param network: The network to attach the counter :param stats: Dictionary holding the counter. """ if hasattr(network, "__counter_nonzero__"): raise ValueError("nonzero counter was already registered for this network") if not isinstance(stats, dict): raise ValueError("stats must be a dictionary") network.__counter_nonzero__ = stats handles = [] for name, module in network.named_modules(): handles.append(module.register_forward_hook(_nonzero_counter_hook)) if network != module: if hasattr(module, "__counter_nonzero__"): raise ValueError("nonzero counter was already registered for this module") child_data = dict() network.__counter_nonzero__[name] = child_data module.__counter_nonzero__ = child_data network.__counter_nonzero_handles__ = handles
[ "def", "register_nonzero_counter", "(", "network", ",", "stats", ")", ":", "if", "hasattr", "(", "network", ",", "\"__counter_nonzero__\"", ")", ":", "raise", "ValueError", "(", "\"nonzero counter was already registered for this network\"", ")", "if", "not", "isinstance", "(", "stats", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"stats must be a dictionary\"", ")", "network", ".", "__counter_nonzero__", "=", "stats", "handles", "=", "[", "]", "for", "name", ",", "module", "in", "network", ".", "named_modules", "(", ")", ":", "handles", ".", "append", "(", "module", ".", "register_forward_hook", "(", "_nonzero_counter_hook", ")", ")", "if", "network", "!=", "module", ":", "if", "hasattr", "(", "module", ",", "\"__counter_nonzero__\"", ")", ":", "raise", "ValueError", "(", "\"nonzero counter was already registered for this module\"", ")", "child_data", "=", "dict", "(", ")", "network", ".", "__counter_nonzero__", "[", "name", "]", "=", "child_data", "module", ".", "__counter_nonzero__", "=", "child_data", "network", ".", "__counter_nonzero_handles__", "=", "handles" ]
Register forward hooks to count the number of nonzero floating points values from all the tensors used by the given network during inference. :param network: The network to attach the counter :param stats: Dictionary holding the counter.
[ "Register", "forward", "hooks", "to", "count", "the", "number", "of", "nonzero", "floating", "points", "values", "from", "all", "the", "tensors", "used", "by", "the", "given", "network", "during", "inference", "." ]
python
train
36.888889
ARMmbed/autoversion
src/auto_version/auto_version_tool.py
https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L139-L156
def get_final_version_string(release_mode, semver, commit_count=0): """Generates update dictionary entries for the version string""" version_string = ".".join(semver) maybe_dev_version_string = version_string updates = {} if release_mode: # in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE else: # in dev mode, we have a dev marker e.g. `1.2.3.dev678` maybe_dev_version_string = config.DEVMODE_TEMPLATE.format( version=version_string, count=commit_count ) # make available all components of the semantic version including the full string updates[Constants.VERSION_FIELD] = maybe_dev_version_string updates[Constants.VERSION_STRICT_FIELD] = version_string return updates
[ "def", "get_final_version_string", "(", "release_mode", ",", "semver", ",", "commit_count", "=", "0", ")", ":", "version_string", "=", "\".\"", ".", "join", "(", "semver", ")", "maybe_dev_version_string", "=", "version_string", "updates", "=", "{", "}", "if", "release_mode", ":", "# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True", "updates", "[", "Constants", ".", "RELEASE_FIELD", "]", "=", "config", ".", "RELEASED_VALUE", "else", ":", "# in dev mode, we have a dev marker e.g. `1.2.3.dev678`", "maybe_dev_version_string", "=", "config", ".", "DEVMODE_TEMPLATE", ".", "format", "(", "version", "=", "version_string", ",", "count", "=", "commit_count", ")", "# make available all components of the semantic version including the full string", "updates", "[", "Constants", ".", "VERSION_FIELD", "]", "=", "maybe_dev_version_string", "updates", "[", "Constants", ".", "VERSION_STRICT_FIELD", "]", "=", "version_string", "return", "updates" ]
Generates update dictionary entries for the version string
[ "Generates", "update", "dictionary", "entries", "for", "the", "version", "string" ]
python
train
46.611111
jdowner/gist
gist/gist.py
https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L405-L418
def clone(self, id, name=None): """Clone a gist Arguments: id: the gist identifier name: the name to give the cloned repo """ url = '[email protected]:/{}'.format(id) if name is None: os.system('git clone {}'.format(url)) else: os.system('git clone {} {}'.format(url, name))
[ "def", "clone", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "url", "=", "'[email protected]:/{}'", ".", "format", "(", "id", ")", "if", "name", "is", "None", ":", "os", ".", "system", "(", "'git clone {}'", ".", "format", "(", "url", ")", ")", "else", ":", "os", ".", "system", "(", "'git clone {} {}'", ".", "format", "(", "url", ",", "name", ")", ")" ]
Clone a gist Arguments: id: the gist identifier name: the name to give the cloned repo
[ "Clone", "a", "gist" ]
python
train
26
maximkulkin/lollipop
lollipop/utils.py
https://github.com/maximkulkin/lollipop/blob/042e8a24508cc3b28630863253c38ffbfc52c882/lollipop/utils.py#L59-L64
def call_with_context(func, context, *args): """ Check if given function has more arguments than given. Call it with context as last argument or without it. """ return make_context_aware(func, len(args))(*args + (context,))
[ "def", "call_with_context", "(", "func", ",", "context", ",", "*", "args", ")", ":", "return", "make_context_aware", "(", "func", ",", "len", "(", "args", ")", ")", "(", "*", "args", "+", "(", "context", ",", ")", ")" ]
Check if given function has more arguments than given. Call it with context as last argument or without it.
[ "Check", "if", "given", "function", "has", "more", "arguments", "than", "given", ".", "Call", "it", "with", "context", "as", "last", "argument", "or", "without", "it", "." ]
python
train
39.666667
loanzen/falcon-auth
falcon_auth/backends.py
https://github.com/loanzen/falcon-auth/blob/b9063163fff8044a8579a6047a85f28f3b214fdf/falcon_auth/backends.py#L307-L319
def authenticate(self, req, resp, resource): """ Extract basic auth token from request `authorization` header, deocode the token, verifies the username/password and return either a ``user`` object if successful else raise an `falcon.HTTPUnauthoried exception` """ username, password = self._extract_credentials(req) user = self.user_loader(username, password) if not user: raise falcon.HTTPUnauthorized( description='Invalid Username/Password') return user
[ "def", "authenticate", "(", "self", ",", "req", ",", "resp", ",", "resource", ")", ":", "username", ",", "password", "=", "self", ".", "_extract_credentials", "(", "req", ")", "user", "=", "self", ".", "user_loader", "(", "username", ",", "password", ")", "if", "not", "user", ":", "raise", "falcon", ".", "HTTPUnauthorized", "(", "description", "=", "'Invalid Username/Password'", ")", "return", "user" ]
Extract basic auth token from request `authorization` header, deocode the token, verifies the username/password and return either a ``user`` object if successful else raise an `falcon.HTTPUnauthoried exception`
[ "Extract", "basic", "auth", "token", "from", "request", "authorization", "header", "deocode", "the", "token", "verifies", "the", "username", "/", "password", "and", "return", "either", "a", "user", "object", "if", "successful", "else", "raise", "an", "falcon", ".", "HTTPUnauthoried", "exception" ]
python
train
42
nchopin/particles
particles/smoothing.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L286-L317
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False, return_ess=False, modif_forward=None, modif_info=None): """Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1}) """ ti = self.T - 2 - t # t+1 in reverse if t < 0 or t >= self.T - 1: raise ValueError( 'two-filter smoothing: t must be in range 0,...,T-2') lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti]) if linear_cost: return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info) else: return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo)
[ "def", "twofilter_smoothing", "(", "self", ",", "t", ",", "info", ",", "phi", ",", "loggamma", ",", "linear_cost", "=", "False", ",", "return_ess", "=", "False", ",", "modif_forward", "=", "None", ",", "modif_info", "=", "None", ")", ":", "ti", "=", "self", ".", "T", "-", "2", "-", "t", "# t+1 in reverse", "if", "t", "<", "0", "or", "t", ">=", "self", ".", "T", "-", "1", ":", "raise", "ValueError", "(", "'two-filter smoothing: t must be in range 0,...,T-2'", ")", "lwinfo", "=", "info", ".", "hist", ".", "wgt", "[", "ti", "]", ".", "lw", "-", "loggamma", "(", "info", ".", "hist", ".", "X", "[", "ti", "]", ")", "if", "linear_cost", ":", "return", "self", ".", "_twofilter_smoothing_ON", "(", "t", ",", "ti", ",", "info", ",", "phi", ",", "lwinfo", ",", "return_ess", ",", "modif_forward", ",", "modif_info", ")", "else", ":", "return", "self", ".", "_twofilter_smoothing_ON2", "(", "t", ",", "ti", ",", "info", ",", "phi", ",", "lwinfo", ")" ]
Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1})
[ "Two", "-", "filter", "smoothing", "." ]
python
train
39.0625
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L95-L98
def account_update(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/accounts#update-account" api_path = "/api/v2/account" return self.call(api_path, method="PUT", data=data, **kwargs)
[ "def", "account_update", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/account\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"PUT\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/chat/accounts#update-account
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "accounts#update", "-", "account" ]
python
train
57
PredixDev/predixpy
predix/data/blobstore.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/data/blobstore.py#L108-L115
def list_objects(self, bucket_name=None, **kwargs): """ This method is primarily for illustration and just calls the boto3 client implementation of list_objects but is a common task for first time Predix BlobStore users. """ if not bucket_name: bucket_name = self.bucket_name return self.client.list_objects(Bucket=bucket_name, **kwargs)
[ "def", "list_objects", "(", "self", ",", "bucket_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "bucket_name", ":", "bucket_name", "=", "self", ".", "bucket_name", "return", "self", ".", "client", ".", "list_objects", "(", "Bucket", "=", "bucket_name", ",", "*", "*", "kwargs", ")" ]
This method is primarily for illustration and just calls the boto3 client implementation of list_objects but is a common task for first time Predix BlobStore users.
[ "This", "method", "is", "primarily", "for", "illustration", "and", "just", "calls", "the", "boto3", "client", "implementation", "of", "list_objects", "but", "is", "a", "common", "task", "for", "first", "time", "Predix", "BlobStore", "users", "." ]
python
train
48.375
saltstack/salt
salt/modules/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/neutron.py#L1083-L1102
def create_vpnservice(subnet, router, name, admin_state_up=True, profile=None): ''' Creates a new VPN service CLI Example: .. code-block:: bash salt '*' neutron.create_vpnservice router-name name :param subnet: Subnet unique identifier for the VPN service deployment :param router: Router unique identifier for the VPN service :param name: Set a name for the VPN service :param admin_state_up: Set admin state up to true or false, default:True (Optional) :param profile: Profile to build on (Optional) :return: Created VPN service information ''' conn = _auth(profile) return conn.create_vpnservice(subnet, router, name, admin_state_up)
[ "def", "create_vpnservice", "(", "subnet", ",", "router", ",", "name", ",", "admin_state_up", "=", "True", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "create_vpnservice", "(", "subnet", ",", "router", ",", "name", ",", "admin_state_up", ")" ]
Creates a new VPN service CLI Example: .. code-block:: bash salt '*' neutron.create_vpnservice router-name name :param subnet: Subnet unique identifier for the VPN service deployment :param router: Router unique identifier for the VPN service :param name: Set a name for the VPN service :param admin_state_up: Set admin state up to true or false, default:True (Optional) :param profile: Profile to build on (Optional) :return: Created VPN service information
[ "Creates", "a", "new", "VPN", "service" ]
python
train
34.65
boakley/robotframework-lint
rflint/parser/parser.py
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L272-L277
def settings(self): '''Generator which returns all of the statements in all of the settings tables''' for table in self.tables: if isinstance(table, SettingTable): for statement in table.statements: yield statement
[ "def", "settings", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "if", "isinstance", "(", "table", ",", "SettingTable", ")", ":", "for", "statement", "in", "table", ".", "statements", ":", "yield", "statement" ]
Generator which returns all of the statements in all of the settings tables
[ "Generator", "which", "returns", "all", "of", "the", "statements", "in", "all", "of", "the", "settings", "tables" ]
python
valid
45.5
kevinconway/venvctrl
venvctrl/venv/pip.py
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/pip.py#L27-L50
def install_package(self, name, index=None, force=False, update=False): """Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date. """ cmd = 'install' if force: cmd = '{0} {1}'.format(cmd, '--force-reinstall') if update: cmd = '{0} {1}'.format(cmd, '--update') if index: cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index)) self.pip('{0} {1}'.format(cmd, name))
[ "def", "install_package", "(", "self", ",", "name", ",", "index", "=", "None", ",", "force", "=", "False", ",", "update", "=", "False", ")", ":", "cmd", "=", "'install'", "if", "force", ":", "cmd", "=", "'{0} {1}'", ".", "format", "(", "cmd", ",", "'--force-reinstall'", ")", "if", "update", ":", "cmd", "=", "'{0} {1}'", ".", "format", "(", "cmd", ",", "'--update'", ")", "if", "index", ":", "cmd", "=", "'{0} {1}'", ".", "format", "(", "cmd", ",", "'--index-url {0}'", ".", "format", "(", "index", ")", ")", "self", ".", "pip", "(", "'{0} {1}'", ".", "format", "(", "cmd", ",", "name", ")", ")" ]
Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date.
[ "Install", "a", "given", "package", "." ]
python
train
31
cslarsen/crianza
crianza/instructions.py
https://github.com/cslarsen/crianza/blob/fa044f9d491f37cc06892bad14b2c80b8ac5a7cd/crianza/instructions.py#L100-L107
def rot(vm): """Rotate topmost three items once to the left. ( a b c -- b c a )""" c = vm.pop() b = vm.pop() a = vm.pop() vm.push(b) vm.push(c) vm.push(a)
[ "def", "rot", "(", "vm", ")", ":", "c", "=", "vm", ".", "pop", "(", ")", "b", "=", "vm", ".", "pop", "(", ")", "a", "=", "vm", ".", "pop", "(", ")", "vm", ".", "push", "(", "b", ")", "vm", ".", "push", "(", "c", ")", "vm", ".", "push", "(", "a", ")" ]
Rotate topmost three items once to the left. ( a b c -- b c a )
[ "Rotate", "topmost", "three", "items", "once", "to", "the", "left", ".", "(", "a", "b", "c", "--", "b", "c", "a", ")" ]
python
train
21.875
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L234-L508
def parsecommonarguments(object, doc, annotationtype, required, allowed, **kwargs): """Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly.""" object.doc = doc #The FoLiA root document if required is None: required = tuple() if allowed is None: allowed = tuple() supported = required + allowed if 'generate_id_in' in kwargs: try: kwargs['id'] = kwargs['generate_id_in'].generate_id(object.__class__) except GenerateIDException: pass #ID could not be generated, just skip del kwargs['generate_id_in'] if 'id' in kwargs: if Attrib.ID not in supported: raise ValueError("ID is not supported on " + object.__class__.__name__) isncname(kwargs['id']) object.id = kwargs['id'] del kwargs['id'] elif Attrib.ID in required: raise ValueError("ID is required for " + object.__class__.__name__) else: object.id = None if 'set' in kwargs: if Attrib.CLASS not in supported and not object.SETONLY: raise ValueError("Set is not supported on " + object.__class__.__name__) if not kwargs['set']: object.set ="undefined" else: object.set = kwargs['set'] del kwargs['set'] if object.set: if doc and (not (annotationtype in doc.annotationdefaults) or not (object.set in doc.annotationdefaults[annotationtype])): if object.set in doc.alias_set: object.set = doc.alias_set[object.set] elif doc.autodeclare: doc.annotations.append( (annotationtype, object.set ) ) doc.annotationdefaults[annotationtype] = {object.set: {} } else: raise ValueError("Set '" + object.set + "' is used for " + object.__class__.__name__ + ", but has no declaration!") elif annotationtype in doc.annotationdefaults and len(doc.annotationdefaults[annotationtype]) == 1: object.set = list(doc.annotationdefaults[annotationtype].keys())[0] elif object.ANNOTATIONTYPE == AnnotationType.TEXT: object.set = "undefined" #text content needs never be declared (for backward compatibility) and is in set 'undefined' elif Attrib.CLASS in required: #or (hasattr(object,'SETONLY') and object.SETONLY): raise ValueError("Set is required for " + object.__class__.__name__) if 'class' in kwargs: if not Attrib.CLASS in supported: raise ValueError("Class is not supported for " + object.__class__.__name__) object.cls = kwargs['class'] del kwargs['class'] elif 'cls' in kwargs: if not Attrib.CLASS in supported: raise ValueError("Class is not supported on " + object.__class__.__name__) object.cls = kwargs['cls'] del kwargs['cls'] elif Attrib.CLASS in required: raise ValueError("Class is required for " + object.__class__.__name__) if object.cls and not object.set: if doc and doc.autodeclare: if not (annotationtype, 'undefined') in doc.annotations: doc.annotations.append( (annotationtype, 'undefined') ) doc.annotationdefaults[annotationtype] = {'undefined': {} } object.set = 'undefined' else: raise ValueError("Set is required for " + object.__class__.__name__ + ". Class '" + object.cls + "' assigned without set.") if 'annotator' in kwargs: if not Attrib.ANNOTATOR in supported: raise ValueError("Annotator is not supported for " + object.__class__.__name__) object.annotator = kwargs['annotator'] del kwargs['annotator'] elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotator' in doc.annotationdefaults[annotationtype][object.set]: object.annotator = doc.annotationdefaults[annotationtype][object.set]['annotator'] elif Attrib.ANNOTATOR in required: raise ValueError("Annotator is required for " + object.__class__.__name__) if 'annotatortype' in kwargs: if not Attrib.ANNOTATOR in supported: raise ValueError("Annotatortype is not supported for " + object.__class__.__name__) if kwargs['annotatortype'] == 'auto' or kwargs['annotatortype'] == AnnotatorType.AUTO: object.annotatortype = AnnotatorType.AUTO elif kwargs['annotatortype'] == 'manual' or kwargs['annotatortype'] == AnnotatorType.MANUAL: object.annotatortype = AnnotatorType.MANUAL else: raise ValueError("annotatortype must be 'auto' or 'manual', got " + repr(kwargs['annotatortype'])) del kwargs['annotatortype'] elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotatortype' in doc.annotationdefaults[annotationtype][object.set]: object.annotatortype = doc.annotationdefaults[annotationtype][object.set]['annotatortype'] elif Attrib.ANNOTATOR in required: raise ValueError("Annotatortype is required for " + object.__class__.__name__) if 'confidence' in kwargs: if not Attrib.CONFIDENCE in supported: raise ValueError("Confidence is not supported") if kwargs['confidence'] is not None: try: object.confidence = float(kwargs['confidence']) assert object.confidence >= 0.0 and object.confidence <= 1.0 except: raise ValueError("Confidence must be a floating point number between 0 and 1, got " + repr(kwargs['confidence']) ) del kwargs['confidence'] elif Attrib.CONFIDENCE in required: raise ValueError("Confidence is required for " + object.__class__.__name__) if 'n' in kwargs: if not Attrib.N in supported: raise ValueError("N is not supported for " + object.__class__.__name__) object.n = kwargs['n'] del kwargs['n'] elif Attrib.N in required: raise ValueError("N is required for " + object.__class__.__name__) if 'datetime' in kwargs: if not Attrib.DATETIME in supported: raise ValueError("Datetime is not supported") if isinstance(kwargs['datetime'], datetime): object.datetime = kwargs['datetime'] else: #try: object.datetime = parse_datetime(kwargs['datetime']) #except: # raise ValueError("Unable to parse datetime: " + str(repr(kwargs['datetime']))) del kwargs['datetime'] elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'datetime' in doc.annotationdefaults[annotationtype][object.set]: object.datetime = doc.annotationdefaults[annotationtype][object.set]['datetime'] elif Attrib.DATETIME in required: raise ValueError("Datetime is required for " + object.__class__.__name__) if 'src' in kwargs: if not Attrib.SRC in supported: raise ValueError("Source is not supported for " + object.__class__.__name__) object.src = kwargs['src'] del kwargs['src'] elif Attrib.SRC in required: raise ValueError("Source is required for " + object.__class__.__name__) if 'begintime' in kwargs: if not Attrib.BEGINTIME in supported: raise ValueError("Begintime is not supported for " + object.__class__.__name__) object.begintime = parsetime(kwargs['begintime']) del kwargs['begintime'] elif Attrib.BEGINTIME in required: raise ValueError("Begintime is required for " + object.__class__.__name__) if 'endtime' in kwargs: if not Attrib.ENDTIME in supported: raise ValueError("Endtime is not supported for " + object.__class__.__name__) object.endtime = parsetime(kwargs['endtime']) del kwargs['endtime'] elif Attrib.ENDTIME in required: raise ValueError("Endtime is required for " + object.__class__.__name__) if 'speaker' in kwargs: if not Attrib.SPEAKER in supported: raise ValueError("Speaker is not supported for " + object.__class__.__name__) object.speaker = kwargs['speaker'] del kwargs['speaker'] elif Attrib.SPEAKER in required: raise ValueError("Speaker is required for " + object.__class__.__name__) if 'auth' in kwargs: if kwargs['auth'] in ('no','false'): object.auth = False else: object.auth = bool(kwargs['auth']) del kwargs['auth'] else: object.auth = object.__class__.AUTH if 'text' in kwargs: if kwargs['text']: object.settext(kwargs['text']) del kwargs['text'] if 'phon' in kwargs: if kwargs['phon']: object.setphon(kwargs['phon']) del kwargs['phon'] if 'textclass' in kwargs: if not Attrib.TEXTCLASS in supported: raise ValueError("Textclass is not supported for " + object.__class__.__name__) object.textclass = kwargs['textclass'] del kwargs['textclass'] else: if Attrib.TEXTCLASS in supported: object.textclass = "current" if 'metadata' in kwargs: if not Attrib.METADATA in supported: raise ValueError("Metadata is not supported for " + object.__class__.__name__) object.metadata = kwargs['metadata'] if doc: try: doc.submetadata[kwargs['metadata']] except KeyError: raise KeyError("No such metadata defined: " + kwargs['metadata']) del kwargs['metadata'] if object.XLINK: if 'href' in kwargs: object.href =kwargs['href'] del kwargs['href'] if 'xlinktype' in kwargs: object.xlinktype = kwargs['xlinktype'] del kwargs['xlinktype'] if 'xlinkrole' in kwargs: object.xlinkrole = kwargs['xlinkrole'] del kwargs['xlinkrole'] if 'xlinklabel' in kwargs: object.xlinklabel = kwargs['xlinklabel'] del kwargs['xlinklabel'] if 'xlinkshow' in kwargs: object.xlinkshow = kwargs['xlinkshow'] del kwargs['xlinklabel'] if 'xlinktitle' in kwargs: object.xlinktitle = kwargs['xlinktitle'] del kwargs['xlinktitle'] if doc and doc.debug >= 2: print(" @id = ", repr(object.id),file=stderr) print(" @set = ", repr(object.set),file=stderr) print(" @class = ", repr(object.cls),file=stderr) print(" @annotator = ", repr(object.annotator),file=stderr) print(" @annotatortype= ", repr(object.annotatortype),file=stderr) print(" @confidence = ", repr(object.confidence),file=stderr) print(" @n = ", repr(object.n),file=stderr) print(" @datetime = ", repr(object.datetime),file=stderr) #set index if object.id and doc: if object.id in doc.index: if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Duplicate ID not permitted:" + object.id,file=stderr) raise DuplicateIDError("Duplicate ID not permitted: " + object.id) else: if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Adding to index: " + object.id,file=stderr) doc.index[object.id] = object #Parse feature attributes (shortcut for feature specification for some elements) for c in object.ACCEPTED_DATA: if issubclass(c, Feature): if c.SUBSET in kwargs: if kwargs[c.SUBSET]: object.append(c,cls=kwargs[c.SUBSET]) del kwargs[c.SUBSET] return kwargs
[ "def", "parsecommonarguments", "(", "object", ",", "doc", ",", "annotationtype", ",", "required", ",", "allowed", ",", "*", "*", "kwargs", ")", ":", "object", ".", "doc", "=", "doc", "#The FoLiA root document", "if", "required", "is", "None", ":", "required", "=", "tuple", "(", ")", "if", "allowed", "is", "None", ":", "allowed", "=", "tuple", "(", ")", "supported", "=", "required", "+", "allowed", "if", "'generate_id_in'", "in", "kwargs", ":", "try", ":", "kwargs", "[", "'id'", "]", "=", "kwargs", "[", "'generate_id_in'", "]", ".", "generate_id", "(", "object", ".", "__class__", ")", "except", "GenerateIDException", ":", "pass", "#ID could not be generated, just skip", "del", "kwargs", "[", "'generate_id_in'", "]", "if", "'id'", "in", "kwargs", ":", "if", "Attrib", ".", "ID", "not", "in", "supported", ":", "raise", "ValueError", "(", "\"ID is not supported on \"", "+", "object", ".", "__class__", ".", "__name__", ")", "isncname", "(", "kwargs", "[", "'id'", "]", ")", "object", ".", "id", "=", "kwargs", "[", "'id'", "]", "del", "kwargs", "[", "'id'", "]", "elif", "Attrib", ".", "ID", "in", "required", ":", "raise", "ValueError", "(", "\"ID is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "else", ":", "object", ".", "id", "=", "None", "if", "'set'", "in", "kwargs", ":", "if", "Attrib", ".", "CLASS", "not", "in", "supported", "and", "not", "object", ".", "SETONLY", ":", "raise", "ValueError", "(", "\"Set is not supported on \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "not", "kwargs", "[", "'set'", "]", ":", "object", ".", "set", "=", "\"undefined\"", "else", ":", "object", ".", "set", "=", "kwargs", "[", "'set'", "]", "del", "kwargs", "[", "'set'", "]", "if", "object", ".", "set", ":", "if", "doc", "and", "(", "not", "(", "annotationtype", "in", "doc", ".", "annotationdefaults", ")", "or", "not", "(", "object", ".", "set", "in", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", ")", ")", ":", "if", "object", ".", "set", "in", "doc", ".", "alias_set", ":", "object", ".", "set", "=", "doc", ".", "alias_set", "[", "object", ".", "set", "]", "elif", "doc", ".", "autodeclare", ":", "doc", ".", "annotations", ".", "append", "(", "(", "annotationtype", ",", "object", ".", "set", ")", ")", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "=", "{", "object", ".", "set", ":", "{", "}", "}", "else", ":", "raise", "ValueError", "(", "\"Set '\"", "+", "object", ".", "set", "+", "\"' is used for \"", "+", "object", ".", "__class__", ".", "__name__", "+", "\", but has no declaration!\"", ")", "elif", "annotationtype", "in", "doc", ".", "annotationdefaults", "and", "len", "(", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", ")", "==", "1", ":", "object", ".", "set", "=", "list", "(", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", ".", "keys", "(", ")", ")", "[", "0", "]", "elif", "object", ".", "ANNOTATIONTYPE", "==", "AnnotationType", ".", "TEXT", ":", "object", ".", "set", "=", "\"undefined\"", "#text content needs never be declared (for backward compatibility) and is in set 'undefined'", "elif", "Attrib", ".", "CLASS", "in", "required", ":", "#or (hasattr(object,'SETONLY') and object.SETONLY):", "raise", "ValueError", "(", "\"Set is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'class'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "CLASS", "in", "supported", ":", "raise", "ValueError", "(", "\"Class is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "cls", "=", "kwargs", "[", "'class'", "]", "del", "kwargs", "[", "'class'", "]", "elif", "'cls'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "CLASS", "in", "supported", ":", "raise", "ValueError", "(", "\"Class is not supported on \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "cls", "=", "kwargs", "[", "'cls'", "]", "del", "kwargs", "[", "'cls'", "]", "elif", "Attrib", ".", "CLASS", "in", "required", ":", "raise", "ValueError", "(", "\"Class is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "object", ".", "cls", "and", "not", "object", ".", "set", ":", "if", "doc", "and", "doc", ".", "autodeclare", ":", "if", "not", "(", "annotationtype", ",", "'undefined'", ")", "in", "doc", ".", "annotations", ":", "doc", ".", "annotations", ".", "append", "(", "(", "annotationtype", ",", "'undefined'", ")", ")", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "=", "{", "'undefined'", ":", "{", "}", "}", "object", ".", "set", "=", "'undefined'", "else", ":", "raise", "ValueError", "(", "\"Set is required for \"", "+", "object", ".", "__class__", ".", "__name__", "+", "\". Class '\"", "+", "object", ".", "cls", "+", "\"' assigned without set.\"", ")", "if", "'annotator'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "ANNOTATOR", "in", "supported", ":", "raise", "ValueError", "(", "\"Annotator is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "annotator", "=", "kwargs", "[", "'annotator'", "]", "del", "kwargs", "[", "'annotator'", "]", "elif", "doc", "and", "annotationtype", "in", "doc", ".", "annotationdefaults", "and", "object", ".", "set", "in", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "and", "'annotator'", "in", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "[", "object", ".", "set", "]", ":", "object", ".", "annotator", "=", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "[", "object", ".", "set", "]", "[", "'annotator'", "]", "elif", "Attrib", ".", "ANNOTATOR", "in", "required", ":", "raise", "ValueError", "(", "\"Annotator is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'annotatortype'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "ANNOTATOR", "in", "supported", ":", "raise", "ValueError", "(", "\"Annotatortype is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "kwargs", "[", "'annotatortype'", "]", "==", "'auto'", "or", "kwargs", "[", "'annotatortype'", "]", "==", "AnnotatorType", ".", "AUTO", ":", "object", ".", "annotatortype", "=", "AnnotatorType", ".", "AUTO", "elif", "kwargs", "[", "'annotatortype'", "]", "==", "'manual'", "or", "kwargs", "[", "'annotatortype'", "]", "==", "AnnotatorType", ".", "MANUAL", ":", "object", ".", "annotatortype", "=", "AnnotatorType", ".", "MANUAL", "else", ":", "raise", "ValueError", "(", "\"annotatortype must be 'auto' or 'manual', got \"", "+", "repr", "(", "kwargs", "[", "'annotatortype'", "]", ")", ")", "del", "kwargs", "[", "'annotatortype'", "]", "elif", "doc", "and", "annotationtype", "in", "doc", ".", "annotationdefaults", "and", "object", ".", "set", "in", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "and", "'annotatortype'", "in", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "[", "object", ".", "set", "]", ":", "object", ".", "annotatortype", "=", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "[", "object", ".", "set", "]", "[", "'annotatortype'", "]", "elif", "Attrib", ".", "ANNOTATOR", "in", "required", ":", "raise", "ValueError", "(", "\"Annotatortype is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'confidence'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "CONFIDENCE", "in", "supported", ":", "raise", "ValueError", "(", "\"Confidence is not supported\"", ")", "if", "kwargs", "[", "'confidence'", "]", "is", "not", "None", ":", "try", ":", "object", ".", "confidence", "=", "float", "(", "kwargs", "[", "'confidence'", "]", ")", "assert", "object", ".", "confidence", ">=", "0.0", "and", "object", ".", "confidence", "<=", "1.0", "except", ":", "raise", "ValueError", "(", "\"Confidence must be a floating point number between 0 and 1, got \"", "+", "repr", "(", "kwargs", "[", "'confidence'", "]", ")", ")", "del", "kwargs", "[", "'confidence'", "]", "elif", "Attrib", ".", "CONFIDENCE", "in", "required", ":", "raise", "ValueError", "(", "\"Confidence is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'n'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "N", "in", "supported", ":", "raise", "ValueError", "(", "\"N is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "n", "=", "kwargs", "[", "'n'", "]", "del", "kwargs", "[", "'n'", "]", "elif", "Attrib", ".", "N", "in", "required", ":", "raise", "ValueError", "(", "\"N is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'datetime'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "DATETIME", "in", "supported", ":", "raise", "ValueError", "(", "\"Datetime is not supported\"", ")", "if", "isinstance", "(", "kwargs", "[", "'datetime'", "]", ",", "datetime", ")", ":", "object", ".", "datetime", "=", "kwargs", "[", "'datetime'", "]", "else", ":", "#try:", "object", ".", "datetime", "=", "parse_datetime", "(", "kwargs", "[", "'datetime'", "]", ")", "#except:", "# raise ValueError(\"Unable to parse datetime: \" + str(repr(kwargs['datetime'])))", "del", "kwargs", "[", "'datetime'", "]", "elif", "doc", "and", "annotationtype", "in", "doc", ".", "annotationdefaults", "and", "object", ".", "set", "in", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "and", "'datetime'", "in", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "[", "object", ".", "set", "]", ":", "object", ".", "datetime", "=", "doc", ".", "annotationdefaults", "[", "annotationtype", "]", "[", "object", ".", "set", "]", "[", "'datetime'", "]", "elif", "Attrib", ".", "DATETIME", "in", "required", ":", "raise", "ValueError", "(", "\"Datetime is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'src'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "SRC", "in", "supported", ":", "raise", "ValueError", "(", "\"Source is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "src", "=", "kwargs", "[", "'src'", "]", "del", "kwargs", "[", "'src'", "]", "elif", "Attrib", ".", "SRC", "in", "required", ":", "raise", "ValueError", "(", "\"Source is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'begintime'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "BEGINTIME", "in", "supported", ":", "raise", "ValueError", "(", "\"Begintime is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "begintime", "=", "parsetime", "(", "kwargs", "[", "'begintime'", "]", ")", "del", "kwargs", "[", "'begintime'", "]", "elif", "Attrib", ".", "BEGINTIME", "in", "required", ":", "raise", "ValueError", "(", "\"Begintime is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'endtime'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "ENDTIME", "in", "supported", ":", "raise", "ValueError", "(", "\"Endtime is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "endtime", "=", "parsetime", "(", "kwargs", "[", "'endtime'", "]", ")", "del", "kwargs", "[", "'endtime'", "]", "elif", "Attrib", ".", "ENDTIME", "in", "required", ":", "raise", "ValueError", "(", "\"Endtime is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'speaker'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "SPEAKER", "in", "supported", ":", "raise", "ValueError", "(", "\"Speaker is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "speaker", "=", "kwargs", "[", "'speaker'", "]", "del", "kwargs", "[", "'speaker'", "]", "elif", "Attrib", ".", "SPEAKER", "in", "required", ":", "raise", "ValueError", "(", "\"Speaker is required for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "if", "'auth'", "in", "kwargs", ":", "if", "kwargs", "[", "'auth'", "]", "in", "(", "'no'", ",", "'false'", ")", ":", "object", ".", "auth", "=", "False", "else", ":", "object", ".", "auth", "=", "bool", "(", "kwargs", "[", "'auth'", "]", ")", "del", "kwargs", "[", "'auth'", "]", "else", ":", "object", ".", "auth", "=", "object", ".", "__class__", ".", "AUTH", "if", "'text'", "in", "kwargs", ":", "if", "kwargs", "[", "'text'", "]", ":", "object", ".", "settext", "(", "kwargs", "[", "'text'", "]", ")", "del", "kwargs", "[", "'text'", "]", "if", "'phon'", "in", "kwargs", ":", "if", "kwargs", "[", "'phon'", "]", ":", "object", ".", "setphon", "(", "kwargs", "[", "'phon'", "]", ")", "del", "kwargs", "[", "'phon'", "]", "if", "'textclass'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "TEXTCLASS", "in", "supported", ":", "raise", "ValueError", "(", "\"Textclass is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "textclass", "=", "kwargs", "[", "'textclass'", "]", "del", "kwargs", "[", "'textclass'", "]", "else", ":", "if", "Attrib", ".", "TEXTCLASS", "in", "supported", ":", "object", ".", "textclass", "=", "\"current\"", "if", "'metadata'", "in", "kwargs", ":", "if", "not", "Attrib", ".", "METADATA", "in", "supported", ":", "raise", "ValueError", "(", "\"Metadata is not supported for \"", "+", "object", ".", "__class__", ".", "__name__", ")", "object", ".", "metadata", "=", "kwargs", "[", "'metadata'", "]", "if", "doc", ":", "try", ":", "doc", ".", "submetadata", "[", "kwargs", "[", "'metadata'", "]", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"No such metadata defined: \"", "+", "kwargs", "[", "'metadata'", "]", ")", "del", "kwargs", "[", "'metadata'", "]", "if", "object", ".", "XLINK", ":", "if", "'href'", "in", "kwargs", ":", "object", ".", "href", "=", "kwargs", "[", "'href'", "]", "del", "kwargs", "[", "'href'", "]", "if", "'xlinktype'", "in", "kwargs", ":", "object", ".", "xlinktype", "=", "kwargs", "[", "'xlinktype'", "]", "del", "kwargs", "[", "'xlinktype'", "]", "if", "'xlinkrole'", "in", "kwargs", ":", "object", ".", "xlinkrole", "=", "kwargs", "[", "'xlinkrole'", "]", "del", "kwargs", "[", "'xlinkrole'", "]", "if", "'xlinklabel'", "in", "kwargs", ":", "object", ".", "xlinklabel", "=", "kwargs", "[", "'xlinklabel'", "]", "del", "kwargs", "[", "'xlinklabel'", "]", "if", "'xlinkshow'", "in", "kwargs", ":", "object", ".", "xlinkshow", "=", "kwargs", "[", "'xlinkshow'", "]", "del", "kwargs", "[", "'xlinklabel'", "]", "if", "'xlinktitle'", "in", "kwargs", ":", "object", ".", "xlinktitle", "=", "kwargs", "[", "'xlinktitle'", "]", "del", "kwargs", "[", "'xlinktitle'", "]", "if", "doc", "and", "doc", ".", "debug", ">=", "2", ":", "print", "(", "\" @id = \"", ",", "repr", "(", "object", ".", "id", ")", ",", "file", "=", "stderr", ")", "print", "(", "\" @set = \"", ",", "repr", "(", "object", ".", "set", ")", ",", "file", "=", "stderr", ")", "print", "(", "\" @class = \"", ",", "repr", "(", "object", ".", "cls", ")", ",", "file", "=", "stderr", ")", "print", "(", "\" @annotator = \"", ",", "repr", "(", "object", ".", "annotator", ")", ",", "file", "=", "stderr", ")", "print", "(", "\" @annotatortype= \"", ",", "repr", "(", "object", ".", "annotatortype", ")", ",", "file", "=", "stderr", ")", "print", "(", "\" @confidence = \"", ",", "repr", "(", "object", ".", "confidence", ")", ",", "file", "=", "stderr", ")", "print", "(", "\" @n = \"", ",", "repr", "(", "object", ".", "n", ")", ",", "file", "=", "stderr", ")", "print", "(", "\" @datetime = \"", ",", "repr", "(", "object", ".", "datetime", ")", ",", "file", "=", "stderr", ")", "#set index", "if", "object", ".", "id", "and", "doc", ":", "if", "object", ".", "id", "in", "doc", ".", "index", ":", "if", "doc", ".", "debug", ">=", "1", ":", "print", "(", "\"[PyNLPl FoLiA DEBUG] Duplicate ID not permitted:\"", "+", "object", ".", "id", ",", "file", "=", "stderr", ")", "raise", "DuplicateIDError", "(", "\"Duplicate ID not permitted: \"", "+", "object", ".", "id", ")", "else", ":", "if", "doc", ".", "debug", ">=", "1", ":", "print", "(", "\"[PyNLPl FoLiA DEBUG] Adding to index: \"", "+", "object", ".", "id", ",", "file", "=", "stderr", ")", "doc", ".", "index", "[", "object", ".", "id", "]", "=", "object", "#Parse feature attributes (shortcut for feature specification for some elements)", "for", "c", "in", "object", ".", "ACCEPTED_DATA", ":", "if", "issubclass", "(", "c", ",", "Feature", ")", ":", "if", "c", ".", "SUBSET", "in", "kwargs", ":", "if", "kwargs", "[", "c", ".", "SUBSET", "]", ":", "object", ".", "append", "(", "c", ",", "cls", "=", "kwargs", "[", "c", ".", "SUBSET", "]", ")", "del", "kwargs", "[", "c", ".", "SUBSET", "]", "return", "kwargs" ]
Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly.
[ "Internal", "function", "to", "parse", "common", "FoLiA", "attributes", "and", "sets", "up", "the", "instance", "accordingly", ".", "Do", "not", "invoke", "directly", "." ]
python
train
42.189091
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/genesis/genesis.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/genesis/genesis.py#L129-L142
def setup_signals(self, ): """Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None """ self.browser.shot_taskfile_sel_changed.connect(self.shot_taskfile_sel_changed) self.browser.asset_taskfile_sel_changed.connect(self.asset_taskfile_sel_changed) self.shot_open_pb.clicked.connect(self.shot_open_callback) self.asset_open_pb.clicked.connect(self.asset_open_callback) self.shot_save_pb.clicked.connect(self.shot_save_callback) self.asset_save_pb.clicked.connect(self.asset_save_callback)
[ "def", "setup_signals", "(", "self", ",", ")", ":", "self", ".", "browser", ".", "shot_taskfile_sel_changed", ".", "connect", "(", "self", ".", "shot_taskfile_sel_changed", ")", "self", ".", "browser", ".", "asset_taskfile_sel_changed", ".", "connect", "(", "self", ".", "asset_taskfile_sel_changed", ")", "self", ".", "shot_open_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_open_callback", ")", "self", ".", "asset_open_pb", ".", "clicked", ".", "connect", "(", "self", ".", "asset_open_callback", ")", "self", ".", "shot_save_pb", ".", "clicked", ".", "connect", "(", "self", ".", "shot_save_callback", ")", "self", ".", "asset_save_pb", ".", "clicked", ".", "connect", "(", "self", ".", "asset_save_callback", ")" ]
Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None
[ "Connect", "the", "signals", "with", "the", "slots", "to", "make", "the", "ui", "functional" ]
python
train
43.785714
pandas-dev/pandas
pandas/core/ops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1672-L1770
def _arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) construct_result = (_construct_divmod_result if op in [divmod, rdivmod] else _construct_result) def na_op(x, y): import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: result = masked_arith_op(x, y, op) result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result def safe_na_op(lvalues, rvalues): """ return the result of evaluating na_op on the passed in values try coercion to object type if the native types are not compatible Parameters ---------- lvalues : array-like rvalues : array-like Raises ------ TypeError: invalid operation """ try: with np.errstate(all='ignore'): return na_op(lvalues, rvalues) except Exception: if is_object_dtype(lvalues): return libalgos.arrmap_object(lvalues, lambda x: op(x, rvalues)) raise def wrapper(left, right): if isinstance(right, ABCDataFrame): return NotImplemented left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) right = maybe_upcast_for_op(right) if is_categorical_dtype(left): raise TypeError("{typ} cannot perform the operation " "{op}".format(typ=type(left).__name__, op=str_rep)) elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): # Give dispatch_to_index_op a chance for tests like # test_dt64_series_add_intlike, which the index dispatching handles # specifically. result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) elif (is_extension_array_dtype(left) or (is_extension_array_dtype(right) and not is_scalar(right))): # GH#22378 disallow scalar to exclude e.g. "category", "Int64" return dispatch_to_extension_op(op, left, right) elif is_timedelta64_dtype(left): result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): # We should only get here with non-scalar or timedelta64('NaT') # values for right # Note: we cannot use dispatch_to_index_op because # that may incorrectly raise TypeError when we # should get NullFrequencyError result = op(pd.Index(left), right) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): rvalues = rvalues.values result = safe_na_op(lvalues, rvalues) return construct_result(left, result, index=left.index, name=res_name, dtype=None) wrapper.__name__ = op_name return wrapper
[ "def", "_arith_method_SERIES", "(", "cls", ",", "op", ",", "special", ")", ":", "str_rep", "=", "_get_opstr", "(", "op", ",", "cls", ")", "op_name", "=", "_get_op_name", "(", "op", ",", "special", ")", "eval_kwargs", "=", "_gen_eval_kwargs", "(", "op_name", ")", "fill_zeros", "=", "_gen_fill_zeros", "(", "op_name", ")", "construct_result", "=", "(", "_construct_divmod_result", "if", "op", "in", "[", "divmod", ",", "rdivmod", "]", "else", "_construct_result", ")", "def", "na_op", "(", "x", ",", "y", ")", ":", "import", "pandas", ".", "core", ".", "computation", ".", "expressions", "as", "expressions", "try", ":", "result", "=", "expressions", ".", "evaluate", "(", "op", ",", "str_rep", ",", "x", ",", "y", ",", "*", "*", "eval_kwargs", ")", "except", "TypeError", ":", "result", "=", "masked_arith_op", "(", "x", ",", "y", ",", "op", ")", "result", "=", "missing", ".", "fill_zeros", "(", "result", ",", "x", ",", "y", ",", "op_name", ",", "fill_zeros", ")", "return", "result", "def", "safe_na_op", "(", "lvalues", ",", "rvalues", ")", ":", "\"\"\"\n return the result of evaluating na_op on the passed in values\n\n try coercion to object type if the native types are not compatible\n\n Parameters\n ----------\n lvalues : array-like\n rvalues : array-like\n\n Raises\n ------\n TypeError: invalid operation\n \"\"\"", "try", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "return", "na_op", "(", "lvalues", ",", "rvalues", ")", "except", "Exception", ":", "if", "is_object_dtype", "(", "lvalues", ")", ":", "return", "libalgos", ".", "arrmap_object", "(", "lvalues", ",", "lambda", "x", ":", "op", "(", "x", ",", "rvalues", ")", ")", "raise", "def", "wrapper", "(", "left", ",", "right", ")", ":", "if", "isinstance", "(", "right", ",", "ABCDataFrame", ")", ":", "return", "NotImplemented", "left", ",", "right", "=", "_align_method_SERIES", "(", "left", ",", "right", ")", "res_name", "=", "get_op_result_name", "(", "left", ",", "right", ")", "right", "=", "maybe_upcast_for_op", "(", "right", ")", "if", "is_categorical_dtype", "(", "left", ")", ":", "raise", "TypeError", "(", "\"{typ} cannot perform the operation \"", "\"{op}\"", ".", "format", "(", "typ", "=", "type", "(", "left", ")", ".", "__name__", ",", "op", "=", "str_rep", ")", ")", "elif", "is_datetime64_dtype", "(", "left", ")", "or", "is_datetime64tz_dtype", "(", "left", ")", ":", "# Give dispatch_to_index_op a chance for tests like", "# test_dt64_series_add_intlike, which the index dispatching handles", "# specifically.", "result", "=", "dispatch_to_index_op", "(", "op", ",", "left", ",", "right", ",", "pd", ".", "DatetimeIndex", ")", "return", "construct_result", "(", "left", ",", "result", ",", "index", "=", "left", ".", "index", ",", "name", "=", "res_name", ",", "dtype", "=", "result", ".", "dtype", ")", "elif", "(", "is_extension_array_dtype", "(", "left", ")", "or", "(", "is_extension_array_dtype", "(", "right", ")", "and", "not", "is_scalar", "(", "right", ")", ")", ")", ":", "# GH#22378 disallow scalar to exclude e.g. \"category\", \"Int64\"", "return", "dispatch_to_extension_op", "(", "op", ",", "left", ",", "right", ")", "elif", "is_timedelta64_dtype", "(", "left", ")", ":", "result", "=", "dispatch_to_index_op", "(", "op", ",", "left", ",", "right", ",", "pd", ".", "TimedeltaIndex", ")", "return", "construct_result", "(", "left", ",", "result", ",", "index", "=", "left", ".", "index", ",", "name", "=", "res_name", ")", "elif", "is_timedelta64_dtype", "(", "right", ")", ":", "# We should only get here with non-scalar or timedelta64('NaT')", "# values for right", "# Note: we cannot use dispatch_to_index_op because", "# that may incorrectly raise TypeError when we", "# should get NullFrequencyError", "result", "=", "op", "(", "pd", ".", "Index", "(", "left", ")", ",", "right", ")", "return", "construct_result", "(", "left", ",", "result", ",", "index", "=", "left", ".", "index", ",", "name", "=", "res_name", ",", "dtype", "=", "result", ".", "dtype", ")", "lvalues", "=", "left", ".", "values", "rvalues", "=", "right", "if", "isinstance", "(", "rvalues", ",", "ABCSeries", ")", ":", "rvalues", "=", "rvalues", ".", "values", "result", "=", "safe_na_op", "(", "lvalues", ",", "rvalues", ")", "return", "construct_result", "(", "left", ",", "result", ",", "index", "=", "left", ".", "index", ",", "name", "=", "res_name", ",", "dtype", "=", "None", ")", "wrapper", ".", "__name__", "=", "op_name", "return", "wrapper" ]
Wrapper function for Series arithmetic operations, to avoid code duplication.
[ "Wrapper", "function", "for", "Series", "arithmetic", "operations", "to", "avoid", "code", "duplication", "." ]
python
train
37.424242
scanny/python-pptx
pptx/oxml/shapes/picture.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/shapes/picture.py#L125-L145
def _fill_cropping(self, image_size, view_size): """ Return a (left, top, right, bottom) 4-tuple containing the cropping values required to display an image of *image_size* in *view_size* when stretched proportionately. Each value is a percentage expressed as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and *view_size* are each (width, height) pairs. """ def aspect_ratio(width, height): return width / height ar_view = aspect_ratio(*view_size) ar_image = aspect_ratio(*image_size) if ar_view < ar_image: # image too wide crop = (1.0 - (ar_view/ar_image)) / 2.0 return (crop, 0.0, crop, 0.0) if ar_view > ar_image: # image too tall crop = (1.0 - (ar_image/ar_view)) / 2.0 return (0.0, crop, 0.0, crop) return (0.0, 0.0, 0.0, 0.0)
[ "def", "_fill_cropping", "(", "self", ",", "image_size", ",", "view_size", ")", ":", "def", "aspect_ratio", "(", "width", ",", "height", ")", ":", "return", "width", "/", "height", "ar_view", "=", "aspect_ratio", "(", "*", "view_size", ")", "ar_image", "=", "aspect_ratio", "(", "*", "image_size", ")", "if", "ar_view", "<", "ar_image", ":", "# image too wide", "crop", "=", "(", "1.0", "-", "(", "ar_view", "/", "ar_image", ")", ")", "/", "2.0", "return", "(", "crop", ",", "0.0", ",", "crop", ",", "0.0", ")", "if", "ar_view", ">", "ar_image", ":", "# image too tall", "crop", "=", "(", "1.0", "-", "(", "ar_image", "/", "ar_view", ")", ")", "/", "2.0", "return", "(", "0.0", ",", "crop", ",", "0.0", ",", "crop", ")", "return", "(", "0.0", ",", "0.0", ",", "0.0", ",", "0.0", ")" ]
Return a (left, top, right, bottom) 4-tuple containing the cropping values required to display an image of *image_size* in *view_size* when stretched proportionately. Each value is a percentage expressed as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and *view_size* are each (width, height) pairs.
[ "Return", "a", "(", "left", "top", "right", "bottom", ")", "4", "-", "tuple", "containing", "the", "cropping", "values", "required", "to", "display", "an", "image", "of", "*", "image_size", "*", "in", "*", "view_size", "*", "when", "stretched", "proportionately", ".", "Each", "value", "is", "a", "percentage", "expressed", "as", "a", "fraction", "of", "1", ".", "0", "e", ".", "g", ".", "0", ".", "425", "represents", "42", ".", "5%", ".", "*", "image_size", "*", "and", "*", "view_size", "*", "are", "each", "(", "width", "height", ")", "pairs", "." ]
python
train
42.619048
sibirrer/lenstronomy
lenstronomy/Cosmo/nfw_param.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Cosmo/nfw_param.py#L59-L70
def c_rho0(self, rho0): """ computes the concentration given a comoving overdensity rho0 (inverse of function rho0_c) :param rho0: density normalization in h^2/Mpc^3 (comoving) :return: concentration parameter c """ if not hasattr(self, '_c_rho0_interp'): c_array = np.linspace(0.1, 10, 100) rho0_array = self.rho0_c(c_array) from scipy import interpolate self._c_rho0_interp = interpolate.InterpolatedUnivariateSpline(rho0_array, c_array, w=None, bbox=[None, None], k=3) return self._c_rho0_interp(rho0)
[ "def", "c_rho0", "(", "self", ",", "rho0", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_c_rho0_interp'", ")", ":", "c_array", "=", "np", ".", "linspace", "(", "0.1", ",", "10", ",", "100", ")", "rho0_array", "=", "self", ".", "rho0_c", "(", "c_array", ")", "from", "scipy", "import", "interpolate", "self", ".", "_c_rho0_interp", "=", "interpolate", ".", "InterpolatedUnivariateSpline", "(", "rho0_array", ",", "c_array", ",", "w", "=", "None", ",", "bbox", "=", "[", "None", ",", "None", "]", ",", "k", "=", "3", ")", "return", "self", ".", "_c_rho0_interp", "(", "rho0", ")" ]
computes the concentration given a comoving overdensity rho0 (inverse of function rho0_c) :param rho0: density normalization in h^2/Mpc^3 (comoving) :return: concentration parameter c
[ "computes", "the", "concentration", "given", "a", "comoving", "overdensity", "rho0", "(", "inverse", "of", "function", "rho0_c", ")", ":", "param", "rho0", ":", "density", "normalization", "in", "h^2", "/", "Mpc^3", "(", "comoving", ")", ":", "return", ":", "concentration", "parameter", "c" ]
python
train
49.75
sods/paramz
paramz/core/nameable.py
https://github.com/sods/paramz/blob/ae6fc6274b70fb723d91e48fc5026a9bc5a06508/paramz/core/nameable.py#L76-L88
def hierarchy_name(self, adjust_for_printing=True): """ return the name for this object with the parents names attached by dots. :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` on the names, recursively """ if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x) else: adjust = lambda x: x if self.has_parent(): return self._parent_.hierarchy_name() + "." + adjust(self.name) return adjust(self.name)
[ "def", "hierarchy_name", "(", "self", ",", "adjust_for_printing", "=", "True", ")", ":", "if", "adjust_for_printing", ":", "adjust", "=", "lambda", "x", ":", "adjust_name_for_printing", "(", "x", ")", "else", ":", "adjust", "=", "lambda", "x", ":", "x", "if", "self", ".", "has_parent", "(", ")", ":", "return", "self", ".", "_parent_", ".", "hierarchy_name", "(", ")", "+", "\".\"", "+", "adjust", "(", "self", ".", "name", ")", "return", "adjust", "(", "self", ".", "name", ")" ]
return the name for this object with the parents names attached by dots. :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` on the names, recursively
[ "return", "the", "name", "for", "this", "object", "with", "the", "parents", "names", "attached", "by", "dots", "." ]
python
train
45.769231
tradenity/python-sdk
tradenity/resources/free_shipping_coupon.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/free_shipping_coupon.py#L505-L525
def delete_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs): """Delete FreeShippingCoupon Delete an instance of FreeShippingCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs) else: (data) = cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs) return data
[ "def", "delete_free_shipping_coupon_by_id", "(", "cls", ",", "free_shipping_coupon_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_delete_free_shipping_coupon_by_id_with_http_info", "(", "free_shipping_coupon_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_delete_free_shipping_coupon_by_id_with_http_info", "(", "free_shipping_coupon_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Delete FreeShippingCoupon Delete an instance of FreeShippingCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Delete", "FreeShippingCoupon" ]
python
train
48.095238
brmscheiner/ideogram
ideogram/writer.py
https://github.com/brmscheiner/ideogram/blob/422bf566c51fd56f7bbb6e75b16d18d52b4c7568/ideogram/writer.py#L13-L22
def jsName(path,name): '''Returns a name string without \, -, and . so that the string will play nicely with javascript.''' shortPath=path.replace( "C:\\Users\\scheinerbock\\Desktop\\"+ "ideogram\\scrapeSource\\test\\","") noDash = shortPath.replace("-","_dash_") jsPath=noDash.replace("\\","_slash_").replace(".","_dot_") jsName=jsPath+'_slash_'+name return jsName
[ "def", "jsName", "(", "path", ",", "name", ")", ":", "shortPath", "=", "path", ".", "replace", "(", "\"C:\\\\Users\\\\scheinerbock\\\\Desktop\\\\\"", "+", "\"ideogram\\\\scrapeSource\\\\test\\\\\"", ",", "\"\"", ")", "noDash", "=", "shortPath", ".", "replace", "(", "\"-\"", ",", "\"_dash_\"", ")", "jsPath", "=", "noDash", ".", "replace", "(", "\"\\\\\"", ",", "\"_slash_\"", ")", ".", "replace", "(", "\".\"", ",", "\"_dot_\"", ")", "jsName", "=", "jsPath", "+", "'_slash_'", "+", "name", "return", "jsName" ]
Returns a name string without \, -, and . so that the string will play nicely with javascript.
[ "Returns", "a", "name", "string", "without", "\\", "-", "and", ".", "so", "that", "the", "string", "will", "play", "nicely", "with", "javascript", "." ]
python
train
40.9
mardix/Juice
juice/core.py
https://github.com/mardix/Juice/blob/7afa8d4238868235dfcdae82272bd77958dd416a/juice/core.py#L628-L635
def get_config(cls, key, default=None): """ Shortcut to access the application's config in your class :param key: The key to access :param default: The default value when None :returns mixed: """ return cls._app.config.get(key, default)
[ "def", "get_config", "(", "cls", ",", "key", ",", "default", "=", "None", ")", ":", "return", "cls", ".", "_app", ".", "config", ".", "get", "(", "key", ",", "default", ")" ]
Shortcut to access the application's config in your class :param key: The key to access :param default: The default value when None :returns mixed:
[ "Shortcut", "to", "access", "the", "application", "s", "config", "in", "your", "class", ":", "param", "key", ":", "The", "key", "to", "access", ":", "param", "default", ":", "The", "default", "value", "when", "None", ":", "returns", "mixed", ":" ]
python
train
35.625
gabstopper/smc-python
smc/administration/tasks.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/administration/tasks.py#L245-L257
def add_done_callback(self, callback): """ Add a callback to run after the task completes. The callable must take 1 argument which will be the completed Task. :param callback: a callable that takes a single argument which will be the completed Task. """ if self._done is None or self._done.is_set(): raise ValueError('Task has already finished') if callable(callback): self.callbacks.append(callback)
[ "def", "add_done_callback", "(", "self", ",", "callback", ")", ":", "if", "self", ".", "_done", "is", "None", "or", "self", ".", "_done", ".", "is_set", "(", ")", ":", "raise", "ValueError", "(", "'Task has already finished'", ")", "if", "callable", "(", "callback", ")", ":", "self", ".", "callbacks", ".", "append", "(", "callback", ")" ]
Add a callback to run after the task completes. The callable must take 1 argument which will be the completed Task. :param callback: a callable that takes a single argument which will be the completed Task.
[ "Add", "a", "callback", "to", "run", "after", "the", "task", "completes", ".", "The", "callable", "must", "take", "1", "argument", "which", "will", "be", "the", "completed", "Task", "." ]
python
train
37.615385
mental32/spotify.py
spotify/models/base.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/base.py#L16-L36
async def from_href(self): """Get the full object from spotify with a `href` attribute.""" if not hasattr(self, 'href'): raise TypeError('Spotify object has no `href` attribute, therefore cannot be retrived') elif hasattr(self, 'http'): return await self.http.request(('GET', self.href)) else: cls = type(self) try: client = getattr(self, '_{0}__client'.format(cls.__name__)) except AttributeError: raise TypeError('Spotify object has no way to access a HTTPClient.') else: http = client.http data = await http.request(('GET', self.href)) return cls(client, data)
[ "async", "def", "from_href", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'href'", ")", ":", "raise", "TypeError", "(", "'Spotify object has no `href` attribute, therefore cannot be retrived'", ")", "elif", "hasattr", "(", "self", ",", "'http'", ")", ":", "return", "await", "self", ".", "http", ".", "request", "(", "(", "'GET'", ",", "self", ".", "href", ")", ")", "else", ":", "cls", "=", "type", "(", "self", ")", "try", ":", "client", "=", "getattr", "(", "self", ",", "'_{0}__client'", ".", "format", "(", "cls", ".", "__name__", ")", ")", "except", "AttributeError", ":", "raise", "TypeError", "(", "'Spotify object has no way to access a HTTPClient.'", ")", "else", ":", "http", "=", "client", ".", "http", "data", "=", "await", "http", ".", "request", "(", "(", "'GET'", ",", "self", ".", "href", ")", ")", "return", "cls", "(", "client", ",", "data", ")" ]
Get the full object from spotify with a `href` attribute.
[ "Get", "the", "full", "object", "from", "spotify", "with", "a", "href", "attribute", "." ]
python
test
32.952381
inveniosoftware/invenio-records-files
invenio_records_files/api.py
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L32-L40
def get_version(self, version_id=None): """Return specific version ``ObjectVersion`` instance or HEAD. :param version_id: Version ID of the object. :returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or HEAD of the stored object. """ return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key, version_id=version_id)
[ "def", "get_version", "(", "self", ",", "version_id", "=", "None", ")", ":", "return", "ObjectVersion", ".", "get", "(", "bucket", "=", "self", ".", "obj", ".", "bucket", ",", "key", "=", "self", ".", "obj", ".", "key", ",", "version_id", "=", "version_id", ")" ]
Return specific version ``ObjectVersion`` instance or HEAD. :param version_id: Version ID of the object. :returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or HEAD of the stored object.
[ "Return", "specific", "version", "ObjectVersion", "instance", "or", "HEAD", "." ]
python
train
46.444444
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py#L3033-L3038
def alter_targets(self): """Return any corresponding targets in a variant directory. """ if self.is_derived(): return [], None return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
[ "def", "alter_targets", "(", "self", ")", ":", "if", "self", ".", "is_derived", "(", ")", ":", "return", "[", "]", ",", "None", "return", "self", ".", "fs", ".", "variant_dir_target_climb", "(", "self", ",", "self", ".", "dir", ",", "[", "self", ".", "name", "]", ")" ]
Return any corresponding targets in a variant directory.
[ "Return", "any", "corresponding", "targets", "in", "a", "variant", "directory", "." ]
python
train
39
bitesofcode/projexui
projexui/dialogs/xwizardbrowserdialog/xwizardbrowserdialog.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xwizardbrowserdialog/xwizardbrowserdialog.py#L208-L216
def showDescription( self ): """ Shows the description for the current plugin in the interface. """ plugin = self.currentPlugin() if ( not plugin ): self.uiDescriptionTXT.setText('') else: self.uiDescriptionTXT.setText(plugin.description())
[ "def", "showDescription", "(", "self", ")", ":", "plugin", "=", "self", ".", "currentPlugin", "(", ")", "if", "(", "not", "plugin", ")", ":", "self", ".", "uiDescriptionTXT", ".", "setText", "(", "''", ")", "else", ":", "self", ".", "uiDescriptionTXT", ".", "setText", "(", "plugin", ".", "description", "(", ")", ")" ]
Shows the description for the current plugin in the interface.
[ "Shows", "the", "description", "for", "the", "current", "plugin", "in", "the", "interface", "." ]
python
train
34.666667
PMEAL/OpenPNM
openpnm/core/Base.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L1430-L1463
def show_hist(self, props=[], bins=20, **kwargs): r""" Show a quick plot of key property distributions. Parameters ---------- props : string or list of strings The pore and/or throat properties to be plotted as histograms bins : int or array_like The number of bins to use when generating the histogram. If an array is given they are used as the bin spacing instead. Notes ----- Other keyword arguments are passed to the ``matplotlib.pyplot.hist`` function. """ if type(props) is str: props = [props] N = len(props) if N == 1: r = 1 c = 1 elif N < 4: r = 1 c = N else: r = int(sp.ceil(N**0.5)) c = int(sp.floor(N**0.5)) for i in range(len(props)): plt.subplot(r, c, i+1) plt.hist(self[props[i]], bins=bins, **kwargs)
[ "def", "show_hist", "(", "self", ",", "props", "=", "[", "]", ",", "bins", "=", "20", ",", "*", "*", "kwargs", ")", ":", "if", "type", "(", "props", ")", "is", "str", ":", "props", "=", "[", "props", "]", "N", "=", "len", "(", "props", ")", "if", "N", "==", "1", ":", "r", "=", "1", "c", "=", "1", "elif", "N", "<", "4", ":", "r", "=", "1", "c", "=", "N", "else", ":", "r", "=", "int", "(", "sp", ".", "ceil", "(", "N", "**", "0.5", ")", ")", "c", "=", "int", "(", "sp", ".", "floor", "(", "N", "**", "0.5", ")", ")", "for", "i", "in", "range", "(", "len", "(", "props", ")", ")", ":", "plt", ".", "subplot", "(", "r", ",", "c", ",", "i", "+", "1", ")", "plt", ".", "hist", "(", "self", "[", "props", "[", "i", "]", "]", ",", "bins", "=", "bins", ",", "*", "*", "kwargs", ")" ]
r""" Show a quick plot of key property distributions. Parameters ---------- props : string or list of strings The pore and/or throat properties to be plotted as histograms bins : int or array_like The number of bins to use when generating the histogram. If an array is given they are used as the bin spacing instead. Notes ----- Other keyword arguments are passed to the ``matplotlib.pyplot.hist`` function.
[ "r", "Show", "a", "quick", "plot", "of", "key", "property", "distributions", "." ]
python
train
28.470588
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L665-L680
def fetch_more(self, rows=False, columns=False): """Get more columns or rows (based on axis).""" if self.axis == 1 and self.total_rows > self.rows_loaded: reminder = self.total_rows - self.rows_loaded items_to_fetch = min(reminder, ROWS_TO_LOAD) self.beginInsertRows(QModelIndex(), self.rows_loaded, self.rows_loaded + items_to_fetch - 1) self.rows_loaded += items_to_fetch self.endInsertRows() if self.axis == 0 and self.total_cols > self.cols_loaded: reminder = self.total_cols - self.cols_loaded items_to_fetch = min(reminder, COLS_TO_LOAD) self.beginInsertColumns(QModelIndex(), self.cols_loaded, self.cols_loaded + items_to_fetch - 1) self.cols_loaded += items_to_fetch self.endInsertColumns()
[ "def", "fetch_more", "(", "self", ",", "rows", "=", "False", ",", "columns", "=", "False", ")", ":", "if", "self", ".", "axis", "==", "1", "and", "self", ".", "total_rows", ">", "self", ".", "rows_loaded", ":", "reminder", "=", "self", ".", "total_rows", "-", "self", ".", "rows_loaded", "items_to_fetch", "=", "min", "(", "reminder", ",", "ROWS_TO_LOAD", ")", "self", ".", "beginInsertRows", "(", "QModelIndex", "(", ")", ",", "self", ".", "rows_loaded", ",", "self", ".", "rows_loaded", "+", "items_to_fetch", "-", "1", ")", "self", ".", "rows_loaded", "+=", "items_to_fetch", "self", ".", "endInsertRows", "(", ")", "if", "self", ".", "axis", "==", "0", "and", "self", ".", "total_cols", ">", "self", ".", "cols_loaded", ":", "reminder", "=", "self", ".", "total_cols", "-", "self", ".", "cols_loaded", "items_to_fetch", "=", "min", "(", "reminder", ",", "COLS_TO_LOAD", ")", "self", ".", "beginInsertColumns", "(", "QModelIndex", "(", ")", ",", "self", ".", "cols_loaded", ",", "self", ".", "cols_loaded", "+", "items_to_fetch", "-", "1", ")", "self", ".", "cols_loaded", "+=", "items_to_fetch", "self", ".", "endInsertColumns", "(", ")" ]
Get more columns or rows (based on axis).
[ "Get", "more", "columns", "or", "rows", "(", "based", "on", "axis", ")", "." ]
python
train
57
globality-corp/microcosm
microcosm/config/types.py
https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/config/types.py#L8-L21
def boolean(value): """ Configuration-friendly boolean type converter. Supports both boolean-valued and string-valued inputs (e.g. from env vars). """ if isinstance(value, bool): return value if value == "": return False return strtobool(value)
[ "def", "boolean", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "value", "if", "value", "==", "\"\"", ":", "return", "False", "return", "strtobool", "(", "value", ")" ]
Configuration-friendly boolean type converter. Supports both boolean-valued and string-valued inputs (e.g. from env vars).
[ "Configuration", "-", "friendly", "boolean", "type", "converter", "." ]
python
train
19.928571
Esri/ArcREST
src/arcrest/webmap/symbols.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/webmap/symbols.py#L448-L454
def base64ToImage(imgData, out_path, out_file): """ converts a base64 string to a file """ fh = open(os.path.join(out_path, out_file), "wb") fh.write(imgData.decode('base64')) fh.close() del fh return os.path.join(out_path, out_file)
[ "def", "base64ToImage", "(", "imgData", ",", "out_path", ",", "out_file", ")", ":", "fh", "=", "open", "(", "os", ".", "path", ".", "join", "(", "out_path", ",", "out_file", ")", ",", "\"wb\"", ")", "fh", ".", "write", "(", "imgData", ".", "decode", "(", "'base64'", ")", ")", "fh", ".", "close", "(", ")", "del", "fh", "return", "os", ".", "path", ".", "join", "(", "out_path", ",", "out_file", ")" ]
converts a base64 string to a file
[ "converts", "a", "base64", "string", "to", "a", "file" ]
python
train
39.285714
spacetelescope/stsci.tools
lib/stsci/tools/fileutil.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/fileutil.py#L171-L175
def convertDate(date): """Convert DATE string into a decimal year.""" d, t = date.split('T') return decimal_date(d, timeobs=t)
[ "def", "convertDate", "(", "date", ")", ":", "d", ",", "t", "=", "date", ".", "split", "(", "'T'", ")", "return", "decimal_date", "(", "d", ",", "timeobs", "=", "t", ")" ]
Convert DATE string into a decimal year.
[ "Convert", "DATE", "string", "into", "a", "decimal", "year", "." ]
python
train
27
xflows/rdm
rdm/wrappers/rsd/rsd.py
https://github.com/xflows/rdm/blob/d984e2a0297e5fa8d799953bbd0dba79b05d403d/rdm/wrappers/rsd/rsd.py#L78-L89
def settingsAsFacts(self, settings): """ Parses a string of settings. :param setting: String of settings in the form: ``set(name1, val1), set(name2, val2)...`` """ pattern = re.compile('set\(([a-zA-Z0-9_]+),(\[a-zA-Z0-9_]+)\)') pairs = pattern.findall(settings) for name, val in pairs: self.set(name, val)
[ "def", "settingsAsFacts", "(", "self", ",", "settings", ")", ":", "pattern", "=", "re", ".", "compile", "(", "'set\\(([a-zA-Z0-9_]+),(\\[a-zA-Z0-9_]+)\\)'", ")", "pairs", "=", "pattern", ".", "findall", "(", "settings", ")", "for", "name", ",", "val", "in", "pairs", ":", "self", ".", "set", "(", "name", ",", "val", ")" ]
Parses a string of settings. :param setting: String of settings in the form: ``set(name1, val1), set(name2, val2)...``
[ "Parses", "a", "string", "of", "settings", "." ]
python
train
31.666667
tensorflow/tensor2tensor
tensor2tensor/trax/jaxboard.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L111-L125
def scalar(self, tag, value, step=None): """Saves scalar value. Args: tag: str: label for this data value: int/float: number to log step: int: training step """ value = float(onp.array(value)) if step is None: step = self._step else: self._step = step summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)]) self.add_summary(summary, step)
[ "def", "scalar", "(", "self", ",", "tag", ",", "value", ",", "step", "=", "None", ")", ":", "value", "=", "float", "(", "onp", ".", "array", "(", "value", ")", ")", "if", "step", "is", "None", ":", "step", "=", "self", ".", "_step", "else", ":", "self", ".", "_step", "=", "step", "summary", "=", "Summary", "(", "value", "=", "[", "Summary", ".", "Value", "(", "tag", "=", "tag", ",", "simple_value", "=", "value", ")", "]", ")", "self", ".", "add_summary", "(", "summary", ",", "step", ")" ]
Saves scalar value. Args: tag: str: label for this data value: int/float: number to log step: int: training step
[ "Saves", "scalar", "value", "." ]
python
train
26.8
google/grr
grr/server/grr_response_server/flows/general/transfer.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/transfer.py#L432-L442
def StoreStat(self, responses): """Stores stat entry in the flow's state.""" index = responses.request_data["index"] if not responses.success: self.Log("Failed to stat file: %s", responses.status) # Report failure. self._FileFetchFailed(index, responses.request_data["request_name"]) return tracker = self.state.pending_hashes[index] tracker["stat_entry"] = responses.First()
[ "def", "StoreStat", "(", "self", ",", "responses", ")", ":", "index", "=", "responses", ".", "request_data", "[", "\"index\"", "]", "if", "not", "responses", ".", "success", ":", "self", ".", "Log", "(", "\"Failed to stat file: %s\"", ",", "responses", ".", "status", ")", "# Report failure.", "self", ".", "_FileFetchFailed", "(", "index", ",", "responses", ".", "request_data", "[", "\"request_name\"", "]", ")", "return", "tracker", "=", "self", ".", "state", ".", "pending_hashes", "[", "index", "]", "tracker", "[", "\"stat_entry\"", "]", "=", "responses", ".", "First", "(", ")" ]
Stores stat entry in the flow's state.
[ "Stores", "stat", "entry", "in", "the", "flow", "s", "state", "." ]
python
train
37.272727
what-studio/profiling
profiling/__main__.py
https://github.com/what-studio/profiling/blob/49666ba3ea295eb73782ae6c18a4ec7929d7d8b7/profiling/__main__.py#L126-L144
def config_default(option, default=None, type=None, section=cli.name): """Guesses a default value of a CLI option from the configuration. :: @click.option('--locale', default=config_default('locale')) """ def f(option=option, default=default, type=type, section=section): config = read_config() if type is None and default is not None: # detect type from default. type = builtins.type(default) get_option = option_getter(type) try: return get_option(config, section, option) except (NoOptionError, NoSectionError): return default return f
[ "def", "config_default", "(", "option", ",", "default", "=", "None", ",", "type", "=", "None", ",", "section", "=", "cli", ".", "name", ")", ":", "def", "f", "(", "option", "=", "option", ",", "default", "=", "default", ",", "type", "=", "type", ",", "section", "=", "section", ")", ":", "config", "=", "read_config", "(", ")", "if", "type", "is", "None", "and", "default", "is", "not", "None", ":", "# detect type from default.", "type", "=", "builtins", ".", "type", "(", "default", ")", "get_option", "=", "option_getter", "(", "type", ")", "try", ":", "return", "get_option", "(", "config", ",", "section", ",", "option", ")", "except", "(", "NoOptionError", ",", "NoSectionError", ")", ":", "return", "default", "return", "f" ]
Guesses a default value of a CLI option from the configuration. :: @click.option('--locale', default=config_default('locale'))
[ "Guesses", "a", "default", "value", "of", "a", "CLI", "option", "from", "the", "configuration", "." ]
python
train
33.578947
allelos/vectors
vectors/vectors.py
https://github.com/allelos/vectors/blob/55db2a7e489ae5f4380e70b3c5b7a6ce39de5cee/vectors/vectors.py#L235-L241
def spherical(cls, mag, theta, phi=0): '''Returns a Vector instance from spherical coordinates''' return cls( mag * math.sin(phi) * math.cos(theta), # X mag * math.sin(phi) * math.sin(theta), # Y mag * math.cos(phi) # Z )
[ "def", "spherical", "(", "cls", ",", "mag", ",", "theta", ",", "phi", "=", "0", ")", ":", "return", "cls", "(", "mag", "*", "math", ".", "sin", "(", "phi", ")", "*", "math", ".", "cos", "(", "theta", ")", ",", "# X", "mag", "*", "math", ".", "sin", "(", "phi", ")", "*", "math", ".", "sin", "(", "theta", ")", ",", "# Y", "mag", "*", "math", ".", "cos", "(", "phi", ")", "# Z", ")" ]
Returns a Vector instance from spherical coordinates
[ "Returns", "a", "Vector", "instance", "from", "spherical", "coordinates" ]
python
train
39.714286
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L321-L337
def GetAttributes(self, urns, age=NEWEST_TIME): """Retrieves all the attributes for all the urns.""" urns = set([utils.SmartUnicode(u) for u in urns]) to_read = {urn: self._MakeCacheInvariant(urn, age) for urn in urns} # Urns not present in the cache we need to get from the database. if to_read: for subject, values in data_store.DB.MultiResolvePrefix( to_read, AFF4_PREFIXES, timestamp=self.ParseAgeSpecification(age), limit=None): # Ensure the values are sorted. values.sort(key=lambda x: x[-1], reverse=True) yield utils.SmartUnicode(subject), values
[ "def", "GetAttributes", "(", "self", ",", "urns", ",", "age", "=", "NEWEST_TIME", ")", ":", "urns", "=", "set", "(", "[", "utils", ".", "SmartUnicode", "(", "u", ")", "for", "u", "in", "urns", "]", ")", "to_read", "=", "{", "urn", ":", "self", ".", "_MakeCacheInvariant", "(", "urn", ",", "age", ")", "for", "urn", "in", "urns", "}", "# Urns not present in the cache we need to get from the database.", "if", "to_read", ":", "for", "subject", ",", "values", "in", "data_store", ".", "DB", ".", "MultiResolvePrefix", "(", "to_read", ",", "AFF4_PREFIXES", ",", "timestamp", "=", "self", ".", "ParseAgeSpecification", "(", "age", ")", ",", "limit", "=", "None", ")", ":", "# Ensure the values are sorted.", "values", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "-", "1", "]", ",", "reverse", "=", "True", ")", "yield", "utils", ".", "SmartUnicode", "(", "subject", ")", ",", "values" ]
Retrieves all the attributes for all the urns.
[ "Retrieves", "all", "the", "attributes", "for", "all", "the", "urns", "." ]
python
train
37.117647
pyviz/holoviews
holoviews/core/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L2007-L2019
def mimebundle_to_html(bundle): """ Converts a MIME bundle into HTML. """ if isinstance(bundle, tuple): data, metadata = bundle else: data = bundle html = data.get('text/html', '') if 'application/javascript' in data: js = data['application/javascript'] html += '\n<script type="application/javascript">{js}</script>'.format(js=js) return html
[ "def", "mimebundle_to_html", "(", "bundle", ")", ":", "if", "isinstance", "(", "bundle", ",", "tuple", ")", ":", "data", ",", "metadata", "=", "bundle", "else", ":", "data", "=", "bundle", "html", "=", "data", ".", "get", "(", "'text/html'", ",", "''", ")", "if", "'application/javascript'", "in", "data", ":", "js", "=", "data", "[", "'application/javascript'", "]", "html", "+=", "'\\n<script type=\"application/javascript\">{js}</script>'", ".", "format", "(", "js", "=", "js", ")", "return", "html" ]
Converts a MIME bundle into HTML.
[ "Converts", "a", "MIME", "bundle", "into", "HTML", "." ]
python
train
30.384615
bram85/topydo
topydo/lib/ChangeSet.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/ChangeSet.py#L96-L119
def save(self, p_todolist): """ Saves a tuple with archive, todolist and command with its arguments into the backup file with unix timestamp as the key. Tuple is then indexed in backup file with combination of hash calculated from p_todolist and unix timestamp. Backup file is closed afterwards. """ self._trim() current_hash = hash_todolist(p_todolist) list_todo = (self.todolist.print_todos()+'\n').splitlines(True) try: list_archive = (self.archive.print_todos()+'\n').splitlines(True) except AttributeError: list_archive = [] self.backup_dict[self.timestamp] = (list_todo, list_archive, self.label) index = self._get_index() index.insert(0, (self.timestamp, current_hash)) self._save_index(index) self._write() self.close()
[ "def", "save", "(", "self", ",", "p_todolist", ")", ":", "self", ".", "_trim", "(", ")", "current_hash", "=", "hash_todolist", "(", "p_todolist", ")", "list_todo", "=", "(", "self", ".", "todolist", ".", "print_todos", "(", ")", "+", "'\\n'", ")", ".", "splitlines", "(", "True", ")", "try", ":", "list_archive", "=", "(", "self", ".", "archive", ".", "print_todos", "(", ")", "+", "'\\n'", ")", ".", "splitlines", "(", "True", ")", "except", "AttributeError", ":", "list_archive", "=", "[", "]", "self", ".", "backup_dict", "[", "self", ".", "timestamp", "]", "=", "(", "list_todo", ",", "list_archive", ",", "self", ".", "label", ")", "index", "=", "self", ".", "_get_index", "(", ")", "index", ".", "insert", "(", "0", ",", "(", "self", ".", "timestamp", ",", "current_hash", ")", ")", "self", ".", "_save_index", "(", "index", ")", "self", ".", "_write", "(", ")", "self", ".", "close", "(", ")" ]
Saves a tuple with archive, todolist and command with its arguments into the backup file with unix timestamp as the key. Tuple is then indexed in backup file with combination of hash calculated from p_todolist and unix timestamp. Backup file is closed afterwards.
[ "Saves", "a", "tuple", "with", "archive", "todolist", "and", "command", "with", "its", "arguments", "into", "the", "backup", "file", "with", "unix", "timestamp", "as", "the", "key", ".", "Tuple", "is", "then", "indexed", "in", "backup", "file", "with", "combination", "of", "hash", "calculated", "from", "p_todolist", "and", "unix", "timestamp", ".", "Backup", "file", "is", "closed", "afterwards", "." ]
python
train
36.208333
scanny/python-pptx
pptx/oxml/shapes/autoshape.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/shapes/autoshape.py#L115-L119
def add_path(self, w, h): """Return a newly created `a:path` child element.""" path = self._add_path() path.w, path.h = w, h return path
[ "def", "add_path", "(", "self", ",", "w", ",", "h", ")", ":", "path", "=", "self", ".", "_add_path", "(", ")", "path", ".", "w", ",", "path", ".", "h", "=", "w", ",", "h", "return", "path" ]
Return a newly created `a:path` child element.
[ "Return", "a", "newly", "created", "a", ":", "path", "child", "element", "." ]
python
train
32.8
6809/MC6809
MC6809/components/mc6809_stack.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_stack.py#L149-L186
def instruction_PUL(self, opcode, m, register): """ All, some, or none of the processor registers are pulled from stack (with the exception of stack pointer itself). A single register may be pulled from the stack with condition codes set by doing an autoincrement load from the stack (example: LDX ,S++). source code forms: b7 b6 b5 b4 b3 b2 b1 b0 PC U Y X DP B A CC = pull order CC bits "HNZVC": ccccc """ assert register in (self.system_stack_pointer, self.user_stack_pointer) def pull(register_str, stack_pointer): reg_obj = self.register_str2object[register_str] reg_width = reg_obj.WIDTH # 8 / 16 if reg_width == 8: data = self.pull_byte(stack_pointer) else: assert reg_width == 16 data = self.pull_word(stack_pointer) reg_obj.set(data) # log.debug("$%x PUL%s:", self.program_counter, register.name) # m = postbyte if m & 0x01: pull(REG_CC, register) # 8 bit condition code register if m & 0x02: pull(REG_A, register) # 8 bit accumulator if m & 0x04: pull(REG_B, register) # 8 bit accumulator if m & 0x08: pull(REG_DP, register) # 8 bit direct page register if m & 0x10: pull(REG_X, register) # 16 bit index register if m & 0x20: pull(REG_Y, register) # 16 bit index register if m & 0x40: pull(REG_U, register) # 16 bit user-stack pointer if m & 0x80: pull(REG_PC, register)
[ "def", "instruction_PUL", "(", "self", ",", "opcode", ",", "m", ",", "register", ")", ":", "assert", "register", "in", "(", "self", ".", "system_stack_pointer", ",", "self", ".", "user_stack_pointer", ")", "def", "pull", "(", "register_str", ",", "stack_pointer", ")", ":", "reg_obj", "=", "self", ".", "register_str2object", "[", "register_str", "]", "reg_width", "=", "reg_obj", ".", "WIDTH", "# 8 / 16", "if", "reg_width", "==", "8", ":", "data", "=", "self", ".", "pull_byte", "(", "stack_pointer", ")", "else", ":", "assert", "reg_width", "==", "16", "data", "=", "self", ".", "pull_word", "(", "stack_pointer", ")", "reg_obj", ".", "set", "(", "data", ")", "# log.debug(\"$%x PUL%s:\", self.program_counter, register.name)", "# m = postbyte", "if", "m", "&", "0x01", ":", "pull", "(", "REG_CC", ",", "register", ")", "# 8 bit condition code register", "if", "m", "&", "0x02", ":", "pull", "(", "REG_A", ",", "register", ")", "# 8 bit accumulator", "if", "m", "&", "0x04", ":", "pull", "(", "REG_B", ",", "register", ")", "# 8 bit accumulator", "if", "m", "&", "0x08", ":", "pull", "(", "REG_DP", ",", "register", ")", "# 8 bit direct page register", "if", "m", "&", "0x10", ":", "pull", "(", "REG_X", ",", "register", ")", "# 16 bit index register", "if", "m", "&", "0x20", ":", "pull", "(", "REG_Y", ",", "register", ")", "# 16 bit index register", "if", "m", "&", "0x40", ":", "pull", "(", "REG_U", ",", "register", ")", "# 16 bit user-stack pointer", "if", "m", "&", "0x80", ":", "pull", "(", "REG_PC", ",", "register", ")" ]
All, some, or none of the processor registers are pulled from stack (with the exception of stack pointer itself). A single register may be pulled from the stack with condition codes set by doing an autoincrement load from the stack (example: LDX ,S++). source code forms: b7 b6 b5 b4 b3 b2 b1 b0 PC U Y X DP B A CC = pull order CC bits "HNZVC": ccccc
[ "All", "some", "or", "none", "of", "the", "processor", "registers", "are", "pulled", "from", "stack", "(", "with", "the", "exception", "of", "stack", "pointer", "itself", ")", "." ]
python
train
40.131579
secdev/scapy
scapy/contrib/http2.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/http2.py#L368-L397
def addfield(self, pkt, s, val): # type: (Optional[packet.Packet], Union[str, Tuple[str, int, int]], int) -> str # noqa: E501 """ An AbstractUVarIntField prefix always consumes the remaining bits of a BitField;if no current BitField is in use (no tuple in entry) then the prefix length is 8 bits and the whole byte is to be consumed @param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501 @param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501 generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501 number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501 previous bitfield-compatible fields. @param int val: the positive or null value to be added. @return str: s concatenated with the machine representation of this field. # noqa: E501 @raise AssertionError """ assert(val >= 0) if isinstance(s, bytes): assert self.size == 8, 'EINVAL: s: tuple expected when prefix_len is not a full byte' # noqa: E501 return s + self.i2m(pkt, val) # s is a tuple # assert(s[1] >= 0) # assert(s[2] >= 0) # assert (8 - s[1]) == self.size, 'EINVAL: s: not enough bits remaining in current byte to read the prefix' # noqa: E501 if val >= self._max_value: return s[0] + chb((s[2] << self.size) + self._max_value) + self.i2m(pkt, val)[1:] # noqa: E501 # This AbstractUVarIntField is only one byte long; setting the prefix value # noqa: E501 # and appending the resulting byte to the string return s[0] + chb((s[2] << self.size) + orb(self.i2m(pkt, val)))
[ "def", "addfield", "(", "self", ",", "pkt", ",", "s", ",", "val", ")", ":", "# type: (Optional[packet.Packet], Union[str, Tuple[str, int, int]], int) -> str # noqa: E501", "assert", "(", "val", ">=", "0", ")", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "assert", "self", ".", "size", "==", "8", ",", "'EINVAL: s: tuple expected when prefix_len is not a full byte'", "# noqa: E501", "return", "s", "+", "self", ".", "i2m", "(", "pkt", ",", "val", ")", "# s is a tuple", "# assert(s[1] >= 0)", "# assert(s[2] >= 0)", "# assert (8 - s[1]) == self.size, 'EINVAL: s: not enough bits remaining in current byte to read the prefix' # noqa: E501", "if", "val", ">=", "self", ".", "_max_value", ":", "return", "s", "[", "0", "]", "+", "chb", "(", "(", "s", "[", "2", "]", "<<", "self", ".", "size", ")", "+", "self", ".", "_max_value", ")", "+", "self", ".", "i2m", "(", "pkt", ",", "val", ")", "[", "1", ":", "]", "# noqa: E501", "# This AbstractUVarIntField is only one byte long; setting the prefix value # noqa: E501", "# and appending the resulting byte to the string", "return", "s", "[", "0", "]", "+", "chb", "(", "(", "s", "[", "2", "]", "<<", "self", ".", "size", ")", "+", "orb", "(", "self", ".", "i2m", "(", "pkt", ",", "val", ")", ")", ")" ]
An AbstractUVarIntField prefix always consumes the remaining bits of a BitField;if no current BitField is in use (no tuple in entry) then the prefix length is 8 bits and the whole byte is to be consumed @param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501 @param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501 generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501 number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501 previous bitfield-compatible fields. @param int val: the positive or null value to be added. @return str: s concatenated with the machine representation of this field. # noqa: E501 @raise AssertionError
[ "An", "AbstractUVarIntField", "prefix", "always", "consumes", "the", "remaining", "bits", "of", "a", "BitField", ";", "if", "no", "current", "BitField", "is", "in", "use", "(", "no", "tuple", "in", "entry", ")", "then", "the", "prefix", "length", "is", "8", "bits", "and", "the", "whole", "byte", "is", "to", "be", "consumed" ]
python
train
63.233333
saltstack/salt
salt/modules/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nova.py#L250-L268
def volume_list(search_opts=None, profile=None, **kwargs): ''' List storage volumes search_opts Dictionary of search options profile Profile to use CLI Example: .. code-block:: bash salt '*' nova.volume_list search_opts='{"display_name": "myblock"}' profile=openstack ''' conn = _auth(profile, **kwargs) return conn.volume_list(search_opts=search_opts)
[ "def", "volume_list", "(", "search_opts", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "_auth", "(", "profile", ",", "*", "*", "kwargs", ")", "return", "conn", ".", "volume_list", "(", "search_opts", "=", "search_opts", ")" ]
List storage volumes search_opts Dictionary of search options profile Profile to use CLI Example: .. code-block:: bash salt '*' nova.volume_list search_opts='{"display_name": "myblock"}' profile=openstack
[ "List", "storage", "volumes" ]
python
train
21.052632
potash/drain
drain/data.py
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L203-L213
def expand_dates(df, columns=[]): """ generate year, month, day features from specified date features """ columns = df.columns.intersection(columns) df2 = df.reindex(columns=set(df.columns).difference(columns)) for column in columns: df2[column + '_year'] = df[column].apply(lambda x: x.year) df2[column + '_month'] = df[column].apply(lambda x: x.month) df2[column + '_day'] = df[column].apply(lambda x: x.day) return df2
[ "def", "expand_dates", "(", "df", ",", "columns", "=", "[", "]", ")", ":", "columns", "=", "df", ".", "columns", ".", "intersection", "(", "columns", ")", "df2", "=", "df", ".", "reindex", "(", "columns", "=", "set", "(", "df", ".", "columns", ")", ".", "difference", "(", "columns", ")", ")", "for", "column", "in", "columns", ":", "df2", "[", "column", "+", "'_year'", "]", "=", "df", "[", "column", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "year", ")", "df2", "[", "column", "+", "'_month'", "]", "=", "df", "[", "column", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "month", ")", "df2", "[", "column", "+", "'_day'", "]", "=", "df", "[", "column", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "day", ")", "return", "df2" ]
generate year, month, day features from specified date features
[ "generate", "year", "month", "day", "features", "from", "specified", "date", "features" ]
python
train
42.090909
DarkEnergySurvey/ugali
ugali/utils/healpix.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L173-L194
def healpixMap(nside, lon, lat, fill_value=0., nest=False): """ Input (lon, lat) in degrees instead of (theta, phi) in radians. Returns HEALPix map at the desired resolution """ lon_median, lat_median = np.median(lon), np.median(lat) max_angsep = np.max(ugali.utils.projector.angsep(lon, lat, lon_median, lat_median)) pix = angToPix(nside, lon, lat, nest=nest) if max_angsep < 10: # More efficient histograming for small regions of sky m = np.tile(fill_value, healpy.nside2npix(nside)) pix_subset = ugali.utils.healpix.angToDisc(nside, lon_median, lat_median, max_angsep, nest=nest) bins = np.arange(np.min(pix_subset), np.max(pix_subset) + 1) m_subset = np.histogram(pix, bins=bins - 0.5)[0].astype(float) m[bins[0:-1]] = m_subset else: m = np.histogram(pix, np.arange(hp.nside2npix(nside) + 1))[0].astype(float) if fill_value != 0.: m[m == 0.] = fill_value return m
[ "def", "healpixMap", "(", "nside", ",", "lon", ",", "lat", ",", "fill_value", "=", "0.", ",", "nest", "=", "False", ")", ":", "lon_median", ",", "lat_median", "=", "np", ".", "median", "(", "lon", ")", ",", "np", ".", "median", "(", "lat", ")", "max_angsep", "=", "np", ".", "max", "(", "ugali", ".", "utils", ".", "projector", ".", "angsep", "(", "lon", ",", "lat", ",", "lon_median", ",", "lat_median", ")", ")", "pix", "=", "angToPix", "(", "nside", ",", "lon", ",", "lat", ",", "nest", "=", "nest", ")", "if", "max_angsep", "<", "10", ":", "# More efficient histograming for small regions of sky", "m", "=", "np", ".", "tile", "(", "fill_value", ",", "healpy", ".", "nside2npix", "(", "nside", ")", ")", "pix_subset", "=", "ugali", ".", "utils", ".", "healpix", ".", "angToDisc", "(", "nside", ",", "lon_median", ",", "lat_median", ",", "max_angsep", ",", "nest", "=", "nest", ")", "bins", "=", "np", ".", "arange", "(", "np", ".", "min", "(", "pix_subset", ")", ",", "np", ".", "max", "(", "pix_subset", ")", "+", "1", ")", "m_subset", "=", "np", ".", "histogram", "(", "pix", ",", "bins", "=", "bins", "-", "0.5", ")", "[", "0", "]", ".", "astype", "(", "float", ")", "m", "[", "bins", "[", "0", ":", "-", "1", "]", "]", "=", "m_subset", "else", ":", "m", "=", "np", ".", "histogram", "(", "pix", ",", "np", ".", "arange", "(", "hp", ".", "nside2npix", "(", "nside", ")", "+", "1", ")", ")", "[", "0", "]", ".", "astype", "(", "float", ")", "if", "fill_value", "!=", "0.", ":", "m", "[", "m", "==", "0.", "]", "=", "fill_value", "return", "m" ]
Input (lon, lat) in degrees instead of (theta, phi) in radians. Returns HEALPix map at the desired resolution
[ "Input", "(", "lon", "lat", ")", "in", "degrees", "instead", "of", "(", "theta", "phi", ")", "in", "radians", ".", "Returns", "HEALPix", "map", "at", "the", "desired", "resolution" ]
python
train
43.681818
numenta/nupic
src/nupic/algorithms/spatial_pooler.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L821-L830
def getPermanence(self, columnIndex, permanence): """ Returns the permanence values for a given column. ``permanence`` size must match the number of inputs. :param columnIndex: (int) column index to get permanence for. :param permanence: (list) will be overwritten with permanences. """ assert(columnIndex < self._numColumns) permanence[:] = self._permanences[columnIndex]
[ "def", "getPermanence", "(", "self", ",", "columnIndex", ",", "permanence", ")", ":", "assert", "(", "columnIndex", "<", "self", ".", "_numColumns", ")", "permanence", "[", ":", "]", "=", "self", ".", "_permanences", "[", "columnIndex", "]" ]
Returns the permanence values for a given column. ``permanence`` size must match the number of inputs. :param columnIndex: (int) column index to get permanence for. :param permanence: (list) will be overwritten with permanences.
[ "Returns", "the", "permanence", "values", "for", "a", "given", "column", ".", "permanence", "size", "must", "match", "the", "number", "of", "inputs", ".", ":", "param", "columnIndex", ":", "(", "int", ")", "column", "index", "to", "get", "permanence", "for", ".", ":", "param", "permanence", ":", "(", "list", ")", "will", "be", "overwritten", "with", "permanences", "." ]
python
valid
40.1
buildbot/buildbot
master/buildbot/buildbot_net_usage_data.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/buildbot_net_usage_data.py#L75-L93
def getName(obj): """This method finds the first parent class which is within the buildbot namespace it prepends the name with as many ">" as the class is subclassed """ # elastic search does not like '.' in dict keys, so we replace by / def sanitize(name): return name.replace(".", "/") if isinstance(obj, _BuildStepFactory): klass = obj.factory else: klass = type(obj) name = "" klasses = (klass, ) + inspect.getmro(klass) for klass in klasses: if hasattr(klass, "__module__") and klass.__module__.startswith("buildbot."): return sanitize(name + klass.__module__ + "." + klass.__name__) else: name += ">" return sanitize(type(obj).__name__)
[ "def", "getName", "(", "obj", ")", ":", "# elastic search does not like '.' in dict keys, so we replace by /", "def", "sanitize", "(", "name", ")", ":", "return", "name", ".", "replace", "(", "\".\"", ",", "\"/\"", ")", "if", "isinstance", "(", "obj", ",", "_BuildStepFactory", ")", ":", "klass", "=", "obj", ".", "factory", "else", ":", "klass", "=", "type", "(", "obj", ")", "name", "=", "\"\"", "klasses", "=", "(", "klass", ",", ")", "+", "inspect", ".", "getmro", "(", "klass", ")", "for", "klass", "in", "klasses", ":", "if", "hasattr", "(", "klass", ",", "\"__module__\"", ")", "and", "klass", ".", "__module__", ".", "startswith", "(", "\"buildbot.\"", ")", ":", "return", "sanitize", "(", "name", "+", "klass", ".", "__module__", "+", "\".\"", "+", "klass", ".", "__name__", ")", "else", ":", "name", "+=", "\">\"", "return", "sanitize", "(", "type", "(", "obj", ")", ".", "__name__", ")" ]
This method finds the first parent class which is within the buildbot namespace it prepends the name with as many ">" as the class is subclassed
[ "This", "method", "finds", "the", "first", "parent", "class", "which", "is", "within", "the", "buildbot", "namespace", "it", "prepends", "the", "name", "with", "as", "many", ">", "as", "the", "class", "is", "subclassed" ]
python
train
38.526316
jhermann/rituals
src/rituals/util/scm/base.py
https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/util/scm/base.py#L38-L41
def run(self, cmd, *args, **kwargs): """Run a command.""" runner = self.ctx.run if self.ctx else None return run(cmd, runner=runner, *args, **kwargs)
[ "def", "run", "(", "self", ",", "cmd", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "runner", "=", "self", ".", "ctx", ".", "run", "if", "self", ".", "ctx", "else", "None", "return", "run", "(", "cmd", ",", "runner", "=", "runner", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run a command.
[ "Run", "a", "command", "." ]
python
valid
42.5
adrn/gala
gala/dynamics/orbit.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L614-L637
def eccentricity(self, **kw): r""" Returns the eccentricity computed from the mean apocenter and mean pericenter. .. math:: e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}} Parameters ---------- **kw Any keyword arguments passed to ``apocenter()`` and ``pericenter()``. For example, ``approximate=True``. Returns ------- ecc : float The orbital eccentricity. """ ra = self.apocenter(**kw) rp = self.pericenter(**kw) return (ra - rp) / (ra + rp)
[ "def", "eccentricity", "(", "self", ",", "*", "*", "kw", ")", ":", "ra", "=", "self", ".", "apocenter", "(", "*", "*", "kw", ")", "rp", "=", "self", ".", "pericenter", "(", "*", "*", "kw", ")", "return", "(", "ra", "-", "rp", ")", "/", "(", "ra", "+", "rp", ")" ]
r""" Returns the eccentricity computed from the mean apocenter and mean pericenter. .. math:: e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}} Parameters ---------- **kw Any keyword arguments passed to ``apocenter()`` and ``pericenter()``. For example, ``approximate=True``. Returns ------- ecc : float The orbital eccentricity.
[ "r", "Returns", "the", "eccentricity", "computed", "from", "the", "mean", "apocenter", "and", "mean", "pericenter", "." ]
python
train
25.041667
KelSolaar/Umbra
umbra/components/factory/script_editor/editor.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/editor.py#L425-L446
def set_title(self, title=None): """ Sets the editor title. :param title: Editor title. :type title: unicode :return: Method success. :rtype: bool """ if not title: # TODO: https://bugreports.qt-project.org/browse/QTBUG-27084 # titleTemplate = self.is_modified() and "{0} *" or "{0}" # title = titleTemplate.format(self.get_file_short_name()) title = self.get_file_short_name() LOGGER.debug("> Setting editor title to '{0}'.".format(title)) self.__title = title self.setWindowTitle(title) self.title_changed.emit() return True
[ "def", "set_title", "(", "self", ",", "title", "=", "None", ")", ":", "if", "not", "title", ":", "# TODO: https://bugreports.qt-project.org/browse/QTBUG-27084", "# titleTemplate = self.is_modified() and \"{0} *\" or \"{0}\"", "# title = titleTemplate.format(self.get_file_short_name())", "title", "=", "self", ".", "get_file_short_name", "(", ")", "LOGGER", ".", "debug", "(", "\"> Setting editor title to '{0}'.\"", ".", "format", "(", "title", ")", ")", "self", ".", "__title", "=", "title", "self", ".", "setWindowTitle", "(", "title", ")", "self", ".", "title_changed", ".", "emit", "(", ")", "return", "True" ]
Sets the editor title. :param title: Editor title. :type title: unicode :return: Method success. :rtype: bool
[ "Sets", "the", "editor", "title", "." ]
python
train
30.045455
streamlink/streamlink
src/streamlink/plugin/api/validate.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugin/api/validate.py#L219-L241
def map(func): """Apply function to each value inside the sequence or dict. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict. """ # text is an alias for basestring on Python 2, which cannot be # instantiated and therefore can't be used to transform the value, # so we force to unicode instead. if is_py2 and text == func: func = unicode def expand_kv(kv): return func(*kv) def map_values(value): cls = type(value) if isinstance(value, dict): return cls(_map(expand_kv, value.items())) else: return cls(_map(func, value)) return transform(map_values)
[ "def", "map", "(", "func", ")", ":", "# text is an alias for basestring on Python 2, which cannot be", "# instantiated and therefore can't be used to transform the value,", "# so we force to unicode instead.", "if", "is_py2", "and", "text", "==", "func", ":", "func", "=", "unicode", "def", "expand_kv", "(", "kv", ")", ":", "return", "func", "(", "*", "kv", ")", "def", "map_values", "(", "value", ")", ":", "cls", "=", "type", "(", "value", ")", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "cls", "(", "_map", "(", "expand_kv", ",", "value", ".", "items", "(", ")", ")", ")", "else", ":", "return", "cls", "(", "_map", "(", "func", ",", "value", ")", ")", "return", "transform", "(", "map_values", ")" ]
Apply function to each value inside the sequence or dict. Supports both dicts and sequences, key/value pairs are expanded when applied to a dict.
[ "Apply", "function", "to", "each", "value", "inside", "the", "sequence", "or", "dict", "." ]
python
test
29.434783
minhhoit/yacms
yacms/generic/forms.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/forms.py#L197-L209
def clean(self): """ Check unauthenticated user's cookie as a light check to prevent duplicate votes. """ bits = (self.data["content_type"], self.data["object_pk"]) request = self.request self.current = "%s.%s" % bits self.previous = request.COOKIES.get("yacms-rating", "").split(",") already_rated = self.current in self.previous if already_rated and not self.request.user.is_authenticated(): raise forms.ValidationError(ugettext("Already rated.")) return self.cleaned_data
[ "def", "clean", "(", "self", ")", ":", "bits", "=", "(", "self", ".", "data", "[", "\"content_type\"", "]", ",", "self", ".", "data", "[", "\"object_pk\"", "]", ")", "request", "=", "self", ".", "request", "self", ".", "current", "=", "\"%s.%s\"", "%", "bits", "self", ".", "previous", "=", "request", ".", "COOKIES", ".", "get", "(", "\"yacms-rating\"", ",", "\"\"", ")", ".", "split", "(", "\",\"", ")", "already_rated", "=", "self", ".", "current", "in", "self", ".", "previous", "if", "already_rated", "and", "not", "self", ".", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "raise", "forms", ".", "ValidationError", "(", "ugettext", "(", "\"Already rated.\"", ")", ")", "return", "self", ".", "cleaned_data" ]
Check unauthenticated user's cookie as a light check to prevent duplicate votes.
[ "Check", "unauthenticated", "user", "s", "cookie", "as", "a", "light", "check", "to", "prevent", "duplicate", "votes", "." ]
python
train
43.230769
edx/edx-enterprise
enterprise/admin/views.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/views.py#L218-L238
def _build_context(self, request, customer_uuid): """ Build common context parts used by different handlers in this view. """ # TODO: pylint acts stupid - find a way around it without suppressing enterprise_customer = EnterpriseCustomer.objects.get(uuid=customer_uuid) # pylint: disable=no-member search_keyword = self.get_search_keyword(request) linked_learners = self.get_enterprise_customer_user_queryset(request, search_keyword, customer_uuid) pending_linked_learners = self.get_pending_users_queryset(search_keyword, customer_uuid) context = { self.ContextParameters.ENTERPRISE_CUSTOMER: enterprise_customer, self.ContextParameters.PENDING_LEARNERS: pending_linked_learners, self.ContextParameters.LEARNERS: linked_learners, self.ContextParameters.SEARCH_KEYWORD: search_keyword or '', self.ContextParameters.ENROLLMENT_URL: settings.LMS_ENROLLMENT_API_PATH, } context.update(admin.site.each_context(request)) context.update(self._build_admin_context(request, enterprise_customer)) return context
[ "def", "_build_context", "(", "self", ",", "request", ",", "customer_uuid", ")", ":", "# TODO: pylint acts stupid - find a way around it without suppressing", "enterprise_customer", "=", "EnterpriseCustomer", ".", "objects", ".", "get", "(", "uuid", "=", "customer_uuid", ")", "# pylint: disable=no-member", "search_keyword", "=", "self", ".", "get_search_keyword", "(", "request", ")", "linked_learners", "=", "self", ".", "get_enterprise_customer_user_queryset", "(", "request", ",", "search_keyword", ",", "customer_uuid", ")", "pending_linked_learners", "=", "self", ".", "get_pending_users_queryset", "(", "search_keyword", ",", "customer_uuid", ")", "context", "=", "{", "self", ".", "ContextParameters", ".", "ENTERPRISE_CUSTOMER", ":", "enterprise_customer", ",", "self", ".", "ContextParameters", ".", "PENDING_LEARNERS", ":", "pending_linked_learners", ",", "self", ".", "ContextParameters", ".", "LEARNERS", ":", "linked_learners", ",", "self", ".", "ContextParameters", ".", "SEARCH_KEYWORD", ":", "search_keyword", "or", "''", ",", "self", ".", "ContextParameters", ".", "ENROLLMENT_URL", ":", "settings", ".", "LMS_ENROLLMENT_API_PATH", ",", "}", "context", ".", "update", "(", "admin", ".", "site", ".", "each_context", "(", "request", ")", ")", "context", ".", "update", "(", "self", ".", "_build_admin_context", "(", "request", ",", "enterprise_customer", ")", ")", "return", "context" ]
Build common context parts used by different handlers in this view.
[ "Build", "common", "context", "parts", "used", "by", "different", "handlers", "in", "this", "view", "." ]
python
valid
54.666667
econ-ark/HARK
HARK/ConsumptionSaving/ConsMedModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsMedModel.py#L1019-L1101
def makevAndvPfuncs(self,policyFunc): ''' Constructs the marginal value function for this period. Parameters ---------- policyFunc : function Consumption and medical care function for this period, defined over market resources, permanent income level, and the medical need shock. Returns ------- vFunc : function Value function for this period, defined over market resources and permanent income. vPfunc : function Marginal value (of market resources) function for this period, defined over market resources and permanent income. ''' # Get state dimension sizes mCount = self.aXtraGrid.size pCount = self.pLvlGrid.size MedCount = self.MedShkVals.size # Make temporary grids to evaluate the consumption function temp_grid = np.tile(np.reshape(self.aXtraGrid,(mCount,1,1)),(1,pCount,MedCount)) aMinGrid = np.tile(np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount,1)), (mCount,1,MedCount)) pGrid = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(mCount,1,MedCount)) mGrid = temp_grid*pGrid + aMinGrid if self.pLvlGrid[0] == 0: mGrid[:,0,:] = np.tile(np.reshape(self.aXtraGrid,(mCount,1)),(1,MedCount)) MedShkGrid = np.tile(np.reshape(self.MedShkVals,(1,1,MedCount)),(mCount,pCount,1)) probsGrid = np.tile(np.reshape(self.MedShkPrbs,(1,1,MedCount)),(mCount,pCount,1)) # Get optimal consumption (and medical care) for each state cGrid,MedGrid = policyFunc(mGrid,pGrid,MedShkGrid) # Calculate expected value by "integrating" across medical shocks if self.vFuncBool: MedGrid = np.maximum(MedGrid,1e-100) # interpolation error sometimes makes Med < 0 (barely) aGrid = np.maximum(mGrid - cGrid - self.MedPrice*MedGrid, aMinGrid) # interpolation error sometimes makes tiny violations vGrid = self.u(cGrid) + MedShkGrid*self.uMed(MedGrid) + self.EndOfPrdvFunc(aGrid,pGrid) vNow = np.sum(vGrid*probsGrid,axis=2) # Calculate expected marginal value by "integrating" across medical shocks vPgrid = self.uP(cGrid) vPnow = np.sum(vPgrid*probsGrid,axis=2) # Add vPnvrs=0 at m=mLvlMin to close it off at the bottom (and vNvrs=0) mGrid_small = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount)),mGrid[:,:,0])) vPnvrsNow = np.concatenate((np.zeros((1,pCount)),self.uPinv(vPnow))) if self.vFuncBool: vNvrsNow = np.concatenate((np.zeros((1,pCount)),self.uinv(vNow)),axis=0) vNvrsPnow = vPnow*self.uinvP(vNow) vNvrsPnow = np.concatenate((np.zeros((1,pCount)),vNvrsPnow),axis=0) # Construct the pseudo-inverse value and marginal value functions over mLvl,pLvl vPnvrsFunc_by_pLvl = [] vNvrsFunc_by_pLvl = [] for j in range(pCount): # Make a pseudo inverse marginal value function for each pLvl pLvl = self.pLvlGrid[j] m_temp = mGrid_small[:,j] - self.mLvlMinNow(pLvl) vPnvrs_temp = vPnvrsNow[:,j] vPnvrsFunc_by_pLvl.append(LinearInterp(m_temp,vPnvrs_temp)) if self.vFuncBool: vNvrs_temp = vNvrsNow[:,j] vNvrsP_temp = vNvrsPnow[:,j] vNvrsFunc_by_pLvl.append(CubicInterp(m_temp,vNvrs_temp,vNvrsP_temp)) vPnvrsFuncBase = LinearInterpOnInterp1D(vPnvrsFunc_by_pLvl,self.pLvlGrid) vPnvrsFunc = VariableLowerBoundFunc2D(vPnvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl if self.vFuncBool: vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_by_pLvl,self.pLvlGrid) vNvrsFunc = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl # "Re-curve" the (marginal) value function vPfunc = MargValueFunc2D(vPnvrsFunc,self.CRRA) if self.vFuncBool: vFunc = ValueFunc2D(vNvrsFunc,self.CRRA) else: vFunc = NullFunc() return vFunc, vPfunc
[ "def", "makevAndvPfuncs", "(", "self", ",", "policyFunc", ")", ":", "# Get state dimension sizes", "mCount", "=", "self", ".", "aXtraGrid", ".", "size", "pCount", "=", "self", ".", "pLvlGrid", ".", "size", "MedCount", "=", "self", ".", "MedShkVals", ".", "size", "# Make temporary grids to evaluate the consumption function", "temp_grid", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "self", ".", "aXtraGrid", ",", "(", "mCount", ",", "1", ",", "1", ")", ")", ",", "(", "1", ",", "pCount", ",", "MedCount", ")", ")", "aMinGrid", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "self", ".", "mLvlMinNow", "(", "self", ".", "pLvlGrid", ")", ",", "(", "1", ",", "pCount", ",", "1", ")", ")", ",", "(", "mCount", ",", "1", ",", "MedCount", ")", ")", "pGrid", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "self", ".", "pLvlGrid", ",", "(", "1", ",", "pCount", ",", "1", ")", ")", ",", "(", "mCount", ",", "1", ",", "MedCount", ")", ")", "mGrid", "=", "temp_grid", "*", "pGrid", "+", "aMinGrid", "if", "self", ".", "pLvlGrid", "[", "0", "]", "==", "0", ":", "mGrid", "[", ":", ",", "0", ",", ":", "]", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "self", ".", "aXtraGrid", ",", "(", "mCount", ",", "1", ")", ")", ",", "(", "1", ",", "MedCount", ")", ")", "MedShkGrid", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "self", ".", "MedShkVals", ",", "(", "1", ",", "1", ",", "MedCount", ")", ")", ",", "(", "mCount", ",", "pCount", ",", "1", ")", ")", "probsGrid", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "self", ".", "MedShkPrbs", ",", "(", "1", ",", "1", ",", "MedCount", ")", ")", ",", "(", "mCount", ",", "pCount", ",", "1", ")", ")", "# Get optimal consumption (and medical care) for each state", "cGrid", ",", "MedGrid", "=", "policyFunc", "(", "mGrid", ",", "pGrid", ",", "MedShkGrid", ")", "# Calculate expected value by \"integrating\" across medical shocks", "if", "self", ".", "vFuncBool", ":", "MedGrid", "=", "np", ".", "maximum", "(", "MedGrid", ",", "1e-100", ")", "# interpolation error sometimes makes Med < 0 (barely)", "aGrid", "=", "np", ".", "maximum", "(", "mGrid", "-", "cGrid", "-", "self", ".", "MedPrice", "*", "MedGrid", ",", "aMinGrid", ")", "# interpolation error sometimes makes tiny violations", "vGrid", "=", "self", ".", "u", "(", "cGrid", ")", "+", "MedShkGrid", "*", "self", ".", "uMed", "(", "MedGrid", ")", "+", "self", ".", "EndOfPrdvFunc", "(", "aGrid", ",", "pGrid", ")", "vNow", "=", "np", ".", "sum", "(", "vGrid", "*", "probsGrid", ",", "axis", "=", "2", ")", "# Calculate expected marginal value by \"integrating\" across medical shocks", "vPgrid", "=", "self", ".", "uP", "(", "cGrid", ")", "vPnow", "=", "np", ".", "sum", "(", "vPgrid", "*", "probsGrid", ",", "axis", "=", "2", ")", "# Add vPnvrs=0 at m=mLvlMin to close it off at the bottom (and vNvrs=0)", "mGrid_small", "=", "np", ".", "concatenate", "(", "(", "np", ".", "reshape", "(", "self", ".", "mLvlMinNow", "(", "self", ".", "pLvlGrid", ")", ",", "(", "1", ",", "pCount", ")", ")", ",", "mGrid", "[", ":", ",", ":", ",", "0", "]", ")", ")", "vPnvrsNow", "=", "np", ".", "concatenate", "(", "(", "np", ".", "zeros", "(", "(", "1", ",", "pCount", ")", ")", ",", "self", ".", "uPinv", "(", "vPnow", ")", ")", ")", "if", "self", ".", "vFuncBool", ":", "vNvrsNow", "=", "np", ".", "concatenate", "(", "(", "np", ".", "zeros", "(", "(", "1", ",", "pCount", ")", ")", ",", "self", ".", "uinv", "(", "vNow", ")", ")", ",", "axis", "=", "0", ")", "vNvrsPnow", "=", "vPnow", "*", "self", ".", "uinvP", "(", "vNow", ")", "vNvrsPnow", "=", "np", ".", "concatenate", "(", "(", "np", ".", "zeros", "(", "(", "1", ",", "pCount", ")", ")", ",", "vNvrsPnow", ")", ",", "axis", "=", "0", ")", "# Construct the pseudo-inverse value and marginal value functions over mLvl,pLvl", "vPnvrsFunc_by_pLvl", "=", "[", "]", "vNvrsFunc_by_pLvl", "=", "[", "]", "for", "j", "in", "range", "(", "pCount", ")", ":", "# Make a pseudo inverse marginal value function for each pLvl", "pLvl", "=", "self", ".", "pLvlGrid", "[", "j", "]", "m_temp", "=", "mGrid_small", "[", ":", ",", "j", "]", "-", "self", ".", "mLvlMinNow", "(", "pLvl", ")", "vPnvrs_temp", "=", "vPnvrsNow", "[", ":", ",", "j", "]", "vPnvrsFunc_by_pLvl", ".", "append", "(", "LinearInterp", "(", "m_temp", ",", "vPnvrs_temp", ")", ")", "if", "self", ".", "vFuncBool", ":", "vNvrs_temp", "=", "vNvrsNow", "[", ":", ",", "j", "]", "vNvrsP_temp", "=", "vNvrsPnow", "[", ":", ",", "j", "]", "vNvrsFunc_by_pLvl", ".", "append", "(", "CubicInterp", "(", "m_temp", ",", "vNvrs_temp", ",", "vNvrsP_temp", ")", ")", "vPnvrsFuncBase", "=", "LinearInterpOnInterp1D", "(", "vPnvrsFunc_by_pLvl", ",", "self", ".", "pLvlGrid", ")", "vPnvrsFunc", "=", "VariableLowerBoundFunc2D", "(", "vPnvrsFuncBase", ",", "self", ".", "mLvlMinNow", ")", "# adjust for the lower bound of mLvl", "if", "self", ".", "vFuncBool", ":", "vNvrsFuncBase", "=", "LinearInterpOnInterp1D", "(", "vNvrsFunc_by_pLvl", ",", "self", ".", "pLvlGrid", ")", "vNvrsFunc", "=", "VariableLowerBoundFunc2D", "(", "vNvrsFuncBase", ",", "self", ".", "mLvlMinNow", ")", "# adjust for the lower bound of mLvl", "# \"Re-curve\" the (marginal) value function", "vPfunc", "=", "MargValueFunc2D", "(", "vPnvrsFunc", ",", "self", ".", "CRRA", ")", "if", "self", ".", "vFuncBool", ":", "vFunc", "=", "ValueFunc2D", "(", "vNvrsFunc", ",", "self", ".", "CRRA", ")", "else", ":", "vFunc", "=", "NullFunc", "(", ")", "return", "vFunc", ",", "vPfunc" ]
Constructs the marginal value function for this period. Parameters ---------- policyFunc : function Consumption and medical care function for this period, defined over market resources, permanent income level, and the medical need shock. Returns ------- vFunc : function Value function for this period, defined over market resources and permanent income. vPfunc : function Marginal value (of market resources) function for this period, defined over market resources and permanent income.
[ "Constructs", "the", "marginal", "value", "function", "for", "this", "period", "." ]
python
train
49.819277
horazont/aioxmpp
aioxmpp/pubsub/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/pubsub/service.py#L1023-L1044
def purge(self, jid, node): """ Delete all items from a node. :param jid: JID of the PubSub service :param node: Name of the PubSub node :type node: :class:`str` Requires :attr:`.xso.Feature.PURGE`. """ iq = aioxmpp.stanza.IQ( type_=aioxmpp.structs.IQType.SET, to=jid, payload=pubsub_xso.OwnerRequest( pubsub_xso.OwnerPurge( node ) ) ) yield from self.client.send(iq)
[ "def", "purge", "(", "self", ",", "jid", ",", "node", ")", ":", "iq", "=", "aioxmpp", ".", "stanza", ".", "IQ", "(", "type_", "=", "aioxmpp", ".", "structs", ".", "IQType", ".", "SET", ",", "to", "=", "jid", ",", "payload", "=", "pubsub_xso", ".", "OwnerRequest", "(", "pubsub_xso", ".", "OwnerPurge", "(", "node", ")", ")", ")", "yield", "from", "self", ".", "client", ".", "send", "(", "iq", ")" ]
Delete all items from a node. :param jid: JID of the PubSub service :param node: Name of the PubSub node :type node: :class:`str` Requires :attr:`.xso.Feature.PURGE`.
[ "Delete", "all", "items", "from", "a", "node", "." ]
python
train
24.090909
wummel/linkchecker
linkcheck/fileutil.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/fileutil.py#L200-L202
def is_readable(filename): """Check if file is a regular file and is readable.""" return os.path.isfile(filename) and os.access(filename, os.R_OK)
[ "def", "is_readable", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "filename", ")", "and", "os", ".", "access", "(", "filename", ",", "os", ".", "R_OK", ")" ]
Check if file is a regular file and is readable.
[ "Check", "if", "file", "is", "a", "regular", "file", "and", "is", "readable", "." ]
python
train
50.666667
PatrikValkovic/grammpy
grammpy/representation/support/_RulesSet.py
https://github.com/PatrikValkovic/grammpy/blob/879ce0ef794ac2823acc19314fcd7a8aba53e50f/grammpy/representation/support/_RulesSet.py#L82-L102
def _add(self, *rules): # type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]] """ Add rules into the set. Each rule is validated and split if needed. The method add the rules into dictionary, so the rule can be deleted with terminals or nonterminals. :param rules: Rules to insert. :return: Inserted rules. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid. """ for rule in rules: if rule in self: continue self._validate_rule(rule) for rule in rules: for r in self._split_rules(rule): for side in r.rule: for s in side: self._assign_map[s].add(r) super().add(r) yield r
[ "def", "_add", "(", "self", ",", "*", "rules", ")", ":", "# type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]]", "for", "rule", "in", "rules", ":", "if", "rule", "in", "self", ":", "continue", "self", ".", "_validate_rule", "(", "rule", ")", "for", "rule", "in", "rules", ":", "for", "r", "in", "self", ".", "_split_rules", "(", "rule", ")", ":", "for", "side", "in", "r", ".", "rule", ":", "for", "s", "in", "side", ":", "self", ".", "_assign_map", "[", "s", "]", ".", "add", "(", "r", ")", "super", "(", ")", ".", "add", "(", "r", ")", "yield", "r" ]
Add rules into the set. Each rule is validated and split if needed. The method add the rules into dictionary, so the rule can be deleted with terminals or nonterminals. :param rules: Rules to insert. :return: Inserted rules. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid.
[ "Add", "rules", "into", "the", "set", ".", "Each", "rule", "is", "validated", "and", "split", "if", "needed", ".", "The", "method", "add", "the", "rules", "into", "dictionary", "so", "the", "rule", "can", "be", "deleted", "with", "terminals", "or", "nonterminals", ".", ":", "param", "rules", ":", "Rules", "to", "insert", ".", ":", "return", ":", "Inserted", "rules", ".", ":", "raise", "NotRuleException", ":", "If", "the", "parameter", "doesn", "t", "inherit", "from", "Rule", ".", ":", "raise", "RuleException", ":", "If", "the", "syntax", "of", "the", "rule", "is", "invalid", "." ]
python
train
41.047619
dependencies-io/cli
dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py
https://github.com/dependencies-io/cli/blob/d8ae97343c48a61d6614d3e8af6a981b4cfb1bcb/dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py#L7-L12
def write_json_to_temp_file(data): """Writes JSON data to a temporary file and returns the path to it""" fp = tempfile.NamedTemporaryFile(delete=False) fp.write(json.dumps(data).encode('utf-8')) fp.close() return fp.name
[ "def", "write_json_to_temp_file", "(", "data", ")", ":", "fp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "fp", ".", "write", "(", "json", ".", "dumps", "(", "data", ")", ".", "encode", "(", "'utf-8'", ")", ")", "fp", ".", "close", "(", ")", "return", "fp", ".", "name" ]
Writes JSON data to a temporary file and returns the path to it
[ "Writes", "JSON", "data", "to", "a", "temporary", "file", "and", "returns", "the", "path", "to", "it" ]
python
train
39.166667
ethereum/py-trie
trie/binary.py
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L305-L320
def delete_subtrie(self, key): """ Given a key prefix, delete the whole subtrie that starts with the key prefix. Key will be encoded into binary array format first. It will call `_set` with `if_delete_subtrie` set to True. """ validate_is_bytes(key) self.root_hash = self._set( self.root_hash, encode_to_bin(key), value=b'', if_delete_subtrie=True, )
[ "def", "delete_subtrie", "(", "self", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "self", ".", "root_hash", "=", "self", ".", "_set", "(", "self", ".", "root_hash", ",", "encode_to_bin", "(", "key", ")", ",", "value", "=", "b''", ",", "if_delete_subtrie", "=", "True", ",", ")" ]
Given a key prefix, delete the whole subtrie that starts with the key prefix. Key will be encoded into binary array format first. It will call `_set` with `if_delete_subtrie` set to True.
[ "Given", "a", "key", "prefix", "delete", "the", "whole", "subtrie", "that", "starts", "with", "the", "key", "prefix", "." ]
python
train
28.125
google/grr
grr/server/grr_response_server/threadpool.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/threadpool.py#L363-L453
def AddTask(self, target, args=(), name="Unnamed task", blocking=True, inline=True): """Adds a task to be processed later. Args: target: A callable which should be processed by one of the workers. args: A tuple of arguments to target. name: The name of this task. Used to identify tasks in the log. blocking: If True we block until the task is finished, otherwise we raise queue.Full inline: If set, process the task inline when the queue is full. This implies no blocking. Specifying inline helps if the worker tasks are blocked because it still ensures some progress is made. However, this can generally block the calling thread even after the threadpool is available again and therefore decrease efficiency. Raises: ThreadPoolNotStartedError: if the pool was not started yet. queue.Full: if the pool is full and can not accept new jobs. """ if not self.started: raise ThreadPoolNotStartedError(self.name) # This pool should have no worker threads - just run the task inline. if self.max_threads == 0: target(*args) return if inline: blocking = False with self.lock: while True: # This check makes sure that the threadpool will add new workers # even if the queue is not full. This is needed for a scenario when # a fresh threadpool is created (say, with min_threads=1 and # max_threads=10) and 2 long-running tasks are added. The code below # will spawn a new worker for a second long-running task. if len(self) < self.max_threads: try: self._AddWorker() except (RuntimeError, threading.ThreadError) as e: logging.error( "Threadpool exception: " "Could not spawn worker threads: %s", e) try: # Push the task on the queue but raise if unsuccessful. self._queue.put((target, args, name, time.time()), block=False) return except queue.Full: # We increase the number of active threads if we do not exceed the # maximum _and_ our process CPU utilization is not too high. This # ensures that if the workers are waiting on IO we add more workers, # but we do not waste workers when tasks are CPU bound. if len(self) < self.max_threads: try: self._AddWorker() continue # If we fail to add a worker we should keep going anyway. except (RuntimeError, threading.ThreadError) as e: logging.error( "Threadpool exception: " "Could not spawn worker threads: %s", e) # If we need to process the task inline just break out of the loop, # therefore releasing the lock and run the task inline. if inline: break # We should block and try again soon. elif blocking: try: self._queue.put((target, args, name, time.time()), block=True, timeout=1) return except queue.Full: continue else: raise Full() # We don't want to hold the lock while running the task inline if inline: target(*args)
[ "def", "AddTask", "(", "self", ",", "target", ",", "args", "=", "(", ")", ",", "name", "=", "\"Unnamed task\"", ",", "blocking", "=", "True", ",", "inline", "=", "True", ")", ":", "if", "not", "self", ".", "started", ":", "raise", "ThreadPoolNotStartedError", "(", "self", ".", "name", ")", "# This pool should have no worker threads - just run the task inline.", "if", "self", ".", "max_threads", "==", "0", ":", "target", "(", "*", "args", ")", "return", "if", "inline", ":", "blocking", "=", "False", "with", "self", ".", "lock", ":", "while", "True", ":", "# This check makes sure that the threadpool will add new workers", "# even if the queue is not full. This is needed for a scenario when", "# a fresh threadpool is created (say, with min_threads=1 and", "# max_threads=10) and 2 long-running tasks are added. The code below", "# will spawn a new worker for a second long-running task.", "if", "len", "(", "self", ")", "<", "self", ".", "max_threads", ":", "try", ":", "self", ".", "_AddWorker", "(", ")", "except", "(", "RuntimeError", ",", "threading", ".", "ThreadError", ")", "as", "e", ":", "logging", ".", "error", "(", "\"Threadpool exception: \"", "\"Could not spawn worker threads: %s\"", ",", "e", ")", "try", ":", "# Push the task on the queue but raise if unsuccessful.", "self", ".", "_queue", ".", "put", "(", "(", "target", ",", "args", ",", "name", ",", "time", ".", "time", "(", ")", ")", ",", "block", "=", "False", ")", "return", "except", "queue", ".", "Full", ":", "# We increase the number of active threads if we do not exceed the", "# maximum _and_ our process CPU utilization is not too high. This", "# ensures that if the workers are waiting on IO we add more workers,", "# but we do not waste workers when tasks are CPU bound.", "if", "len", "(", "self", ")", "<", "self", ".", "max_threads", ":", "try", ":", "self", ".", "_AddWorker", "(", ")", "continue", "# If we fail to add a worker we should keep going anyway.", "except", "(", "RuntimeError", ",", "threading", ".", "ThreadError", ")", "as", "e", ":", "logging", ".", "error", "(", "\"Threadpool exception: \"", "\"Could not spawn worker threads: %s\"", ",", "e", ")", "# If we need to process the task inline just break out of the loop,", "# therefore releasing the lock and run the task inline.", "if", "inline", ":", "break", "# We should block and try again soon.", "elif", "blocking", ":", "try", ":", "self", ".", "_queue", ".", "put", "(", "(", "target", ",", "args", ",", "name", ",", "time", ".", "time", "(", ")", ")", ",", "block", "=", "True", ",", "timeout", "=", "1", ")", "return", "except", "queue", ".", "Full", ":", "continue", "else", ":", "raise", "Full", "(", ")", "# We don't want to hold the lock while running the task inline", "if", "inline", ":", "target", "(", "*", "args", ")" ]
Adds a task to be processed later. Args: target: A callable which should be processed by one of the workers. args: A tuple of arguments to target. name: The name of this task. Used to identify tasks in the log. blocking: If True we block until the task is finished, otherwise we raise queue.Full inline: If set, process the task inline when the queue is full. This implies no blocking. Specifying inline helps if the worker tasks are blocked because it still ensures some progress is made. However, this can generally block the calling thread even after the threadpool is available again and therefore decrease efficiency. Raises: ThreadPoolNotStartedError: if the pool was not started yet. queue.Full: if the pool is full and can not accept new jobs.
[ "Adds", "a", "task", "to", "be", "processed", "later", "." ]
python
train
36.978022
OpenVolunteeringPlatform/django-ovp-projects
ovp_projects/views/project.py
https://github.com/OpenVolunteeringPlatform/django-ovp-projects/blob/239e27027ca99c7b44ee4f30bf55d06439d49251/ovp_projects/views/project.py#L46-L57
def partial_update(self, request, *args, **kwargs): """ We do not include the mixin as we want only PATCH and no PUT """ instance = self.get_object() serializer = self.get_serializer(instance, data=request.data, partial=True, context=self.get_serializer_context()) serializer.is_valid(raise_exception=True) serializer.save() if getattr(instance, '_prefetched_objects_cache', None): #pragma: no cover instance = self.get_object() serializer = self.get_serializer(instance) return response.Response(serializer.data)
[ "def", "partial_update", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "self", ".", "get_object", "(", ")", "serializer", "=", "self", ".", "get_serializer", "(", "instance", ",", "data", "=", "request", ".", "data", ",", "partial", "=", "True", ",", "context", "=", "self", ".", "get_serializer_context", "(", ")", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "serializer", ".", "save", "(", ")", "if", "getattr", "(", "instance", ",", "'_prefetched_objects_cache'", ",", "None", ")", ":", "#pragma: no cover", "instance", "=", "self", ".", "get_object", "(", ")", "serializer", "=", "self", ".", "get_serializer", "(", "instance", ")", "return", "response", ".", "Response", "(", "serializer", ".", "data", ")" ]
We do not include the mixin as we want only PATCH and no PUT
[ "We", "do", "not", "include", "the", "mixin", "as", "we", "want", "only", "PATCH", "and", "no", "PUT" ]
python
train
45.333333
fastai/fastai
old/fastai/dataset.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/dataset.py#L255-L293
def open_image(fn): """ Opens an image using OpenCV given the file path. Arguments: fn: the file path of the image Returns: The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0 """ flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR if not os.path.exists(fn) and not str(fn).startswith("http"): raise OSError('No such file or directory: {}'.format(fn)) elif os.path.isdir(fn) and not str(fn).startswith("http"): raise OSError('Is a directory: {}'.format(fn)) elif isdicom(fn): slice = pydicom.read_file(fn) if slice.PhotometricInterpretation.startswith('MONOCHROME'): # Make a fake RGB image im = np.stack([slice.pixel_array]*3,-1) return im / ((1 << slice.BitsStored)-1) else: # No support for RGB yet, as it involves various color spaces. # It shouldn't be too difficult to add though, if needed. raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation)) else: #res = np.array(Image.open(fn), dtype=np.float32)/255 #if len(res.shape)==2: res = np.repeat(res[...,None],3,2) #return res try: if str(fn).startswith("http"): req = urllib.urlopen(str(fn)) image = np.asarray(bytearray(req.read()), dtype="uint8") im = cv2.imdecode(image, flags).astype(np.float32)/255 else: im = cv2.imread(str(fn), flags).astype(np.float32)/255 if im is None: raise OSError(f'File not recognized by opencv: {fn}') return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) except Exception as e: raise OSError('Error handling image at: {}'.format(fn)) from e
[ "def", "open_image", "(", "fn", ")", ":", "flags", "=", "cv2", ".", "IMREAD_UNCHANGED", "+", "cv2", ".", "IMREAD_ANYDEPTH", "+", "cv2", ".", "IMREAD_ANYCOLOR", "if", "not", "os", ".", "path", ".", "exists", "(", "fn", ")", "and", "not", "str", "(", "fn", ")", ".", "startswith", "(", "\"http\"", ")", ":", "raise", "OSError", "(", "'No such file or directory: {}'", ".", "format", "(", "fn", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "fn", ")", "and", "not", "str", "(", "fn", ")", ".", "startswith", "(", "\"http\"", ")", ":", "raise", "OSError", "(", "'Is a directory: {}'", ".", "format", "(", "fn", ")", ")", "elif", "isdicom", "(", "fn", ")", ":", "slice", "=", "pydicom", ".", "read_file", "(", "fn", ")", "if", "slice", ".", "PhotometricInterpretation", ".", "startswith", "(", "'MONOCHROME'", ")", ":", "# Make a fake RGB image", "im", "=", "np", ".", "stack", "(", "[", "slice", ".", "pixel_array", "]", "*", "3", ",", "-", "1", ")", "return", "im", "/", "(", "(", "1", "<<", "slice", ".", "BitsStored", ")", "-", "1", ")", "else", ":", "# No support for RGB yet, as it involves various color spaces.", "# It shouldn't be too difficult to add though, if needed.", "raise", "OSError", "(", "'Unsupported DICOM image with PhotometricInterpretation=={}'", ".", "format", "(", "slice", ".", "PhotometricInterpretation", ")", ")", "else", ":", "#res = np.array(Image.open(fn), dtype=np.float32)/255", "#if len(res.shape)==2: res = np.repeat(res[...,None],3,2)", "#return res", "try", ":", "if", "str", "(", "fn", ")", ".", "startswith", "(", "\"http\"", ")", ":", "req", "=", "urllib", ".", "urlopen", "(", "str", "(", "fn", ")", ")", "image", "=", "np", ".", "asarray", "(", "bytearray", "(", "req", ".", "read", "(", ")", ")", ",", "dtype", "=", "\"uint8\"", ")", "im", "=", "cv2", ".", "imdecode", "(", "image", ",", "flags", ")", ".", "astype", "(", "np", ".", "float32", ")", "/", "255", "else", ":", "im", "=", "cv2", ".", "imread", "(", "str", "(", "fn", ")", ",", "flags", ")", ".", "astype", "(", "np", ".", "float32", ")", "/", "255", "if", "im", "is", "None", ":", "raise", "OSError", "(", "f'File not recognized by opencv: {fn}'", ")", "return", "cv2", ".", "cvtColor", "(", "im", ",", "cv2", ".", "COLOR_BGR2RGB", ")", "except", "Exception", "as", "e", ":", "raise", "OSError", "(", "'Error handling image at: {}'", ".", "format", "(", "fn", ")", ")", "from", "e" ]
Opens an image using OpenCV given the file path. Arguments: fn: the file path of the image Returns: The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
[ "Opens", "an", "image", "using", "OpenCV", "given", "the", "file", "path", "." ]
python
train
46.769231
opennode/waldur-core
waldur_core/cost_tracking/views.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/cost_tracking/views.py#L103-L109
def update(self, request, *args, **kwargs): """ Run **PATCH** request against */api/price-list-items/<uuid>/* to update price list item. Only item_type, key value and units can be updated. Only customer owner and staff can update price items. """ return super(PriceListItemViewSet, self).update(request, *args, **kwargs)
[ "def", "update", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "PriceListItemViewSet", ",", "self", ")", ".", "update", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run **PATCH** request against */api/price-list-items/<uuid>/* to update price list item. Only item_type, key value and units can be updated. Only customer owner and staff can update price items.
[ "Run", "**", "PATCH", "**", "request", "against", "*", "/", "api", "/", "price", "-", "list", "-", "items", "/", "<uuid", ">", "/", "*", "to", "update", "price", "list", "item", ".", "Only", "item_type", "key", "value", "and", "units", "can", "be", "updated", ".", "Only", "customer", "owner", "and", "staff", "can", "update", "price", "items", "." ]
python
train
51.714286
gunthercox/ChatterBot
chatterbot/parsing.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L540-L554
def date_from_quarter(base_date, ordinal, year): """ Extract date from quarter of a year """ interval = 3 month_start = interval * (ordinal - 1) if month_start < 0: month_start = 9 month_end = month_start + interval if month_start == 0: month_start = 1 return [ datetime(year, month_start, 1), datetime(year, month_end, calendar.monthrange(year, month_end)[1]) ]
[ "def", "date_from_quarter", "(", "base_date", ",", "ordinal", ",", "year", ")", ":", "interval", "=", "3", "month_start", "=", "interval", "*", "(", "ordinal", "-", "1", ")", "if", "month_start", "<", "0", ":", "month_start", "=", "9", "month_end", "=", "month_start", "+", "interval", "if", "month_start", "==", "0", ":", "month_start", "=", "1", "return", "[", "datetime", "(", "year", ",", "month_start", ",", "1", ")", ",", "datetime", "(", "year", ",", "month_end", ",", "calendar", ".", "monthrange", "(", "year", ",", "month_end", ")", "[", "1", "]", ")", "]" ]
Extract date from quarter of a year
[ "Extract", "date", "from", "quarter", "of", "a", "year" ]
python
train
28
jwodder/doapi
doapi/doapi.py
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/doapi.py#L370-L379
def fetch_all_droplet_neighbors(self): r""" Returns a generator of all sets of multiple droplets that are running on the same physical hardware :rtype: generator of lists of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error """ for hood in self.paginate('/v2/reports/droplet_neighbors', 'neighbors'): yield list(map(self._droplet, hood))
[ "def", "fetch_all_droplet_neighbors", "(", "self", ")", ":", "for", "hood", "in", "self", ".", "paginate", "(", "'/v2/reports/droplet_neighbors'", ",", "'neighbors'", ")", ":", "yield", "list", "(", "map", "(", "self", ".", "_droplet", ",", "hood", ")", ")" ]
r""" Returns a generator of all sets of multiple droplets that are running on the same physical hardware :rtype: generator of lists of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error
[ "r", "Returns", "a", "generator", "of", "all", "sets", "of", "multiple", "droplets", "that", "are", "running", "on", "the", "same", "physical", "hardware" ]
python
train
42.2
sailthru/relay
relay/runner.py
https://github.com/sailthru/relay/blob/995209346c6663675d96d0cbff3bb67b9758c8e2/relay/runner.py#L69-L101
def create_ramp_plan(err, ramp): """ Formulate and execute on a plan to slowly add heat or cooling to the system `err` initial error (PV - SP) `ramp` the size of the ramp A ramp plan might yield MVs in this order at every timestep: [5, 0, 4, 0, 3, 0, 2, 0, 1] where err == 5 + 4 + 3 + 2 + 1 """ if ramp == 1: # basecase yield int(err) while True: yield 0 # np.arange(n).sum() == err # --> solve for n # err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n # 0 = n**2 - n --> solve for n n = np.abs(np.roots([.5, -.5, 0]).max()) niter = int(ramp // (2 * n)) # 2 means add all MV in first half of ramp MV = n log.info('Initializing a ramp plan', extra=dict( ramp_size=ramp, err=err, niter=niter)) for x in range(int(n)): budget = MV for x in range(niter): budget -= MV // niter yield int(np.sign(err) * (MV // niter)) yield int(budget * np.sign(err)) MV -= 1 while True: yield 0
[ "def", "create_ramp_plan", "(", "err", ",", "ramp", ")", ":", "if", "ramp", "==", "1", ":", "# basecase", "yield", "int", "(", "err", ")", "while", "True", ":", "yield", "0", "# np.arange(n).sum() == err", "# --> solve for n", "# err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n", "# 0 = n**2 - n --> solve for n", "n", "=", "np", ".", "abs", "(", "np", ".", "roots", "(", "[", ".5", ",", "-", ".5", ",", "0", "]", ")", ".", "max", "(", ")", ")", "niter", "=", "int", "(", "ramp", "//", "(", "2", "*", "n", ")", ")", "# 2 means add all MV in first half of ramp", "MV", "=", "n", "log", ".", "info", "(", "'Initializing a ramp plan'", ",", "extra", "=", "dict", "(", "ramp_size", "=", "ramp", ",", "err", "=", "err", ",", "niter", "=", "niter", ")", ")", "for", "x", "in", "range", "(", "int", "(", "n", ")", ")", ":", "budget", "=", "MV", "for", "x", "in", "range", "(", "niter", ")", ":", "budget", "-=", "MV", "//", "niter", "yield", "int", "(", "np", ".", "sign", "(", "err", ")", "*", "(", "MV", "//", "niter", ")", ")", "yield", "int", "(", "budget", "*", "np", ".", "sign", "(", "err", ")", ")", "MV", "-=", "1", "while", "True", ":", "yield", "0" ]
Formulate and execute on a plan to slowly add heat or cooling to the system `err` initial error (PV - SP) `ramp` the size of the ramp A ramp plan might yield MVs in this order at every timestep: [5, 0, 4, 0, 3, 0, 2, 0, 1] where err == 5 + 4 + 3 + 2 + 1
[ "Formulate", "and", "execute", "on", "a", "plan", "to", "slowly", "add", "heat", "or", "cooling", "to", "the", "system" ]
python
train
31.121212
cytoscape/py2cytoscape
py2cytoscape/cyrest/edge.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/edge.py#L13-L35
def create_attribute(self,column=None,listType=None,namespace=None, network=None, atype=None, verbose=False): """ Creates a new edge column. :param column (string, optional): Unique name of column :param listType (string, optional): Can be one of integer, long, double, or string. :param namespace (string, optional): Node, Edge, and Network objects support the default, local, and hidden namespaces. Root networks also support the shared namespace. Custom namespaces may be specified by Apps. :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value can also be used to specify the current network. :param atype (string, optional): Can be one of integer, long, double, string, or list. :param verbose: print more """ network=check_network(self,network,verbose=verbose) PARAMS=set_param(["column","listType","namespace","network","type"],[column,listType,namespace,network,atype]) response=api(url=self.__url+"/create attribute", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "create_attribute", "(", "self", ",", "column", "=", "None", ",", "listType", "=", "None", ",", "namespace", "=", "None", ",", "network", "=", "None", ",", "atype", "=", "None", ",", "verbose", "=", "False", ")", ":", "network", "=", "check_network", "(", "self", ",", "network", ",", "verbose", "=", "verbose", ")", "PARAMS", "=", "set_param", "(", "[", "\"column\"", ",", "\"listType\"", ",", "\"namespace\"", ",", "\"network\"", ",", "\"type\"", "]", ",", "[", "column", ",", "listType", ",", "namespace", ",", "network", ",", "atype", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/create attribute\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"POST\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Creates a new edge column. :param column (string, optional): Unique name of column :param listType (string, optional): Can be one of integer, long, double, or string. :param namespace (string, optional): Node, Edge, and Network objects support the default, local, and hidden namespaces. Root networks also support the shared namespace. Custom namespaces may be specified by Apps. :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value can also be used to specify the current network. :param atype (string, optional): Can be one of integer, long, double, string, or list. :param verbose: print more
[ "Creates", "a", "new", "edge", "column", "." ]
python
train
54.130435
acorg/dark-matter
dark/blast/score.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/blast/score.py#L45-L63
def eValueToBitScore(eValue, dbSize, dbSequenceCount, queryLength, lengthAdjustment): """ Convert an e-value to a bit score. @param eValue: The C{float} e-value to convert. @param dbSize: The C{int} total size of the database (i.e., the sum of the lengths of all sequences in the BLAST database). @param dbSequenceCount: The C{int} number of sequences in the database. @param queryLength: The C{int} length of the query. @param lengthAdjustment: The C{int} length adjustment (BLAST XML output calls this the Statistics_hsp-len). @return: A C{float} bit score. """ effectiveDbSize = ( (dbSize - dbSequenceCount * lengthAdjustment) * (queryLength - lengthAdjustment) ) return -1.0 * (log(eValue / effectiveDbSize) / _LOG2)
[ "def", "eValueToBitScore", "(", "eValue", ",", "dbSize", ",", "dbSequenceCount", ",", "queryLength", ",", "lengthAdjustment", ")", ":", "effectiveDbSize", "=", "(", "(", "dbSize", "-", "dbSequenceCount", "*", "lengthAdjustment", ")", "*", "(", "queryLength", "-", "lengthAdjustment", ")", ")", "return", "-", "1.0", "*", "(", "log", "(", "eValue", "/", "effectiveDbSize", ")", "/", "_LOG2", ")" ]
Convert an e-value to a bit score. @param eValue: The C{float} e-value to convert. @param dbSize: The C{int} total size of the database (i.e., the sum of the lengths of all sequences in the BLAST database). @param dbSequenceCount: The C{int} number of sequences in the database. @param queryLength: The C{int} length of the query. @param lengthAdjustment: The C{int} length adjustment (BLAST XML output calls this the Statistics_hsp-len). @return: A C{float} bit score.
[ "Convert", "an", "e", "-", "value", "to", "a", "bit", "score", "." ]
python
train
42.315789
michael-lazar/rtv
rtv/page.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/page.py#L773-L846
def _draw_content(self): """ Loop through submissions and fill up the content page. """ n_rows, n_cols = self.term.stdscr.getmaxyx() window = self.term.stdscr.derwin(n_rows - self._row - 1, n_cols, self._row, 0) window.erase() win_n_rows, win_n_cols = window.getmaxyx() self._subwindows = [] page_index, cursor_index, inverted = self.nav.position step = self.nav.step # If not inverted, align the first submission with the top and draw # downwards. If inverted, align the first submission with the bottom # and draw upwards. cancel_inverted = True current_row = (win_n_rows - 1) if inverted else 0 available_rows = win_n_rows top_item_height = None if inverted else self.nav.top_item_height for data in self.content.iterate(page_index, step, win_n_cols - 2): subwin_n_rows = min(available_rows, data['n_rows']) subwin_inverted = inverted if top_item_height is not None: # Special case: draw the page as non-inverted, except for the # top element. This element will be drawn as inverted with a # restricted height subwin_n_rows = min(subwin_n_rows, top_item_height) subwin_inverted = True top_item_height = None subwin_n_cols = win_n_cols - data['h_offset'] start = current_row - subwin_n_rows + 1 if inverted else current_row subwindow = window.derwin(subwin_n_rows, subwin_n_cols, start, data['h_offset']) self._subwindows.append((subwindow, data, subwin_inverted)) available_rows -= (subwin_n_rows + 1) # Add one for the blank line current_row += step * (subwin_n_rows + 1) if available_rows <= 0: # Indicate the page is full and we can keep the inverted screen. cancel_inverted = False break if len(self._subwindows) == 1: # Never draw inverted if only one subwindow. The top of the # subwindow should always be aligned with the top of the screen. cancel_inverted = True if cancel_inverted and self.nav.inverted: # In some cases we need to make sure that the screen is NOT # inverted. Unfortunately, this currently means drawing the whole # page over again. Could not think of a better way to pre-determine # if the content will fill up the page, given that it is dependent # on the size of the terminal. self.nav.flip((len(self._subwindows) - 1)) self._draw_content() return if self.nav.cursor_index >= len(self._subwindows): # Don't allow the cursor to go over the number of subwindows # This could happen if the window is resized and the cursor index is # pushed out of bounds self.nav.cursor_index = len(self._subwindows) - 1 # Now that the windows are setup, we can take a second pass through # to draw the text onto each subwindow for index, (win, data, inverted) in enumerate(self._subwindows): if self.nav.absolute_index >= 0 and index == self.nav.cursor_index: win.bkgd(str(' '), self.term.attr('Selected')) with self.term.theme.turn_on_selected(): self._draw_item(win, data, inverted) else: win.bkgd(str(' '), self.term.attr('Normal')) self._draw_item(win, data, inverted) self._row += win_n_rows
[ "def", "_draw_content", "(", "self", ")", ":", "n_rows", ",", "n_cols", "=", "self", ".", "term", ".", "stdscr", ".", "getmaxyx", "(", ")", "window", "=", "self", ".", "term", ".", "stdscr", ".", "derwin", "(", "n_rows", "-", "self", ".", "_row", "-", "1", ",", "n_cols", ",", "self", ".", "_row", ",", "0", ")", "window", ".", "erase", "(", ")", "win_n_rows", ",", "win_n_cols", "=", "window", ".", "getmaxyx", "(", ")", "self", ".", "_subwindows", "=", "[", "]", "page_index", ",", "cursor_index", ",", "inverted", "=", "self", ".", "nav", ".", "position", "step", "=", "self", ".", "nav", ".", "step", "# If not inverted, align the first submission with the top and draw", "# downwards. If inverted, align the first submission with the bottom", "# and draw upwards.", "cancel_inverted", "=", "True", "current_row", "=", "(", "win_n_rows", "-", "1", ")", "if", "inverted", "else", "0", "available_rows", "=", "win_n_rows", "top_item_height", "=", "None", "if", "inverted", "else", "self", ".", "nav", ".", "top_item_height", "for", "data", "in", "self", ".", "content", ".", "iterate", "(", "page_index", ",", "step", ",", "win_n_cols", "-", "2", ")", ":", "subwin_n_rows", "=", "min", "(", "available_rows", ",", "data", "[", "'n_rows'", "]", ")", "subwin_inverted", "=", "inverted", "if", "top_item_height", "is", "not", "None", ":", "# Special case: draw the page as non-inverted, except for the", "# top element. This element will be drawn as inverted with a", "# restricted height", "subwin_n_rows", "=", "min", "(", "subwin_n_rows", ",", "top_item_height", ")", "subwin_inverted", "=", "True", "top_item_height", "=", "None", "subwin_n_cols", "=", "win_n_cols", "-", "data", "[", "'h_offset'", "]", "start", "=", "current_row", "-", "subwin_n_rows", "+", "1", "if", "inverted", "else", "current_row", "subwindow", "=", "window", ".", "derwin", "(", "subwin_n_rows", ",", "subwin_n_cols", ",", "start", ",", "data", "[", "'h_offset'", "]", ")", "self", ".", "_subwindows", ".", "append", "(", "(", "subwindow", ",", "data", ",", "subwin_inverted", ")", ")", "available_rows", "-=", "(", "subwin_n_rows", "+", "1", ")", "# Add one for the blank line", "current_row", "+=", "step", "*", "(", "subwin_n_rows", "+", "1", ")", "if", "available_rows", "<=", "0", ":", "# Indicate the page is full and we can keep the inverted screen.", "cancel_inverted", "=", "False", "break", "if", "len", "(", "self", ".", "_subwindows", ")", "==", "1", ":", "# Never draw inverted if only one subwindow. The top of the", "# subwindow should always be aligned with the top of the screen.", "cancel_inverted", "=", "True", "if", "cancel_inverted", "and", "self", ".", "nav", ".", "inverted", ":", "# In some cases we need to make sure that the screen is NOT", "# inverted. Unfortunately, this currently means drawing the whole", "# page over again. Could not think of a better way to pre-determine", "# if the content will fill up the page, given that it is dependent", "# on the size of the terminal.", "self", ".", "nav", ".", "flip", "(", "(", "len", "(", "self", ".", "_subwindows", ")", "-", "1", ")", ")", "self", ".", "_draw_content", "(", ")", "return", "if", "self", ".", "nav", ".", "cursor_index", ">=", "len", "(", "self", ".", "_subwindows", ")", ":", "# Don't allow the cursor to go over the number of subwindows", "# This could happen if the window is resized and the cursor index is", "# pushed out of bounds", "self", ".", "nav", ".", "cursor_index", "=", "len", "(", "self", ".", "_subwindows", ")", "-", "1", "# Now that the windows are setup, we can take a second pass through", "# to draw the text onto each subwindow", "for", "index", ",", "(", "win", ",", "data", ",", "inverted", ")", "in", "enumerate", "(", "self", ".", "_subwindows", ")", ":", "if", "self", ".", "nav", ".", "absolute_index", ">=", "0", "and", "index", "==", "self", ".", "nav", ".", "cursor_index", ":", "win", ".", "bkgd", "(", "str", "(", "' '", ")", ",", "self", ".", "term", ".", "attr", "(", "'Selected'", ")", ")", "with", "self", ".", "term", ".", "theme", ".", "turn_on_selected", "(", ")", ":", "self", ".", "_draw_item", "(", "win", ",", "data", ",", "inverted", ")", "else", ":", "win", ".", "bkgd", "(", "str", "(", "' '", ")", ",", "self", ".", "term", ".", "attr", "(", "'Normal'", ")", ")", "self", ".", "_draw_item", "(", "win", ",", "data", ",", "inverted", ")", "self", ".", "_row", "+=", "win_n_rows" ]
Loop through submissions and fill up the content page.
[ "Loop", "through", "submissions", "and", "fill", "up", "the", "content", "page", "." ]
python
train
48.594595
clalancette/pycdlib
pycdlib/dr.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/dr.py#L1044-L1059
def is_associated_file(self): # type: () -> bool ''' A method to determine whether this file is 'associated' with another file on the ISO. Parameters: None. Returns: True if this file is associated with another file on the ISO, False otherwise. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized') return self.file_flags & (1 << self.FILE_FLAG_ASSOCIATED_FILE_BIT)
[ "def", "is_associated_file", "(", "self", ")", ":", "# type: () -> bool", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Directory Record not yet initialized'", ")", "return", "self", ".", "file_flags", "&", "(", "1", "<<", "self", ".", "FILE_FLAG_ASSOCIATED_FILE_BIT", ")" ]
A method to determine whether this file is 'associated' with another file on the ISO. Parameters: None. Returns: True if this file is associated with another file on the ISO, False otherwise.
[ "A", "method", "to", "determine", "whether", "this", "file", "is", "associated", "with", "another", "file", "on", "the", "ISO", "." ]
python
train
32.6875
vertexproject/synapse
synapse/lib/link.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/link.py#L18-L24
async def connect(host, port, ssl=None): ''' Async connect and return a Link(). ''' info = {'host': host, 'port': port, 'ssl': ssl} reader, writer = await asyncio.open_connection(host, port, ssl=ssl) return await Link.anit(reader, writer, info=info)
[ "async", "def", "connect", "(", "host", ",", "port", ",", "ssl", "=", "None", ")", ":", "info", "=", "{", "'host'", ":", "host", ",", "'port'", ":", "port", ",", "'ssl'", ":", "ssl", "}", "reader", ",", "writer", "=", "await", "asyncio", ".", "open_connection", "(", "host", ",", "port", ",", "ssl", "=", "ssl", ")", "return", "await", "Link", ".", "anit", "(", "reader", ",", "writer", ",", "info", "=", "info", ")" ]
Async connect and return a Link().
[ "Async", "connect", "and", "return", "a", "Link", "()", "." ]
python
train
38.142857
mikedh/trimesh
trimesh/creation.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/creation.py#L883-L972
def axis(origin_size=0.04, transform=None, origin_color=None, axis_radius=None, axis_length=None): """ Return an XYZ axis marker as a Trimesh, which represents position and orientation. If you set the origin size the other parameters will be set relative to it. Parameters ---------- transform : (4, 4) float Transformation matrix origin_size : float Radius of sphere that represents the origin origin_color : (3,) float or int, uint8 or float Color of the origin axis_radius : float Radius of cylinder that represents x, y, z axis axis_length: float Length of cylinder that represents x, y, z axis Returns ------- marker : trimesh.Trimesh Mesh geometry of axis indicators """ # the size of the ball representing the origin origin_size = float(origin_size) # set the transform and use origin-relative # sized for other parameters if not specified if transform is None: transform = np.eye(4) if origin_color is None: origin_color = [255, 255, 255, 255] if axis_radius is None: axis_radius = origin_size / 5.0 if axis_length is None: axis_length = origin_size * 10.0 # generate a ball for the origin axis_origin = uv_sphere(radius=origin_size, count=[10, 10]) axis_origin.apply_transform(transform) # apply color to the origin ball axis_origin.visual.face_colors = origin_color # create the cylinder for the z-axis translation = transformations.translation_matrix( [0, 0, axis_length / 2]) z_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(translation)) # XYZ->RGB, Z is blue z_axis.visual.face_colors = [0, 0, 255] # create the cylinder for the y-axis translation = transformations.translation_matrix( [0, 0, axis_length / 2]) rotation = transformations.rotation_matrix(np.radians(-90), [1, 0, 0]) y_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(rotation).dot(translation)) # XYZ->RGB, Y is green y_axis.visual.face_colors = [0, 255, 0] # create the cylinder for the x-axis translation = transformations.translation_matrix( [0, 0, axis_length / 2]) rotation = transformations.rotation_matrix(np.radians(90), [0, 1, 0]) x_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(rotation).dot(translation)) # XYZ->RGB, X is red x_axis.visual.face_colors = [255, 0, 0] # append the sphere and three cylinders marker = util.concatenate([axis_origin, x_axis, y_axis, z_axis]) return marker
[ "def", "axis", "(", "origin_size", "=", "0.04", ",", "transform", "=", "None", ",", "origin_color", "=", "None", ",", "axis_radius", "=", "None", ",", "axis_length", "=", "None", ")", ":", "# the size of the ball representing the origin", "origin_size", "=", "float", "(", "origin_size", ")", "# set the transform and use origin-relative", "# sized for other parameters if not specified", "if", "transform", "is", "None", ":", "transform", "=", "np", ".", "eye", "(", "4", ")", "if", "origin_color", "is", "None", ":", "origin_color", "=", "[", "255", ",", "255", ",", "255", ",", "255", "]", "if", "axis_radius", "is", "None", ":", "axis_radius", "=", "origin_size", "/", "5.0", "if", "axis_length", "is", "None", ":", "axis_length", "=", "origin_size", "*", "10.0", "# generate a ball for the origin", "axis_origin", "=", "uv_sphere", "(", "radius", "=", "origin_size", ",", "count", "=", "[", "10", ",", "10", "]", ")", "axis_origin", ".", "apply_transform", "(", "transform", ")", "# apply color to the origin ball", "axis_origin", ".", "visual", ".", "face_colors", "=", "origin_color", "# create the cylinder for the z-axis", "translation", "=", "transformations", ".", "translation_matrix", "(", "[", "0", ",", "0", ",", "axis_length", "/", "2", "]", ")", "z_axis", "=", "cylinder", "(", "radius", "=", "axis_radius", ",", "height", "=", "axis_length", ",", "transform", "=", "transform", ".", "dot", "(", "translation", ")", ")", "# XYZ->RGB, Z is blue", "z_axis", ".", "visual", ".", "face_colors", "=", "[", "0", ",", "0", ",", "255", "]", "# create the cylinder for the y-axis", "translation", "=", "transformations", ".", "translation_matrix", "(", "[", "0", ",", "0", ",", "axis_length", "/", "2", "]", ")", "rotation", "=", "transformations", ".", "rotation_matrix", "(", "np", ".", "radians", "(", "-", "90", ")", ",", "[", "1", ",", "0", ",", "0", "]", ")", "y_axis", "=", "cylinder", "(", "radius", "=", "axis_radius", ",", "height", "=", "axis_length", ",", "transform", "=", "transform", ".", "dot", "(", "rotation", ")", ".", "dot", "(", "translation", ")", ")", "# XYZ->RGB, Y is green", "y_axis", ".", "visual", ".", "face_colors", "=", "[", "0", ",", "255", ",", "0", "]", "# create the cylinder for the x-axis", "translation", "=", "transformations", ".", "translation_matrix", "(", "[", "0", ",", "0", ",", "axis_length", "/", "2", "]", ")", "rotation", "=", "transformations", ".", "rotation_matrix", "(", "np", ".", "radians", "(", "90", ")", ",", "[", "0", ",", "1", ",", "0", "]", ")", "x_axis", "=", "cylinder", "(", "radius", "=", "axis_radius", ",", "height", "=", "axis_length", ",", "transform", "=", "transform", ".", "dot", "(", "rotation", ")", ".", "dot", "(", "translation", ")", ")", "# XYZ->RGB, X is red", "x_axis", ".", "visual", ".", "face_colors", "=", "[", "255", ",", "0", ",", "0", "]", "# append the sphere and three cylinders", "marker", "=", "util", ".", "concatenate", "(", "[", "axis_origin", ",", "x_axis", ",", "y_axis", ",", "z_axis", "]", ")", "return", "marker" ]
Return an XYZ axis marker as a Trimesh, which represents position and orientation. If you set the origin size the other parameters will be set relative to it. Parameters ---------- transform : (4, 4) float Transformation matrix origin_size : float Radius of sphere that represents the origin origin_color : (3,) float or int, uint8 or float Color of the origin axis_radius : float Radius of cylinder that represents x, y, z axis axis_length: float Length of cylinder that represents x, y, z axis Returns ------- marker : trimesh.Trimesh Mesh geometry of axis indicators
[ "Return", "an", "XYZ", "axis", "marker", "as", "a", "Trimesh", "which", "represents", "position", "and", "orientation", ".", "If", "you", "set", "the", "origin", "size", "the", "other", "parameters", "will", "be", "set", "relative", "to", "it", "." ]
python
train
32.266667
rameshg87/pyremotevbox
pyremotevbox/ZSI/digest_auth.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/digest_auth.py#L103-L110
def build_authorization_arg(authdict): """ Create an "Authorization" header value from an authdict (created by generate_response()). """ vallist = [] for k in authdict.keys(): vallist += ['%s=%s' % (k,authdict[k])] return 'Digest '+', '.join(vallist)
[ "def", "build_authorization_arg", "(", "authdict", ")", ":", "vallist", "=", "[", "]", "for", "k", "in", "authdict", ".", "keys", "(", ")", ":", "vallist", "+=", "[", "'%s=%s'", "%", "(", "k", ",", "authdict", "[", "k", "]", ")", "]", "return", "'Digest '", "+", "', '", ".", "join", "(", "vallist", ")" ]
Create an "Authorization" header value from an authdict (created by generate_response()).
[ "Create", "an", "Authorization", "header", "value", "from", "an", "authdict", "(", "created", "by", "generate_response", "()", ")", "." ]
python
train
32.375
marshallward/f90nml
f90nml/parser.py
https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/parser.py#L221-L228
def strict_logical(self, value): """Validate and set the strict logical flag.""" if value is not None: if not isinstance(value, bool): raise TypeError( 'f90nml: error: strict_logical must be a logical value.') else: self._strict_logical = value
[ "def", "strict_logical", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "if", "not", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "TypeError", "(", "'f90nml: error: strict_logical must be a logical value.'", ")", "else", ":", "self", ".", "_strict_logical", "=", "value" ]
Validate and set the strict logical flag.
[ "Validate", "and", "set", "the", "strict", "logical", "flag", "." ]
python
train
41.125
tensorflow/datasets
tensorflow_datasets/image/flowers.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/flowers.py#L71-L93
def _generate_examples(self, images_dir_path): """Generate flower images and labels given the image directory path. Args: images_dir_path: path to the directory where the images are stored. Yields: The image path and its corresponding label. """ parent_dir = tf.io.gfile.listdir(images_dir_path)[0] walk_dir = os.path.join(images_dir_path, parent_dir) dirs = tf.io.gfile.listdir(walk_dir) for d in dirs: if tf.io.gfile.isdir(os.path.join(walk_dir, d)): for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)): for image_file in fname: if image_file.endswith(".jpg"): image_path = os.path.join(full_path, image_file) yield { "image": image_path, "label": d.lower(), }
[ "def", "_generate_examples", "(", "self", ",", "images_dir_path", ")", ":", "parent_dir", "=", "tf", ".", "io", ".", "gfile", ".", "listdir", "(", "images_dir_path", ")", "[", "0", "]", "walk_dir", "=", "os", ".", "path", ".", "join", "(", "images_dir_path", ",", "parent_dir", ")", "dirs", "=", "tf", ".", "io", ".", "gfile", ".", "listdir", "(", "walk_dir", ")", "for", "d", "in", "dirs", ":", "if", "tf", ".", "io", ".", "gfile", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "walk_dir", ",", "d", ")", ")", ":", "for", "full_path", ",", "_", ",", "fname", "in", "tf", ".", "io", ".", "gfile", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "walk_dir", ",", "d", ")", ")", ":", "for", "image_file", "in", "fname", ":", "if", "image_file", ".", "endswith", "(", "\".jpg\"", ")", ":", "image_path", "=", "os", ".", "path", ".", "join", "(", "full_path", ",", "image_file", ")", "yield", "{", "\"image\"", ":", "image_path", ",", "\"label\"", ":", "d", ".", "lower", "(", ")", ",", "}" ]
Generate flower images and labels given the image directory path. Args: images_dir_path: path to the directory where the images are stored. Yields: The image path and its corresponding label.
[ "Generate", "flower", "images", "and", "labels", "given", "the", "image", "directory", "path", "." ]
python
train
35.652174
edx/completion
completion/services.py
https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L114-L120
def blocks_to_mark_complete_on_view(self, blocks): """ Returns a set of blocks which should be marked complete on view and haven't been yet. """ blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)} completions = self.get_completions({block.location for block in blocks}) return {block for block in blocks if completions.get(block.location, 0) < 1.0}
[ "def", "blocks_to_mark_complete_on_view", "(", "self", ",", "blocks", ")", ":", "blocks", "=", "{", "block", "for", "block", "in", "blocks", "if", "self", ".", "can_mark_block_complete_on_view", "(", "block", ")", "}", "completions", "=", "self", ".", "get_completions", "(", "{", "block", ".", "location", "for", "block", "in", "blocks", "}", ")", "return", "{", "block", "for", "block", "in", "blocks", "if", "completions", ".", "get", "(", "block", ".", "location", ",", "0", ")", "<", "1.0", "}" ]
Returns a set of blocks which should be marked complete on view and haven't been yet.
[ "Returns", "a", "set", "of", "blocks", "which", "should", "be", "marked", "complete", "on", "view", "and", "haven", "t", "been", "yet", "." ]
python
train
60.285714
jantman/awslimitchecker
awslimitchecker/services/rds.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/rds.py#L113-L131
def _find_usage_security_groups(self): """find usage for security groups""" vpc_count = 0 paginator = self.conn.get_paginator('describe_db_security_groups') for page in paginator.paginate(): for group in page['DBSecurityGroups']: if 'VpcId' in group and group['VpcId'] is not None: vpc_count += 1 self.limits['Max auths per security group']._add_current_usage( len(group["EC2SecurityGroups"]) + len(group["IPRanges"]), aws_type='AWS::RDS::DBSecurityGroup', resource_id=group['DBSecurityGroupName'] ) self.limits['VPC Security Groups']._add_current_usage( vpc_count, aws_type='AWS::RDS::DBSecurityGroup', )
[ "def", "_find_usage_security_groups", "(", "self", ")", ":", "vpc_count", "=", "0", "paginator", "=", "self", ".", "conn", ".", "get_paginator", "(", "'describe_db_security_groups'", ")", "for", "page", "in", "paginator", ".", "paginate", "(", ")", ":", "for", "group", "in", "page", "[", "'DBSecurityGroups'", "]", ":", "if", "'VpcId'", "in", "group", "and", "group", "[", "'VpcId'", "]", "is", "not", "None", ":", "vpc_count", "+=", "1", "self", ".", "limits", "[", "'Max auths per security group'", "]", ".", "_add_current_usage", "(", "len", "(", "group", "[", "\"EC2SecurityGroups\"", "]", ")", "+", "len", "(", "group", "[", "\"IPRanges\"", "]", ")", ",", "aws_type", "=", "'AWS::RDS::DBSecurityGroup'", ",", "resource_id", "=", "group", "[", "'DBSecurityGroupName'", "]", ")", "self", ".", "limits", "[", "'VPC Security Groups'", "]", ".", "_add_current_usage", "(", "vpc_count", ",", "aws_type", "=", "'AWS::RDS::DBSecurityGroup'", ",", ")" ]
find usage for security groups
[ "find", "usage", "for", "security", "groups" ]
python
train
42.157895
vbwagner/ctypescrypto
ctypescrypto/cms.py
https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/cms.py#L107-L114
def pem(self): """ Serialize in PEM format """ bio = Membio() if not libcrypto.PEM_write_bio_CMS(bio.bio, self.ptr): raise CMSError("writing CMS to PEM") return str(bio)
[ "def", "pem", "(", "self", ")", ":", "bio", "=", "Membio", "(", ")", "if", "not", "libcrypto", ".", "PEM_write_bio_CMS", "(", "bio", ".", "bio", ",", "self", ".", "ptr", ")", ":", "raise", "CMSError", "(", "\"writing CMS to PEM\"", ")", "return", "str", "(", "bio", ")" ]
Serialize in PEM format
[ "Serialize", "in", "PEM", "format" ]
python
train
27.75
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/yaml_data_visibility_config_reader.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/yaml_data_visibility_config_reader.py#L94-L120
def Read(f): """Reads and returns Config data from a yaml file. Args: f: Yaml file to parse. Returns: Config object as defined in this file. Raises: Error (some subclass): If there is a problem loading or parsing the file. """ try: yaml_data = yaml.load(f) except yaml.YAMLError as e: raise ParseError('%s' % e) except IOError as e: raise YAMLLoadError('%s' % e) _CheckData(yaml_data) try: return Config( yaml_data.get('blacklist', ()), yaml_data.get('whitelist', ('*'))) except UnicodeDecodeError as e: raise YAMLLoadError('%s' % e)
[ "def", "Read", "(", "f", ")", ":", "try", ":", "yaml_data", "=", "yaml", ".", "load", "(", "f", ")", "except", "yaml", ".", "YAMLError", "as", "e", ":", "raise", "ParseError", "(", "'%s'", "%", "e", ")", "except", "IOError", "as", "e", ":", "raise", "YAMLLoadError", "(", "'%s'", "%", "e", ")", "_CheckData", "(", "yaml_data", ")", "try", ":", "return", "Config", "(", "yaml_data", ".", "get", "(", "'blacklist'", ",", "(", ")", ")", ",", "yaml_data", ".", "get", "(", "'whitelist'", ",", "(", "'*'", ")", ")", ")", "except", "UnicodeDecodeError", "as", "e", ":", "raise", "YAMLLoadError", "(", "'%s'", "%", "e", ")" ]
Reads and returns Config data from a yaml file. Args: f: Yaml file to parse. Returns: Config object as defined in this file. Raises: Error (some subclass): If there is a problem loading or parsing the file.
[ "Reads", "and", "returns", "Config", "data", "from", "a", "yaml", "file", "." ]
python
train
21.555556
hobson/pug-invest
pug/invest/util.py
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/util.py#L421-L447
def clipped_area(ts, thresh=0, integrator=integrate.trapz): """Total value * time above the starting value within a TimeSeries Arguments: ts (pandas.Series): Time series to be integrated. thresh (float): Value to clip the tops off at (crossings will be interpolated) References: http://nbviewer.ipython.org/gist/kermit666/5720498 >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', ... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) >>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS 8598.52941... >>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 562.5 >>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)), ... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 562.5 """ integrator = get_integrator(integrator or 0) ts = insert_crossings(ts, thresh) - thresh ts = ts[ts >= 0] # timestamp is in nanoseconds (since 1/1/1970) but this converts it to seconds (SI units) return integrator(ts, ts.index.astype(np.int64)) / 1.0e9
[ "def", "clipped_area", "(", "ts", ",", "thresh", "=", "0", ",", "integrator", "=", "integrate", ".", "trapz", ")", ":", "integrator", "=", "get_integrator", "(", "integrator", "or", "0", ")", "ts", "=", "insert_crossings", "(", "ts", ",", "thresh", ")", "-", "thresh", "ts", "=", "ts", "[", "ts", ">=", "0", "]", "# timestamp is in nanoseconds (since 1/1/1970) but this converts it to seconds (SI units)", "return", "integrator", "(", "ts", ",", "ts", ".", "index", ".", "astype", "(", "np", ".", "int64", ")", ")", "/", "1.0e9" ]
Total value * time above the starting value within a TimeSeries Arguments: ts (pandas.Series): Time series to be integrated. thresh (float): Value to clip the tops off at (crossings will be interpolated) References: http://nbviewer.ipython.org/gist/kermit666/5720498 >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', ... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) >>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS 8598.52941... >>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 562.5 >>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)), ... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 562.5
[ "Total", "value", "*", "time", "above", "the", "starting", "value", "within", "a", "TimeSeries" ]
python
train
46.777778
ladybug-tools/ladybug
ladybug/_datacollectionbase.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L552-L560
def _filter_by_statement(self, statement): """Filter the data collection based on a conditional statement.""" self.__class__._check_conditional_statement(statement, 1) _filt_values, _filt_datetimes = [], [] for i, a in enumerate(self._values): if eval(statement, {'a': a}): _filt_values.append(a) _filt_datetimes.append(self.datetimes[i]) return _filt_values, _filt_datetimes
[ "def", "_filter_by_statement", "(", "self", ",", "statement", ")", ":", "self", ".", "__class__", ".", "_check_conditional_statement", "(", "statement", ",", "1", ")", "_filt_values", ",", "_filt_datetimes", "=", "[", "]", ",", "[", "]", "for", "i", ",", "a", "in", "enumerate", "(", "self", ".", "_values", ")", ":", "if", "eval", "(", "statement", ",", "{", "'a'", ":", "a", "}", ")", ":", "_filt_values", ".", "append", "(", "a", ")", "_filt_datetimes", ".", "append", "(", "self", ".", "datetimes", "[", "i", "]", ")", "return", "_filt_values", ",", "_filt_datetimes" ]
Filter the data collection based on a conditional statement.
[ "Filter", "the", "data", "collection", "based", "on", "a", "conditional", "statement", "." ]
python
train
50.111111
fuzeman/trakt.py
trakt/interfaces/search.py
https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/search.py#L105-L187
def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs): """Search by titles, descriptions, translated titles, aliases, and people. **Note:** Results are ordered by the most relevant score. :param query: Search title or description :type query: :class:`~python:str` :param media: Desired media type (or :code:`None` to return all matching items) **Possible values:** - :code:`movie` - :code:`show` - :code:`episode` - :code:`person` - :code:`list` :type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str` :param year: Desired media year (or :code:`None` to return all matching items) :type year: :class:`~python:str` or :class:`~python:int` :param fields: Fields to search for :code:`query` (or :code:`None` to search all fields) :type fields: :class:`~python:str` or :class:`~python:list` :param extended: Level of information to include in response **Possible values:** - :code:`None`: Minimal (e.g. title, year, ids) **(default)** - :code:`full`: Complete :type extended: :class:`~python:str` :param kwargs: Extra request options :type kwargs: :class:`~python:dict` :return: Results :rtype: :class:`~python:list` of :class:`trakt.objects.media.Media` """ # Validate parameters if not media: warnings.warn( "\"media\" parameter is now required on the Trakt['search'].query() method", DeprecationWarning, stacklevel=2 ) if fields and not media: raise ValueError('"fields" can only be used when the "media" parameter is defined') # Build query query = { 'query': query } if year: query['year'] = year if fields: query['fields'] = fields if extended: query['extended'] = extended # Serialize media items if isinstance(media, list): media = ','.join(media) # Send request response = self.http.get( params=[media], query=query ) # Parse response items = self.get_data(response, **kwargs) if isinstance(items, requests.Response): return items if items is not None: return SearchMapper.process_many(self.client, items) return None
[ "def", "query", "(", "self", ",", "query", ",", "media", "=", "None", ",", "year", "=", "None", ",", "fields", "=", "None", ",", "extended", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Validate parameters", "if", "not", "media", ":", "warnings", ".", "warn", "(", "\"\\\"media\\\" parameter is now required on the Trakt['search'].query() method\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "if", "fields", "and", "not", "media", ":", "raise", "ValueError", "(", "'\"fields\" can only be used when the \"media\" parameter is defined'", ")", "# Build query", "query", "=", "{", "'query'", ":", "query", "}", "if", "year", ":", "query", "[", "'year'", "]", "=", "year", "if", "fields", ":", "query", "[", "'fields'", "]", "=", "fields", "if", "extended", ":", "query", "[", "'extended'", "]", "=", "extended", "# Serialize media items", "if", "isinstance", "(", "media", ",", "list", ")", ":", "media", "=", "','", ".", "join", "(", "media", ")", "# Send request", "response", "=", "self", ".", "http", ".", "get", "(", "params", "=", "[", "media", "]", ",", "query", "=", "query", ")", "# Parse response", "items", "=", "self", ".", "get_data", "(", "response", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "items", ",", "requests", ".", "Response", ")", ":", "return", "items", "if", "items", "is", "not", "None", ":", "return", "SearchMapper", ".", "process_many", "(", "self", ".", "client", ",", "items", ")", "return", "None" ]
Search by titles, descriptions, translated titles, aliases, and people. **Note:** Results are ordered by the most relevant score. :param query: Search title or description :type query: :class:`~python:str` :param media: Desired media type (or :code:`None` to return all matching items) **Possible values:** - :code:`movie` - :code:`show` - :code:`episode` - :code:`person` - :code:`list` :type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str` :param year: Desired media year (or :code:`None` to return all matching items) :type year: :class:`~python:str` or :class:`~python:int` :param fields: Fields to search for :code:`query` (or :code:`None` to search all fields) :type fields: :class:`~python:str` or :class:`~python:list` :param extended: Level of information to include in response **Possible values:** - :code:`None`: Minimal (e.g. title, year, ids) **(default)** - :code:`full`: Complete :type extended: :class:`~python:str` :param kwargs: Extra request options :type kwargs: :class:`~python:dict` :return: Results :rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
[ "Search", "by", "titles", "descriptions", "translated", "titles", "aliases", "and", "people", "." ]
python
train
30.108434
getpelican/pelican-plugins
thumbnailer/thumbnailer.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/thumbnailer/thumbnailer.py#L99-L120
def resize_file_to(self, in_path, out_path, keep_filename=False): """ Given a filename, resize and save the image per the specification into out_path :param in_path: path to image file to save. Must be supported by PIL :param out_path: path to the directory root for the outputted thumbnails to be stored :return: None """ if keep_filename: filename = path.join(out_path, path.basename(in_path)) else: filename = path.join(out_path, self.get_thumbnail_name(in_path)) out_path = path.dirname(filename) if not path.exists(out_path): os.makedirs(out_path) if not path.exists(filename): try: image = Image.open(in_path) thumbnail = self.resize(image) thumbnail.save(filename) logger.info("Generated Thumbnail {0}".format(path.basename(filename))) except IOError: logger.info("Generating Thumbnail for {0} skipped".format(path.basename(filename)))
[ "def", "resize_file_to", "(", "self", ",", "in_path", ",", "out_path", ",", "keep_filename", "=", "False", ")", ":", "if", "keep_filename", ":", "filename", "=", "path", ".", "join", "(", "out_path", ",", "path", ".", "basename", "(", "in_path", ")", ")", "else", ":", "filename", "=", "path", ".", "join", "(", "out_path", ",", "self", ".", "get_thumbnail_name", "(", "in_path", ")", ")", "out_path", "=", "path", ".", "dirname", "(", "filename", ")", "if", "not", "path", ".", "exists", "(", "out_path", ")", ":", "os", ".", "makedirs", "(", "out_path", ")", "if", "not", "path", ".", "exists", "(", "filename", ")", ":", "try", ":", "image", "=", "Image", ".", "open", "(", "in_path", ")", "thumbnail", "=", "self", ".", "resize", "(", "image", ")", "thumbnail", ".", "save", "(", "filename", ")", "logger", ".", "info", "(", "\"Generated Thumbnail {0}\"", ".", "format", "(", "path", ".", "basename", "(", "filename", ")", ")", ")", "except", "IOError", ":", "logger", ".", "info", "(", "\"Generating Thumbnail for {0} skipped\"", ".", "format", "(", "path", ".", "basename", "(", "filename", ")", ")", ")" ]
Given a filename, resize and save the image per the specification into out_path :param in_path: path to image file to save. Must be supported by PIL :param out_path: path to the directory root for the outputted thumbnails to be stored :return: None
[ "Given", "a", "filename", "resize", "and", "save", "the", "image", "per", "the", "specification", "into", "out_path" ]
python
train
47.409091
federico123579/Trading212-API
tradingAPI/utils.py
https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/utils.py#L48-L56
def get_number_unit(number): """get the unit of number""" n = str(float(number)) mult, submult = n.split('.') if float(submult) != 0: unit = '0.' + (len(submult)-1)*'0' + '1' return float(unit) else: return float(1)
[ "def", "get_number_unit", "(", "number", ")", ":", "n", "=", "str", "(", "float", "(", "number", ")", ")", "mult", ",", "submult", "=", "n", ".", "split", "(", "'.'", ")", "if", "float", "(", "submult", ")", "!=", "0", ":", "unit", "=", "'0.'", "+", "(", "len", "(", "submult", ")", "-", "1", ")", "*", "'0'", "+", "'1'", "return", "float", "(", "unit", ")", "else", ":", "return", "float", "(", "1", ")" ]
get the unit of number
[ "get", "the", "unit", "of", "number" ]
python
train
27.888889
inasafe/inasafe
safe/utilities/rounding.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/rounding.py#L259-L288
def html_scientific_notation_rate(rate): """Helper for convert decimal rate using scientific notation. For example we want to show the very detail value of fatality rate because it might be a very small number. :param rate: Rate value :type rate: float :return: Rate value with html tag to show the exponent :rtype: str """ precision = '%.3f' if rate * 100 > 0: decimal_rate = Decimal(precision % (rate * 100)) if decimal_rate == Decimal((precision % 0)): decimal_rate = Decimal(str(rate * 100)) else: decimal_rate = Decimal(str(rate * 100)) if decimal_rate.as_tuple().exponent >= -3: rate_percentage = str(decimal_rate) else: rate = '%.2E' % decimal_rate html_rate = rate.split('E') # we use html tag to show exponent html_rate[1] = '10<sup>{exponent}</sup>'.format( exponent=html_rate[1]) html_rate.insert(1, 'x') rate_percentage = ''.join(html_rate) return rate_percentage
[ "def", "html_scientific_notation_rate", "(", "rate", ")", ":", "precision", "=", "'%.3f'", "if", "rate", "*", "100", ">", "0", ":", "decimal_rate", "=", "Decimal", "(", "precision", "%", "(", "rate", "*", "100", ")", ")", "if", "decimal_rate", "==", "Decimal", "(", "(", "precision", "%", "0", ")", ")", ":", "decimal_rate", "=", "Decimal", "(", "str", "(", "rate", "*", "100", ")", ")", "else", ":", "decimal_rate", "=", "Decimal", "(", "str", "(", "rate", "*", "100", ")", ")", "if", "decimal_rate", ".", "as_tuple", "(", ")", ".", "exponent", ">=", "-", "3", ":", "rate_percentage", "=", "str", "(", "decimal_rate", ")", "else", ":", "rate", "=", "'%.2E'", "%", "decimal_rate", "html_rate", "=", "rate", ".", "split", "(", "'E'", ")", "# we use html tag to show exponent", "html_rate", "[", "1", "]", "=", "'10<sup>{exponent}</sup>'", ".", "format", "(", "exponent", "=", "html_rate", "[", "1", "]", ")", "html_rate", ".", "insert", "(", "1", ",", "'x'", ")", "rate_percentage", "=", "''", ".", "join", "(", "html_rate", ")", "return", "rate_percentage" ]
Helper for convert decimal rate using scientific notation. For example we want to show the very detail value of fatality rate because it might be a very small number. :param rate: Rate value :type rate: float :return: Rate value with html tag to show the exponent :rtype: str
[ "Helper", "for", "convert", "decimal", "rate", "using", "scientific", "notation", "." ]
python
train
33.633333
RedHatInsights/insights-core
insights/specs/default.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/specs/default.py#L916-L923
def docker_installed_rpms(broker): """ Command: /usr/bin/rpm -qa --root `%s` --qf `%s`""" ctx = broker[DockerImageContext] root = ctx.root fmt = DefaultSpecs.rpm_format cmd = "/usr/bin/rpm -qa --root %s --qf '%s'" % (root, fmt) result = ctx.shell_out(cmd) return CommandOutputProvider(cmd, ctx, content=result)
[ "def", "docker_installed_rpms", "(", "broker", ")", ":", "ctx", "=", "broker", "[", "DockerImageContext", "]", "root", "=", "ctx", ".", "root", "fmt", "=", "DefaultSpecs", ".", "rpm_format", "cmd", "=", "\"/usr/bin/rpm -qa --root %s --qf '%s'\"", "%", "(", "root", ",", "fmt", ")", "result", "=", "ctx", ".", "shell_out", "(", "cmd", ")", "return", "CommandOutputProvider", "(", "cmd", ",", "ctx", ",", "content", "=", "result", ")" ]
Command: /usr/bin/rpm -qa --root `%s` --qf `%s`
[ "Command", ":", "/", "usr", "/", "bin", "/", "rpm", "-", "qa", "--", "root", "%s", "--", "qf", "%s" ]
python
train
44.875
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/control_client/models.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/control_client/models.py#L281-L303
def as_batch_body(self): ''' return the current message as expected by batch body format''' if sys.version_info >= (3,) and isinstance(self.body, bytes): # It HAS to be string to be serialized in JSON body = self.body.decode('utf-8') else: # Python 2.7 people handle this themself body = self.body result = {'Body': body} # Adds custom properties if self.custom_properties: result['UserProperties'] = {name: self._serialize_basic_properties_value(value) for name, value in self.custom_properties.items()} # Adds BrokerProperties if self.broker_properties: result['BrokerProperties'] = {name: self._serialize_basic_properties_value(value) for name, value in self.broker_properties.items()} return result
[ "def", "as_batch_body", "(", "self", ")", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", ")", "and", "isinstance", "(", "self", ".", "body", ",", "bytes", ")", ":", "# It HAS to be string to be serialized in JSON", "body", "=", "self", ".", "body", ".", "decode", "(", "'utf-8'", ")", "else", ":", "# Python 2.7 people handle this themself", "body", "=", "self", ".", "body", "result", "=", "{", "'Body'", ":", "body", "}", "# Adds custom properties", "if", "self", ".", "custom_properties", ":", "result", "[", "'UserProperties'", "]", "=", "{", "name", ":", "self", ".", "_serialize_basic_properties_value", "(", "value", ")", "for", "name", ",", "value", "in", "self", ".", "custom_properties", ".", "items", "(", ")", "}", "# Adds BrokerProperties", "if", "self", ".", "broker_properties", ":", "result", "[", "'BrokerProperties'", "]", "=", "{", "name", ":", "self", ".", "_serialize_basic_properties_value", "(", "value", ")", "for", "name", ",", "value", "in", "self", ".", "broker_properties", ".", "items", "(", ")", "}", "return", "result" ]
return the current message as expected by batch body format
[ "return", "the", "current", "message", "as", "expected", "by", "batch", "body", "format" ]
python
test
43.086957
bw2/ConfigArgParse
configargparse.py
https://github.com/bw2/ConfigArgParse/blob/8bbc7de67f884184068d62af7f78e723d01c0081/configargparse.py#L573-L584
def get_command_line_key_for_unknown_config_file_setting(self, key): """Compute a commandline arg key to be used for a config file setting that doesn't correspond to any defined configargparse arg (and so doesn't have a user-specified commandline arg key). Args: key: The config file key that was being set. """ key_without_prefix_chars = key.strip(self.prefix_chars) command_line_key = self.prefix_chars[0]*2 + key_without_prefix_chars return command_line_key
[ "def", "get_command_line_key_for_unknown_config_file_setting", "(", "self", ",", "key", ")", ":", "key_without_prefix_chars", "=", "key", ".", "strip", "(", "self", ".", "prefix_chars", ")", "command_line_key", "=", "self", ".", "prefix_chars", "[", "0", "]", "*", "2", "+", "key_without_prefix_chars", "return", "command_line_key" ]
Compute a commandline arg key to be used for a config file setting that doesn't correspond to any defined configargparse arg (and so doesn't have a user-specified commandline arg key). Args: key: The config file key that was being set.
[ "Compute", "a", "commandline", "arg", "key", "to", "be", "used", "for", "a", "config", "file", "setting", "that", "doesn", "t", "correspond", "to", "any", "defined", "configargparse", "arg", "(", "and", "so", "doesn", "t", "have", "a", "user", "-", "specified", "commandline", "arg", "key", ")", "." ]
python
train
43.916667
cloud9ers/gurumate
environment/lib/python2.7/site-packages/gurumate-2.8.6-py2.7.egg/gurumate/web.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/gurumate-2.8.6-py2.7.egg/gurumate/web.py#L57-L68
def compare_content_type(url, content_type): ''' Compare the content type header of url param with content_type param and returns boolean @param url -> string e.g. http://127.0.0.1/index @param content_type -> string e.g. text/html ''' try: response = urllib2.urlopen(url) except: return False return response.headers.type == content_type
[ "def", "compare_content_type", "(", "url", ",", "content_type", ")", ":", "try", ":", "response", "=", "urllib2", ".", "urlopen", "(", "url", ")", "except", ":", "return", "False", "return", "response", ".", "headers", ".", "type", "==", "content_type" ]
Compare the content type header of url param with content_type param and returns boolean @param url -> string e.g. http://127.0.0.1/index @param content_type -> string e.g. text/html
[ "Compare", "the", "content", "type", "header", "of", "url", "param", "with", "content_type", "param", "and", "returns", "boolean" ]
python
test
31.75
20c/grainy
grainy/core.py
https://github.com/20c/grainy/blob/cd956fd4144044993abc967974a127aab07a8ef6/grainy/core.py#L81-L113
def match(self, keys, partial=True): """ Check if the value of this namespace is matched by keys '*' is treated as wildcard Arguments: keys -- list of keys Examples: ns = Namespace("a.b.c") ns.match(["a"]) #True ns.match(["a","b"]) #True ns.match(["a","b","c"]) #True ns.match(["a","*","c"]) #True ns.match(["b","b","c"]) #False """ if not partial and len(keys) != self.length: return False c = 0 for k in keys: if c >= self.length: return False a = self.keys[c] if a != "*" and k != "*" and k != a: return False c += 1 return True
[ "def", "match", "(", "self", ",", "keys", ",", "partial", "=", "True", ")", ":", "if", "not", "partial", "and", "len", "(", "keys", ")", "!=", "self", ".", "length", ":", "return", "False", "c", "=", "0", "for", "k", "in", "keys", ":", "if", "c", ">=", "self", ".", "length", ":", "return", "False", "a", "=", "self", ".", "keys", "[", "c", "]", "if", "a", "!=", "\"*\"", "and", "k", "!=", "\"*\"", "and", "k", "!=", "a", ":", "return", "False", "c", "+=", "1", "return", "True" ]
Check if the value of this namespace is matched by keys '*' is treated as wildcard Arguments: keys -- list of keys Examples: ns = Namespace("a.b.c") ns.match(["a"]) #True ns.match(["a","b"]) #True ns.match(["a","b","c"]) #True ns.match(["a","*","c"]) #True ns.match(["b","b","c"]) #False
[ "Check", "if", "the", "value", "of", "this", "namespace", "is", "matched", "by", "keys" ]
python
train
23.212121
fr33jc/bang
bang/providers/hpcloud/load_balancer.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/hpcloud/load_balancer.py#L185-L195
def remove_lb_nodes(self, lb_id, node_ids): """ Remove one or more nodes :param string lb_id: Balancer id :param list node_ids: List of node ids """ log.info("Removing load balancer nodes %s" % node_ids) for node_id in node_ids: self._request('delete', '/loadbalancers/%s/nodes/%s' % (lb_id, node_id))
[ "def", "remove_lb_nodes", "(", "self", ",", "lb_id", ",", "node_ids", ")", ":", "log", ".", "info", "(", "\"Removing load balancer nodes %s\"", "%", "node_ids", ")", "for", "node_id", "in", "node_ids", ":", "self", ".", "_request", "(", "'delete'", ",", "'/loadbalancers/%s/nodes/%s'", "%", "(", "lb_id", ",", "node_id", ")", ")" ]
Remove one or more nodes :param string lb_id: Balancer id :param list node_ids: List of node ids
[ "Remove", "one", "or", "more", "nodes" ]
python
train
33
kmarekspartz/props
props/__init__.py
https://github.com/kmarekspartz/props/blob/a766cdbe6f7be846f602553280770546f124a91d/props/__init__.py#L221-L249
def tuple_of(*generators): """ Generates a tuple by generating values for each of the specified generators. This is a class factory, it makes a class which is a closure around the specified generators. """ class TupleOfGenerators(ArbitraryInterface): """ A closure class around the generators specified above, which generates a tuple of the generators. """ @classmethod def arbitrary(cls): """ Generate a tuple of the enclosed generators. """ return tuple([ arbitrary(generator) for generator in generators if generator is not tuple ]) TupleOfGenerators.__name__ = ''.join([ 'tuple_of(', ', '.join(generator.__name__ for generator in generators), ')' ]) return TupleOfGenerators
[ "def", "tuple_of", "(", "*", "generators", ")", ":", "class", "TupleOfGenerators", "(", "ArbitraryInterface", ")", ":", "\"\"\"\n A closure class around the generators specified above, which\n generates a tuple of the generators.\n \"\"\"", "@", "classmethod", "def", "arbitrary", "(", "cls", ")", ":", "\"\"\"\n Generate a tuple of the enclosed generators.\n \"\"\"", "return", "tuple", "(", "[", "arbitrary", "(", "generator", ")", "for", "generator", "in", "generators", "if", "generator", "is", "not", "tuple", "]", ")", "TupleOfGenerators", ".", "__name__", "=", "''", ".", "join", "(", "[", "'tuple_of('", ",", "', '", ".", "join", "(", "generator", ".", "__name__", "for", "generator", "in", "generators", ")", ",", "')'", "]", ")", "return", "TupleOfGenerators" ]
Generates a tuple by generating values for each of the specified generators. This is a class factory, it makes a class which is a closure around the specified generators.
[ "Generates", "a", "tuple", "by", "generating", "values", "for", "each", "of", "the", "specified", "generators", ".", "This", "is", "a", "class", "factory", "it", "makes", "a", "class", "which", "is", "a", "closure", "around", "the", "specified", "generators", "." ]
python
train
29.275862
apache/airflow
airflow/utils/compression.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/compression.py#L26-L43
def uncompress_file(input_file_name, file_extension, dest_dir): """ Uncompress gz and bz2 files """ if file_extension.lower() not in ('.gz', '.bz2'): raise NotImplementedError("Received {} format. Only gz and bz2 " "files can currently be uncompressed." .format(file_extension)) if file_extension.lower() == '.gz': fmodule = gzip.GzipFile elif file_extension.lower() == '.bz2': fmodule = bz2.BZ2File with fmodule(input_file_name, mode='rb') as f_compressed,\ NamedTemporaryFile(dir=dest_dir, mode='wb', delete=False) as f_uncompressed: shutil.copyfileobj(f_compressed, f_uncompressed) return f_uncompressed.name
[ "def", "uncompress_file", "(", "input_file_name", ",", "file_extension", ",", "dest_dir", ")", ":", "if", "file_extension", ".", "lower", "(", ")", "not", "in", "(", "'.gz'", ",", "'.bz2'", ")", ":", "raise", "NotImplementedError", "(", "\"Received {} format. Only gz and bz2 \"", "\"files can currently be uncompressed.\"", ".", "format", "(", "file_extension", ")", ")", "if", "file_extension", ".", "lower", "(", ")", "==", "'.gz'", ":", "fmodule", "=", "gzip", ".", "GzipFile", "elif", "file_extension", ".", "lower", "(", ")", "==", "'.bz2'", ":", "fmodule", "=", "bz2", ".", "BZ2File", "with", "fmodule", "(", "input_file_name", ",", "mode", "=", "'rb'", ")", "as", "f_compressed", ",", "NamedTemporaryFile", "(", "dir", "=", "dest_dir", ",", "mode", "=", "'wb'", ",", "delete", "=", "False", ")", "as", "f_uncompressed", ":", "shutil", ".", "copyfileobj", "(", "f_compressed", ",", "f_uncompressed", ")", "return", "f_uncompressed", ".", "name" ]
Uncompress gz and bz2 files
[ "Uncompress", "gz", "and", "bz2", "files" ]
python
test
43.777778
iskandr/fancyimpute
fancyimpute/dictionary_helpers.py
https://github.com/iskandr/fancyimpute/blob/9f0837d387c7303d5c8c925a9989ca77a1a96e3e/fancyimpute/dictionary_helpers.py#L310-L330
def reverse_lookup_from_nested_dict(values_dict): """ Create reverse-lookup dictionary mapping each row key to a list of triplets: [(column key, value), ...] Parameters ---------- nested_values_dict : dict column_key -> row_key -> value weights_dict : dict column_key -> row_key -> sample weight Returns dictionary mapping row_key -> [(column key, value)] """ reverse_lookup = defaultdict(list) for column_key, column_dict in values_dict.items(): for row_key, value in column_dict.items(): entry = (column_key, value) reverse_lookup[row_key].append(entry) return reverse_lookup
[ "def", "reverse_lookup_from_nested_dict", "(", "values_dict", ")", ":", "reverse_lookup", "=", "defaultdict", "(", "list", ")", "for", "column_key", ",", "column_dict", "in", "values_dict", ".", "items", "(", ")", ":", "for", "row_key", ",", "value", "in", "column_dict", ".", "items", "(", ")", ":", "entry", "=", "(", "column_key", ",", "value", ")", "reverse_lookup", "[", "row_key", "]", ".", "append", "(", "entry", ")", "return", "reverse_lookup" ]
Create reverse-lookup dictionary mapping each row key to a list of triplets: [(column key, value), ...] Parameters ---------- nested_values_dict : dict column_key -> row_key -> value weights_dict : dict column_key -> row_key -> sample weight Returns dictionary mapping row_key -> [(column key, value)]
[ "Create", "reverse", "-", "lookup", "dictionary", "mapping", "each", "row", "key", "to", "a", "list", "of", "triplets", ":", "[", "(", "column", "key", "value", ")", "...", "]" ]
python
train
31.238095
spyder-ide/spyder
spyder/widgets/comboboxes.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/comboboxes.py#L270-L274
def selected(self): """Action to be executed when a valid item has been selected""" self.selected_text = self.currentText() self.valid.emit(True, True) self.open_dir.emit(self.selected_text)
[ "def", "selected", "(", "self", ")", ":", "self", ".", "selected_text", "=", "self", ".", "currentText", "(", ")", "self", ".", "valid", ".", "emit", "(", "True", ",", "True", ")", "self", ".", "open_dir", ".", "emit", "(", "self", ".", "selected_text", ")" ]
Action to be executed when a valid item has been selected
[ "Action", "to", "be", "executed", "when", "a", "valid", "item", "has", "been", "selected" ]
python
train
44.4
OnroerendErfgoed/pyramid_urireferencer
pyramid_urireferencer/models.py
https://github.com/OnroerendErfgoed/pyramid_urireferencer/blob/c6ee4ba863e32ced304b9cf00f3f5b450757a29a/pyramid_urireferencer/models.py#L26-L39
def load_from_json(data): """ Load a :class:`RegistryReponse` from a dictionary or a string (that will be parsed as json). """ if isinstance(data, str): data = json.loads(data) applications = [ ApplicationResponse.load_from_json(a) for a in data['applications'] ] if data['applications'] is not None else [] return RegistryResponse( data['query_uri'], data['success'], data['has_references'], data['count'], applications )
[ "def", "load_from_json", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "json", ".", "loads", "(", "data", ")", "applications", "=", "[", "ApplicationResponse", ".", "load_from_json", "(", "a", ")", "for", "a", "in", "data", "[", "'applications'", "]", "]", "if", "data", "[", "'applications'", "]", "is", "not", "None", "else", "[", "]", "return", "RegistryResponse", "(", "data", "[", "'query_uri'", "]", ",", "data", "[", "'success'", "]", ",", "data", "[", "'has_references'", "]", ",", "data", "[", "'count'", "]", ",", "applications", ")" ]
Load a :class:`RegistryReponse` from a dictionary or a string (that will be parsed as json).
[ "Load", "a", ":", "class", ":", "RegistryReponse", "from", "a", "dictionary", "or", "a", "string", "(", "that", "will", "be", "parsed", "as", "json", ")", "." ]
python
train
37.785714
sveetch/py-css-styleguide
py_css_styleguide/parser.py
https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/parser.py#L54-L87
def digest_content(self, rule): """ Walk on rule content tokens to return a dict of properties. This is pretty naive and will choke/fail on everything that is more evolved than simple ``ident(string):value(string)`` Arguments: rule (tinycss2.ast.QualifiedRule): Qualified rule object as returned by tinycss2. Returns: dict: Dictionnary of retrieved variables and properties. """ data = OrderedDict() current_key = None for token in rule.content: # Assume first identity token is the property name if token.type == 'ident': # Ignore starting '-' from css variables name = token.value if name.startswith('-'): name = name[1:] current_key = name data[current_key] = None # Assume first following string token is the property value. if token.type == 'string': data[current_key] = token.value return data
[ "def", "digest_content", "(", "self", ",", "rule", ")", ":", "data", "=", "OrderedDict", "(", ")", "current_key", "=", "None", "for", "token", "in", "rule", ".", "content", ":", "# Assume first identity token is the property name", "if", "token", ".", "type", "==", "'ident'", ":", "# Ignore starting '-' from css variables", "name", "=", "token", ".", "value", "if", "name", ".", "startswith", "(", "'-'", ")", ":", "name", "=", "name", "[", "1", ":", "]", "current_key", "=", "name", "data", "[", "current_key", "]", "=", "None", "# Assume first following string token is the property value.", "if", "token", ".", "type", "==", "'string'", ":", "data", "[", "current_key", "]", "=", "token", ".", "value", "return", "data" ]
Walk on rule content tokens to return a dict of properties. This is pretty naive and will choke/fail on everything that is more evolved than simple ``ident(string):value(string)`` Arguments: rule (tinycss2.ast.QualifiedRule): Qualified rule object as returned by tinycss2. Returns: dict: Dictionnary of retrieved variables and properties.
[ "Walk", "on", "rule", "content", "tokens", "to", "return", "a", "dict", "of", "properties", "." ]
python
train
31.382353
pdkit/pdkit
pdkit/gait_processor.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/gait_processor.py#L268-L311
def walk_direction_preheel(self, data_frame): """ Estimate local walk (not cardinal) direction with pre-heel strike phase. Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors" :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :return: Unit vector of local walk (not cardinal) direction. :rtype: numpy.ndarray """ # Sum of absolute values across accelerometer axes: data = data_frame.x.abs() + data_frame.y.abs() + data_frame.z.abs() # Find maximum peaks of smoothed data: dummy, ipeaks_smooth = self.heel_strikes(data) data = data.values # Compute number of samples between peaks using the real part of the FFT: interpeak = compute_interpeak(data, self.sampling_frequency) decel = np.int(np.round(self.stride_fraction * interpeak)) # Find maximum peaks close to maximum peaks of smoothed data: ipeaks = [] for ipeak_smooth in ipeaks_smooth: ipeak = np.argmax(data[ipeak_smooth - decel:ipeak_smooth + decel]) ipeak += ipeak_smooth - decel ipeaks.append(ipeak) # Compute the average vector for each deceleration phase: vectors = [] for ipeak in ipeaks: decel_vectors = np.asarray([[data_frame.x[i], data_frame.y[i], data_frame.z[i]] for i in range(ipeak - decel, ipeak)]) vectors.append(np.mean(decel_vectors, axis=0)) # Compute the average deceleration vector and take the opposite direction: direction = -1 * np.mean(vectors, axis=0) # Return the unit vector in this direction: direction /= np.sqrt(direction.dot(direction)) return direction
[ "def", "walk_direction_preheel", "(", "self", ",", "data_frame", ")", ":", "# Sum of absolute values across accelerometer axes:", "data", "=", "data_frame", ".", "x", ".", "abs", "(", ")", "+", "data_frame", ".", "y", ".", "abs", "(", ")", "+", "data_frame", ".", "z", ".", "abs", "(", ")", "# Find maximum peaks of smoothed data:", "dummy", ",", "ipeaks_smooth", "=", "self", ".", "heel_strikes", "(", "data", ")", "data", "=", "data", ".", "values", "# Compute number of samples between peaks using the real part of the FFT:", "interpeak", "=", "compute_interpeak", "(", "data", ",", "self", ".", "sampling_frequency", ")", "decel", "=", "np", ".", "int", "(", "np", ".", "round", "(", "self", ".", "stride_fraction", "*", "interpeak", ")", ")", "# Find maximum peaks close to maximum peaks of smoothed data:", "ipeaks", "=", "[", "]", "for", "ipeak_smooth", "in", "ipeaks_smooth", ":", "ipeak", "=", "np", ".", "argmax", "(", "data", "[", "ipeak_smooth", "-", "decel", ":", "ipeak_smooth", "+", "decel", "]", ")", "ipeak", "+=", "ipeak_smooth", "-", "decel", "ipeaks", ".", "append", "(", "ipeak", ")", "# Compute the average vector for each deceleration phase:", "vectors", "=", "[", "]", "for", "ipeak", "in", "ipeaks", ":", "decel_vectors", "=", "np", ".", "asarray", "(", "[", "[", "data_frame", ".", "x", "[", "i", "]", ",", "data_frame", ".", "y", "[", "i", "]", ",", "data_frame", ".", "z", "[", "i", "]", "]", "for", "i", "in", "range", "(", "ipeak", "-", "decel", ",", "ipeak", ")", "]", ")", "vectors", ".", "append", "(", "np", ".", "mean", "(", "decel_vectors", ",", "axis", "=", "0", ")", ")", "# Compute the average deceleration vector and take the opposite direction:", "direction", "=", "-", "1", "*", "np", ".", "mean", "(", "vectors", ",", "axis", "=", "0", ")", "# Return the unit vector in this direction:", "direction", "/=", "np", ".", "sqrt", "(", "direction", ".", "dot", "(", "direction", ")", ")", "return", "direction" ]
Estimate local walk (not cardinal) direction with pre-heel strike phase. Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors" :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :return: Unit vector of local walk (not cardinal) direction. :rtype: numpy.ndarray
[ "Estimate", "local", "walk", "(", "not", "cardinal", ")", "direction", "with", "pre", "-", "heel", "strike", "phase", "." ]
python
train
42.568182
mdeous/fatbotslim
fatbotslim/irc/bot.py
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/irc/bot.py#L346-L361
def ctcp_reply(self, command, dst, message=None): """ Sends a reply to a CTCP request. :param command: CTCP command to use. :type command: str :param dst: sender of the initial request. :type dst: str :param message: data to attach to the reply. :type message: str """ if message is None: raw_cmd = u'\x01{0}\x01'.format(command) else: raw_cmd = u'\x01{0} {1}\x01'.format(command, message) self.notice(dst, raw_cmd)
[ "def", "ctcp_reply", "(", "self", ",", "command", ",", "dst", ",", "message", "=", "None", ")", ":", "if", "message", "is", "None", ":", "raw_cmd", "=", "u'\\x01{0}\\x01'", ".", "format", "(", "command", ")", "else", ":", "raw_cmd", "=", "u'\\x01{0} {1}\\x01'", ".", "format", "(", "command", ",", "message", ")", "self", ".", "notice", "(", "dst", ",", "raw_cmd", ")" ]
Sends a reply to a CTCP request. :param command: CTCP command to use. :type command: str :param dst: sender of the initial request. :type dst: str :param message: data to attach to the reply. :type message: str
[ "Sends", "a", "reply", "to", "a", "CTCP", "request", "." ]
python
train
32.5625
KelSolaar/Umbra
umbra/ui/widgets/notification_QLabel.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/notification_QLabel.py#L775-L783
def __set_style_sheet(self): """ Sets the Widget stylesheet. """ colors = map( lambda x: "rgb({0}, {1}, {2}, {3})".format(x.red(), x.green(), x.blue(), int(self.__opacity * 255)), (self.__color, self.__background_color, self.__border_color)) self.setStyleSheet(self.__style.format(*colors))
[ "def", "__set_style_sheet", "(", "self", ")", ":", "colors", "=", "map", "(", "lambda", "x", ":", "\"rgb({0}, {1}, {2}, {3})\"", ".", "format", "(", "x", ".", "red", "(", ")", ",", "x", ".", "green", "(", ")", ",", "x", ".", "blue", "(", ")", ",", "int", "(", "self", ".", "__opacity", "*", "255", ")", ")", ",", "(", "self", ".", "__color", ",", "self", ".", "__background_color", ",", "self", ".", "__border_color", ")", ")", "self", ".", "setStyleSheet", "(", "self", ".", "__style", ".", "format", "(", "*", "colors", ")", ")" ]
Sets the Widget stylesheet.
[ "Sets", "the", "Widget", "stylesheet", "." ]
python
train
38.555556