repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
spencerahill/aospy
aospy/automate.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/automate.py#L101-L115
def _get_all_objs_of_type(type_, parent): """Get all attributes of the given type from the given object. Parameters ---------- type_ : The desired type parent : The object from which to get the attributes with type matching 'type_' Returns ------- A list (possibly empty) of attributes from 'parent' """ return set([obj for obj in parent.__dict__.values() if isinstance(obj, type_)])
[ "def", "_get_all_objs_of_type", "(", "type_", ",", "parent", ")", ":", "return", "set", "(", "[", "obj", "for", "obj", "in", "parent", ".", "__dict__", ".", "values", "(", ")", "if", "isinstance", "(", "obj", ",", "type_", ")", "]", ")" ]
Get all attributes of the given type from the given object. Parameters ---------- type_ : The desired type parent : The object from which to get the attributes with type matching 'type_' Returns ------- A list (possibly empty) of attributes from 'parent'
[ "Get", "all", "attributes", "of", "the", "given", "type", "from", "the", "given", "object", "." ]
python
train
theislab/scanpy
scanpy/plotting/_utils.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_utils.py#L212-L236
def savefig(writekey, dpi=None, ext=None): """Save current figure to file. The `filename` is generated as follows: filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs """ if dpi is None: # we need this as in notebooks, the internal figures are also influenced by 'savefig.dpi' this... if not isinstance(rcParams['savefig.dpi'], str) and rcParams['savefig.dpi'] < 150: if settings._low_resolution_warning: logg.warn( 'You are using a low resolution (dpi<150) for saving figures.\n' 'Consider running `set_figure_params(dpi_save=...)`, which will ' 'adjust `matplotlib.rcParams[\'savefig.dpi\']`') settings._low_resolution_warning = False else: dpi = rcParams['savefig.dpi'] if not os.path.exists(settings.figdir): os.makedirs(settings.figdir) if settings.figdir[-1] != '/': settings.figdir += '/' if ext is None: ext = settings.file_format_figs filename = settings.figdir + writekey + settings.plot_suffix + '.' + ext # output the following msg at warning level; it's really important for the user logg.msg('saving figure to file', filename, v=1) pl.savefig(filename, dpi=dpi, bbox_inches='tight')
[ "def", "savefig", "(", "writekey", ",", "dpi", "=", "None", ",", "ext", "=", "None", ")", ":", "if", "dpi", "is", "None", ":", "# we need this as in notebooks, the internal figures are also influenced by 'savefig.dpi' this...", "if", "not", "isinstance", "(", "rcParams", "[", "'savefig.dpi'", "]", ",", "str", ")", "and", "rcParams", "[", "'savefig.dpi'", "]", "<", "150", ":", "if", "settings", ".", "_low_resolution_warning", ":", "logg", ".", "warn", "(", "'You are using a low resolution (dpi<150) for saving figures.\\n'", "'Consider running `set_figure_params(dpi_save=...)`, which will '", "'adjust `matplotlib.rcParams[\\'savefig.dpi\\']`'", ")", "settings", ".", "_low_resolution_warning", "=", "False", "else", ":", "dpi", "=", "rcParams", "[", "'savefig.dpi'", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "settings", ".", "figdir", ")", ":", "os", ".", "makedirs", "(", "settings", ".", "figdir", ")", "if", "settings", ".", "figdir", "[", "-", "1", "]", "!=", "'/'", ":", "settings", ".", "figdir", "+=", "'/'", "if", "ext", "is", "None", ":", "ext", "=", "settings", ".", "file_format_figs", "filename", "=", "settings", ".", "figdir", "+", "writekey", "+", "settings", ".", "plot_suffix", "+", "'.'", "+", "ext", "# output the following msg at warning level; it's really important for the user", "logg", ".", "msg", "(", "'saving figure to file'", ",", "filename", ",", "v", "=", "1", ")", "pl", ".", "savefig", "(", "filename", ",", "dpi", "=", "dpi", ",", "bbox_inches", "=", "'tight'", ")" ]
Save current figure to file. The `filename` is generated as follows: filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs
[ "Save", "current", "figure", "to", "file", "." ]
python
train
brettcannon/gidgethub
gidgethub/abc.py
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L34-L89
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str], data: Any, accept: str, jwt: Opt[str] = None, oauth_token: Opt[str] = None, ) -> Tuple[bytes, Opt[str]]: """Construct and make an HTTP request.""" if oauth_token is not None and jwt is not None: raise ValueError("Cannot pass both oauth_token and jwt.") filled_url = sansio.format_url(url, url_vars) if jwt is not None: request_headers = sansio.create_headers( self.requester, accept=accept, jwt=jwt) elif oauth_token is not None: request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=oauth_token) else: # fallback to using oauth_token request_headers = sansio.create_headers( self.requester, accept=accept, oauth_token=self.oauth_token) cached = cacheable = False # Can't use None as a "no body" sentinel as it's a legitimate JSON type. if data == b"": body = b"" request_headers["content-length"] = "0" if method == "GET" and self._cache is not None: cacheable = True try: etag, last_modified, data, more = self._cache[filled_url] cached = True except KeyError: pass else: if etag is not None: request_headers["if-none-match"] = etag if last_modified is not None: request_headers["if-modified-since"] = last_modified else: charset = "utf-8" body = json.dumps(data).encode(charset) request_headers['content-type'] = f"application/json; charset={charset}" request_headers['content-length'] = str(len(body)) if self.rate_limit is not None: self.rate_limit.remaining -= 1 response = await self._request(method, filled_url, request_headers, body) if not (response[0] == 304 and cached): data, self.rate_limit, more = sansio.decipher_response(*response) has_cache_details = ("etag" in response[1] or "last-modified" in response[1]) if self._cache is not None and cacheable and has_cache_details: etag = response[1].get("etag") last_modified = response[1].get("last-modified") self._cache[filled_url] = etag, last_modified, data, more return data, more
[ "async", "def", "_make_request", "(", "self", ",", "method", ":", "str", ",", "url", ":", "str", ",", "url_vars", ":", "Dict", "[", "str", ",", "str", "]", ",", "data", ":", "Any", ",", "accept", ":", "str", ",", "jwt", ":", "Opt", "[", "str", "]", "=", "None", ",", "oauth_token", ":", "Opt", "[", "str", "]", "=", "None", ",", ")", "->", "Tuple", "[", "bytes", ",", "Opt", "[", "str", "]", "]", ":", "if", "oauth_token", "is", "not", "None", "and", "jwt", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot pass both oauth_token and jwt.\"", ")", "filled_url", "=", "sansio", ".", "format_url", "(", "url", ",", "url_vars", ")", "if", "jwt", "is", "not", "None", ":", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "jwt", "=", "jwt", ")", "elif", "oauth_token", "is", "not", "None", ":", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "oauth_token", "=", "oauth_token", ")", "else", ":", "# fallback to using oauth_token", "request_headers", "=", "sansio", ".", "create_headers", "(", "self", ".", "requester", ",", "accept", "=", "accept", ",", "oauth_token", "=", "self", ".", "oauth_token", ")", "cached", "=", "cacheable", "=", "False", "# Can't use None as a \"no body\" sentinel as it's a legitimate JSON type.", "if", "data", "==", "b\"\"", ":", "body", "=", "b\"\"", "request_headers", "[", "\"content-length\"", "]", "=", "\"0\"", "if", "method", "==", "\"GET\"", "and", "self", ".", "_cache", "is", "not", "None", ":", "cacheable", "=", "True", "try", ":", "etag", ",", "last_modified", ",", "data", ",", "more", "=", "self", ".", "_cache", "[", "filled_url", "]", "cached", "=", "True", "except", "KeyError", ":", "pass", "else", ":", "if", "etag", "is", "not", "None", ":", "request_headers", "[", "\"if-none-match\"", "]", "=", "etag", "if", "last_modified", "is", "not", "None", ":", "request_headers", "[", "\"if-modified-since\"", "]", "=", "last_modified", "else", ":", "charset", "=", "\"utf-8\"", "body", "=", "json", ".", "dumps", "(", "data", ")", ".", "encode", "(", "charset", ")", "request_headers", "[", "'content-type'", "]", "=", "f\"application/json; charset={charset}\"", "request_headers", "[", "'content-length'", "]", "=", "str", "(", "len", "(", "body", ")", ")", "if", "self", ".", "rate_limit", "is", "not", "None", ":", "self", ".", "rate_limit", ".", "remaining", "-=", "1", "response", "=", "await", "self", ".", "_request", "(", "method", ",", "filled_url", ",", "request_headers", ",", "body", ")", "if", "not", "(", "response", "[", "0", "]", "==", "304", "and", "cached", ")", ":", "data", ",", "self", ".", "rate_limit", ",", "more", "=", "sansio", ".", "decipher_response", "(", "*", "response", ")", "has_cache_details", "=", "(", "\"etag\"", "in", "response", "[", "1", "]", "or", "\"last-modified\"", "in", "response", "[", "1", "]", ")", "if", "self", ".", "_cache", "is", "not", "None", "and", "cacheable", "and", "has_cache_details", ":", "etag", "=", "response", "[", "1", "]", ".", "get", "(", "\"etag\"", ")", "last_modified", "=", "response", "[", "1", "]", ".", "get", "(", "\"last-modified\"", ")", "self", ".", "_cache", "[", "filled_url", "]", "=", "etag", ",", "last_modified", ",", "data", ",", "more", "return", "data", ",", "more" ]
Construct and make an HTTP request.
[ "Construct", "and", "make", "an", "HTTP", "request", "." ]
python
train
brainiak/brainiak
brainiak/utils/utils.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L634-L675
def center_mass_exp(interval, scale=1.0): """ Calculate the center of mass of negative exponential distribution p(x) = exp(-x / scale) / scale in the interval of (interval_left, interval_right). scale is the same scale parameter as scipy.stats.expon.pdf Parameters ---------- interval: size 2 tuple, float interval must be in the form of (interval_left, interval_right), where interval_left/interval_right is the starting/end point of the interval in which the center of mass is calculated for exponential distribution. Note that interval_left must be non-negative, since exponential is not supported in the negative domain, and interval_right must be bigger than interval_left (thus positive) to form a well-defined interval. scale: float, positive The scale parameter of the exponential distribution. See above. Returns ------- m: float The center of mass in the interval of (interval_left, interval_right) for exponential distribution. """ assert isinstance(interval, tuple), 'interval must be a tuple' assert len(interval) == 2, 'interval must be length two' (interval_left, interval_right) = interval assert interval_left >= 0, 'interval_left must be non-negative' assert interval_right > interval_left, \ 'interval_right must be bigger than interval_left' assert scale > 0, 'scale must be positive' if interval_right < np.inf: return ((interval_left + scale) * np.exp(-interval_left / scale) - ( scale + interval_right) * np.exp(-interval_right / scale)) / ( np.exp(-interval_left / scale) - np.exp(-interval_right / scale)) else: return interval_left + scale
[ "def", "center_mass_exp", "(", "interval", ",", "scale", "=", "1.0", ")", ":", "assert", "isinstance", "(", "interval", ",", "tuple", ")", ",", "'interval must be a tuple'", "assert", "len", "(", "interval", ")", "==", "2", ",", "'interval must be length two'", "(", "interval_left", ",", "interval_right", ")", "=", "interval", "assert", "interval_left", ">=", "0", ",", "'interval_left must be non-negative'", "assert", "interval_right", ">", "interval_left", ",", "'interval_right must be bigger than interval_left'", "assert", "scale", ">", "0", ",", "'scale must be positive'", "if", "interval_right", "<", "np", ".", "inf", ":", "return", "(", "(", "interval_left", "+", "scale", ")", "*", "np", ".", "exp", "(", "-", "interval_left", "/", "scale", ")", "-", "(", "scale", "+", "interval_right", ")", "*", "np", ".", "exp", "(", "-", "interval_right", "/", "scale", ")", ")", "/", "(", "np", ".", "exp", "(", "-", "interval_left", "/", "scale", ")", "-", "np", ".", "exp", "(", "-", "interval_right", "/", "scale", ")", ")", "else", ":", "return", "interval_left", "+", "scale" ]
Calculate the center of mass of negative exponential distribution p(x) = exp(-x / scale) / scale in the interval of (interval_left, interval_right). scale is the same scale parameter as scipy.stats.expon.pdf Parameters ---------- interval: size 2 tuple, float interval must be in the form of (interval_left, interval_right), where interval_left/interval_right is the starting/end point of the interval in which the center of mass is calculated for exponential distribution. Note that interval_left must be non-negative, since exponential is not supported in the negative domain, and interval_right must be bigger than interval_left (thus positive) to form a well-defined interval. scale: float, positive The scale parameter of the exponential distribution. See above. Returns ------- m: float The center of mass in the interval of (interval_left, interval_right) for exponential distribution.
[ "Calculate", "the", "center", "of", "mass", "of", "negative", "exponential", "distribution", "p", "(", "x", ")", "=", "exp", "(", "-", "x", "/", "scale", ")", "/", "scale", "in", "the", "interval", "of", "(", "interval_left", "interval_right", ")", ".", "scale", "is", "the", "same", "scale", "parameter", "as", "scipy", ".", "stats", ".", "expon", ".", "pdf" ]
python
train
urinieto/msaf
msaf/algorithms/sf/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/sf/segmenter.py#L63-L80
def pick_peaks(nc, L=16, offset_denom=0.1): """Obtain peaks from a novelty curve using an adaptive threshold.""" offset = nc.mean() * float(offset_denom) th = filters.median_filter(nc, size=L) + offset #th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset #import pylab as plt #plt.plot(nc) #plt.plot(th) #plt.show() # th = np.ones(nc.shape[0]) * nc.mean() - 0.08 peaks = [] for i in range(1, nc.shape[0] - 1): # is it a peak? if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]: # is it above the threshold? if nc[i] > th[i]: peaks.append(i) return peaks
[ "def", "pick_peaks", "(", "nc", ",", "L", "=", "16", ",", "offset_denom", "=", "0.1", ")", ":", "offset", "=", "nc", ".", "mean", "(", ")", "*", "float", "(", "offset_denom", ")", "th", "=", "filters", ".", "median_filter", "(", "nc", ",", "size", "=", "L", ")", "+", "offset", "#th = filters.gaussian_filter(nc, sigma=L/2., mode=\"nearest\") + offset", "#import pylab as plt", "#plt.plot(nc)", "#plt.plot(th)", "#plt.show()", "# th = np.ones(nc.shape[0]) * nc.mean() - 0.08", "peaks", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "nc", ".", "shape", "[", "0", "]", "-", "1", ")", ":", "# is it a peak?", "if", "nc", "[", "i", "-", "1", "]", "<", "nc", "[", "i", "]", "and", "nc", "[", "i", "]", ">", "nc", "[", "i", "+", "1", "]", ":", "# is it above the threshold?", "if", "nc", "[", "i", "]", ">", "th", "[", "i", "]", ":", "peaks", ".", "append", "(", "i", ")", "return", "peaks" ]
Obtain peaks from a novelty curve using an adaptive threshold.
[ "Obtain", "peaks", "from", "a", "novelty", "curve", "using", "an", "adaptive", "threshold", "." ]
python
test
rraadd88/rohan
rohan/dandage/db/uniprot.py
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/db/uniprot.py#L46-L67
def map_ids(queries,frm='ACC',to='ENSEMBL_PRO_ID', organism_taxid=9606,test=False): """ https://www.uniprot.org/help/api_idmapping """ url = 'https://www.uniprot.org/uploadlists/' params = { 'from':frm, 'to':to, 'format':'tab', 'organism':organism_taxid, 'query':' '.join(queries), } response = requests.get(url, params=params) if test: print(response.url) if response.ok: df=pd.read_table(response.url) df.columns=[frm,to] return df else: print('Something went wrong ', response.status_code)
[ "def", "map_ids", "(", "queries", ",", "frm", "=", "'ACC'", ",", "to", "=", "'ENSEMBL_PRO_ID'", ",", "organism_taxid", "=", "9606", ",", "test", "=", "False", ")", ":", "url", "=", "'https://www.uniprot.org/uploadlists/'", "params", "=", "{", "'from'", ":", "frm", ",", "'to'", ":", "to", ",", "'format'", ":", "'tab'", ",", "'organism'", ":", "organism_taxid", ",", "'query'", ":", "' '", ".", "join", "(", "queries", ")", ",", "}", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "test", ":", "print", "(", "response", ".", "url", ")", "if", "response", ".", "ok", ":", "df", "=", "pd", ".", "read_table", "(", "response", ".", "url", ")", "df", ".", "columns", "=", "[", "frm", ",", "to", "]", "return", "df", "else", ":", "print", "(", "'Something went wrong '", ",", "response", ".", "status_code", ")" ]
https://www.uniprot.org/help/api_idmapping
[ "https", ":", "//", "www", ".", "uniprot", ".", "org", "/", "help", "/", "api_idmapping" ]
python
train
inveniosoftware/invenio-webhooks
invenio_webhooks/models.py
https://github.com/inveniosoftware/invenio-webhooks/blob/f407cb2245464543ee474a81189fb9d3978bdde5/invenio_webhooks/models.py#L87-L112
def get_hook_url(self, access_token): """Get URL for webhook. In debug and testing mode the hook URL can be overwritten using ``WEBHOOKS_DEBUG_RECEIVER_URLS`` configuration variable to allow testing webhooks via services such as e.g. Ultrahook. .. code-block:: python WEBHOOKS_DEBUG_RECEIVER_URLS = dict( github='http://github.userid.ultrahook.com', ) """ # Allow overwriting hook URL in debug mode. if (current_app.debug or current_app.testing) and \ current_app.config.get('WEBHOOKS_DEBUG_RECEIVER_URLS', None): url_pattern = current_app.config[ 'WEBHOOKS_DEBUG_RECEIVER_URLS'].get(self.receiver_id, None) if url_pattern: return url_pattern % dict(token=access_token) return url_for( 'invenio_webhooks.event_list', receiver_id=self.receiver_id, access_token=access_token, _external=True )
[ "def", "get_hook_url", "(", "self", ",", "access_token", ")", ":", "# Allow overwriting hook URL in debug mode.", "if", "(", "current_app", ".", "debug", "or", "current_app", ".", "testing", ")", "and", "current_app", ".", "config", ".", "get", "(", "'WEBHOOKS_DEBUG_RECEIVER_URLS'", ",", "None", ")", ":", "url_pattern", "=", "current_app", ".", "config", "[", "'WEBHOOKS_DEBUG_RECEIVER_URLS'", "]", ".", "get", "(", "self", ".", "receiver_id", ",", "None", ")", "if", "url_pattern", ":", "return", "url_pattern", "%", "dict", "(", "token", "=", "access_token", ")", "return", "url_for", "(", "'invenio_webhooks.event_list'", ",", "receiver_id", "=", "self", ".", "receiver_id", ",", "access_token", "=", "access_token", ",", "_external", "=", "True", ")" ]
Get URL for webhook. In debug and testing mode the hook URL can be overwritten using ``WEBHOOKS_DEBUG_RECEIVER_URLS`` configuration variable to allow testing webhooks via services such as e.g. Ultrahook. .. code-block:: python WEBHOOKS_DEBUG_RECEIVER_URLS = dict( github='http://github.userid.ultrahook.com', )
[ "Get", "URL", "for", "webhook", "." ]
python
train
python-diamond/Diamond
src/diamond/handler/sentry.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/sentry.py#L349-L357
def process(self, metric): """ process a single metric @type metric: diamond.metric.Metric @param metric: metric to process @rtype None """ for rule in self.rules: rule.process(metric, self)
[ "def", "process", "(", "self", ",", "metric", ")", ":", "for", "rule", "in", "self", ".", "rules", ":", "rule", ".", "process", "(", "metric", ",", "self", ")" ]
process a single metric @type metric: diamond.metric.Metric @param metric: metric to process @rtype None
[ "process", "a", "single", "metric" ]
python
train
edx/edx-organizations
organizations/serializers.py
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/serializers.py#L18-L28
def serialize_organization(organization): """ Organization object-to-dict serialization """ return { 'id': organization.id, 'name': organization.name, 'short_name': organization.short_name, 'description': organization.description, 'logo': organization.logo }
[ "def", "serialize_organization", "(", "organization", ")", ":", "return", "{", "'id'", ":", "organization", ".", "id", ",", "'name'", ":", "organization", ".", "name", ",", "'short_name'", ":", "organization", ".", "short_name", ",", "'description'", ":", "organization", ".", "description", ",", "'logo'", ":", "organization", ".", "logo", "}" ]
Organization object-to-dict serialization
[ "Organization", "object", "-", "to", "-", "dict", "serialization" ]
python
valid
bitesofcode/projexui
projexui/widgets/xganttwidget/xganttwidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidgetitem.py#L303-L329
def property(self, key, default=None): """ Returns the custom data that is stored on this object. :param key | <str> default | <variant> :return <variant> """ if key == 'Name': return self.name() elif key == 'Start': return self.dateStart() elif key == 'End': return self.dateEnd() elif key == 'Calendar Days': return self.duration() elif key == 'Work Days': return self.weekdays() elif key == 'Time Start': return self.timeStart() elif key == 'Time End': return self.timeEnd() elif key == 'All Day': return self.isAllDay() else: return self._properties.get(nativestring(key), default)
[ "def", "property", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "if", "key", "==", "'Name'", ":", "return", "self", ".", "name", "(", ")", "elif", "key", "==", "'Start'", ":", "return", "self", ".", "dateStart", "(", ")", "elif", "key", "==", "'End'", ":", "return", "self", ".", "dateEnd", "(", ")", "elif", "key", "==", "'Calendar Days'", ":", "return", "self", ".", "duration", "(", ")", "elif", "key", "==", "'Work Days'", ":", "return", "self", ".", "weekdays", "(", ")", "elif", "key", "==", "'Time Start'", ":", "return", "self", ".", "timeStart", "(", ")", "elif", "key", "==", "'Time End'", ":", "return", "self", ".", "timeEnd", "(", ")", "elif", "key", "==", "'All Day'", ":", "return", "self", ".", "isAllDay", "(", ")", "else", ":", "return", "self", ".", "_properties", ".", "get", "(", "nativestring", "(", "key", ")", ",", "default", ")" ]
Returns the custom data that is stored on this object. :param key | <str> default | <variant> :return <variant>
[ "Returns", "the", "custom", "data", "that", "is", "stored", "on", "this", "object", ".", ":", "param", "key", "|", "<str", ">", "default", "|", "<variant", ">", ":", "return", "<variant", ">" ]
python
train
peri-source/peri
peri/states.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L447-L454
def crb(self, params=None, *args, **kwargs): """ Calculate the diagonal elements of the minimum covariance of the model with respect to parameters params. ``*args`` and ``**kwargs`` go to ``fisherinformation``. """ fish = self.fisherinformation(params=params, *args, **kwargs) return np.sqrt(np.diag(np.linalg.inv(fish))) * self.sigma
[ "def", "crb", "(", "self", ",", "params", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "fish", "=", "self", ".", "fisherinformation", "(", "params", "=", "params", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "np", ".", "linalg", ".", "inv", "(", "fish", ")", ")", ")", "*", "self", ".", "sigma" ]
Calculate the diagonal elements of the minimum covariance of the model with respect to parameters params. ``*args`` and ``**kwargs`` go to ``fisherinformation``.
[ "Calculate", "the", "diagonal", "elements", "of", "the", "minimum", "covariance", "of", "the", "model", "with", "respect", "to", "parameters", "params", ".", "*", "args", "and", "**", "kwargs", "go", "to", "fisherinformation", "." ]
python
valid
IdentityPython/SATOSA
src/satosa/micro_services/consent.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/micro_services/consent.py#L206-L219
def _end_consent(self, context, internal_response): """ Clear the state for consent and end the consent step :type context: satosa.context.Context :type internal_response: satosa.internal.InternalData :rtype: satosa.response.Response :param context: response context :param internal_response: the response :return: response """ del context.state[STATE_KEY] return super().process(context, internal_response)
[ "def", "_end_consent", "(", "self", ",", "context", ",", "internal_response", ")", ":", "del", "context", ".", "state", "[", "STATE_KEY", "]", "return", "super", "(", ")", ".", "process", "(", "context", ",", "internal_response", ")" ]
Clear the state for consent and end the consent step :type context: satosa.context.Context :type internal_response: satosa.internal.InternalData :rtype: satosa.response.Response :param context: response context :param internal_response: the response :return: response
[ "Clear", "the", "state", "for", "consent", "and", "end", "the", "consent", "step" ]
python
train
ternaris/marv
marv_node/io.py
https://github.com/ternaris/marv/blob/c221354d912ff869bbdb4f714a86a70be30d823e/marv_node/io.py#L34-L40
def create_stream(name, **header): """Create a stream for publishing messages. All keyword arguments will be used to form the header. """ assert isinstance(name, basestring), name return CreateStream(parent=None, name=name, group=False, header=header)
[ "def", "create_stream", "(", "name", ",", "*", "*", "header", ")", ":", "assert", "isinstance", "(", "name", ",", "basestring", ")", ",", "name", "return", "CreateStream", "(", "parent", "=", "None", ",", "name", "=", "name", ",", "group", "=", "False", ",", "header", "=", "header", ")" ]
Create a stream for publishing messages. All keyword arguments will be used to form the header.
[ "Create", "a", "stream", "for", "publishing", "messages", "." ]
python
train
ethereum/py-evm
eth/vm/computation.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L369-L390
def prepare_child_message(self, gas: int, to: Address, value: int, data: BytesOrView, code: bytes, **kwargs: Any) -> Message: """ Helper method for creating a child computation. """ kwargs.setdefault('sender', self.msg.storage_address) child_message = Message( gas=gas, to=to, value=value, data=data, code=code, depth=self.msg.depth + 1, **kwargs ) return child_message
[ "def", "prepare_child_message", "(", "self", ",", "gas", ":", "int", ",", "to", ":", "Address", ",", "value", ":", "int", ",", "data", ":", "BytesOrView", ",", "code", ":", "bytes", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "Message", ":", "kwargs", ".", "setdefault", "(", "'sender'", ",", "self", ".", "msg", ".", "storage_address", ")", "child_message", "=", "Message", "(", "gas", "=", "gas", ",", "to", "=", "to", ",", "value", "=", "value", ",", "data", "=", "data", ",", "code", "=", "code", ",", "depth", "=", "self", ".", "msg", ".", "depth", "+", "1", ",", "*", "*", "kwargs", ")", "return", "child_message" ]
Helper method for creating a child computation.
[ "Helper", "method", "for", "creating", "a", "child", "computation", "." ]
python
train
CivicSpleen/ambry
ambry/orm/database.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/database.py#L1013-L1030
def _get_all_migrations(): """ Returns sorted list of all migrations. Returns: list of (int, str) tuples: first elem of the tuple is migration number, second if module name. """ from . import migrations package = migrations prefix = package.__name__ + '.' all_migrations = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_migrations.append((version, modname)) all_migrations = sorted(all_migrations, key=lambda x: x[0]) return all_migrations
[ "def", "_get_all_migrations", "(", ")", ":", "from", ".", "import", "migrations", "package", "=", "migrations", "prefix", "=", "package", ".", "__name__", "+", "'.'", "all_migrations", "=", "[", "]", "for", "importer", ",", "modname", ",", "ispkg", "in", "pkgutil", ".", "iter_modules", "(", "package", ".", "__path__", ",", "prefix", ")", ":", "version", "=", "int", "(", "modname", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "all_migrations", ".", "append", "(", "(", "version", ",", "modname", ")", ")", "all_migrations", "=", "sorted", "(", "all_migrations", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "return", "all_migrations" ]
Returns sorted list of all migrations. Returns: list of (int, str) tuples: first elem of the tuple is migration number, second if module name.
[ "Returns", "sorted", "list", "of", "all", "migrations", "." ]
python
train
rckclmbr/pyportify
pyportify/pkcs1/primitives.py
https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primitives.py#L67-L79
def i2osp(x, x_len): '''Converts the integer x to its big-endian representation of length x_len. ''' if x > 256**x_len: raise exceptions.IntegerTooLarge h = hex(x)[2:] if h[-1] == 'L': h = h[:-1] if len(h) & 1 == 1: h = '0%s' % h x = binascii.unhexlify(h) return b'\x00' * int(x_len-len(x)) + x
[ "def", "i2osp", "(", "x", ",", "x_len", ")", ":", "if", "x", ">", "256", "**", "x_len", ":", "raise", "exceptions", ".", "IntegerTooLarge", "h", "=", "hex", "(", "x", ")", "[", "2", ":", "]", "if", "h", "[", "-", "1", "]", "==", "'L'", ":", "h", "=", "h", "[", ":", "-", "1", "]", "if", "len", "(", "h", ")", "&", "1", "==", "1", ":", "h", "=", "'0%s'", "%", "h", "x", "=", "binascii", ".", "unhexlify", "(", "h", ")", "return", "b'\\x00'", "*", "int", "(", "x_len", "-", "len", "(", "x", ")", ")", "+", "x" ]
Converts the integer x to its big-endian representation of length x_len.
[ "Converts", "the", "integer", "x", "to", "its", "big", "-", "endian", "representation", "of", "length", "x_len", "." ]
python
train
spyder-ide/spyder
spyder/plugins/findinfiles/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/findinfiles/widgets.py#L1000-L1027
def find(self): """Call the find function""" options = self.find_options.get_options() if options is None: return self.stop_and_reset_thread(ignore_results=True) self.search_thread = SearchThread(self) self.search_thread.sig_finished.connect(self.search_complete) self.search_thread.sig_current_file.connect( lambda x: self.status_bar.set_label_path(x, folder=False) ) self.search_thread.sig_current_folder.connect( lambda x: self.status_bar.set_label_path(x, folder=True) ) self.search_thread.sig_file_match.connect( self.result_browser.append_result ) self.search_thread.sig_out_print.connect( lambda x: sys.stdout.write(str(x) + "\n") ) self.status_bar.reset() self.result_browser.clear_title( self.find_options.search_text.currentText()) self.search_thread.initialize(*options) self.search_thread.start() self.find_options.ok_button.setEnabled(False) self.find_options.stop_button.setEnabled(True) self.status_bar.show()
[ "def", "find", "(", "self", ")", ":", "options", "=", "self", ".", "find_options", ".", "get_options", "(", ")", "if", "options", "is", "None", ":", "return", "self", ".", "stop_and_reset_thread", "(", "ignore_results", "=", "True", ")", "self", ".", "search_thread", "=", "SearchThread", "(", "self", ")", "self", ".", "search_thread", ".", "sig_finished", ".", "connect", "(", "self", ".", "search_complete", ")", "self", ".", "search_thread", ".", "sig_current_file", ".", "connect", "(", "lambda", "x", ":", "self", ".", "status_bar", ".", "set_label_path", "(", "x", ",", "folder", "=", "False", ")", ")", "self", ".", "search_thread", ".", "sig_current_folder", ".", "connect", "(", "lambda", "x", ":", "self", ".", "status_bar", ".", "set_label_path", "(", "x", ",", "folder", "=", "True", ")", ")", "self", ".", "search_thread", ".", "sig_file_match", ".", "connect", "(", "self", ".", "result_browser", ".", "append_result", ")", "self", ".", "search_thread", ".", "sig_out_print", ".", "connect", "(", "lambda", "x", ":", "sys", ".", "stdout", ".", "write", "(", "str", "(", "x", ")", "+", "\"\\n\"", ")", ")", "self", ".", "status_bar", ".", "reset", "(", ")", "self", ".", "result_browser", ".", "clear_title", "(", "self", ".", "find_options", ".", "search_text", ".", "currentText", "(", ")", ")", "self", ".", "search_thread", ".", "initialize", "(", "*", "options", ")", "self", ".", "search_thread", ".", "start", "(", ")", "self", ".", "find_options", ".", "ok_button", ".", "setEnabled", "(", "False", ")", "self", ".", "find_options", ".", "stop_button", ".", "setEnabled", "(", "True", ")", "self", ".", "status_bar", ".", "show", "(", ")" ]
Call the find function
[ "Call", "the", "find", "function" ]
python
train
django-danceschool/django-danceschool
danceschool/financial/views.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/financial/views.py#L428-L473
def get(self,request,*args,**kwargs): ''' Pass any permissable GET data. URL parameters override GET parameters ''' try: year = int(self.kwargs.get('year')) except (ValueError, TypeError): year = getIntFromGet(request,'year') if self.kwargs.get('month'): try: month = int(self.kwargs.get('month')) except (ValueError, TypeError): try: month = list(month_name).index(self.kwargs.get('month').title()) except (ValueError, TypeError): month = None else: month = getIntFromGet(request, 'month') try: event_id = int(self.kwargs.get('event')) except (ValueError, TypeError): event_id = getIntFromGet(request, 'event') event = None if event_id: try: event = Event.objects.get(id=event_id) except ObjectDoesNotExist: pass kwargs.update({ 'year': year, 'month': month, 'startDate': getDateTimeFromGet(request,'startDate'), 'endDate': getDateTimeFromGet(request,'endDate'), 'basis': request.GET.get('basis'), 'event': event, }) if kwargs.get('basis') not in EXPENSE_BASES.keys(): kwargs['basis'] = 'accrualDate' context = self.get_context_data(**kwargs) return self.render_to_response(context)
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "year", "=", "int", "(", "self", ".", "kwargs", ".", "get", "(", "'year'", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "year", "=", "getIntFromGet", "(", "request", ",", "'year'", ")", "if", "self", ".", "kwargs", ".", "get", "(", "'month'", ")", ":", "try", ":", "month", "=", "int", "(", "self", ".", "kwargs", ".", "get", "(", "'month'", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "try", ":", "month", "=", "list", "(", "month_name", ")", ".", "index", "(", "self", ".", "kwargs", ".", "get", "(", "'month'", ")", ".", "title", "(", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "month", "=", "None", "else", ":", "month", "=", "getIntFromGet", "(", "request", ",", "'month'", ")", "try", ":", "event_id", "=", "int", "(", "self", ".", "kwargs", ".", "get", "(", "'event'", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "event_id", "=", "getIntFromGet", "(", "request", ",", "'event'", ")", "event", "=", "None", "if", "event_id", ":", "try", ":", "event", "=", "Event", ".", "objects", ".", "get", "(", "id", "=", "event_id", ")", "except", "ObjectDoesNotExist", ":", "pass", "kwargs", ".", "update", "(", "{", "'year'", ":", "year", ",", "'month'", ":", "month", ",", "'startDate'", ":", "getDateTimeFromGet", "(", "request", ",", "'startDate'", ")", ",", "'endDate'", ":", "getDateTimeFromGet", "(", "request", ",", "'endDate'", ")", ",", "'basis'", ":", "request", ".", "GET", ".", "get", "(", "'basis'", ")", ",", "'event'", ":", "event", ",", "}", ")", "if", "kwargs", ".", "get", "(", "'basis'", ")", "not", "in", "EXPENSE_BASES", ".", "keys", "(", ")", ":", "kwargs", "[", "'basis'", "]", "=", "'accrualDate'", "context", "=", "self", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "return", "self", ".", "render_to_response", "(", "context", ")" ]
Pass any permissable GET data. URL parameters override GET parameters
[ "Pass", "any", "permissable", "GET", "data", ".", "URL", "parameters", "override", "GET", "parameters" ]
python
train
jamieleshaw/lurklib
lurklib/core.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/core.py#L187-L198
def stepback(self, append=False): """ Stepbacks/reverses the buffer. Optional arguments: * append=False - If True, appends the data onto the buffer; else, it just steps the index back. """ if append: data = self._buffer[self._index - 1] self._buffer.append(data) else: self._index -= 1
[ "def", "stepback", "(", "self", ",", "append", "=", "False", ")", ":", "if", "append", ":", "data", "=", "self", ".", "_buffer", "[", "self", ".", "_index", "-", "1", "]", "self", ".", "_buffer", ".", "append", "(", "data", ")", "else", ":", "self", ".", "_index", "-=", "1" ]
Stepbacks/reverses the buffer. Optional arguments: * append=False - If True, appends the data onto the buffer; else, it just steps the index back.
[ "Stepbacks", "/", "reverses", "the", "buffer", ".", "Optional", "arguments", ":", "*", "append", "=", "False", "-", "If", "True", "appends", "the", "data", "onto", "the", "buffer", ";", "else", "it", "just", "steps", "the", "index", "back", "." ]
python
train
GNS3/gns3-server
gns3server/compute/dynamips/hypervisor.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/hypervisor.py#L174-L187
def read_stdout(self): """ Reads the standard output of the Dynamips process. Only use when the process has been stopped or has crashed. """ output = "" if self._stdout_file and os.access(self._stdout_file, os.R_OK): try: with open(self._stdout_file, "rb") as file: output = file.read().decode("utf-8", errors="replace") except OSError as e: log.warn("could not read {}: {}".format(self._stdout_file, e)) return output
[ "def", "read_stdout", "(", "self", ")", ":", "output", "=", "\"\"", "if", "self", ".", "_stdout_file", "and", "os", ".", "access", "(", "self", ".", "_stdout_file", ",", "os", ".", "R_OK", ")", ":", "try", ":", "with", "open", "(", "self", ".", "_stdout_file", ",", "\"rb\"", ")", "as", "file", ":", "output", "=", "file", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ",", "errors", "=", "\"replace\"", ")", "except", "OSError", "as", "e", ":", "log", ".", "warn", "(", "\"could not read {}: {}\"", ".", "format", "(", "self", ".", "_stdout_file", ",", "e", ")", ")", "return", "output" ]
Reads the standard output of the Dynamips process. Only use when the process has been stopped or has crashed.
[ "Reads", "the", "standard", "output", "of", "the", "Dynamips", "process", ".", "Only", "use", "when", "the", "process", "has", "been", "stopped", "or", "has", "crashed", "." ]
python
train
fvdsn/py-xml-escpos
xmlescpos/escpos.py
https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L467-L475
def qr(self,text): """ Print QR Code for the provided string """ qr_code = qrcode.QRCode(version=4, box_size=4, border=1) qr_code.add_data(text) qr_code.make(fit=True) qr_img = qr_code.make_image() im = qr_img._img.convert("RGB") # Convert the RGB image in printable image self._convert_image(im)
[ "def", "qr", "(", "self", ",", "text", ")", ":", "qr_code", "=", "qrcode", ".", "QRCode", "(", "version", "=", "4", ",", "box_size", "=", "4", ",", "border", "=", "1", ")", "qr_code", ".", "add_data", "(", "text", ")", "qr_code", ".", "make", "(", "fit", "=", "True", ")", "qr_img", "=", "qr_code", ".", "make_image", "(", ")", "im", "=", "qr_img", ".", "_img", ".", "convert", "(", "\"RGB\"", ")", "# Convert the RGB image in printable image", "self", ".", "_convert_image", "(", "im", ")" ]
Print QR Code for the provided string
[ "Print", "QR", "Code", "for", "the", "provided", "string" ]
python
train
hanguokai/youku
youku/youku_upload.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_upload.py#L291-L321
def upload(self, params={}): """start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload successfully """ if self.upload_token is not None: # resume upload status = self.check() if status['status'] != 4: return self.commit() else: self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit() else: # new upload self.create(self.prepare_video_params(**params)) self.create_file() self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit()
[ "def", "upload", "(", "self", ",", "params", "=", "{", "}", ")", ":", "if", "self", ".", "upload_token", "is", "not", "None", ":", "# resume upload", "status", "=", "self", ".", "check", "(", ")", "if", "status", "[", "'status'", "]", "!=", "4", ":", "return", "self", ".", "commit", "(", ")", "else", ":", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")", "else", ":", "# new upload", "self", ".", "create", "(", "self", ".", "prepare_video_params", "(", "*", "*", "params", ")", ")", "self", ".", "create_file", "(", ")", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")" ]
start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload successfully
[ "start", "uploading", "the", "file", "until", "upload", "is", "complete", "or", "error", ".", "This", "is", "the", "main", "method", "to", "used", "If", "you", "do", "not", "care", "about", "state", "of", "process", "." ]
python
train
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L680-L687
def bbox(self): """returns the bounding box for the segment in the form (xmin, xmax, ymin, ymax).""" xmin = min(self.start.real, self.end.real) xmax = max(self.start.real, self.end.real) ymin = min(self.start.imag, self.end.imag) ymax = max(self.start.imag, self.end.imag) return xmin, xmax, ymin, ymax
[ "def", "bbox", "(", "self", ")", ":", "xmin", "=", "min", "(", "self", ".", "start", ".", "real", ",", "self", ".", "end", ".", "real", ")", "xmax", "=", "max", "(", "self", ".", "start", ".", "real", ",", "self", ".", "end", ".", "real", ")", "ymin", "=", "min", "(", "self", ".", "start", ".", "imag", ",", "self", ".", "end", ".", "imag", ")", "ymax", "=", "max", "(", "self", ".", "start", ".", "imag", ",", "self", ".", "end", ".", "imag", ")", "return", "xmin", ",", "xmax", ",", "ymin", ",", "ymax" ]
returns the bounding box for the segment in the form (xmin, xmax, ymin, ymax).
[ "returns", "the", "bounding", "box", "for", "the", "segment", "in", "the", "form", "(", "xmin", "xmax", "ymin", "ymax", ")", "." ]
python
train
googleapis/google-cloud-python
datastore/google/cloud/datastore/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/client.py#L54-L77
def _determine_default_project(project=None): """Determine default project explicitly or implicitly as fall-back. In implicit case, supports four environments. In order of precedence, the implicit environments are: * DATASTORE_DATASET environment variable (for ``gcd`` / emulator testing) * GOOGLE_CLOUD_PROJECT environment variable * Google App Engine application ID * Google Compute Engine project ID (from metadata server) :type project: str :param project: Optional. The project to use as default. :rtype: str or ``NoneType`` :returns: Default project if it can be determined. """ if project is None: project = _get_gcd_project() if project is None: project = _base_default_project(project=project) return project
[ "def", "_determine_default_project", "(", "project", "=", "None", ")", ":", "if", "project", "is", "None", ":", "project", "=", "_get_gcd_project", "(", ")", "if", "project", "is", "None", ":", "project", "=", "_base_default_project", "(", "project", "=", "project", ")", "return", "project" ]
Determine default project explicitly or implicitly as fall-back. In implicit case, supports four environments. In order of precedence, the implicit environments are: * DATASTORE_DATASET environment variable (for ``gcd`` / emulator testing) * GOOGLE_CLOUD_PROJECT environment variable * Google App Engine application ID * Google Compute Engine project ID (from metadata server) :type project: str :param project: Optional. The project to use as default. :rtype: str or ``NoneType`` :returns: Default project if it can be determined.
[ "Determine", "default", "project", "explicitly", "or", "implicitly", "as", "fall", "-", "back", "." ]
python
train
MisterY/gnucash-portfolio
gnucash_portfolio/lib/settings.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/settings.py#L89-L93
def file_path(self) -> str: """ Settings file absolute path""" user_dir = self.__get_user_path() file_path = path.abspath(path.join(user_dir, self.FILENAME)) return file_path
[ "def", "file_path", "(", "self", ")", "->", "str", ":", "user_dir", "=", "self", ".", "__get_user_path", "(", ")", "file_path", "=", "path", ".", "abspath", "(", "path", ".", "join", "(", "user_dir", ",", "self", ".", "FILENAME", ")", ")", "return", "file_path" ]
Settings file absolute path
[ "Settings", "file", "absolute", "path" ]
python
train
sdispater/orator
orator/orm/relations/morph_pivot.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/morph_pivot.py#L25-L35
def delete(self): """ Delete the pivot model record from the database. :rtype: int """ query = self._get_delete_query() query.where(self._morph_type, self._morph_name) return query.delete()
[ "def", "delete", "(", "self", ")", ":", "query", "=", "self", ".", "_get_delete_query", "(", ")", "query", ".", "where", "(", "self", ".", "_morph_type", ",", "self", ".", "_morph_name", ")", "return", "query", ".", "delete", "(", ")" ]
Delete the pivot model record from the database. :rtype: int
[ "Delete", "the", "pivot", "model", "record", "from", "the", "database", "." ]
python
train
ph4r05/monero-serialize
monero_serialize/xmrboost.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrboost.py#L232-L244
async def version(self, tp, params, version=None, elem=None): """ Symmetric version management :param tp: :param params: :param version: :return: """ if self.writing: return await self.set_version(tp, params, version, elem) else: return await self.get_version(tp, params)
[ "async", "def", "version", "(", "self", ",", "tp", ",", "params", ",", "version", "=", "None", ",", "elem", "=", "None", ")", ":", "if", "self", ".", "writing", ":", "return", "await", "self", ".", "set_version", "(", "tp", ",", "params", ",", "version", ",", "elem", ")", "else", ":", "return", "await", "self", ".", "get_version", "(", "tp", ",", "params", ")" ]
Symmetric version management :param tp: :param params: :param version: :return:
[ "Symmetric", "version", "management" ]
python
train
cloudmesh/cloudmesh-common
cloudmesh/common/FlatDict.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/FlatDict.py#L41-L67
def flatten(d, parent_key='', sep='__'): """ flattens the dict into a one dimensional dictionary :param d: multidimensional dict :param parent_key: replaces from the parent key :param sep: the separation character used when fattening. the default is __ :return: the flattened dict """ # http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys if type(d) == list: flat = [] for entry in d: flat.append(flatten(entry, parent_key=parent_key, sep=sep)) return flat else: items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collectionsAbc.MutableMapping): items.extend(flatten(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
[ "def", "flatten", "(", "d", ",", "parent_key", "=", "''", ",", "sep", "=", "'__'", ")", ":", "# http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys", "if", "type", "(", "d", ")", "==", "list", ":", "flat", "=", "[", "]", "for", "entry", "in", "d", ":", "flat", ".", "append", "(", "flatten", "(", "entry", ",", "parent_key", "=", "parent_key", ",", "sep", "=", "sep", ")", ")", "return", "flat", "else", ":", "items", "=", "[", "]", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "new_key", "=", "parent_key", "+", "sep", "+", "k", "if", "parent_key", "else", "k", "if", "isinstance", "(", "v", ",", "collectionsAbc", ".", "MutableMapping", ")", ":", "items", ".", "extend", "(", "flatten", "(", "v", ",", "new_key", ",", "sep", "=", "sep", ")", ".", "items", "(", ")", ")", "else", ":", "items", ".", "append", "(", "(", "new_key", ",", "v", ")", ")", "return", "dict", "(", "items", ")" ]
flattens the dict into a one dimensional dictionary :param d: multidimensional dict :param parent_key: replaces from the parent key :param sep: the separation character used when fattening. the default is __ :return: the flattened dict
[ "flattens", "the", "dict", "into", "a", "one", "dimensional", "dictionary" ]
python
train
odlgroup/odl
odl/solvers/functional/functional.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/functional.py#L628-L657
def gradient(self): """Gradient of the compositon according to the chain rule.""" func = self.left op = self.right class FunctionalCompositionGradient(Operator): """Gradient of the compositon according to the chain rule.""" def __init__(self): """Initialize a new instance.""" super(FunctionalCompositionGradient, self).__init__( op.domain, op.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point.""" return op.derivative(x).adjoint(func.gradient(op(x))) def derivative(self, x): """The derivative in point ``x``. This is only defined """ if not op.is_linear: raise NotImplementedError('derivative only implemented ' 'for linear opertors.') else: return (op.adjoint * func.gradient * op).derivative(x) return FunctionalCompositionGradient()
[ "def", "gradient", "(", "self", ")", ":", "func", "=", "self", ".", "left", "op", "=", "self", ".", "right", "class", "FunctionalCompositionGradient", "(", "Operator", ")", ":", "\"\"\"Gradient of the compositon according to the chain rule.\"\"\"", "def", "__init__", "(", "self", ")", ":", "\"\"\"Initialize a new instance.\"\"\"", "super", "(", "FunctionalCompositionGradient", ",", "self", ")", ".", "__init__", "(", "op", ".", "domain", ",", "op", ".", "domain", ",", "linear", "=", "False", ")", "def", "_call", "(", "self", ",", "x", ")", ":", "\"\"\"Apply the gradient operator to the given point.\"\"\"", "return", "op", ".", "derivative", "(", "x", ")", ".", "adjoint", "(", "func", ".", "gradient", "(", "op", "(", "x", ")", ")", ")", "def", "derivative", "(", "self", ",", "x", ")", ":", "\"\"\"The derivative in point ``x``.\n\n This is only defined\n \"\"\"", "if", "not", "op", ".", "is_linear", ":", "raise", "NotImplementedError", "(", "'derivative only implemented '", "'for linear opertors.'", ")", "else", ":", "return", "(", "op", ".", "adjoint", "*", "func", ".", "gradient", "*", "op", ")", ".", "derivative", "(", "x", ")", "return", "FunctionalCompositionGradient", "(", ")" ]
Gradient of the compositon according to the chain rule.
[ "Gradient", "of", "the", "compositon", "according", "to", "the", "chain", "rule", "." ]
python
train
maweigert/gputools
gputools/fft/oclfft_convolve.py
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft_convolve.py#L15-L45
def fft_convolve(data, h, res_g = None, plan = None, inplace = False, kernel_is_fft = False, kernel_is_fftshifted = False): """ convolves data with kernel h via FFTs data should be either a numpy array or a OCLArray (see doc for fft) both data and h should be same shape if data/h are OCLArrays, then: - type should be complex64 - shape should be equal and power of two - h is assumed to be already fftshifted (otherwise set kernel_is_fftshifted to true) """ if isinstance(data,np.ndarray): return _fft_convolve_numpy(data, h, plan = plan, kernel_is_fft = kernel_is_fft, kernel_is_fftshifted = kernel_is_fftshifted) elif isinstance(data,OCLArray): return _fft_convolve_gpu(data,h, res_g = res_g, plan = plan, inplace = inplace, kernel_is_fft = kernel_is_fft) else: raise TypeError("array argument (1) has bad type: %s"%type(data))
[ "def", "fft_convolve", "(", "data", ",", "h", ",", "res_g", "=", "None", ",", "plan", "=", "None", ",", "inplace", "=", "False", ",", "kernel_is_fft", "=", "False", ",", "kernel_is_fftshifted", "=", "False", ")", ":", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "return", "_fft_convolve_numpy", "(", "data", ",", "h", ",", "plan", "=", "plan", ",", "kernel_is_fft", "=", "kernel_is_fft", ",", "kernel_is_fftshifted", "=", "kernel_is_fftshifted", ")", "elif", "isinstance", "(", "data", ",", "OCLArray", ")", ":", "return", "_fft_convolve_gpu", "(", "data", ",", "h", ",", "res_g", "=", "res_g", ",", "plan", "=", "plan", ",", "inplace", "=", "inplace", ",", "kernel_is_fft", "=", "kernel_is_fft", ")", "else", ":", "raise", "TypeError", "(", "\"array argument (1) has bad type: %s\"", "%", "type", "(", "data", ")", ")" ]
convolves data with kernel h via FFTs data should be either a numpy array or a OCLArray (see doc for fft) both data and h should be same shape if data/h are OCLArrays, then: - type should be complex64 - shape should be equal and power of two - h is assumed to be already fftshifted (otherwise set kernel_is_fftshifted to true)
[ "convolves", "data", "with", "kernel", "h", "via", "FFTs" ]
python
train
duniter/duniter-python-api
examples/save_revoke_document.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/examples/save_revoke_document.py#L77-L91
def get_signed_raw_revocation_document(identity: Identity, salt: str, password: str) -> str: """ Generate account revocation document for given identity :param identity: Self Certification of the identity :param salt: Salt :param password: Password :rtype: str """ revocation = Revocation(PROTOCOL_VERSION, identity.currency, identity, "") key = SigningKey.from_credentials(salt, password) revocation.sign([key]) return revocation.signed_raw()
[ "def", "get_signed_raw_revocation_document", "(", "identity", ":", "Identity", ",", "salt", ":", "str", ",", "password", ":", "str", ")", "->", "str", ":", "revocation", "=", "Revocation", "(", "PROTOCOL_VERSION", ",", "identity", ".", "currency", ",", "identity", ",", "\"\"", ")", "key", "=", "SigningKey", ".", "from_credentials", "(", "salt", ",", "password", ")", "revocation", ".", "sign", "(", "[", "key", "]", ")", "return", "revocation", ".", "signed_raw", "(", ")" ]
Generate account revocation document for given identity :param identity: Self Certification of the identity :param salt: Salt :param password: Password :rtype: str
[ "Generate", "account", "revocation", "document", "for", "given", "identity" ]
python
train
scanny/python-pptx
pptx/chart/xmlwriter.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/xmlwriter.py#L123-L141
def numRef_xml(self, wksht_ref, number_format, values): """ Return the ``<c:numRef>`` element specified by the parameters as unicode text. """ pt_xml = self.pt_xml(values) return ( ' <c:numRef>\n' ' <c:f>{wksht_ref}</c:f>\n' ' <c:numCache>\n' ' <c:formatCode>{number_format}</c:formatCode>\n' '{pt_xml}' ' </c:numCache>\n' ' </c:numRef>\n' ).format(**{ 'wksht_ref': wksht_ref, 'number_format': number_format, 'pt_xml': pt_xml, })
[ "def", "numRef_xml", "(", "self", ",", "wksht_ref", ",", "number_format", ",", "values", ")", ":", "pt_xml", "=", "self", ".", "pt_xml", "(", "values", ")", "return", "(", "' <c:numRef>\\n'", "' <c:f>{wksht_ref}</c:f>\\n'", "' <c:numCache>\\n'", "' <c:formatCode>{number_format}</c:formatCode>\\n'", "'{pt_xml}'", "' </c:numCache>\\n'", "' </c:numRef>\\n'", ")", ".", "format", "(", "*", "*", "{", "'wksht_ref'", ":", "wksht_ref", ",", "'number_format'", ":", "number_format", ",", "'pt_xml'", ":", "pt_xml", ",", "}", ")" ]
Return the ``<c:numRef>`` element specified by the parameters as unicode text.
[ "Return", "the", "<c", ":", "numRef", ">", "element", "specified", "by", "the", "parameters", "as", "unicode", "text", "." ]
python
train
the01/python-floscraper
floscraper/webscraper.py
https://github.com/the01/python-floscraper/blob/d578cd3d6381070d9a07dade1e10387ae33e9a65/floscraper/webscraper.py#L642-L655
def shrink(self, shrink): """ Remove unnecessary parts :param shrink: Object to shringk :type shrink: dict | list :return: Shrunk object :rtype: dict | list """ if isinstance(shrink, list): return self._shrink_list(shrink) if isinstance(shrink, dict): return self._shrink_dict(shrink) return shrink
[ "def", "shrink", "(", "self", ",", "shrink", ")", ":", "if", "isinstance", "(", "shrink", ",", "list", ")", ":", "return", "self", ".", "_shrink_list", "(", "shrink", ")", "if", "isinstance", "(", "shrink", ",", "dict", ")", ":", "return", "self", ".", "_shrink_dict", "(", "shrink", ")", "return", "shrink" ]
Remove unnecessary parts :param shrink: Object to shringk :type shrink: dict | list :return: Shrunk object :rtype: dict | list
[ "Remove", "unnecessary", "parts" ]
python
train
nvbn/thefuck
thefuck/output_readers/read_log.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/output_readers/read_log.py#L70-L105
def get_output(script): """Reads script output from log. :type script: str :rtype: str | None """ if six.PY2: logs.warn('Experimental instant mode is Python 3+ only') return None if 'THEFUCK_OUTPUT_LOG' not in os.environ: logs.warn("Output log isn't specified") return None if const.USER_COMMAND_MARK not in os.environ.get('PS1', ''): logs.warn( "PS1 doesn't contain user command mark, please ensure " "that PS1 is not changed after The Fuck alias initialization") return None try: with logs.debug_time(u'Read output from log'): fd = os.open(os.environ['THEFUCK_OUTPUT_LOG'], os.O_RDONLY) buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_READ) _skip_old_lines(buffer) lines = _get_output_lines(script, buffer) output = '\n'.join(lines).strip() logs.debug(u'Received output: {}'.format(output)) return output except OSError: logs.warn("Can't read output log") return None except ScriptNotInLog: logs.warn("Script not found in output log") return None
[ "def", "get_output", "(", "script", ")", ":", "if", "six", ".", "PY2", ":", "logs", ".", "warn", "(", "'Experimental instant mode is Python 3+ only'", ")", "return", "None", "if", "'THEFUCK_OUTPUT_LOG'", "not", "in", "os", ".", "environ", ":", "logs", ".", "warn", "(", "\"Output log isn't specified\"", ")", "return", "None", "if", "const", ".", "USER_COMMAND_MARK", "not", "in", "os", ".", "environ", ".", "get", "(", "'PS1'", ",", "''", ")", ":", "logs", ".", "warn", "(", "\"PS1 doesn't contain user command mark, please ensure \"", "\"that PS1 is not changed after The Fuck alias initialization\"", ")", "return", "None", "try", ":", "with", "logs", ".", "debug_time", "(", "u'Read output from log'", ")", ":", "fd", "=", "os", ".", "open", "(", "os", ".", "environ", "[", "'THEFUCK_OUTPUT_LOG'", "]", ",", "os", ".", "O_RDONLY", ")", "buffer", "=", "mmap", ".", "mmap", "(", "fd", ",", "const", ".", "LOG_SIZE_IN_BYTES", ",", "mmap", ".", "MAP_SHARED", ",", "mmap", ".", "PROT_READ", ")", "_skip_old_lines", "(", "buffer", ")", "lines", "=", "_get_output_lines", "(", "script", ",", "buffer", ")", "output", "=", "'\\n'", ".", "join", "(", "lines", ")", ".", "strip", "(", ")", "logs", ".", "debug", "(", "u'Received output: {}'", ".", "format", "(", "output", ")", ")", "return", "output", "except", "OSError", ":", "logs", ".", "warn", "(", "\"Can't read output log\"", ")", "return", "None", "except", "ScriptNotInLog", ":", "logs", ".", "warn", "(", "\"Script not found in output log\"", ")", "return", "None" ]
Reads script output from log. :type script: str :rtype: str | None
[ "Reads", "script", "output", "from", "log", "." ]
python
train
twisted/twistedchecker
check_pyflakes.py
https://github.com/twisted/twistedchecker/blob/80060e1c07cf5d67d747dbec8ec0e5ee913e8929/check_pyflakes.py#L117-L132
def checkRecursive(paths, reporter): """ Recursively check all source files in C{paths}. @param paths: A list of paths to Python source files and directories containing Python source files. @param reporter: A L{Reporter} where all of the warnings and errors will be reported to. @return: The number of warnings found. """ warnings = 0 for sourcePath in iterSourceCode(paths): if re.search(RE_EXCLUDE, sourcePath): continue warnings += checkPath(sourcePath, reporter) return warnings
[ "def", "checkRecursive", "(", "paths", ",", "reporter", ")", ":", "warnings", "=", "0", "for", "sourcePath", "in", "iterSourceCode", "(", "paths", ")", ":", "if", "re", ".", "search", "(", "RE_EXCLUDE", ",", "sourcePath", ")", ":", "continue", "warnings", "+=", "checkPath", "(", "sourcePath", ",", "reporter", ")", "return", "warnings" ]
Recursively check all source files in C{paths}. @param paths: A list of paths to Python source files and directories containing Python source files. @param reporter: A L{Reporter} where all of the warnings and errors will be reported to. @return: The number of warnings found.
[ "Recursively", "check", "all", "source", "files", "in", "C", "{", "paths", "}", "." ]
python
train
SmartTeleMax/iktomi
iktomi/unstable/utils/image_resizers.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/unstable/utils/image_resizers.py#L34-L55
def transform(self, img, transformation, params): ''' Apply transformations to the image. New transformations can be defined as methods:: def do__transformationname(self, img, transformation, params): 'returns new image with transformation applied' ... def new_size__transformationname(self, size, target_size, params): 'dry run, returns a size of image if transformation is applied' ... ''' # Transformations MUST be idempotent. # The limitation is caused by implementation of # image upload in iktomi.cms. # The transformation can be applied twice: # on image upload after crop (when TransientFile is created) # and on object save (when PersistentFile is created). method = getattr(self, 'do__' + transformation) return method(img, transformation, params)
[ "def", "transform", "(", "self", ",", "img", ",", "transformation", ",", "params", ")", ":", "# Transformations MUST be idempotent.", "# The limitation is caused by implementation of", "# image upload in iktomi.cms.", "# The transformation can be applied twice:", "# on image upload after crop (when TransientFile is created)", "# and on object save (when PersistentFile is created).", "method", "=", "getattr", "(", "self", ",", "'do__'", "+", "transformation", ")", "return", "method", "(", "img", ",", "transformation", ",", "params", ")" ]
Apply transformations to the image. New transformations can be defined as methods:: def do__transformationname(self, img, transformation, params): 'returns new image with transformation applied' ... def new_size__transformationname(self, size, target_size, params): 'dry run, returns a size of image if transformation is applied' ...
[ "Apply", "transformations", "to", "the", "image", "." ]
python
train
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L571-L581
def get_broadcast_transaction_hashes(coin_symbol='btc', api_key=None, limit=10): ''' Warning, slow! ''' transactions = get_broadcast_transactions( coin_symbol=coin_symbol, api_key=api_key, limit=limit, ) return [tx['hash'] for tx in transactions]
[ "def", "get_broadcast_transaction_hashes", "(", "coin_symbol", "=", "'btc'", ",", "api_key", "=", "None", ",", "limit", "=", "10", ")", ":", "transactions", "=", "get_broadcast_transactions", "(", "coin_symbol", "=", "coin_symbol", ",", "api_key", "=", "api_key", ",", "limit", "=", "limit", ",", ")", "return", "[", "tx", "[", "'hash'", "]", "for", "tx", "in", "transactions", "]" ]
Warning, slow!
[ "Warning", "slow!" ]
python
train
Fantomas42/django-blog-zinnia
zinnia/models_bases/entry.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/models_bases/entry.py#L383-L391
def save(self, *args, **kwargs): """ Overrides the save method to create an excerpt from the content field if void. """ if not self.excerpt and self.status == PUBLISHED: self.excerpt = Truncator(strip_tags( getattr(self, 'content', ''))).words(50) super(ExcerptEntry, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "excerpt", "and", "self", ".", "status", "==", "PUBLISHED", ":", "self", ".", "excerpt", "=", "Truncator", "(", "strip_tags", "(", "getattr", "(", "self", ",", "'content'", ",", "''", ")", ")", ")", ".", "words", "(", "50", ")", "super", "(", "ExcerptEntry", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Overrides the save method to create an excerpt from the content field if void.
[ "Overrides", "the", "save", "method", "to", "create", "an", "excerpt", "from", "the", "content", "field", "if", "void", "." ]
python
train
SheffieldML/GPy
GPy/plotting/gpy_plot/latent_plots.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/plotting/gpy_plot/latent_plots.py#L238-L280
def plot_latent(self, labels=None, which_indices=None, resolution=60, legend=True, plot_limits=None, updates=False, kern=None, marker='<>^vsd', num_samples=1000, projection='2d', scatter_kwargs=None, **imshow_kwargs): """ Plot the latent space of the GP on the inputs. This is the density of the GP posterior as a grey scale and the scatter plot of the input dimemsions selected by which_indices. :param array-like labels: a label for each data point (row) of the inputs :param (int, int) which_indices: which input dimensions to plot against each other :param int resolution: the resolution at which we predict the magnification factor :param bool legend: whether to plot the legend on the figure :param plot_limits: the plot limits for the plot :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax)) :param bool updates: if possible, make interactive updates using the specific library you are using :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction :param str marker: markers to use - cycle if more labels then markers are given :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples. :param imshow_kwargs: the kwargs for the imshow (magnification factor) :param scatter_kwargs: the kwargs for the scatter plots """ if projection != '2d': raise ValueError('Cannot plot latent in other then 2 dimensions, consider plot_scatter') input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2] X = get_x_y_var(self)[0] _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution) canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]), xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs) if legend: if (labels is not None): legend = find_best_layout_for_subplots(len(np.unique(labels)))[1] else: labels = np.ones(self.num_data) legend = False scatters = _plot_latent_scatter(canvas, X, which_indices, labels, marker, num_samples, projection='2d', **scatter_kwargs or {}) view = _plot_latent(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, updates, kern, **imshow_kwargs) retval = pl().add_to_canvas(canvas, dict(scatter=scatters, imshow=view), legend=legend) _wait_for_updates(view, updates) return retval
[ "def", "plot_latent", "(", "self", ",", "labels", "=", "None", ",", "which_indices", "=", "None", ",", "resolution", "=", "60", ",", "legend", "=", "True", ",", "plot_limits", "=", "None", ",", "updates", "=", "False", ",", "kern", "=", "None", ",", "marker", "=", "'<>^vsd'", ",", "num_samples", "=", "1000", ",", "projection", "=", "'2d'", ",", "scatter_kwargs", "=", "None", ",", "*", "*", "imshow_kwargs", ")", ":", "if", "projection", "!=", "'2d'", ":", "raise", "ValueError", "(", "'Cannot plot latent in other then 2 dimensions, consider plot_scatter'", ")", "input_1", ",", "input_2", "=", "which_indices", "=", "self", ".", "get_most_significant_input_dimensions", "(", "which_indices", ")", "[", ":", "2", "]", "X", "=", "get_x_y_var", "(", "self", ")", "[", "0", "]", "_", ",", "_", ",", "Xgrid", ",", "_", ",", "_", ",", "xmin", ",", "xmax", ",", "resolution", "=", "helper_for_plot_data", "(", "self", ",", "X", ",", "plot_limits", ",", "which_indices", ",", "None", ",", "resolution", ")", "canvas", ",", "imshow_kwargs", "=", "pl", "(", ")", ".", "new_canvas", "(", "xlim", "=", "(", "xmin", "[", "0", "]", ",", "xmax", "[", "0", "]", ")", ",", "ylim", "=", "(", "xmin", "[", "1", "]", ",", "xmax", "[", "1", "]", ")", ",", "xlabel", "=", "'latent dimension %i'", "%", "input_1", ",", "ylabel", "=", "'latent dimension %i'", "%", "input_2", ",", "*", "*", "imshow_kwargs", ")", "if", "legend", ":", "if", "(", "labels", "is", "not", "None", ")", ":", "legend", "=", "find_best_layout_for_subplots", "(", "len", "(", "np", ".", "unique", "(", "labels", ")", ")", ")", "[", "1", "]", "else", ":", "labels", "=", "np", ".", "ones", "(", "self", ".", "num_data", ")", "legend", "=", "False", "scatters", "=", "_plot_latent_scatter", "(", "canvas", ",", "X", ",", "which_indices", ",", "labels", ",", "marker", ",", "num_samples", ",", "projection", "=", "'2d'", ",", "*", "*", "scatter_kwargs", "or", "{", "}", ")", "view", "=", "_plot_latent", "(", "self", ",", "canvas", ",", "which_indices", ",", "Xgrid", ",", "xmin", ",", "xmax", ",", "resolution", ",", "updates", ",", "kern", ",", "*", "*", "imshow_kwargs", ")", "retval", "=", "pl", "(", ")", ".", "add_to_canvas", "(", "canvas", ",", "dict", "(", "scatter", "=", "scatters", ",", "imshow", "=", "view", ")", ",", "legend", "=", "legend", ")", "_wait_for_updates", "(", "view", ",", "updates", ")", "return", "retval" ]
Plot the latent space of the GP on the inputs. This is the density of the GP posterior as a grey scale and the scatter plot of the input dimemsions selected by which_indices. :param array-like labels: a label for each data point (row) of the inputs :param (int, int) which_indices: which input dimensions to plot against each other :param int resolution: the resolution at which we predict the magnification factor :param bool legend: whether to plot the legend on the figure :param plot_limits: the plot limits for the plot :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax)) :param bool updates: if possible, make interactive updates using the specific library you are using :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction :param str marker: markers to use - cycle if more labels then markers are given :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples. :param imshow_kwargs: the kwargs for the imshow (magnification factor) :param scatter_kwargs: the kwargs for the scatter plots
[ "Plot", "the", "latent", "space", "of", "the", "GP", "on", "the", "inputs", ".", "This", "is", "the", "density", "of", "the", "GP", "posterior", "as", "a", "grey", "scale", "and", "the", "scatter", "plot", "of", "the", "input", "dimemsions", "selected", "by", "which_indices", "." ]
python
train
saltstack/salt
salt/modules/win_dacl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dacl.py#L653-L674
def enable_inheritance(path, objectType, clear=False): ''' enable/disable inheritance on an object Args: path: The path to the object objectType: The type of object (FILE, DIRECTORY, REGISTRY) clear: True will remove non-Inherited ACEs from the ACL Returns (dict): A dictionary containing the results CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.enable_inheritance c:\temp directory ''' dc = daclConstants() objectType = dc.getObjectTypeBit(objectType) path = dc.processPath(path, objectType) return _set_dacl_inheritance(path, objectType, True, None, clear)
[ "def", "enable_inheritance", "(", "path", ",", "objectType", ",", "clear", "=", "False", ")", ":", "dc", "=", "daclConstants", "(", ")", "objectType", "=", "dc", ".", "getObjectTypeBit", "(", "objectType", ")", "path", "=", "dc", ".", "processPath", "(", "path", ",", "objectType", ")", "return", "_set_dacl_inheritance", "(", "path", ",", "objectType", ",", "True", ",", "None", ",", "clear", ")" ]
enable/disable inheritance on an object Args: path: The path to the object objectType: The type of object (FILE, DIRECTORY, REGISTRY) clear: True will remove non-Inherited ACEs from the ACL Returns (dict): A dictionary containing the results CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.enable_inheritance c:\temp directory
[ "enable", "/", "disable", "inheritance", "on", "an", "object" ]
python
train
angr/angr
angr/analyses/forward_analysis.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/forward_analysis.py#L129-L149
def all_successors(self, node, skip_reached_fixedpoint=False): """ Returns all successors to the specific node. :param node: A node in the graph. :return: A set of nodes that are all successors to the given node. :rtype: set """ successors = set() stack = [ node ] while stack: n = stack.pop() successors.add(n) stack.extend(succ for succ in self.successors(n) if succ not in successors and (not skip_reached_fixedpoint or succ not in self._reached_fixedpoint) ) return successors
[ "def", "all_successors", "(", "self", ",", "node", ",", "skip_reached_fixedpoint", "=", "False", ")", ":", "successors", "=", "set", "(", ")", "stack", "=", "[", "node", "]", "while", "stack", ":", "n", "=", "stack", ".", "pop", "(", ")", "successors", ".", "add", "(", "n", ")", "stack", ".", "extend", "(", "succ", "for", "succ", "in", "self", ".", "successors", "(", "n", ")", "if", "succ", "not", "in", "successors", "and", "(", "not", "skip_reached_fixedpoint", "or", "succ", "not", "in", "self", ".", "_reached_fixedpoint", ")", ")", "return", "successors" ]
Returns all successors to the specific node. :param node: A node in the graph. :return: A set of nodes that are all successors to the given node. :rtype: set
[ "Returns", "all", "successors", "to", "the", "specific", "node", "." ]
python
train
sibson/vncdotool
vncdotool/client.py
https://github.com/sibson/vncdotool/blob/e133a8916efaa0f5ed421e0aa737196624635b0c/vncdotool/client.py#L318-L342
def mouseDrag(self, x, y, step=1): """ Move the mouse point to position (x, y) in increments of step """ log.debug('mouseDrag %d,%d', x, y) if x < self.x: xsteps = [self.x - i for i in range(step, self.x - x + 1, step)] else: xsteps = range(self.x, x, step) if y < self.y: ysteps = [self.y - i for i in range(step, self.y - y + 1, step)] else: ysteps = range(self.y, y, step) for ypos in ysteps: time.sleep(.2) self.mouseMove(self.x, ypos) for xpos in xsteps: time.sleep(.2) self.mouseMove(xpos, self.y) self.mouseMove(x, y) return self
[ "def", "mouseDrag", "(", "self", ",", "x", ",", "y", ",", "step", "=", "1", ")", ":", "log", ".", "debug", "(", "'mouseDrag %d,%d'", ",", "x", ",", "y", ")", "if", "x", "<", "self", ".", "x", ":", "xsteps", "=", "[", "self", ".", "x", "-", "i", "for", "i", "in", "range", "(", "step", ",", "self", ".", "x", "-", "x", "+", "1", ",", "step", ")", "]", "else", ":", "xsteps", "=", "range", "(", "self", ".", "x", ",", "x", ",", "step", ")", "if", "y", "<", "self", ".", "y", ":", "ysteps", "=", "[", "self", ".", "y", "-", "i", "for", "i", "in", "range", "(", "step", ",", "self", ".", "y", "-", "y", "+", "1", ",", "step", ")", "]", "else", ":", "ysteps", "=", "range", "(", "self", ".", "y", ",", "y", ",", "step", ")", "for", "ypos", "in", "ysteps", ":", "time", ".", "sleep", "(", ".2", ")", "self", ".", "mouseMove", "(", "self", ".", "x", ",", "ypos", ")", "for", "xpos", "in", "xsteps", ":", "time", ".", "sleep", "(", ".2", ")", "self", ".", "mouseMove", "(", "xpos", ",", "self", ".", "y", ")", "self", ".", "mouseMove", "(", "x", ",", "y", ")", "return", "self" ]
Move the mouse point to position (x, y) in increments of step
[ "Move", "the", "mouse", "point", "to", "position", "(", "x", "y", ")", "in", "increments", "of", "step" ]
python
train
mar10/pyftpsync
ftpsync/synchronizers.py
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L936-L990
def on_conflict(self, pair): """Return False to prevent visiting of children.""" # self._log_action("skip", "conflict", "!", pair.local, min_level=2) # print("on_conflict", pair) any_entry = pair.any_entry if not self._test_match_or_print(any_entry): return resolve = self._interactive_resolve(pair) if resolve == "skip": self._log_action("skip", "conflict", "*?*", any_entry) self._inc_stat("conflict_files_skipped") return if pair.local and pair.remote: assert pair.local.is_file() is_newer = pair.local > pair.remote if ( resolve == "local" or (is_newer and resolve == "new") or (not is_newer and resolve == "old") ): self._log_action("copy", "conflict", "*>*", pair.local) self._copy_file(self.local, self.remote, pair.local) elif ( resolve == "remote" or (is_newer and resolve == "old") or (not is_newer and resolve == "new") ): self._log_action("copy", "conflict", "*<*", pair.local) self._copy_file(self.remote, self.local, pair.remote) else: raise NotImplementedError elif pair.local: assert pair.local.is_file() if resolve == "local": self._log_action("restore", "conflict", "*>x", pair.local) self._copy_file(self.local, self.remote, pair.local) elif resolve == "remote": self._log_action("delete", "conflict", "*<x", pair.local) self._remove_file(pair.local) else: raise NotImplementedError else: assert pair.remote.is_file() if resolve == "local": self._log_action("delete", "conflict", "x>*", pair.remote) self._remove_file(pair.remote) elif resolve == "remote": self._log_action("restore", "conflict", "x<*", pair.remote) self._copy_file(self.remote, self.local, pair.remote) else: raise NotImplementedError return
[ "def", "on_conflict", "(", "self", ",", "pair", ")", ":", "# self._log_action(\"skip\", \"conflict\", \"!\", pair.local, min_level=2)", "# print(\"on_conflict\", pair)", "any_entry", "=", "pair", ".", "any_entry", "if", "not", "self", ".", "_test_match_or_print", "(", "any_entry", ")", ":", "return", "resolve", "=", "self", ".", "_interactive_resolve", "(", "pair", ")", "if", "resolve", "==", "\"skip\"", ":", "self", ".", "_log_action", "(", "\"skip\"", ",", "\"conflict\"", ",", "\"*?*\"", ",", "any_entry", ")", "self", ".", "_inc_stat", "(", "\"conflict_files_skipped\"", ")", "return", "if", "pair", ".", "local", "and", "pair", ".", "remote", ":", "assert", "pair", ".", "local", ".", "is_file", "(", ")", "is_newer", "=", "pair", ".", "local", ">", "pair", ".", "remote", "if", "(", "resolve", "==", "\"local\"", "or", "(", "is_newer", "and", "resolve", "==", "\"new\"", ")", "or", "(", "not", "is_newer", "and", "resolve", "==", "\"old\"", ")", ")", ":", "self", ".", "_log_action", "(", "\"copy\"", ",", "\"conflict\"", ",", "\"*>*\"", ",", "pair", ".", "local", ")", "self", ".", "_copy_file", "(", "self", ".", "local", ",", "self", ".", "remote", ",", "pair", ".", "local", ")", "elif", "(", "resolve", "==", "\"remote\"", "or", "(", "is_newer", "and", "resolve", "==", "\"old\"", ")", "or", "(", "not", "is_newer", "and", "resolve", "==", "\"new\"", ")", ")", ":", "self", ".", "_log_action", "(", "\"copy\"", ",", "\"conflict\"", ",", "\"*<*\"", ",", "pair", ".", "local", ")", "self", ".", "_copy_file", "(", "self", ".", "remote", ",", "self", ".", "local", ",", "pair", ".", "remote", ")", "else", ":", "raise", "NotImplementedError", "elif", "pair", ".", "local", ":", "assert", "pair", ".", "local", ".", "is_file", "(", ")", "if", "resolve", "==", "\"local\"", ":", "self", ".", "_log_action", "(", "\"restore\"", ",", "\"conflict\"", ",", "\"*>x\"", ",", "pair", ".", "local", ")", "self", ".", "_copy_file", "(", "self", ".", "local", ",", "self", ".", "remote", ",", "pair", ".", "local", ")", "elif", "resolve", "==", "\"remote\"", ":", "self", ".", "_log_action", "(", "\"delete\"", ",", "\"conflict\"", ",", "\"*<x\"", ",", "pair", ".", "local", ")", "self", ".", "_remove_file", "(", "pair", ".", "local", ")", "else", ":", "raise", "NotImplementedError", "else", ":", "assert", "pair", ".", "remote", ".", "is_file", "(", ")", "if", "resolve", "==", "\"local\"", ":", "self", ".", "_log_action", "(", "\"delete\"", ",", "\"conflict\"", ",", "\"x>*\"", ",", "pair", ".", "remote", ")", "self", ".", "_remove_file", "(", "pair", ".", "remote", ")", "elif", "resolve", "==", "\"remote\"", ":", "self", ".", "_log_action", "(", "\"restore\"", ",", "\"conflict\"", ",", "\"x<*\"", ",", "pair", ".", "remote", ")", "self", ".", "_copy_file", "(", "self", ".", "remote", ",", "self", ".", "local", ",", "pair", ".", "remote", ")", "else", ":", "raise", "NotImplementedError", "return" ]
Return False to prevent visiting of children.
[ "Return", "False", "to", "prevent", "visiting", "of", "children", "." ]
python
train
stevearc/dql
dql/cli.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/cli.py#L504-L518
def do_use(self, region): """ Switch the AWS region > use us-west-1 > use us-east-1 """ if self._local_endpoint is not None: host, port = self._local_endpoint # pylint: disable=W0633 self.engine.connect( region, session=self.session, host=host, port=port, is_secure=False ) else: self.engine.connect(region, session=self.session)
[ "def", "do_use", "(", "self", ",", "region", ")", ":", "if", "self", ".", "_local_endpoint", "is", "not", "None", ":", "host", ",", "port", "=", "self", ".", "_local_endpoint", "# pylint: disable=W0633", "self", ".", "engine", ".", "connect", "(", "region", ",", "session", "=", "self", ".", "session", ",", "host", "=", "host", ",", "port", "=", "port", ",", "is_secure", "=", "False", ")", "else", ":", "self", ".", "engine", ".", "connect", "(", "region", ",", "session", "=", "self", ".", "session", ")" ]
Switch the AWS region > use us-west-1 > use us-east-1
[ "Switch", "the", "AWS", "region" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/models/instance.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/models/instance.py#L632-L651
def get_worker_log(self, log_id, log_type, size=0): """ Get logs from worker. :param log_id: id of log, can be retrieved from details. :param log_type: type of logs. Possible log types contains {log_types} :param size: length of the log to retrieve :return: log content """ params = OrderedDict([('log', ''), ('id', log_id)]) if log_type is not None: log_type = log_type.lower() if log_type not in LOG_TYPES_MAPPING: raise ValueError('log_type should choose a value in ' + ' '.join(six.iterkeys(LOG_TYPES_MAPPING))) params['logtype'] = LOG_TYPES_MAPPING[log_type] if size > 0: params['size'] = str(size) resp = self._client.get(self.resource(), params=params) return resp.text
[ "def", "get_worker_log", "(", "self", ",", "log_id", ",", "log_type", ",", "size", "=", "0", ")", ":", "params", "=", "OrderedDict", "(", "[", "(", "'log'", ",", "''", ")", ",", "(", "'id'", ",", "log_id", ")", "]", ")", "if", "log_type", "is", "not", "None", ":", "log_type", "=", "log_type", ".", "lower", "(", ")", "if", "log_type", "not", "in", "LOG_TYPES_MAPPING", ":", "raise", "ValueError", "(", "'log_type should choose a value in '", "+", "' '", ".", "join", "(", "six", ".", "iterkeys", "(", "LOG_TYPES_MAPPING", ")", ")", ")", "params", "[", "'logtype'", "]", "=", "LOG_TYPES_MAPPING", "[", "log_type", "]", "if", "size", ">", "0", ":", "params", "[", "'size'", "]", "=", "str", "(", "size", ")", "resp", "=", "self", ".", "_client", ".", "get", "(", "self", ".", "resource", "(", ")", ",", "params", "=", "params", ")", "return", "resp", ".", "text" ]
Get logs from worker. :param log_id: id of log, can be retrieved from details. :param log_type: type of logs. Possible log types contains {log_types} :param size: length of the log to retrieve :return: log content
[ "Get", "logs", "from", "worker", "." ]
python
train
pmelchior/proxmin
examples/parabola.py
https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/examples/parabola.py#L53-L60
def prox_yline(y, step): """Projection onto line in y""" if not np.isscalar(y): y= y[0] if y > -0.75: return np.array([-0.75]) else: return np.array([y])
[ "def", "prox_yline", "(", "y", ",", "step", ")", ":", "if", "not", "np", ".", "isscalar", "(", "y", ")", ":", "y", "=", "y", "[", "0", "]", "if", "y", ">", "-", "0.75", ":", "return", "np", ".", "array", "(", "[", "-", "0.75", "]", ")", "else", ":", "return", "np", ".", "array", "(", "[", "y", "]", ")" ]
Projection onto line in y
[ "Projection", "onto", "line", "in", "y" ]
python
train
pyca/pynacl
src/nacl/bindings/crypto_box.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_box.py#L234-L269
def crypto_box_seal(message, pk): """ Encrypts and returns a message ``message`` using an ephemeral secret key and the public key ``pk``. The ephemeral public key, which is embedded in the sealed box, is also used, in combination with ``pk``, to derive the nonce needed for the underlying box construct. :param message: bytes :param pk: bytes :rtype: bytes .. versionadded:: 1.2 """ ensure(isinstance(message, bytes), "input message must be bytes", raising=TypeError) ensure(isinstance(pk, bytes), "public key must be bytes", raising=TypeError) if len(pk) != crypto_box_PUBLICKEYBYTES: raise exc.ValueError("Invalid public key") _mlen = len(message) _clen = crypto_box_SEALBYTES + _mlen ciphertext = ffi.new("unsigned char[]", _clen) rc = lib.crypto_box_seal(ciphertext, message, _mlen, pk) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(ciphertext, _clen)[:]
[ "def", "crypto_box_seal", "(", "message", ",", "pk", ")", ":", "ensure", "(", "isinstance", "(", "message", ",", "bytes", ")", ",", "\"input message must be bytes\"", ",", "raising", "=", "TypeError", ")", "ensure", "(", "isinstance", "(", "pk", ",", "bytes", ")", ",", "\"public key must be bytes\"", ",", "raising", "=", "TypeError", ")", "if", "len", "(", "pk", ")", "!=", "crypto_box_PUBLICKEYBYTES", ":", "raise", "exc", ".", "ValueError", "(", "\"Invalid public key\"", ")", "_mlen", "=", "len", "(", "message", ")", "_clen", "=", "crypto_box_SEALBYTES", "+", "_mlen", "ciphertext", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "_clen", ")", "rc", "=", "lib", ".", "crypto_box_seal", "(", "ciphertext", ",", "message", ",", "_mlen", ",", "pk", ")", "ensure", "(", "rc", "==", "0", ",", "'Unexpected library error'", ",", "raising", "=", "exc", ".", "RuntimeError", ")", "return", "ffi", ".", "buffer", "(", "ciphertext", ",", "_clen", ")", "[", ":", "]" ]
Encrypts and returns a message ``message`` using an ephemeral secret key and the public key ``pk``. The ephemeral public key, which is embedded in the sealed box, is also used, in combination with ``pk``, to derive the nonce needed for the underlying box construct. :param message: bytes :param pk: bytes :rtype: bytes .. versionadded:: 1.2
[ "Encrypts", "and", "returns", "a", "message", "message", "using", "an", "ephemeral", "secret", "key", "and", "the", "public", "key", "pk", ".", "The", "ephemeral", "public", "key", "which", "is", "embedded", "in", "the", "sealed", "box", "is", "also", "used", "in", "combination", "with", "pk", "to", "derive", "the", "nonce", "needed", "for", "the", "underlying", "box", "construct", "." ]
python
train
saltstack/salt
salt/thorium/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/__init__.py#L70-L102
def gather_cache(self): ''' Gather the specified data from the minion data cache ''' cache = {'grains': {}, 'pillar': {}} if self.grains or self.pillar: if self.opts.get('minion_data_cache'): minions = self.cache.list('minions') if not minions: return cache for minion in minions: total = self.cache.fetch('minions/{0}'.format(minion), 'data') if 'pillar' in total: if self.pillar_keys: for key in self.pillar_keys: if key in total['pillar']: cache['pillar'][minion][key] = total['pillar'][key] else: cache['pillar'][minion] = total['pillar'] else: cache['pillar'][minion] = {} if 'grains' in total: if self.grain_keys: for key in self.grain_keys: if key in total['grains']: cache['grains'][minion][key] = total['grains'][key] else: cache['grains'][minion] = total['grains'] else: cache['grains'][minion] = {} return cache
[ "def", "gather_cache", "(", "self", ")", ":", "cache", "=", "{", "'grains'", ":", "{", "}", ",", "'pillar'", ":", "{", "}", "}", "if", "self", ".", "grains", "or", "self", ".", "pillar", ":", "if", "self", ".", "opts", ".", "get", "(", "'minion_data_cache'", ")", ":", "minions", "=", "self", ".", "cache", ".", "list", "(", "'minions'", ")", "if", "not", "minions", ":", "return", "cache", "for", "minion", "in", "minions", ":", "total", "=", "self", ".", "cache", ".", "fetch", "(", "'minions/{0}'", ".", "format", "(", "minion", ")", ",", "'data'", ")", "if", "'pillar'", "in", "total", ":", "if", "self", ".", "pillar_keys", ":", "for", "key", "in", "self", ".", "pillar_keys", ":", "if", "key", "in", "total", "[", "'pillar'", "]", ":", "cache", "[", "'pillar'", "]", "[", "minion", "]", "[", "key", "]", "=", "total", "[", "'pillar'", "]", "[", "key", "]", "else", ":", "cache", "[", "'pillar'", "]", "[", "minion", "]", "=", "total", "[", "'pillar'", "]", "else", ":", "cache", "[", "'pillar'", "]", "[", "minion", "]", "=", "{", "}", "if", "'grains'", "in", "total", ":", "if", "self", ".", "grain_keys", ":", "for", "key", "in", "self", ".", "grain_keys", ":", "if", "key", "in", "total", "[", "'grains'", "]", ":", "cache", "[", "'grains'", "]", "[", "minion", "]", "[", "key", "]", "=", "total", "[", "'grains'", "]", "[", "key", "]", "else", ":", "cache", "[", "'grains'", "]", "[", "minion", "]", "=", "total", "[", "'grains'", "]", "else", ":", "cache", "[", "'grains'", "]", "[", "minion", "]", "=", "{", "}", "return", "cache" ]
Gather the specified data from the minion data cache
[ "Gather", "the", "specified", "data", "from", "the", "minion", "data", "cache" ]
python
train
bitesofcode/projexui
projexui/widgets/xcalendarwidget/xcalendaritem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendaritem.py#L240-L259
def rebuild( self ): """ Rebuilds the current item in the scene. """ self.markForRebuild(False) self._textData = [] if ( self.rebuildBlocked() ): return scene = self.scene() if ( not scene ): return # rebuild a month look if ( scene.currentMode() == scene.Mode.Month ): self.rebuildMonth() elif ( scene.currentMode() in (scene.Mode.Day, scene.Mode.Week) ): self.rebuildDay()
[ "def", "rebuild", "(", "self", ")", ":", "self", ".", "markForRebuild", "(", "False", ")", "self", ".", "_textData", "=", "[", "]", "if", "(", "self", ".", "rebuildBlocked", "(", ")", ")", ":", "return", "scene", "=", "self", ".", "scene", "(", ")", "if", "(", "not", "scene", ")", ":", "return", "# rebuild a month look\r", "if", "(", "scene", ".", "currentMode", "(", ")", "==", "scene", ".", "Mode", ".", "Month", ")", ":", "self", ".", "rebuildMonth", "(", ")", "elif", "(", "scene", ".", "currentMode", "(", ")", "in", "(", "scene", ".", "Mode", ".", "Day", ",", "scene", ".", "Mode", ".", "Week", ")", ")", ":", "self", ".", "rebuildDay", "(", ")" ]
Rebuilds the current item in the scene.
[ "Rebuilds", "the", "current", "item", "in", "the", "scene", "." ]
python
train
alvations/lazyme
lazyme/fileio.py
https://github.com/alvations/lazyme/blob/961a8282198588ff72e15643f725ce895e51d06d/lazyme/fileio.py#L9-L20
def find_files(dir_path, extension="*"): """ From https://stackoverflow.com/a/2186565/610569 """ if sys.version_info.major == 3 and sys.version_info.minor >= 5: pattern = '/'.join([dir_path, '**', extension]) for filename in glob.iglob(pattern, recursive=True): yield filename else: for root, dirnames, filenames in os.walk(dir_path): for filename in fnmatch.filter(filenames, extension): yield os.path.join(root, filename)
[ "def", "find_files", "(", "dir_path", ",", "extension", "=", "\"*\"", ")", ":", "if", "sys", ".", "version_info", ".", "major", "==", "3", "and", "sys", ".", "version_info", ".", "minor", ">=", "5", ":", "pattern", "=", "'/'", ".", "join", "(", "[", "dir_path", ",", "'**'", ",", "extension", "]", ")", "for", "filename", "in", "glob", ".", "iglob", "(", "pattern", ",", "recursive", "=", "True", ")", ":", "yield", "filename", "else", ":", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "dir_path", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "extension", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")" ]
From https://stackoverflow.com/a/2186565/610569
[ "From", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "2186565", "/", "610569" ]
python
train
saltstack/salt
salt/auth/rest.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/auth/rest.py#L50-L70
def auth(username, password): ''' REST authentication ''' url = rest_auth_setup() data = {'username': username, 'password': password} # Post to the API endpoint. If 200 is returned then the result will be the ACLs # for this user result = salt.utils.http.query(url, method='POST', data=data, status=True, decode=True) if result['status'] == 200: log.debug('eauth REST call returned 200: %s', result) if result['dict'] is not None: return result['dict'] return True else: log.debug('eauth REST call failed: %s', result) return False
[ "def", "auth", "(", "username", ",", "password", ")", ":", "url", "=", "rest_auth_setup", "(", ")", "data", "=", "{", "'username'", ":", "username", ",", "'password'", ":", "password", "}", "# Post to the API endpoint. If 200 is returned then the result will be the ACLs", "# for this user", "result", "=", "salt", ".", "utils", ".", "http", ".", "query", "(", "url", ",", "method", "=", "'POST'", ",", "data", "=", "data", ",", "status", "=", "True", ",", "decode", "=", "True", ")", "if", "result", "[", "'status'", "]", "==", "200", ":", "log", ".", "debug", "(", "'eauth REST call returned 200: %s'", ",", "result", ")", "if", "result", "[", "'dict'", "]", "is", "not", "None", ":", "return", "result", "[", "'dict'", "]", "return", "True", "else", ":", "log", ".", "debug", "(", "'eauth REST call failed: %s'", ",", "result", ")", "return", "False" ]
REST authentication
[ "REST", "authentication" ]
python
train
sleepyfran/itunespy
itunespy/music_artist.py
https://github.com/sleepyfran/itunespy/blob/0e7e931b135b5e0daae49ba68e9167ff4ac73eb5/itunespy/music_artist.py#L29-L34
def get_albums(self): """ Retrieves all the albums by the artist :return: List. Albums published by the artist """ return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['album'])[1:]
[ "def", "get_albums", "(", "self", ")", ":", "return", "itunespy", ".", "lookup", "(", "id", "=", "self", ".", "artist_id", ",", "entity", "=", "itunespy", ".", "entities", "[", "'album'", "]", ")", "[", "1", ":", "]" ]
Retrieves all the albums by the artist :return: List. Albums published by the artist
[ "Retrieves", "all", "the", "albums", "by", "the", "artist", ":", "return", ":", "List", ".", "Albums", "published", "by", "the", "artist" ]
python
train
saltstack/salt
salt/config/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/config/__init__.py#L1971-L1980
def _validate_pillar_roots(pillar_roots): ''' If the pillar_roots option has a key that is None then we will error out, just replace it with an empty list ''' if not isinstance(pillar_roots, dict): log.warning('The pillar_roots parameter is not properly formatted,' ' using defaults') return {'base': _expand_glob_path([salt.syspaths.BASE_PILLAR_ROOTS_DIR])} return _normalize_roots(pillar_roots)
[ "def", "_validate_pillar_roots", "(", "pillar_roots", ")", ":", "if", "not", "isinstance", "(", "pillar_roots", ",", "dict", ")", ":", "log", ".", "warning", "(", "'The pillar_roots parameter is not properly formatted,'", "' using defaults'", ")", "return", "{", "'base'", ":", "_expand_glob_path", "(", "[", "salt", ".", "syspaths", ".", "BASE_PILLAR_ROOTS_DIR", "]", ")", "}", "return", "_normalize_roots", "(", "pillar_roots", ")" ]
If the pillar_roots option has a key that is None then we will error out, just replace it with an empty list
[ "If", "the", "pillar_roots", "option", "has", "a", "key", "that", "is", "None", "then", "we", "will", "error", "out", "just", "replace", "it", "with", "an", "empty", "list" ]
python
train
tomer8007/kik-bot-api-unofficial
kik_unofficial/client.py
https://github.com/tomer8007/kik-bot-api-unofficial/blob/2ae5216bc05e7099a41895382fc8e428a7a5c3ac/kik_unofficial/client.py#L152-L167
def send_chat_message(self, peer_jid: str, message: str, bot_mention_jid=None): """ Sends a text chat message to another person or a group with the given JID/username. :param peer_jid: The Jabber ID for which to send the message (looks like [email protected]) If you don't know the JID of someone, you can also specify a kik username here. :param message: The actual message body :param bot_mention_jid: If an official bot is referenced, their jid must be embedded as mention for them to respond. """ if self.is_group_jid(peer_jid): log.info("[+] Sending chat message '{}' to group '{}'...".format(message, peer_jid)) return self._send_xmpp_element(chatting.OutgoingGroupChatMessage(peer_jid, message, bot_mention_jid)) else: log.info("[+] Sending chat message '{}' to user '{}'...".format(message, peer_jid)) return self._send_xmpp_element(chatting.OutgoingChatMessage(peer_jid, message, False, bot_mention_jid))
[ "def", "send_chat_message", "(", "self", ",", "peer_jid", ":", "str", ",", "message", ":", "str", ",", "bot_mention_jid", "=", "None", ")", ":", "if", "self", ".", "is_group_jid", "(", "peer_jid", ")", ":", "log", ".", "info", "(", "\"[+] Sending chat message '{}' to group '{}'...\"", ".", "format", "(", "message", ",", "peer_jid", ")", ")", "return", "self", ".", "_send_xmpp_element", "(", "chatting", ".", "OutgoingGroupChatMessage", "(", "peer_jid", ",", "message", ",", "bot_mention_jid", ")", ")", "else", ":", "log", ".", "info", "(", "\"[+] Sending chat message '{}' to user '{}'...\"", ".", "format", "(", "message", ",", "peer_jid", ")", ")", "return", "self", ".", "_send_xmpp_element", "(", "chatting", ".", "OutgoingChatMessage", "(", "peer_jid", ",", "message", ",", "False", ",", "bot_mention_jid", ")", ")" ]
Sends a text chat message to another person or a group with the given JID/username. :param peer_jid: The Jabber ID for which to send the message (looks like [email protected]) If you don't know the JID of someone, you can also specify a kik username here. :param message: The actual message body :param bot_mention_jid: If an official bot is referenced, their jid must be embedded as mention for them to respond.
[ "Sends", "a", "text", "chat", "message", "to", "another", "person", "or", "a", "group", "with", "the", "given", "JID", "/", "username", "." ]
python
train
gem/oq-engine
openquake/commonlib/shapefileparser.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/shapefileparser.py#L994-L1030
def read(self, input_shapefile, validate=False, simple_fault_spacing=1.0, complex_mesh_spacing=5.0, mfd_spacing=0.1): """ Build the source model from nrml format """ reader = shapefile.Reader(input_shapefile) fields = [field[0] for field in reader.fields[1:]] shapes = reader.shapes() records = reader.records() sources = [] if validate: converter = SourceConverter(1.0, simple_fault_spacing, complex_mesh_spacing, mfd_spacing, 10.0) for iloc in range(0, reader.numRecords): # Build record dictionary record = record_to_dict(records[iloc], fields) shape = shapes[iloc] if "pointSource" in record["sourcetype"]: src = build_point_source_from_shp(shape, record) elif "areaSource" in record["sourcetype"]: src = build_area_source_from_shp(shape, record) elif "simpleFaultSource" in record["sourcetype"]: src = build_simple_fault_source_from_shp(shape, record) elif "complexFaultSource" in record["sourcetype"]: src = build_complex_fault_source_from_shp(shape, record) elif "characteristicFaultSource" in record["sourcetype"]: print("Characteristic Fault Source Not Yet Supported - Sorry!") src = None if src and validate: print("Validating Source %s" % src.attrib["id"]) converter.convert_node(src) if src: sources.append(src) return SourceModel(sources)
[ "def", "read", "(", "self", ",", "input_shapefile", ",", "validate", "=", "False", ",", "simple_fault_spacing", "=", "1.0", ",", "complex_mesh_spacing", "=", "5.0", ",", "mfd_spacing", "=", "0.1", ")", ":", "reader", "=", "shapefile", ".", "Reader", "(", "input_shapefile", ")", "fields", "=", "[", "field", "[", "0", "]", "for", "field", "in", "reader", ".", "fields", "[", "1", ":", "]", "]", "shapes", "=", "reader", ".", "shapes", "(", ")", "records", "=", "reader", ".", "records", "(", ")", "sources", "=", "[", "]", "if", "validate", ":", "converter", "=", "SourceConverter", "(", "1.0", ",", "simple_fault_spacing", ",", "complex_mesh_spacing", ",", "mfd_spacing", ",", "10.0", ")", "for", "iloc", "in", "range", "(", "0", ",", "reader", ".", "numRecords", ")", ":", "# Build record dictionary", "record", "=", "record_to_dict", "(", "records", "[", "iloc", "]", ",", "fields", ")", "shape", "=", "shapes", "[", "iloc", "]", "if", "\"pointSource\"", "in", "record", "[", "\"sourcetype\"", "]", ":", "src", "=", "build_point_source_from_shp", "(", "shape", ",", "record", ")", "elif", "\"areaSource\"", "in", "record", "[", "\"sourcetype\"", "]", ":", "src", "=", "build_area_source_from_shp", "(", "shape", ",", "record", ")", "elif", "\"simpleFaultSource\"", "in", "record", "[", "\"sourcetype\"", "]", ":", "src", "=", "build_simple_fault_source_from_shp", "(", "shape", ",", "record", ")", "elif", "\"complexFaultSource\"", "in", "record", "[", "\"sourcetype\"", "]", ":", "src", "=", "build_complex_fault_source_from_shp", "(", "shape", ",", "record", ")", "elif", "\"characteristicFaultSource\"", "in", "record", "[", "\"sourcetype\"", "]", ":", "print", "(", "\"Characteristic Fault Source Not Yet Supported - Sorry!\"", ")", "src", "=", "None", "if", "src", "and", "validate", ":", "print", "(", "\"Validating Source %s\"", "%", "src", ".", "attrib", "[", "\"id\"", "]", ")", "converter", ".", "convert_node", "(", "src", ")", "if", "src", ":", "sources", ".", "append", "(", "src", ")", "return", "SourceModel", "(", "sources", ")" ]
Build the source model from nrml format
[ "Build", "the", "source", "model", "from", "nrml", "format" ]
python
train
rohankapoorcom/zm-py
zoneminder/zm.py
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L117-L130
def get_run_states(self) -> List[RunState]: """Get a list of RunStates from the ZoneMinder API.""" raw_states = self.get_state('api/states.json') if not raw_states: _LOGGER.warning("Could not fetch runstates from ZoneMinder") return [] run_states = [] for i in raw_states['states']: raw_state = i['State'] _LOGGER.info("Initializing runstate %s", raw_state['Id']) run_states.append(RunState(self, raw_state)) return run_states
[ "def", "get_run_states", "(", "self", ")", "->", "List", "[", "RunState", "]", ":", "raw_states", "=", "self", ".", "get_state", "(", "'api/states.json'", ")", "if", "not", "raw_states", ":", "_LOGGER", ".", "warning", "(", "\"Could not fetch runstates from ZoneMinder\"", ")", "return", "[", "]", "run_states", "=", "[", "]", "for", "i", "in", "raw_states", "[", "'states'", "]", ":", "raw_state", "=", "i", "[", "'State'", "]", "_LOGGER", ".", "info", "(", "\"Initializing runstate %s\"", ",", "raw_state", "[", "'Id'", "]", ")", "run_states", ".", "append", "(", "RunState", "(", "self", ",", "raw_state", ")", ")", "return", "run_states" ]
Get a list of RunStates from the ZoneMinder API.
[ "Get", "a", "list", "of", "RunStates", "from", "the", "ZoneMinder", "API", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4158-L4169
def users_autocomplete(self, name=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/users#autocomplete-users" api_path = "/api/v2/users/autocomplete.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if name: api_query.update({ "name": name, }) return self.call(api_path, query=api_query, **kwargs)
[ "def", "users_autocomplete", "(", "self", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/autocomplete.json\"", "api_query", "=", "{", "}", "if", "\"query\"", "in", "kwargs", ".", "keys", "(", ")", ":", "api_query", ".", "update", "(", "kwargs", "[", "\"query\"", "]", ")", "del", "kwargs", "[", "\"query\"", "]", "if", "name", ":", "api_query", ".", "update", "(", "{", "\"name\"", ":", "name", ",", "}", ")", "return", "self", ".", "call", "(", "api_path", ",", "query", "=", "api_query", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/users#autocomplete-users
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "users#autocomplete", "-", "users" ]
python
train
raphaelvallat/pingouin
pingouin/external/qsturng.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/external/qsturng.py#L666-L725
def _qsturng(p, r, v): """scalar version of qsturng""" ## print 'q',p # r is interpolated through the q to y here we only need to # account for when p and/or v are not found in the table. global A, p_keys, v_keys if p < .1 or p > .999: raise ValueError('p must be between .1 and .999') if p < .9: if v < 2: raise ValueError('v must be > 2 when p < .9') else: if v < 1: raise ValueError('v must be > 1 when p >= .9') # The easy case. A tabled value is requested. #numpy 1.4.1: TypeError: unhashable type: 'numpy.ndarray' : p = float(p) if isinstance(v, np.ndarray): v = v.item() if (p,v) in A: y = _func(A[(p,v)], p, r, v) + 1. elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]: # find the 3 closest v values v0, v1, v2 = _select_vs(v, p) # find the 3 closest p values p0, p1, p2 = _select_ps(p) # calculate r0, r1, and r2 r0_sq = _interpolate_p(p, r, v0)**2 r1_sq = _interpolate_p(p, r, v1)**2 r2_sq = _interpolate_p(p, r, v2)**2 # transform v v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2 # calculate derivatives for quadratic interpolation d2 = 2.*((r2_sq-r1_sq)/(v2_-v1_) - \ (r0_sq-r1_sq)/(v0_-v1_)) / (v2_-v0_) if (v2_ + v0_) >= (v1_ + v1_): d1 = (r2_sq-r1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_) else: d1 = (r1_sq-r0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_) d0 = r1_sq # calculate y y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0) elif v not in v_keys+([],[1])[p>=.90]: y = _interpolate_v(p, r, v) elif p not in p_keys: y = _interpolate_p(p, r, v) return math.sqrt(2) * -y * \ scipy.stats.t.isf((1. + p) / 2., max(v, 1e38))
[ "def", "_qsturng", "(", "p", ",", "r", ",", "v", ")", ":", "## print 'q',p", "# r is interpolated through the q to y here we only need to", "# account for when p and/or v are not found in the table.", "global", "A", ",", "p_keys", ",", "v_keys", "if", "p", "<", ".1", "or", "p", ">", ".999", ":", "raise", "ValueError", "(", "'p must be between .1 and .999'", ")", "if", "p", "<", ".9", ":", "if", "v", "<", "2", ":", "raise", "ValueError", "(", "'v must be > 2 when p < .9'", ")", "else", ":", "if", "v", "<", "1", ":", "raise", "ValueError", "(", "'v must be > 1 when p >= .9'", ")", "# The easy case. A tabled value is requested.", "#numpy 1.4.1: TypeError: unhashable type: 'numpy.ndarray' :", "p", "=", "float", "(", "p", ")", "if", "isinstance", "(", "v", ",", "np", ".", "ndarray", ")", ":", "v", "=", "v", ".", "item", "(", ")", "if", "(", "p", ",", "v", ")", "in", "A", ":", "y", "=", "_func", "(", "A", "[", "(", "p", ",", "v", ")", "]", ",", "p", ",", "r", ",", "v", ")", "+", "1.", "elif", "p", "not", "in", "p_keys", "and", "v", "not", "in", "v_keys", "+", "(", "[", "]", ",", "[", "1", "]", ")", "[", "p", ">=", ".90", "]", ":", "# find the 3 closest v values", "v0", ",", "v1", ",", "v2", "=", "_select_vs", "(", "v", ",", "p", ")", "# find the 3 closest p values", "p0", ",", "p1", ",", "p2", "=", "_select_ps", "(", "p", ")", "# calculate r0, r1, and r2", "r0_sq", "=", "_interpolate_p", "(", "p", ",", "r", ",", "v0", ")", "**", "2", "r1_sq", "=", "_interpolate_p", "(", "p", ",", "r", ",", "v1", ")", "**", "2", "r2_sq", "=", "_interpolate_p", "(", "p", ",", "r", ",", "v2", ")", "**", "2", "# transform v", "v_", ",", "v0_", ",", "v1_", ",", "v2_", "=", "1.", "/", "v", ",", "1.", "/", "v0", ",", "1.", "/", "v1", ",", "1.", "/", "v2", "# calculate derivatives for quadratic interpolation", "d2", "=", "2.", "*", "(", "(", "r2_sq", "-", "r1_sq", ")", "/", "(", "v2_", "-", "v1_", ")", "-", "(", "r0_sq", "-", "r1_sq", ")", "/", "(", "v0_", "-", "v1_", ")", ")", "/", "(", "v2_", "-", "v0_", ")", "if", "(", "v2_", "+", "v0_", ")", ">=", "(", "v1_", "+", "v1_", ")", ":", "d1", "=", "(", "r2_sq", "-", "r1_sq", ")", "/", "(", "v2_", "-", "v1_", ")", "-", "0.5", "*", "d2", "*", "(", "v2_", "-", "v1_", ")", "else", ":", "d1", "=", "(", "r1_sq", "-", "r0_sq", ")", "/", "(", "v1_", "-", "v0_", ")", "+", "0.5", "*", "d2", "*", "(", "v1_", "-", "v0_", ")", "d0", "=", "r1_sq", "# calculate y", "y", "=", "math", ".", "sqrt", "(", "(", "d2", "/", "2.", ")", "*", "(", "v_", "-", "v1_", ")", "**", "2.", "+", "d1", "*", "(", "v_", "-", "v1_", ")", "+", "d0", ")", "elif", "v", "not", "in", "v_keys", "+", "(", "[", "]", ",", "[", "1", "]", ")", "[", "p", ">=", ".90", "]", ":", "y", "=", "_interpolate_v", "(", "p", ",", "r", ",", "v", ")", "elif", "p", "not", "in", "p_keys", ":", "y", "=", "_interpolate_p", "(", "p", ",", "r", ",", "v", ")", "return", "math", ".", "sqrt", "(", "2", ")", "*", "-", "y", "*", "scipy", ".", "stats", ".", "t", ".", "isf", "(", "(", "1.", "+", "p", ")", "/", "2.", ",", "max", "(", "v", ",", "1e38", ")", ")" ]
scalar version of qsturng
[ "scalar", "version", "of", "qsturng" ]
python
train
samuelcolvin/arq
docs/examples/job_results.py
https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/docs/examples/job_results.py#L12-L50
async def main(): redis = await create_pool(RedisSettings()) job = await redis.enqueue_job('the_task') # get the job's id print(job.job_id) """ > 68362958a244465b9be909db4b7b5ab4 (or whatever) """ # get information about the job, will include results if the job has finished, but # doesn't await the job's result debug(await job.info()) """ > docs/examples/job_results.py:23 main JobDef( function='the_task', args=(), kwargs={}, job_try=None, enqueue_time=datetime.datetime(2019, 4, 23, 13, 58, 56, 781000), score=1556027936781 ) (JobDef) """ # get the Job's status print(await job.status()) """ > JobStatus.queued """ # poll redis for the job result, if the job raised an exception, # it will be raised here # (You'll need the worker running at the same time to get a result here) print(await job.result(timeout=5)) """ > 42 """
[ "async", "def", "main", "(", ")", ":", "redis", "=", "await", "create_pool", "(", "RedisSettings", "(", ")", ")", "job", "=", "await", "redis", ".", "enqueue_job", "(", "'the_task'", ")", "# get the job's id", "print", "(", "job", ".", "job_id", ")", "# get information about the job, will include results if the job has finished, but", "# doesn't await the job's result", "debug", "(", "await", "job", ".", "info", "(", ")", ")", "\"\"\"\n > docs/examples/job_results.py:23 main\n JobDef(\n function='the_task',\n args=(),\n kwargs={},\n job_try=None,\n enqueue_time=datetime.datetime(2019, 4, 23, 13, 58, 56, 781000),\n score=1556027936781\n ) (JobDef)\n \"\"\"", "# get the Job's status", "print", "(", "await", "job", ".", "status", "(", ")", ")", "\"\"\"\n > JobStatus.queued\n \"\"\"", "# poll redis for the job result, if the job raised an exception,", "# it will be raised here", "# (You'll need the worker running at the same time to get a result here)", "print", "(", "await", "job", ".", "result", "(", "timeout", "=", "5", ")", ")", "\"\"\"\n > 42\n \"\"\"" ]
> 68362958a244465b9be909db4b7b5ab4 (or whatever)
[ ">", "68362958a244465b9be909db4b7b5ab4", "(", "or", "whatever", ")" ]
python
train
locationlabs/mockredis
mockredis/client.py
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L1069-L1080
def srem(self, key, *values): """Emulate srem.""" redis_set = self._get_set(key, 'SREM') if not redis_set: return 0 before_count = len(redis_set) for value in values: redis_set.discard(self._encode(value)) after_count = len(redis_set) if before_count > 0 and len(redis_set) == 0: self.delete(key) return before_count - after_count
[ "def", "srem", "(", "self", ",", "key", ",", "*", "values", ")", ":", "redis_set", "=", "self", ".", "_get_set", "(", "key", ",", "'SREM'", ")", "if", "not", "redis_set", ":", "return", "0", "before_count", "=", "len", "(", "redis_set", ")", "for", "value", "in", "values", ":", "redis_set", ".", "discard", "(", "self", ".", "_encode", "(", "value", ")", ")", "after_count", "=", "len", "(", "redis_set", ")", "if", "before_count", ">", "0", "and", "len", "(", "redis_set", ")", "==", "0", ":", "self", ".", "delete", "(", "key", ")", "return", "before_count", "-", "after_count" ]
Emulate srem.
[ "Emulate", "srem", "." ]
python
train
quantopian/alphalens
alphalens/utils.py
https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/utils.py#L420-L629
def get_clean_factor(factor, forward_returns, groupby=None, binning_by_group=False, quantiles=5, bins=None, groupby_labels=None, max_loss=0.35, zero_aware=False): """ Formats the factor data, forward return data, and group mappings into a DataFrame that contains aligned MultiIndex indices of timestamp and asset. The returned data will be formatted to be suitable for Alphalens functions. It is safe to skip a call to this function and still make use of Alphalens functionalities as long as the factor data conforms to the format returned from get_clean_factor_and_forward_returns and documented here Parameters ---------- factor : pd.Series - MultiIndex A MultiIndex Series indexed by timestamp (level 0) and asset (level 1), containing the values for a single alpha factor. :: ----------------------------------- date | asset | ----------------------------------- | AAPL | 0.5 ----------------------- | BA | -1.1 ----------------------- 2014-01-01 | CMG | 1.7 ----------------------- | DAL | -0.1 ----------------------- | LULU | 2.7 ----------------------- forward_returns : pd.DataFrame - MultiIndex A MultiIndex DataFrame indexed by timestamp (level 0) and asset (level 1), containing the forward returns for assets. Forward returns column names must follow the format accepted by pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc). 'date' index freq property must be set to a trading calendar (pandas DateOffset), see infer_trading_calendar for more details. This information is currently used only in cumulative returns computation :: --------------------------------------- | | 1D | 5D | 10D --------------------------------------- date | asset | | | --------------------------------------- | AAPL | 0.09|-0.01|-0.079 ---------------------------- | BA | 0.02| 0.06| 0.020 ---------------------------- 2014-01-01 | CMG | 0.03| 0.09| 0.036 ---------------------------- | DAL |-0.02|-0.06|-0.029 ---------------------------- | LULU |-0.03| 0.05|-0.009 ---------------------------- groupby : pd.Series - MultiIndex or dict Either A MultiIndex Series indexed by date and asset, containing the period wise group codes for each asset, or a dict of asset to group mappings. If a dict is passed, it is assumed that group mappings are unchanged for the entire time period of the passed factor data. binning_by_group : bool If True, compute quantile buckets separately for each group. This is useful when the factor values range vary considerably across gorups so that it is wise to make the binning group relative. You should probably enable this if the factor is intended to be analyzed for a group neutral portfolio quantiles : int or sequence[float] Number of equal-sized quantile buckets to use in factor bucketing. Alternately sequence of quantiles, allowing non-equal-sized buckets e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95] Only one of 'quantiles' or 'bins' can be not-None bins : int or sequence[float] Number of equal-width (valuewise) bins to use in factor bucketing. Alternately sequence of bin edges allowing for non-uniform bin width e.g. [-4, -2, -0.5, 0, 10] Chooses the buckets to be evenly spaced according to the values themselves. Useful when the factor contains discrete values. Only one of 'quantiles' or 'bins' can be not-None groupby_labels : dict A dictionary keyed by group code with values corresponding to the display name for each group. max_loss : float, optional Maximum percentage (0.00 to 1.00) of factor data dropping allowed, computed comparing the number of items in the input factor index and the number of items in the output DataFrame index. Factor data can be partially dropped due to being flawed itself (e.g. NaNs), not having provided enough price data to compute forward returns for all factor values, or because it is not possible to perform binning. Set max_loss=0 to avoid Exceptions suppression. zero_aware : bool, optional If True, compute quantile buckets separately for positive and negative signal values. This is useful if your signal is centered and zero is the separation between long and short signals, respectively. 'quantiles' is None. Returns ------- merged_data : pd.DataFrame - MultiIndex A MultiIndex Series indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - forward returns column names follow the format accepted by pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc) - 'date' index freq property (merged_data.index.levels[0].freq) is the same as that of the input forward returns data. This is currently used only in cumulative returns computation :: ------------------------------------------------------------------- | | 1D | 5D | 10D |factor|group|factor_quantile ------------------------------------------------------------------- date | asset | | | | | | ------------------------------------------------------------------- | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3 -------------------------------------------------------- | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5 -------------------------------------------------------- 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1 -------------------------------------------------------- | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5 -------------------------------------------------------- | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2 -------------------------------------------------------- """ initial_amount = float(len(factor.index)) factor_copy = factor.copy() factor_copy.index = factor_copy.index.rename(['date', 'asset']) merged_data = forward_returns.copy() merged_data['factor'] = factor_copy if groupby is not None: if isinstance(groupby, dict): diff = set(factor_copy.index.get_level_values( 'asset')) - set(groupby.keys()) if len(diff) > 0: raise KeyError( "Assets {} not in group mapping".format( list(diff))) ss = pd.Series(groupby) groupby = pd.Series(index=factor_copy.index, data=ss[factor_copy.index.get_level_values( 'asset')].values) if groupby_labels is not None: diff = set(groupby.values) - set(groupby_labels.keys()) if len(diff) > 0: raise KeyError( "groups {} not in passed group names".format( list(diff))) sn = pd.Series(groupby_labels) groupby = pd.Series(index=groupby.index, data=sn[groupby.values].values) merged_data['group'] = groupby.astype('category') merged_data = merged_data.dropna() fwdret_amount = float(len(merged_data.index)) no_raise = False if max_loss == 0 else True quantile_data = quantize_factor( merged_data, quantiles, bins, binning_by_group, no_raise, zero_aware ) merged_data['factor_quantile'] = quantile_data merged_data = merged_data.dropna() binning_amount = float(len(merged_data.index)) tot_loss = (initial_amount - binning_amount) / initial_amount fwdret_loss = (initial_amount - fwdret_amount) / initial_amount bin_loss = tot_loss - fwdret_loss print("Dropped %.1f%% entries from factor data: %.1f%% in forward " "returns computation and %.1f%% in binning phase " "(set max_loss=0 to see potentially suppressed Exceptions)." % (tot_loss * 100, fwdret_loss * 100, bin_loss * 100)) if tot_loss > max_loss: message = ("max_loss (%.1f%%) exceeded %.1f%%, consider increasing it." % (max_loss * 100, tot_loss * 100)) raise MaxLossExceededError(message) else: print("max_loss is %.1f%%, not exceeded: OK!" % (max_loss * 100)) return merged_data
[ "def", "get_clean_factor", "(", "factor", ",", "forward_returns", ",", "groupby", "=", "None", ",", "binning_by_group", "=", "False", ",", "quantiles", "=", "5", ",", "bins", "=", "None", ",", "groupby_labels", "=", "None", ",", "max_loss", "=", "0.35", ",", "zero_aware", "=", "False", ")", ":", "initial_amount", "=", "float", "(", "len", "(", "factor", ".", "index", ")", ")", "factor_copy", "=", "factor", ".", "copy", "(", ")", "factor_copy", ".", "index", "=", "factor_copy", ".", "index", ".", "rename", "(", "[", "'date'", ",", "'asset'", "]", ")", "merged_data", "=", "forward_returns", ".", "copy", "(", ")", "merged_data", "[", "'factor'", "]", "=", "factor_copy", "if", "groupby", "is", "not", "None", ":", "if", "isinstance", "(", "groupby", ",", "dict", ")", ":", "diff", "=", "set", "(", "factor_copy", ".", "index", ".", "get_level_values", "(", "'asset'", ")", ")", "-", "set", "(", "groupby", ".", "keys", "(", ")", ")", "if", "len", "(", "diff", ")", ">", "0", ":", "raise", "KeyError", "(", "\"Assets {} not in group mapping\"", ".", "format", "(", "list", "(", "diff", ")", ")", ")", "ss", "=", "pd", ".", "Series", "(", "groupby", ")", "groupby", "=", "pd", ".", "Series", "(", "index", "=", "factor_copy", ".", "index", ",", "data", "=", "ss", "[", "factor_copy", ".", "index", ".", "get_level_values", "(", "'asset'", ")", "]", ".", "values", ")", "if", "groupby_labels", "is", "not", "None", ":", "diff", "=", "set", "(", "groupby", ".", "values", ")", "-", "set", "(", "groupby_labels", ".", "keys", "(", ")", ")", "if", "len", "(", "diff", ")", ">", "0", ":", "raise", "KeyError", "(", "\"groups {} not in passed group names\"", ".", "format", "(", "list", "(", "diff", ")", ")", ")", "sn", "=", "pd", ".", "Series", "(", "groupby_labels", ")", "groupby", "=", "pd", ".", "Series", "(", "index", "=", "groupby", ".", "index", ",", "data", "=", "sn", "[", "groupby", ".", "values", "]", ".", "values", ")", "merged_data", "[", "'group'", "]", "=", "groupby", ".", "astype", "(", "'category'", ")", "merged_data", "=", "merged_data", ".", "dropna", "(", ")", "fwdret_amount", "=", "float", "(", "len", "(", "merged_data", ".", "index", ")", ")", "no_raise", "=", "False", "if", "max_loss", "==", "0", "else", "True", "quantile_data", "=", "quantize_factor", "(", "merged_data", ",", "quantiles", ",", "bins", ",", "binning_by_group", ",", "no_raise", ",", "zero_aware", ")", "merged_data", "[", "'factor_quantile'", "]", "=", "quantile_data", "merged_data", "=", "merged_data", ".", "dropna", "(", ")", "binning_amount", "=", "float", "(", "len", "(", "merged_data", ".", "index", ")", ")", "tot_loss", "=", "(", "initial_amount", "-", "binning_amount", ")", "/", "initial_amount", "fwdret_loss", "=", "(", "initial_amount", "-", "fwdret_amount", ")", "/", "initial_amount", "bin_loss", "=", "tot_loss", "-", "fwdret_loss", "print", "(", "\"Dropped %.1f%% entries from factor data: %.1f%% in forward \"", "\"returns computation and %.1f%% in binning phase \"", "\"(set max_loss=0 to see potentially suppressed Exceptions).\"", "%", "(", "tot_loss", "*", "100", ",", "fwdret_loss", "*", "100", ",", "bin_loss", "*", "100", ")", ")", "if", "tot_loss", ">", "max_loss", ":", "message", "=", "(", "\"max_loss (%.1f%%) exceeded %.1f%%, consider increasing it.\"", "%", "(", "max_loss", "*", "100", ",", "tot_loss", "*", "100", ")", ")", "raise", "MaxLossExceededError", "(", "message", ")", "else", ":", "print", "(", "\"max_loss is %.1f%%, not exceeded: OK!\"", "%", "(", "max_loss", "*", "100", ")", ")", "return", "merged_data" ]
Formats the factor data, forward return data, and group mappings into a DataFrame that contains aligned MultiIndex indices of timestamp and asset. The returned data will be formatted to be suitable for Alphalens functions. It is safe to skip a call to this function and still make use of Alphalens functionalities as long as the factor data conforms to the format returned from get_clean_factor_and_forward_returns and documented here Parameters ---------- factor : pd.Series - MultiIndex A MultiIndex Series indexed by timestamp (level 0) and asset (level 1), containing the values for a single alpha factor. :: ----------------------------------- date | asset | ----------------------------------- | AAPL | 0.5 ----------------------- | BA | -1.1 ----------------------- 2014-01-01 | CMG | 1.7 ----------------------- | DAL | -0.1 ----------------------- | LULU | 2.7 ----------------------- forward_returns : pd.DataFrame - MultiIndex A MultiIndex DataFrame indexed by timestamp (level 0) and asset (level 1), containing the forward returns for assets. Forward returns column names must follow the format accepted by pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc). 'date' index freq property must be set to a trading calendar (pandas DateOffset), see infer_trading_calendar for more details. This information is currently used only in cumulative returns computation :: --------------------------------------- | | 1D | 5D | 10D --------------------------------------- date | asset | | | --------------------------------------- | AAPL | 0.09|-0.01|-0.079 ---------------------------- | BA | 0.02| 0.06| 0.020 ---------------------------- 2014-01-01 | CMG | 0.03| 0.09| 0.036 ---------------------------- | DAL |-0.02|-0.06|-0.029 ---------------------------- | LULU |-0.03| 0.05|-0.009 ---------------------------- groupby : pd.Series - MultiIndex or dict Either A MultiIndex Series indexed by date and asset, containing the period wise group codes for each asset, or a dict of asset to group mappings. If a dict is passed, it is assumed that group mappings are unchanged for the entire time period of the passed factor data. binning_by_group : bool If True, compute quantile buckets separately for each group. This is useful when the factor values range vary considerably across gorups so that it is wise to make the binning group relative. You should probably enable this if the factor is intended to be analyzed for a group neutral portfolio quantiles : int or sequence[float] Number of equal-sized quantile buckets to use in factor bucketing. Alternately sequence of quantiles, allowing non-equal-sized buckets e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95] Only one of 'quantiles' or 'bins' can be not-None bins : int or sequence[float] Number of equal-width (valuewise) bins to use in factor bucketing. Alternately sequence of bin edges allowing for non-uniform bin width e.g. [-4, -2, -0.5, 0, 10] Chooses the buckets to be evenly spaced according to the values themselves. Useful when the factor contains discrete values. Only one of 'quantiles' or 'bins' can be not-None groupby_labels : dict A dictionary keyed by group code with values corresponding to the display name for each group. max_loss : float, optional Maximum percentage (0.00 to 1.00) of factor data dropping allowed, computed comparing the number of items in the input factor index and the number of items in the output DataFrame index. Factor data can be partially dropped due to being flawed itself (e.g. NaNs), not having provided enough price data to compute forward returns for all factor values, or because it is not possible to perform binning. Set max_loss=0 to avoid Exceptions suppression. zero_aware : bool, optional If True, compute quantile buckets separately for positive and negative signal values. This is useful if your signal is centered and zero is the separation between long and short signals, respectively. 'quantiles' is None. Returns ------- merged_data : pd.DataFrame - MultiIndex A MultiIndex Series indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - forward returns column names follow the format accepted by pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc) - 'date' index freq property (merged_data.index.levels[0].freq) is the same as that of the input forward returns data. This is currently used only in cumulative returns computation :: ------------------------------------------------------------------- | | 1D | 5D | 10D |factor|group|factor_quantile ------------------------------------------------------------------- date | asset | | | | | | ------------------------------------------------------------------- | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3 -------------------------------------------------------- | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5 -------------------------------------------------------- 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1 -------------------------------------------------------- | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5 -------------------------------------------------------- | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2 --------------------------------------------------------
[ "Formats", "the", "factor", "data", "forward", "return", "data", "and", "group", "mappings", "into", "a", "DataFrame", "that", "contains", "aligned", "MultiIndex", "indices", "of", "timestamp", "and", "asset", ".", "The", "returned", "data", "will", "be", "formatted", "to", "be", "suitable", "for", "Alphalens", "functions", "." ]
python
train
RedHatInsights/insights-core
insights/client/collection_rules.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L149-L158
def get_collection_rules_gpg(self, collection_rules): """ Download the collection rules gpg signature """ sig_text = self.fetch_gpg() sig_response = NamedTemporaryFile(suffix=".asc") sig_response.write(sig_text.encode('utf-8')) sig_response.file.flush() self.validate_gpg_sig(collection_rules.name, sig_response.name) self.write_collection_data(self.collection_rules_file + ".asc", sig_text)
[ "def", "get_collection_rules_gpg", "(", "self", ",", "collection_rules", ")", ":", "sig_text", "=", "self", ".", "fetch_gpg", "(", ")", "sig_response", "=", "NamedTemporaryFile", "(", "suffix", "=", "\".asc\"", ")", "sig_response", ".", "write", "(", "sig_text", ".", "encode", "(", "'utf-8'", ")", ")", "sig_response", ".", "file", ".", "flush", "(", ")", "self", ".", "validate_gpg_sig", "(", "collection_rules", ".", "name", ",", "sig_response", ".", "name", ")", "self", ".", "write_collection_data", "(", "self", ".", "collection_rules_file", "+", "\".asc\"", ",", "sig_text", ")" ]
Download the collection rules gpg signature
[ "Download", "the", "collection", "rules", "gpg", "signature" ]
python
train
micha030201/aionationstates
aionationstates/wa_.py
https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/wa_.py#L68-L83
def repeal_target(self): """The resolution this resolution has repealed, or is attempting to repeal. Returns ------- :class:`ApiQuery` of :class:`Resolution` Raises ------ TypeError: If the resolution doesn't repeal anything. """ if not self.category == 'Repeal': raise TypeError("This resolution doesn't repeal anything") return wa.resolution(int(self.option) + 1)
[ "def", "repeal_target", "(", "self", ")", ":", "if", "not", "self", ".", "category", "==", "'Repeal'", ":", "raise", "TypeError", "(", "\"This resolution doesn't repeal anything\"", ")", "return", "wa", ".", "resolution", "(", "int", "(", "self", ".", "option", ")", "+", "1", ")" ]
The resolution this resolution has repealed, or is attempting to repeal. Returns ------- :class:`ApiQuery` of :class:`Resolution` Raises ------ TypeError: If the resolution doesn't repeal anything.
[ "The", "resolution", "this", "resolution", "has", "repealed", "or", "is", "attempting", "to", "repeal", "." ]
python
train
tensorflow/tensorboard
tensorboard/lazy.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/lazy.py#L27-L76
def lazy_load(name): """Decorator to define a function that lazily loads the module 'name'. This can be used to defer importing troublesome dependencies - e.g. ones that are large and infrequently used, or that cause a dependency cycle - until they are actually used. Args: name: the fully-qualified name of the module; typically the last segment of 'name' matches the name of the decorated function Returns: Decorator function that produces a lazy-loading module 'name' backed by the underlying decorated function. """ def wrapper(load_fn): # Wrap load_fn to call it exactly once and update __dict__ afterwards to # make future lookups efficient (only failed lookups call __getattr__). @_memoize def load_once(self): if load_once.loading: raise ImportError("Circular import when resolving LazyModule %r" % name) load_once.loading = True try: module = load_fn() finally: load_once.loading = False self.__dict__.update(module.__dict__) load_once.loaded = True return module load_once.loading = False load_once.loaded = False # Define a module that proxies getattr() and dir() to the result of calling # load_once() the first time it's needed. The class is nested so we can close # over load_once() and avoid polluting the module's attrs with our own state. class LazyModule(types.ModuleType): def __getattr__(self, attr_name): return getattr(load_once(self), attr_name) def __dir__(self): return dir(load_once(self)) def __repr__(self): if load_once.loaded: return '<%r via LazyModule (loaded)>' % load_once(self) return '<module %r via LazyModule (not yet loaded)>' % self.__name__ return LazyModule(name) return wrapper
[ "def", "lazy_load", "(", "name", ")", ":", "def", "wrapper", "(", "load_fn", ")", ":", "# Wrap load_fn to call it exactly once and update __dict__ afterwards to", "# make future lookups efficient (only failed lookups call __getattr__).", "@", "_memoize", "def", "load_once", "(", "self", ")", ":", "if", "load_once", ".", "loading", ":", "raise", "ImportError", "(", "\"Circular import when resolving LazyModule %r\"", "%", "name", ")", "load_once", ".", "loading", "=", "True", "try", ":", "module", "=", "load_fn", "(", ")", "finally", ":", "load_once", ".", "loading", "=", "False", "self", ".", "__dict__", ".", "update", "(", "module", ".", "__dict__", ")", "load_once", ".", "loaded", "=", "True", "return", "module", "load_once", ".", "loading", "=", "False", "load_once", ".", "loaded", "=", "False", "# Define a module that proxies getattr() and dir() to the result of calling", "# load_once() the first time it's needed. The class is nested so we can close", "# over load_once() and avoid polluting the module's attrs with our own state.", "class", "LazyModule", "(", "types", ".", "ModuleType", ")", ":", "def", "__getattr__", "(", "self", ",", "attr_name", ")", ":", "return", "getattr", "(", "load_once", "(", "self", ")", ",", "attr_name", ")", "def", "__dir__", "(", "self", ")", ":", "return", "dir", "(", "load_once", "(", "self", ")", ")", "def", "__repr__", "(", "self", ")", ":", "if", "load_once", ".", "loaded", ":", "return", "'<%r via LazyModule (loaded)>'", "%", "load_once", "(", "self", ")", "return", "'<module %r via LazyModule (not yet loaded)>'", "%", "self", ".", "__name__", "return", "LazyModule", "(", "name", ")", "return", "wrapper" ]
Decorator to define a function that lazily loads the module 'name'. This can be used to defer importing troublesome dependencies - e.g. ones that are large and infrequently used, or that cause a dependency cycle - until they are actually used. Args: name: the fully-qualified name of the module; typically the last segment of 'name' matches the name of the decorated function Returns: Decorator function that produces a lazy-loading module 'name' backed by the underlying decorated function.
[ "Decorator", "to", "define", "a", "function", "that", "lazily", "loads", "the", "module", "name", "." ]
python
train
saltstack/salt
salt/utils/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L576-L592
def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask
[ "def", "natural_ipv4_netmask", "(", "ip", ",", "fmt", "=", "'prefixlen'", ")", ":", "bits", "=", "_ipv4_to_bits", "(", "ip", ")", "if", "bits", ".", "startswith", "(", "'11'", ")", ":", "mask", "=", "'24'", "elif", "bits", ".", "startswith", "(", "'1'", ")", ":", "mask", "=", "'16'", "else", ":", "mask", "=", "'8'", "if", "fmt", "==", "'netmask'", ":", "return", "cidr_to_ipv4_netmask", "(", "mask", ")", "else", ":", "return", "'/'", "+", "mask" ]
Returns the "natural" mask of an IPv4 address
[ "Returns", "the", "natural", "mask", "of", "an", "IPv4", "address" ]
python
train
kislyuk/ensure
ensure/main.py
https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L243-L249
def contains_one_of(self, elements): """ Ensures :attr:`subject` contains exactly one of *elements*, which must be an iterable. """ if sum(e in self._subject for e in elements) != 1: raise self._error_factory(_format("Expected {} to have exactly one of {}", self._subject, elements)) return ChainInspector(self._subject)
[ "def", "contains_one_of", "(", "self", ",", "elements", ")", ":", "if", "sum", "(", "e", "in", "self", ".", "_subject", "for", "e", "in", "elements", ")", "!=", "1", ":", "raise", "self", ".", "_error_factory", "(", "_format", "(", "\"Expected {} to have exactly one of {}\"", ",", "self", ".", "_subject", ",", "elements", ")", ")", "return", "ChainInspector", "(", "self", ".", "_subject", ")" ]
Ensures :attr:`subject` contains exactly one of *elements*, which must be an iterable.
[ "Ensures", ":", "attr", ":", "subject", "contains", "exactly", "one", "of", "*", "elements", "*", "which", "must", "be", "an", "iterable", "." ]
python
train
awslabs/aws-sam-cli
samcli/local/layers/layer_downloader.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/layers/layer_downloader.py#L54-L74
def download_all(self, layers, force=False): """ Download a list of layers to the cache Parameters ---------- layers list(samcli.commands.local.lib.provider.Layer) List of Layers representing the layer to be downloaded force bool True to download the layer even if it exists already on the system Returns ------- List(Path) List of Paths to where the layer was cached """ layer_dirs = [] for layer in layers: layer_dirs.append(self.download(layer, force)) return layer_dirs
[ "def", "download_all", "(", "self", ",", "layers", ",", "force", "=", "False", ")", ":", "layer_dirs", "=", "[", "]", "for", "layer", "in", "layers", ":", "layer_dirs", ".", "append", "(", "self", ".", "download", "(", "layer", ",", "force", ")", ")", "return", "layer_dirs" ]
Download a list of layers to the cache Parameters ---------- layers list(samcli.commands.local.lib.provider.Layer) List of Layers representing the layer to be downloaded force bool True to download the layer even if it exists already on the system Returns ------- List(Path) List of Paths to where the layer was cached
[ "Download", "a", "list", "of", "layers", "to", "the", "cache" ]
python
train
sebp/scikit-survival
sksurv/datasets/base.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/datasets/base.py#L46-L88
def get_x_y(data_frame, attr_labels, pos_label=None, survival=True): """Split data frame into features and labels. Parameters ---------- data_frame : pandas.DataFrame, shape = (n_samples, n_columns) A data frame. attr_labels : sequence of str or None A list of one or more columns that are considered the label. If `survival` is `True`, then attr_labels has two elements: 1) the name of the column denoting the event indicator, and 2) the name of the column denoting the survival time. If the sequence contains `None`, then labels are not retrieved and only a data frame with features is returned. pos_label : any, optional Which value of the event indicator column denotes that a patient experienced an event. This value is ignored if `survival` is `False`. survival : bool, optional, default: True Whether to return `y` that can be used for survival analysis. Returns ------- X : pandas.DataFrame, shape = (n_samples, n_columns - len(attr_labels)) Data frame containing features. y : None or pandas.DataFrame, shape = (n_samples, len(attr_labels)) Data frame containing columns with supervised information. If `survival` was `True`, then the column denoting the event indicator will be boolean and survival times will be float. If `attr_labels` contains `None`, y is set to `None`. """ if survival: if len(attr_labels) != 2: raise ValueError("expected sequence of length two for attr_labels, but got %d" % len(attr_labels)) if pos_label is None: raise ValueError("pos_label needs to be specified if survival=True") return _get_x_y_survival(data_frame, attr_labels[0], attr_labels[1], pos_label) return _get_x_y_other(data_frame, attr_labels)
[ "def", "get_x_y", "(", "data_frame", ",", "attr_labels", ",", "pos_label", "=", "None", ",", "survival", "=", "True", ")", ":", "if", "survival", ":", "if", "len", "(", "attr_labels", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"expected sequence of length two for attr_labels, but got %d\"", "%", "len", "(", "attr_labels", ")", ")", "if", "pos_label", "is", "None", ":", "raise", "ValueError", "(", "\"pos_label needs to be specified if survival=True\"", ")", "return", "_get_x_y_survival", "(", "data_frame", ",", "attr_labels", "[", "0", "]", ",", "attr_labels", "[", "1", "]", ",", "pos_label", ")", "return", "_get_x_y_other", "(", "data_frame", ",", "attr_labels", ")" ]
Split data frame into features and labels. Parameters ---------- data_frame : pandas.DataFrame, shape = (n_samples, n_columns) A data frame. attr_labels : sequence of str or None A list of one or more columns that are considered the label. If `survival` is `True`, then attr_labels has two elements: 1) the name of the column denoting the event indicator, and 2) the name of the column denoting the survival time. If the sequence contains `None`, then labels are not retrieved and only a data frame with features is returned. pos_label : any, optional Which value of the event indicator column denotes that a patient experienced an event. This value is ignored if `survival` is `False`. survival : bool, optional, default: True Whether to return `y` that can be used for survival analysis. Returns ------- X : pandas.DataFrame, shape = (n_samples, n_columns - len(attr_labels)) Data frame containing features. y : None or pandas.DataFrame, shape = (n_samples, len(attr_labels)) Data frame containing columns with supervised information. If `survival` was `True`, then the column denoting the event indicator will be boolean and survival times will be float. If `attr_labels` contains `None`, y is set to `None`.
[ "Split", "data", "frame", "into", "features", "and", "labels", "." ]
python
train
ecell/ecell4
ecell4/extra/ensemble.py
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/extra/ensemble.py#L503-L523
def run_azure(target, jobs, n=1, nproc=None, path='.', delete=True, config=None, **kwargs): """ Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel with Microsoft Azure Batch. This function is the work in progress. The argument `nproc` doesn't work yet. See `ecell4.extra.azure_batch.run_azure` for details. See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure ecell4.extra.azure_batch.run_azure """ import ecell4.extra.azure_batch as azure_batch return azure_batch.run_azure(target, jobs, n, path, delete, config)
[ "def", "run_azure", "(", "target", ",", "jobs", ",", "n", "=", "1", ",", "nproc", "=", "None", ",", "path", "=", "'.'", ",", "delete", "=", "True", ",", "config", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "ecell4", ".", "extra", ".", "azure_batch", "as", "azure_batch", "return", "azure_batch", ".", "run_azure", "(", "target", ",", "jobs", ",", "n", ",", "path", ",", "delete", ",", "config", ")" ]
Evaluate the given function with each set of arguments, and return a list of results. This function does in parallel with Microsoft Azure Batch. This function is the work in progress. The argument `nproc` doesn't work yet. See `ecell4.extra.azure_batch.run_azure` for details. See Also -------- ecell4.extra.ensemble.run_serial ecell4.extra.ensemble.run_sge ecell4.extra.ensemble.run_slurm ecell4.extra.ensemble.run_multiprocessing ecell4.extra.ensemble.run_azure ecell4.extra.azure_batch.run_azure
[ "Evaluate", "the", "given", "function", "with", "each", "set", "of", "arguments", "and", "return", "a", "list", "of", "results", ".", "This", "function", "does", "in", "parallel", "with", "Microsoft", "Azure", "Batch", "." ]
python
train
saltstack/salt
salt/states/boto_dynamodb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_dynamodb.py#L617-L668
def _alarms_present(name, alarms, alarms_from_pillar, write_capacity_units, read_capacity_units, region, key, keyid, profile): '''helper method for present. ensure that cloudwatch_alarms are set''' # load data from alarms_from_pillar tmp = copy.deepcopy( __salt__['config.option'](alarms_from_pillar, {}) ) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add dynamodb table to name and description info["name"] = name + " " + info["name"] info["attributes"]["description"] = name + " " + info["attributes"]["description"] # add dimension attribute info["attributes"]["dimensions"] = {"TableName": [name]} if info["attributes"]["metric"] == "ConsumedWriteCapacityUnits" \ and "threshold" not in info["attributes"]: info["attributes"]["threshold"] = math.ceil(write_capacity_units * info["attributes"]["threshold_percent"]) del info["attributes"]["threshold_percent"] # the write_capacity_units is given in unit / second. So we need # to multiply by the period to get the proper threshold. # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html info["attributes"]["threshold"] *= info["attributes"]["period"] if info["attributes"]["metric"] == "ConsumedReadCapacityUnits" \ and "threshold" not in info["attributes"]: info["attributes"]["threshold"] = math.ceil(read_capacity_units * info["attributes"]["threshold_percent"]) del info["attributes"]["threshold_percent"] # the read_capacity_units is given in unit / second. So we need # to multiply by the period to get the proper threshold. # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html info["attributes"]["threshold"] *= info["attributes"]["period"] # set alarm kwargs = { "name": info["name"], "attributes": info["attributes"], "region": region, "key": key, "keyid": keyid, "profile": profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results["result"]: merged_return_value["result"] = results["result"] if results.get("changes", {}) != {}: merged_return_value["changes"][info["name"]] = results["changes"] if "comment" in results: merged_return_value["comment"] += results["comment"] return merged_return_value
[ "def", "_alarms_present", "(", "name", ",", "alarms", ",", "alarms_from_pillar", ",", "write_capacity_units", ",", "read_capacity_units", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", ":", "# load data from alarms_from_pillar", "tmp", "=", "copy", ".", "deepcopy", "(", "__salt__", "[", "'config.option'", "]", "(", "alarms_from_pillar", ",", "{", "}", ")", ")", "# merge with data from alarms", "if", "alarms", ":", "tmp", "=", "dictupdate", ".", "update", "(", "tmp", ",", "alarms", ")", "# set alarms, using boto_cloudwatch_alarm.present", "merged_return_value", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "for", "_", ",", "info", "in", "six", ".", "iteritems", "(", "tmp", ")", ":", "# add dynamodb table to name and description", "info", "[", "\"name\"", "]", "=", "name", "+", "\" \"", "+", "info", "[", "\"name\"", "]", "info", "[", "\"attributes\"", "]", "[", "\"description\"", "]", "=", "name", "+", "\" \"", "+", "info", "[", "\"attributes\"", "]", "[", "\"description\"", "]", "# add dimension attribute", "info", "[", "\"attributes\"", "]", "[", "\"dimensions\"", "]", "=", "{", "\"TableName\"", ":", "[", "name", "]", "}", "if", "info", "[", "\"attributes\"", "]", "[", "\"metric\"", "]", "==", "\"ConsumedWriteCapacityUnits\"", "and", "\"threshold\"", "not", "in", "info", "[", "\"attributes\"", "]", ":", "info", "[", "\"attributes\"", "]", "[", "\"threshold\"", "]", "=", "math", ".", "ceil", "(", "write_capacity_units", "*", "info", "[", "\"attributes\"", "]", "[", "\"threshold_percent\"", "]", ")", "del", "info", "[", "\"attributes\"", "]", "[", "\"threshold_percent\"", "]", "# the write_capacity_units is given in unit / second. So we need", "# to multiply by the period to get the proper threshold.", "# http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html", "info", "[", "\"attributes\"", "]", "[", "\"threshold\"", "]", "*=", "info", "[", "\"attributes\"", "]", "[", "\"period\"", "]", "if", "info", "[", "\"attributes\"", "]", "[", "\"metric\"", "]", "==", "\"ConsumedReadCapacityUnits\"", "and", "\"threshold\"", "not", "in", "info", "[", "\"attributes\"", "]", ":", "info", "[", "\"attributes\"", "]", "[", "\"threshold\"", "]", "=", "math", ".", "ceil", "(", "read_capacity_units", "*", "info", "[", "\"attributes\"", "]", "[", "\"threshold_percent\"", "]", ")", "del", "info", "[", "\"attributes\"", "]", "[", "\"threshold_percent\"", "]", "# the read_capacity_units is given in unit / second. So we need", "# to multiply by the period to get the proper threshold.", "# http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html", "info", "[", "\"attributes\"", "]", "[", "\"threshold\"", "]", "*=", "info", "[", "\"attributes\"", "]", "[", "\"period\"", "]", "# set alarm", "kwargs", "=", "{", "\"name\"", ":", "info", "[", "\"name\"", "]", ",", "\"attributes\"", ":", "info", "[", "\"attributes\"", "]", ",", "\"region\"", ":", "region", ",", "\"key\"", ":", "key", ",", "\"keyid\"", ":", "keyid", ",", "\"profile\"", ":", "profile", ",", "}", "results", "=", "__states__", "[", "'boto_cloudwatch_alarm.present'", "]", "(", "*", "*", "kwargs", ")", "if", "not", "results", "[", "\"result\"", "]", ":", "merged_return_value", "[", "\"result\"", "]", "=", "results", "[", "\"result\"", "]", "if", "results", ".", "get", "(", "\"changes\"", ",", "{", "}", ")", "!=", "{", "}", ":", "merged_return_value", "[", "\"changes\"", "]", "[", "info", "[", "\"name\"", "]", "]", "=", "results", "[", "\"changes\"", "]", "if", "\"comment\"", "in", "results", ":", "merged_return_value", "[", "\"comment\"", "]", "+=", "results", "[", "\"comment\"", "]", "return", "merged_return_value" ]
helper method for present. ensure that cloudwatch_alarms are set
[ "helper", "method", "for", "present", ".", "ensure", "that", "cloudwatch_alarms", "are", "set" ]
python
train
KrzyHonk/bpmn-python
bpmn_python/bpmn_process_csv_export.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_process_csv_export.py#L44-L75
def export_process_to_csv(bpmn_diagram, directory, filename): """ Root method of CSV export functionality. :param bpmn_diagram: an instance of BpmnDiagramGraph class, :param directory: a string object, which is a path of output directory, :param filename: a string object, which is a name of output file. """ nodes = copy.deepcopy(bpmn_diagram.get_nodes()) start_nodes = [] export_elements = [] for node in nodes: incoming_list = node[1].get(consts.Consts.incoming_flow) if len(incoming_list) == 0: start_nodes.append(node) if len(start_nodes) != 1: raise bpmn_exception.BpmnPythonError("Exporting to CSV format accepts only one start event") nodes_classification = utils.BpmnImportUtils.generate_nodes_clasification(bpmn_diagram) start_node = start_nodes.pop() BpmnDiagramGraphCsvExport.export_node(bpmn_diagram, export_elements, start_node, nodes_classification) try: os.makedirs(directory) except OSError as exception: if exception.errno != errno.EEXIST: raise file_object = open(directory + filename, "w") file_object.write("Order,Activity,Condition,Who,Subprocess,Terminated\n") BpmnDiagramGraphCsvExport.write_export_node_to_file(file_object, export_elements) file_object.close()
[ "def", "export_process_to_csv", "(", "bpmn_diagram", ",", "directory", ",", "filename", ")", ":", "nodes", "=", "copy", ".", "deepcopy", "(", "bpmn_diagram", ".", "get_nodes", "(", ")", ")", "start_nodes", "=", "[", "]", "export_elements", "=", "[", "]", "for", "node", "in", "nodes", ":", "incoming_list", "=", "node", "[", "1", "]", ".", "get", "(", "consts", ".", "Consts", ".", "incoming_flow", ")", "if", "len", "(", "incoming_list", ")", "==", "0", ":", "start_nodes", ".", "append", "(", "node", ")", "if", "len", "(", "start_nodes", ")", "!=", "1", ":", "raise", "bpmn_exception", ".", "BpmnPythonError", "(", "\"Exporting to CSV format accepts only one start event\"", ")", "nodes_classification", "=", "utils", ".", "BpmnImportUtils", ".", "generate_nodes_clasification", "(", "bpmn_diagram", ")", "start_node", "=", "start_nodes", ".", "pop", "(", ")", "BpmnDiagramGraphCsvExport", ".", "export_node", "(", "bpmn_diagram", ",", "export_elements", ",", "start_node", ",", "nodes_classification", ")", "try", ":", "os", ".", "makedirs", "(", "directory", ")", "except", "OSError", "as", "exception", ":", "if", "exception", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "file_object", "=", "open", "(", "directory", "+", "filename", ",", "\"w\"", ")", "file_object", ".", "write", "(", "\"Order,Activity,Condition,Who,Subprocess,Terminated\\n\"", ")", "BpmnDiagramGraphCsvExport", ".", "write_export_node_to_file", "(", "file_object", ",", "export_elements", ")", "file_object", ".", "close", "(", ")" ]
Root method of CSV export functionality. :param bpmn_diagram: an instance of BpmnDiagramGraph class, :param directory: a string object, which is a path of output directory, :param filename: a string object, which is a name of output file.
[ "Root", "method", "of", "CSV", "export", "functionality", "." ]
python
train
luckydonald/pytgbot
code_generation/output/teleflask_messages.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/teleflask_messages.py#L1936-L1991
def to_array(self): """ Serializes this VoiceMessage to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(VoiceMessage, self).to_array() if isinstance(self.voice, InputFile): array['voice'] = self.voice.to_array() # type InputFile elif isinstance(self.voice, str): array['voice'] = u(self.voice) # py2: type unicode, py3: type str else: raise TypeError('Unknown type, must be one of InputFile, str.') # end if if self.receiver is not None: if isinstance(self.receiver, None): array['chat_id'] = None(self.receiver) # type Noneelif isinstance(self.receiver, str): array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str elif isinstance(self.receiver, int): array['chat_id'] = int(self.receiver) # type intelse: raise TypeError('Unknown type, must be one of None, str, int.') # end if if self.reply_id is not None: if isinstance(self.reply_id, DEFAULT_MESSAGE_ID): array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_IDelif isinstance(self.reply_id, int): array['reply_to_message_id'] = int(self.reply_id) # type intelse: raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.') # end if if self.caption is not None: array['caption'] = u(self.caption) # py2: type unicode, py3: type str if self.parse_mode is not None: array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str if self.duration is not None: array['duration'] = int(self.duration) # type int if self.disable_notification is not None: array['disable_notification'] = bool(self.disable_notification) # type bool if self.reply_markup is not None: if isinstance(self.reply_markup, InlineKeyboardMarkup): array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup elif isinstance(self.reply_markup, ReplyKeyboardMarkup): array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup elif isinstance(self.reply_markup, ReplyKeyboardRemove): array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove elif isinstance(self.reply_markup, ForceReply): array['reply_markup'] = self.reply_markup.to_array() # type ForceReply else: raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.') # end if return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "VoiceMessage", ",", "self", ")", ".", "to_array", "(", ")", "if", "isinstance", "(", "self", ".", "voice", ",", "InputFile", ")", ":", "array", "[", "'voice'", "]", "=", "self", ".", "voice", ".", "to_array", "(", ")", "# type InputFile", "elif", "isinstance", "(", "self", ".", "voice", ",", "str", ")", ":", "array", "[", "'voice'", "]", "=", "u", "(", "self", ".", "voice", ")", "# py2: type unicode, py3: type str", "else", ":", "raise", "TypeError", "(", "'Unknown type, must be one of InputFile, str.'", ")", "# end if", "if", "self", ".", "receiver", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "receiver", ",", "None", ")", ":", "array", "[", "'chat_id'", "]", "=", "None", "(", "self", ".", "receiver", ")", "# type Noneelif isinstance(self.receiver, str):", "array", "[", "'chat_id'", "]", "=", "u", "(", "self", ".", "receiver", ")", "# py2: type unicode, py3: type str", "elif", "isinstance", "(", "self", ".", "receiver", ",", "int", ")", ":", "array", "[", "'chat_id'", "]", "=", "int", "(", "self", ".", "receiver", ")", "# type intelse:", "raise", "TypeError", "(", "'Unknown type, must be one of None, str, int.'", ")", "# end if", "if", "self", ".", "reply_id", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "reply_id", ",", "DEFAULT_MESSAGE_ID", ")", ":", "array", "[", "'reply_to_message_id'", "]", "=", "DEFAULT_MESSAGE_ID", "(", "self", ".", "reply_id", ")", "# type DEFAULT_MESSAGE_IDelif isinstance(self.reply_id, int):", "array", "[", "'reply_to_message_id'", "]", "=", "int", "(", "self", ".", "reply_id", ")", "# type intelse:", "raise", "TypeError", "(", "'Unknown type, must be one of DEFAULT_MESSAGE_ID, int.'", ")", "# end if", "if", "self", ".", "caption", "is", "not", "None", ":", "array", "[", "'caption'", "]", "=", "u", "(", "self", ".", "caption", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "parse_mode", "is", "not", "None", ":", "array", "[", "'parse_mode'", "]", "=", "u", "(", "self", ".", "parse_mode", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "duration", "is", "not", "None", ":", "array", "[", "'duration'", "]", "=", "int", "(", "self", ".", "duration", ")", "# type int", "if", "self", ".", "disable_notification", "is", "not", "None", ":", "array", "[", "'disable_notification'", "]", "=", "bool", "(", "self", ".", "disable_notification", ")", "# type bool", "if", "self", ".", "reply_markup", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "reply_markup", ",", "InlineKeyboardMarkup", ")", ":", "array", "[", "'reply_markup'", "]", "=", "self", ".", "reply_markup", ".", "to_array", "(", ")", "# type InlineKeyboardMarkup", "elif", "isinstance", "(", "self", ".", "reply_markup", ",", "ReplyKeyboardMarkup", ")", ":", "array", "[", "'reply_markup'", "]", "=", "self", ".", "reply_markup", ".", "to_array", "(", ")", "# type ReplyKeyboardMarkup", "elif", "isinstance", "(", "self", ".", "reply_markup", ",", "ReplyKeyboardRemove", ")", ":", "array", "[", "'reply_markup'", "]", "=", "self", ".", "reply_markup", ".", "to_array", "(", ")", "# type ReplyKeyboardRemove", "elif", "isinstance", "(", "self", ".", "reply_markup", ",", "ForceReply", ")", ":", "array", "[", "'reply_markup'", "]", "=", "self", ".", "reply_markup", ".", "to_array", "(", ")", "# type ForceReply", "else", ":", "raise", "TypeError", "(", "'Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.'", ")", "# end if", "return", "array" ]
Serializes this VoiceMessage to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "VoiceMessage", "to", "a", "dictionary", "." ]
python
train
pyusb/pyusb
usb/legacy.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/legacy.py#L131-L141
def bulkWrite(self, endpoint, buffer, timeout = 100): r"""Perform a bulk write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written. """ return self.dev.write(endpoint, buffer, timeout)
[ "def", "bulkWrite", "(", "self", ",", "endpoint", ",", "buffer", ",", "timeout", "=", "100", ")", ":", "return", "self", ".", "dev", ".", "write", "(", "endpoint", ",", "buffer", ",", "timeout", ")" ]
r"""Perform a bulk write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written.
[ "r", "Perform", "a", "bulk", "write", "request", "to", "the", "endpoint", "specified", "." ]
python
train
IdentityPython/pysaml2
src/saml2/validate.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/validate.py#L255-L265
def valid_qname(val): """ A qname is either NCName or NCName ':' NCName """ try: (prefix, localpart) = val.split(":") return valid_ncname(prefix) and valid_ncname(localpart) except ValueError: return valid_ncname(val)
[ "def", "valid_qname", "(", "val", ")", ":", "try", ":", "(", "prefix", ",", "localpart", ")", "=", "val", ".", "split", "(", "\":\"", ")", "return", "valid_ncname", "(", "prefix", ")", "and", "valid_ncname", "(", "localpart", ")", "except", "ValueError", ":", "return", "valid_ncname", "(", "val", ")" ]
A qname is either NCName or NCName ':' NCName
[ "A", "qname", "is", "either", "NCName", "or", "NCName", ":", "NCName" ]
python
train
modin-project/modin
modin/pandas/base.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L457-L506
def apply( self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, convert_dtype=True, args=(), **kwds ): """Apply a function along input axis of DataFrame. Args: func: The function to apply axis: The axis over which to apply the func. broadcast: Whether or not to broadcast. raw: Whether or not to convert to a Series. reduce: Whether or not to try to apply reduction procedures. Returns: Series or DataFrame, depending on func. """ axis = self._get_axis_number(axis) ErrorMessage.non_verified_udf() if isinstance(func, string_types): if axis == 1: kwds["axis"] = axis result = self._string_function(func, *args, **kwds) # Sometimes we can return a scalar here if isinstance(result, BasePandasDataset): return result._query_compiler return result elif isinstance(func, dict): if axis == 1: raise TypeError( "(\"'dict' object is not callable\", " "'occurred at index {0}'".format(self.index[0]) ) if len(self.columns) != len(set(self.columns)): warnings.warn( "duplicate column names not supported with apply().", FutureWarning, stacklevel=2, ) elif not callable(func) and not is_list_like(func): raise TypeError("{} object is not callable".format(type(func))) query_compiler = self._query_compiler.apply(func, axis, *args, **kwds) return query_compiler
[ "def", "apply", "(", "self", ",", "func", ",", "axis", "=", "0", ",", "broadcast", "=", "None", ",", "raw", "=", "False", ",", "reduce", "=", "None", ",", "result_type", "=", "None", ",", "convert_dtype", "=", "True", ",", "args", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "ErrorMessage", ".", "non_verified_udf", "(", ")", "if", "isinstance", "(", "func", ",", "string_types", ")", ":", "if", "axis", "==", "1", ":", "kwds", "[", "\"axis\"", "]", "=", "axis", "result", "=", "self", ".", "_string_function", "(", "func", ",", "*", "args", ",", "*", "*", "kwds", ")", "# Sometimes we can return a scalar here\r", "if", "isinstance", "(", "result", ",", "BasePandasDataset", ")", ":", "return", "result", ".", "_query_compiler", "return", "result", "elif", "isinstance", "(", "func", ",", "dict", ")", ":", "if", "axis", "==", "1", ":", "raise", "TypeError", "(", "\"(\\\"'dict' object is not callable\\\", \"", "\"'occurred at index {0}'\"", ".", "format", "(", "self", ".", "index", "[", "0", "]", ")", ")", "if", "len", "(", "self", ".", "columns", ")", "!=", "len", "(", "set", "(", "self", ".", "columns", ")", ")", ":", "warnings", ".", "warn", "(", "\"duplicate column names not supported with apply().\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ",", ")", "elif", "not", "callable", "(", "func", ")", "and", "not", "is_list_like", "(", "func", ")", ":", "raise", "TypeError", "(", "\"{} object is not callable\"", ".", "format", "(", "type", "(", "func", ")", ")", ")", "query_compiler", "=", "self", ".", "_query_compiler", ".", "apply", "(", "func", ",", "axis", ",", "*", "args", ",", "*", "*", "kwds", ")", "return", "query_compiler" ]
Apply a function along input axis of DataFrame. Args: func: The function to apply axis: The axis over which to apply the func. broadcast: Whether or not to broadcast. raw: Whether or not to convert to a Series. reduce: Whether or not to try to apply reduction procedures. Returns: Series or DataFrame, depending on func.
[ "Apply", "a", "function", "along", "input", "axis", "of", "DataFrame", ".", "Args", ":", "func", ":", "The", "function", "to", "apply", "axis", ":", "The", "axis", "over", "which", "to", "apply", "the", "func", ".", "broadcast", ":", "Whether", "or", "not", "to", "broadcast", ".", "raw", ":", "Whether", "or", "not", "to", "convert", "to", "a", "Series", ".", "reduce", ":", "Whether", "or", "not", "to", "try", "to", "apply", "reduction", "procedures", ".", "Returns", ":", "Series", "or", "DataFrame", "depending", "on", "func", "." ]
python
train
proteanhq/protean
src/protean/core/usecase/generic.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/usecase/generic.py#L157-L168
def process_request(self, request_object): """Process the Delete Resource Request""" # Delete the object by its identifier entity = request_object.entity_cls.get(request_object.identifier) entity.delete() # FIXME Check for return value of `delete()` # We have successfully deleted the object. # Sending a 204 Response code. return ResponseSuccessWithNoContent()
[ "def", "process_request", "(", "self", ",", "request_object", ")", ":", "# Delete the object by its identifier", "entity", "=", "request_object", ".", "entity_cls", ".", "get", "(", "request_object", ".", "identifier", ")", "entity", ".", "delete", "(", ")", "# FIXME Check for return value of `delete()`", "# We have successfully deleted the object.", "# Sending a 204 Response code.", "return", "ResponseSuccessWithNoContent", "(", ")" ]
Process the Delete Resource Request
[ "Process", "the", "Delete", "Resource", "Request" ]
python
train
openknowledge-archive/datapackage-registry-py
datapackage_registry/registry.py
https://github.com/openknowledge-archive/datapackage-registry-py/blob/02ba4d1ae6a75d8960abef0ffec3e9ec49ed26f9/datapackage_registry/registry.py#L75-L87
def _get_profile(self, profile_id): '''Return the profile with the received ID as a dict''' profile_metadata = self._registry.get(profile_id) if not profile_metadata: return path = self._get_absolute_path(profile_metadata.get('schema_path')) if path and os.path.isfile(path): return self._load_json_file_or_url(path) url = profile_metadata.get('schema') if url: return self._load_json_file_or_url(url)
[ "def", "_get_profile", "(", "self", ",", "profile_id", ")", ":", "profile_metadata", "=", "self", ".", "_registry", ".", "get", "(", "profile_id", ")", "if", "not", "profile_metadata", ":", "return", "path", "=", "self", ".", "_get_absolute_path", "(", "profile_metadata", ".", "get", "(", "'schema_path'", ")", ")", "if", "path", "and", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "self", ".", "_load_json_file_or_url", "(", "path", ")", "url", "=", "profile_metadata", ".", "get", "(", "'schema'", ")", "if", "url", ":", "return", "self", ".", "_load_json_file_or_url", "(", "url", ")" ]
Return the profile with the received ID as a dict
[ "Return", "the", "profile", "with", "the", "received", "ID", "as", "a", "dict" ]
python
train
balloob/pychromecast
pychromecast/discovery.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/discovery.py#L147-L159
def get_info_from_service(service, zconf): """ Resolve service_info from service. """ service_info = None try: service_info = zconf.get_service_info('_googlecast._tcp.local.', service) if service_info: _LOGGER.debug( "get_info_from_service resolved service %s to service_info %s", service, service_info) except IOError: pass return service_info
[ "def", "get_info_from_service", "(", "service", ",", "zconf", ")", ":", "service_info", "=", "None", "try", ":", "service_info", "=", "zconf", ".", "get_service_info", "(", "'_googlecast._tcp.local.'", ",", "service", ")", "if", "service_info", ":", "_LOGGER", ".", "debug", "(", "\"get_info_from_service resolved service %s to service_info %s\"", ",", "service", ",", "service_info", ")", "except", "IOError", ":", "pass", "return", "service_info" ]
Resolve service_info from service.
[ "Resolve", "service_info", "from", "service", "." ]
python
train
jstitch/MambuPy
MambuPy/rest/mambuclient.py
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuclient.py#L136-L162
def setGroups(self, *args, **kwargs): """Adds the groups to which this client belongs. The 'groupKeys' field of the client holds a list of the encodedKeys of the groups to which this client belongs. Returns the number of requests done to Mambu. """ requests = 0 groups = [] try: for gk in self['groupKeys']: try: g = self.mambugroupclass(entid=gk, *args, **kwargs) except AttributeError as ae: from .mambugroup import MambuGroup self.mambugroupclass = MambuGroup g = self.mambugroupclass(entid=gk, *args, **kwargs) requests += 1 groups.append(g) except KeyError: pass self['groups'] = groups return requests
[ "def", "setGroups", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "requests", "=", "0", "groups", "=", "[", "]", "try", ":", "for", "gk", "in", "self", "[", "'groupKeys'", "]", ":", "try", ":", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", "as", "ae", ":", "from", ".", "mambugroup", "import", "MambuGroup", "self", ".", "mambugroupclass", "=", "MambuGroup", "g", "=", "self", ".", "mambugroupclass", "(", "entid", "=", "gk", ",", "*", "args", ",", "*", "*", "kwargs", ")", "requests", "+=", "1", "groups", ".", "append", "(", "g", ")", "except", "KeyError", ":", "pass", "self", "[", "'groups'", "]", "=", "groups", "return", "requests" ]
Adds the groups to which this client belongs. The 'groupKeys' field of the client holds a list of the encodedKeys of the groups to which this client belongs. Returns the number of requests done to Mambu.
[ "Adds", "the", "groups", "to", "which", "this", "client", "belongs", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/object_detector/_detection.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/_detection.py#L17-L24
def bbox_to_ybox(bbox): """Convert from corner bounding box to center/shape""" return [ (bbox[1] + bbox[3]) / 2, (bbox[0] + bbox[2]) / 2, (bbox[3] - bbox[1]), (bbox[2] - bbox[0]), ]
[ "def", "bbox_to_ybox", "(", "bbox", ")", ":", "return", "[", "(", "bbox", "[", "1", "]", "+", "bbox", "[", "3", "]", ")", "/", "2", ",", "(", "bbox", "[", "0", "]", "+", "bbox", "[", "2", "]", ")", "/", "2", ",", "(", "bbox", "[", "3", "]", "-", "bbox", "[", "1", "]", ")", ",", "(", "bbox", "[", "2", "]", "-", "bbox", "[", "0", "]", ")", ",", "]" ]
Convert from corner bounding box to center/shape
[ "Convert", "from", "corner", "bounding", "box", "to", "center", "/", "shape" ]
python
train
johntruckenbrodt/spatialist
spatialist/raster.py
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/raster.py#L619-L625
def load(self): """ load all raster data to internal memory arrays. This shortens the read time of other methods like :meth:`matrix`. """ for i in range(1, self.bands + 1): self.__data[i - 1] = self.matrix(i)
[ "def", "load", "(", "self", ")", ":", "for", "i", "in", "range", "(", "1", ",", "self", ".", "bands", "+", "1", ")", ":", "self", ".", "__data", "[", "i", "-", "1", "]", "=", "self", ".", "matrix", "(", "i", ")" ]
load all raster data to internal memory arrays. This shortens the read time of other methods like :meth:`matrix`.
[ "load", "all", "raster", "data", "to", "internal", "memory", "arrays", ".", "This", "shortens", "the", "read", "time", "of", "other", "methods", "like", ":", "meth", ":", "matrix", "." ]
python
train
owncloud/pyocclient
owncloud/owncloud.py
https://github.com/owncloud/pyocclient/blob/b9e1f04cdbde74588e86f1bebb6144571d82966c/owncloud/owncloud.py#L678-L695
def accept_remote_share(self, share_id): """Accepts a remote share :param share_id: Share ID (int) :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned """ if not isinstance(share_id, int): return False res = self._make_ocs_request( 'POST', self.OCS_SERVICE_SHARE, 'remote_shares/pending/' + str(share_id) ) if res.status_code == 200: return res raise HTTPResponseError(res)
[ "def", "accept_remote_share", "(", "self", ",", "share_id", ")", ":", "if", "not", "isinstance", "(", "share_id", ",", "int", ")", ":", "return", "False", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'remote_shares/pending/'", "+", "str", "(", "share_id", ")", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "res", "raise", "HTTPResponseError", "(", "res", ")" ]
Accepts a remote share :param share_id: Share ID (int) :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
[ "Accepts", "a", "remote", "share" ]
python
train
yahoo/TensorFlowOnSpark
examples/imagenet/inception/data/build_image_data.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/data/build_image_data.py#L287-L328
def _process_image_files(name, filenames, texts, labels, num_shards): """Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set filenames: list of strings; each string is a path to an image file texts: list of strings; each string is human readable, e.g. 'dog' labels: list of integer; each integer identifies the ground truth num_shards: integer number of shards for this data set. """ assert len(filenames) == len(texts) assert len(filenames) == len(labels) # Break all images into batches with a [ranges[i][0], ranges[i][1]]. spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) ranges = [] for i in range(len(spacing) - 1): ranges.append([spacing[i], spacing[i+1]]) # Launch a thread for each batch. print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) sys.stdout.flush() # Create a mechanism for monitoring when all threads are finished. coord = tf.train.Coordinator() # Create a generic TensorFlow-based utility for converting all image codings. coder = ImageCoder() threads = [] for thread_index in range(len(ranges)): args = (coder, thread_index, ranges, name, filenames, texts, labels, num_shards) t = threading.Thread(target=_process_image_files_batch, args=args) t.start() threads.append(t) # Wait for all the threads to terminate. coord.join(threads) print('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames))) sys.stdout.flush()
[ "def", "_process_image_files", "(", "name", ",", "filenames", ",", "texts", ",", "labels", ",", "num_shards", ")", ":", "assert", "len", "(", "filenames", ")", "==", "len", "(", "texts", ")", "assert", "len", "(", "filenames", ")", "==", "len", "(", "labels", ")", "# Break all images into batches with a [ranges[i][0], ranges[i][1]].", "spacing", "=", "np", ".", "linspace", "(", "0", ",", "len", "(", "filenames", ")", ",", "FLAGS", ".", "num_threads", "+", "1", ")", ".", "astype", "(", "np", ".", "int", ")", "ranges", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "spacing", ")", "-", "1", ")", ":", "ranges", ".", "append", "(", "[", "spacing", "[", "i", "]", ",", "spacing", "[", "i", "+", "1", "]", "]", ")", "# Launch a thread for each batch.", "print", "(", "'Launching %d threads for spacings: %s'", "%", "(", "FLAGS", ".", "num_threads", ",", "ranges", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Create a mechanism for monitoring when all threads are finished.", "coord", "=", "tf", ".", "train", ".", "Coordinator", "(", ")", "# Create a generic TensorFlow-based utility for converting all image codings.", "coder", "=", "ImageCoder", "(", ")", "threads", "=", "[", "]", "for", "thread_index", "in", "range", "(", "len", "(", "ranges", ")", ")", ":", "args", "=", "(", "coder", ",", "thread_index", ",", "ranges", ",", "name", ",", "filenames", ",", "texts", ",", "labels", ",", "num_shards", ")", "t", "=", "threading", ".", "Thread", "(", "target", "=", "_process_image_files_batch", ",", "args", "=", "args", ")", "t", ".", "start", "(", ")", "threads", ".", "append", "(", "t", ")", "# Wait for all the threads to terminate.", "coord", ".", "join", "(", "threads", ")", "print", "(", "'%s: Finished writing all %d images in data set.'", "%", "(", "datetime", ".", "now", "(", ")", ",", "len", "(", "filenames", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set filenames: list of strings; each string is a path to an image file texts: list of strings; each string is human readable, e.g. 'dog' labels: list of integer; each integer identifies the ground truth num_shards: integer number of shards for this data set.
[ "Process", "and", "save", "list", "of", "images", "as", "TFRecord", "of", "Example", "protos", "." ]
python
train
glormph/msstitch
src/app/lookups/sqlite/searchspace.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/searchspace.py#L39-L61
def check_seq_exists(self, seq, amount_ntermwildcards): """Look up sequence in sqlite DB. Returns True or False if it exists (or not). When looking up a reversed DB with ntermwildcards: we reverse the sequence of the pep and add a LIKE and %-suffix to the query. """ cursor = self.get_cursor() if amount_ntermwildcards > 0: seq = seq[::-1] sqlseq = '{}{}'.format(seq, '%') # FIXME non-parametrized string binding because if ? binding is # used the INDEX is not used when looking up, apparently because # the query cant be optimized when using LIKE and binding. sql = ('select seqs from known_searchspace where seqs LIKE ' '"{}"'.format(sqlseq)) for match in cursor.execute(sql): if match[0][:-amount_ntermwildcards] in seq: return True return False else: sql = ('select exists(select seqs from known_searchspace ' 'where seqs=? limit 1)') return cursor.execute(sql, (seq, )).fetchone()[0] == 1
[ "def", "check_seq_exists", "(", "self", ",", "seq", ",", "amount_ntermwildcards", ")", ":", "cursor", "=", "self", ".", "get_cursor", "(", ")", "if", "amount_ntermwildcards", ">", "0", ":", "seq", "=", "seq", "[", ":", ":", "-", "1", "]", "sqlseq", "=", "'{}{}'", ".", "format", "(", "seq", ",", "'%'", ")", "# FIXME non-parametrized string binding because if ? binding is", "# used the INDEX is not used when looking up, apparently because", "# the query cant be optimized when using LIKE and binding.", "sql", "=", "(", "'select seqs from known_searchspace where seqs LIKE '", "'\"{}\"'", ".", "format", "(", "sqlseq", ")", ")", "for", "match", "in", "cursor", ".", "execute", "(", "sql", ")", ":", "if", "match", "[", "0", "]", "[", ":", "-", "amount_ntermwildcards", "]", "in", "seq", ":", "return", "True", "return", "False", "else", ":", "sql", "=", "(", "'select exists(select seqs from known_searchspace '", "'where seqs=? limit 1)'", ")", "return", "cursor", ".", "execute", "(", "sql", ",", "(", "seq", ",", ")", ")", ".", "fetchone", "(", ")", "[", "0", "]", "==", "1" ]
Look up sequence in sqlite DB. Returns True or False if it exists (or not). When looking up a reversed DB with ntermwildcards: we reverse the sequence of the pep and add a LIKE and %-suffix to the query.
[ "Look", "up", "sequence", "in", "sqlite", "DB", ".", "Returns", "True", "or", "False", "if", "it", "exists", "(", "or", "not", ")", ".", "When", "looking", "up", "a", "reversed", "DB", "with", "ntermwildcards", ":", "we", "reverse", "the", "sequence", "of", "the", "pep", "and", "add", "a", "LIKE", "and", "%", "-", "suffix", "to", "the", "query", "." ]
python
train
Spinmob/spinmob
egg/_temporary_fixes.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_temporary_fixes.py#L196-L202
def setMaximum(self, m, update=True): """Set the maximum allowed value (or None for no limit)""" if m is not None: m = D(asUnicode(m)) self.opts['bounds'][1] = m if update: self.setValue()
[ "def", "setMaximum", "(", "self", ",", "m", ",", "update", "=", "True", ")", ":", "if", "m", "is", "not", "None", ":", "m", "=", "D", "(", "asUnicode", "(", "m", ")", ")", "self", ".", "opts", "[", "'bounds'", "]", "[", "1", "]", "=", "m", "if", "update", ":", "self", ".", "setValue", "(", ")" ]
Set the maximum allowed value (or None for no limit)
[ "Set", "the", "maximum", "allowed", "value", "(", "or", "None", "for", "no", "limit", ")" ]
python
train
jhermann/rituals
src/rituals/util/notify.py
https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/util/notify.py#L43-L47
def info(msg): """Emit a normal message.""" _flush() sys.stdout.write(msg + '\n') sys.stdout.flush()
[ "def", "info", "(", "msg", ")", ":", "_flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "msg", "+", "'\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
Emit a normal message.
[ "Emit", "a", "normal", "message", "." ]
python
valid
mohamedattahri/PyXMLi
pyxmli/__init__.py
https://github.com/mohamedattahri/PyXMLi/blob/a81a245be822d62f1a20c734ca14b42c786ae81e/pyxmli/__init__.py#L1252-L1263
def __set_unit_price(self, value): ''' Sets the unit price @param value:str ''' try: if value < 0: raise ValueError() self.__unit_price = Decimal(str(value)) except ValueError: raise ValueError("Unit Price must be a positive number")
[ "def", "__set_unit_price", "(", "self", ",", "value", ")", ":", "try", ":", "if", "value", "<", "0", ":", "raise", "ValueError", "(", ")", "self", ".", "__unit_price", "=", "Decimal", "(", "str", "(", "value", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Unit Price must be a positive number\"", ")" ]
Sets the unit price @param value:str
[ "Sets", "the", "unit", "price" ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/sensor/lego.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sensor/lego.py#L944-L950
def sound_pressure_low(self): """ A measurement of the measured sound pressure level, as a percent. Uses A-weighting, which focuses on levels up to 55 dB. """ self._ensure_mode(self.MODE_DBA) return self.value(0) * self._scale('DBA')
[ "def", "sound_pressure_low", "(", "self", ")", ":", "self", ".", "_ensure_mode", "(", "self", ".", "MODE_DBA", ")", "return", "self", ".", "value", "(", "0", ")", "*", "self", ".", "_scale", "(", "'DBA'", ")" ]
A measurement of the measured sound pressure level, as a percent. Uses A-weighting, which focuses on levels up to 55 dB.
[ "A", "measurement", "of", "the", "measured", "sound", "pressure", "level", "as", "a", "percent", ".", "Uses", "A", "-", "weighting", "which", "focuses", "on", "levels", "up", "to", "55", "dB", "." ]
python
train
acutesoftware/AIKIF
aikif/project.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/project.py#L35-L40
def get_by_name(self, name): """ returns an object Project which matches name """ for p in self.project_list: if p.nme == name: return p return None
[ "def", "get_by_name", "(", "self", ",", "name", ")", ":", "for", "p", "in", "self", ".", "project_list", ":", "if", "p", ".", "nme", "==", "name", ":", "return", "p", "return", "None" ]
returns an object Project which matches name
[ "returns", "an", "object", "Project", "which", "matches", "name" ]
python
train
delph-in/pydelphin
delphin/repp.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/repp.py#L410-L424
def apply(self, s, active=None): """ Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps """ if active is None: active = self.active return self.group.apply(s, active=active)
[ "def", "apply", "(", "self", ",", "s", ",", "active", "=", "None", ")", ":", "if", "active", "is", "None", ":", "active", "=", "self", ".", "active", "return", "self", ".", "group", ".", "apply", "(", "s", ",", "active", "=", "active", ")" ]
Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps
[ "Apply", "the", "REPP", "s", "rewrite", "rules", "to", "the", "input", "string", "*", "s", "*", "." ]
python
train
quantopian/alphalens
alphalens/utils.py
https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/utils.py#L386-L417
def print_table(table, name=None, fmt=None): """ Pretty print a pandas DataFrame. Uses HTML output if running inside Jupyter Notebook, otherwise formatted text output. Parameters ---------- table : pd.Series or pd.DataFrame Table to pretty-print. name : str, optional Table name to display in upper left corner. fmt : str, optional Formatter to use for displaying table elements. E.g. '{0:.2f}%' for displaying 100 as '100.00%'. Restores original setting after displaying. """ if isinstance(table, pd.Series): table = pd.DataFrame(table) if isinstance(table, pd.DataFrame): table.columns.name = name prev_option = pd.get_option('display.float_format') if fmt is not None: pd.set_option('display.float_format', lambda x: fmt.format(x)) display(table) if fmt is not None: pd.set_option('display.float_format', prev_option)
[ "def", "print_table", "(", "table", ",", "name", "=", "None", ",", "fmt", "=", "None", ")", ":", "if", "isinstance", "(", "table", ",", "pd", ".", "Series", ")", ":", "table", "=", "pd", ".", "DataFrame", "(", "table", ")", "if", "isinstance", "(", "table", ",", "pd", ".", "DataFrame", ")", ":", "table", ".", "columns", ".", "name", "=", "name", "prev_option", "=", "pd", ".", "get_option", "(", "'display.float_format'", ")", "if", "fmt", "is", "not", "None", ":", "pd", ".", "set_option", "(", "'display.float_format'", ",", "lambda", "x", ":", "fmt", ".", "format", "(", "x", ")", ")", "display", "(", "table", ")", "if", "fmt", "is", "not", "None", ":", "pd", ".", "set_option", "(", "'display.float_format'", ",", "prev_option", ")" ]
Pretty print a pandas DataFrame. Uses HTML output if running inside Jupyter Notebook, otherwise formatted text output. Parameters ---------- table : pd.Series or pd.DataFrame Table to pretty-print. name : str, optional Table name to display in upper left corner. fmt : str, optional Formatter to use for displaying table elements. E.g. '{0:.2f}%' for displaying 100 as '100.00%'. Restores original setting after displaying.
[ "Pretty", "print", "a", "pandas", "DataFrame", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/transforms/base_transform.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/transforms/base_transform.py#L99-L104
def inverse(self): """ The inverse of this transform. """ if self._inverse is None: self._inverse = InverseTransform(self) return self._inverse
[ "def", "inverse", "(", "self", ")", ":", "if", "self", ".", "_inverse", "is", "None", ":", "self", ".", "_inverse", "=", "InverseTransform", "(", "self", ")", "return", "self", ".", "_inverse" ]
The inverse of this transform.
[ "The", "inverse", "of", "this", "transform", "." ]
python
train
SmileyChris/easy-thumbnails
easy_thumbnails/namers.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/namers.py#L34-L44
def hashed(source_filename, prepared_options, thumbnail_extension, **kwargs): """ Generate a short hashed thumbnail filename. Creates a 12 character url-safe base64 sha1 filename (plus the extension), for example: ``6qW1buHgLaZ9.jpg``. """ parts = ':'.join([source_filename] + prepared_options) short_sha = hashlib.sha1(parts.encode('utf-8')).digest() short_hash = base64.urlsafe_b64encode(short_sha[:9]).decode('utf-8') return '.'.join([short_hash, thumbnail_extension])
[ "def", "hashed", "(", "source_filename", ",", "prepared_options", ",", "thumbnail_extension", ",", "*", "*", "kwargs", ")", ":", "parts", "=", "':'", ".", "join", "(", "[", "source_filename", "]", "+", "prepared_options", ")", "short_sha", "=", "hashlib", ".", "sha1", "(", "parts", ".", "encode", "(", "'utf-8'", ")", ")", ".", "digest", "(", ")", "short_hash", "=", "base64", ".", "urlsafe_b64encode", "(", "short_sha", "[", ":", "9", "]", ")", ".", "decode", "(", "'utf-8'", ")", "return", "'.'", ".", "join", "(", "[", "short_hash", ",", "thumbnail_extension", "]", ")" ]
Generate a short hashed thumbnail filename. Creates a 12 character url-safe base64 sha1 filename (plus the extension), for example: ``6qW1buHgLaZ9.jpg``.
[ "Generate", "a", "short", "hashed", "thumbnail", "filename", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/managers.py#L822-L836
def get_item_query_session(self): """Gets the ``OsidSession`` associated with the item query service. return: (osid.assessment.ItemQuerySession) - an ``ItemQuerySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_item_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_query()`` is ``true``.* """ if not self.supports_item_query(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ItemQuerySession(runtime=self._runtime)
[ "def", "get_item_query_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_item_query", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ItemQuerySession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the ``OsidSession`` associated with the item query service. return: (osid.assessment.ItemQuerySession) - an ``ItemQuerySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_item_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_query()`` is ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "item", "query", "service", "." ]
python
train
wakatime/wakatime
wakatime/packages/pygments/scanner.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/scanner.py#L55-L65
def check(self, pattern): """ Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) return self._re_cache[pattern].match(self.data, self.pos)
[ "def", "check", "(", "self", ",", "pattern", ")", ":", "if", "self", ".", "eos", ":", "raise", "EndOfText", "(", ")", "if", "pattern", "not", "in", "self", ".", "_re_cache", ":", "self", ".", "_re_cache", "[", "pattern", "]", "=", "re", ".", "compile", "(", "pattern", ",", "self", ".", "flags", ")", "return", "self", ".", "_re_cache", "[", "pattern", "]", ".", "match", "(", "self", ".", "data", ",", "self", ".", "pos", ")" ]
Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead.
[ "Apply", "pattern", "on", "the", "current", "position", "and", "return", "the", "match", "object", ".", "(", "Doesn", "t", "touch", "pos", ")", ".", "Use", "this", "for", "lookahead", "." ]
python
train
twisted/mantissa
xmantissa/people.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L1422-L1443
def contactInfo(self, request, tag): """ Render the result of calling L{IContactType.getReadOnlyView} on the corresponding L{IContactType} for each piece of contact info associated with L{person}. Arrange the result by group, using C{tag}'s I{contact-group} pattern. Groupless contact items will have their views yielded directly. The I{contact-group} pattern appears once for each distinct L{ContactGroup}, with the following slots filled: I{name} - The group's C{groupName}. I{views} - A sequence of read-only views belonging to the group. """ groupPattern = inevow.IQ(tag).patternGenerator('contact-group') groupedViews = self.organizer.groupReadOnlyViews(self.person) for (groupName, views) in sorted(groupedViews.items()): if groupName is None: yield views else: yield groupPattern().fillSlots( 'name', groupName).fillSlots( 'views', views)
[ "def", "contactInfo", "(", "self", ",", "request", ",", "tag", ")", ":", "groupPattern", "=", "inevow", ".", "IQ", "(", "tag", ")", ".", "patternGenerator", "(", "'contact-group'", ")", "groupedViews", "=", "self", ".", "organizer", ".", "groupReadOnlyViews", "(", "self", ".", "person", ")", "for", "(", "groupName", ",", "views", ")", "in", "sorted", "(", "groupedViews", ".", "items", "(", ")", ")", ":", "if", "groupName", "is", "None", ":", "yield", "views", "else", ":", "yield", "groupPattern", "(", ")", ".", "fillSlots", "(", "'name'", ",", "groupName", ")", ".", "fillSlots", "(", "'views'", ",", "views", ")" ]
Render the result of calling L{IContactType.getReadOnlyView} on the corresponding L{IContactType} for each piece of contact info associated with L{person}. Arrange the result by group, using C{tag}'s I{contact-group} pattern. Groupless contact items will have their views yielded directly. The I{contact-group} pattern appears once for each distinct L{ContactGroup}, with the following slots filled: I{name} - The group's C{groupName}. I{views} - A sequence of read-only views belonging to the group.
[ "Render", "the", "result", "of", "calling", "L", "{", "IContactType", ".", "getReadOnlyView", "}", "on", "the", "corresponding", "L", "{", "IContactType", "}", "for", "each", "piece", "of", "contact", "info", "associated", "with", "L", "{", "person", "}", ".", "Arrange", "the", "result", "by", "group", "using", "C", "{", "tag", "}", "s", "I", "{", "contact", "-", "group", "}", "pattern", ".", "Groupless", "contact", "items", "will", "have", "their", "views", "yielded", "directly", "." ]
python
train
theislab/scvelo
scvelo/preprocessing/utils.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/utils.py#L99-L185
def filter_genes(data, min_counts=None, min_cells=None, max_counts=None, max_cells=None, min_counts_u=None, min_cells_u=None, max_counts_u=None, max_cells_u=None, min_shared_counts=None, min_shared_cells=None, copy=False): """Filter genes based on number of cells or counts. Keep genes that have at least `min_counts` counts or are expressed in at least `min_cells` cells or have at most `max_counts` counts or are expressed in at most `max_cells` cells. Only provide one of the optional parameters `min_counts`, `min_cells`, `max_counts`, `max_cells` per call. Parameters ---------- data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.spmatrix` The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond to cells and columns to genes. min_counts : `int`, optional (default: `None`) Minimum number of counts required for a gene to pass filtering. min_cells : `int`, optional (default: `None`) Minimum number of cells expressed required for a gene to pass filtering. max_counts : `int`, optional (default: `None`) Maximum number of counts required for a gene to pass filtering. max_cells : `int`, optional (default: `None`) Maximum number of cells expressed required for a gene to pass filtering. min_counts_u : `int`, optional (default: `None`) Minimum number of unspliced counts required for a gene to pass filtering. min_cells_u : `int`, optional (default: `None`) Minimum number of unspliced cells expressed required for a gene to pass filtering. max_counts_u : `int`, optional (default: `None`) Maximum number of unspliced counts required for a gene to pass filtering. max_cells_u : `int`, optional (default: `None`) Maximum number of unspliced cells expressed required for a gene to pass filtering. min_shared_counts: `int`, optional (default: `None`) Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene. min_shared_cells: `int`, optional (default: `None`) Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced. copy : `bool`, optional (default: `False`) Determines whether a copy is returned. Returns ------- Filters the object and adds `n_counts` to `adata.var`. """ adata = data.copy() if copy else data # set initial cell sizes before filtering set_initial_size(adata) layers = [layer for layer in ['spliced', 'unspliced'] if layer in adata.layers.keys()] if min_shared_counts is not None or min_shared_cells is not None: layers.extend(['shared']) for layer in layers: if layer is 'spliced': _min_counts, _min_cells, _max_counts, _max_cells = min_counts, min_cells, max_counts, max_cells elif layer is 'unspliced': _min_counts, _min_cells, _max_counts, _max_cells = min_counts_u, min_cells_u, max_counts_u, max_cells_u else: # shared counts/cells _min_counts, _min_cells, _max_counts, _max_cells = min_shared_counts, min_shared_cells, None, None if layer in adata.layers.keys(): X = adata.layers[layer] else: # shared counts/cells Xs, Xu = adata.layers['spliced'], adata.layers['unspliced'] nonzeros = (Xs > 0).multiply(Xu > 0) if issparse(Xs) else (Xs > 0) * (Xu > 0) X = nonzeros.multiply(Xs) + nonzeros.multiply(Xu) if issparse(nonzeros) else nonzeros * (Xs + Xu) gene_subset = np.ones(adata.n_vars, dtype=bool) if _min_counts is not None or _max_counts is not None: gene_subset, _ = filter(X, min_counts=_min_counts, max_counts=_max_counts) adata._inplace_subset_var(gene_subset) if _min_cells is not None or _max_cells is not None: gene_subset, _ = filter(X, min_cells=_min_cells, max_cells=_max_cells) adata._inplace_subset_var(gene_subset) s = np.sum(~gene_subset) if s > 0: logg.info('Filtered out {} genes that are detected'.format(s), end=' ') if _min_cells is not None or _min_counts is not None: logg.info('in less than', str(_min_cells) + ' cells (' + str(layer) + ').' if _min_counts is None else str(_min_counts) + ' counts (' + str(layer) + ').', no_indent=True) if max_cells is not None or max_counts is not None: logg.info('in more than ', str(_max_cells) + ' cells(' + str(layer) + ').' if _max_counts is None else str(_max_counts) + ' counts (' + str(layer) + ').', no_indent=True) return adata if copy else None
[ "def", "filter_genes", "(", "data", ",", "min_counts", "=", "None", ",", "min_cells", "=", "None", ",", "max_counts", "=", "None", ",", "max_cells", "=", "None", ",", "min_counts_u", "=", "None", ",", "min_cells_u", "=", "None", ",", "max_counts_u", "=", "None", ",", "max_cells_u", "=", "None", ",", "min_shared_counts", "=", "None", ",", "min_shared_cells", "=", "None", ",", "copy", "=", "False", ")", ":", "adata", "=", "data", ".", "copy", "(", ")", "if", "copy", "else", "data", "# set initial cell sizes before filtering", "set_initial_size", "(", "adata", ")", "layers", "=", "[", "layer", "for", "layer", "in", "[", "'spliced'", ",", "'unspliced'", "]", "if", "layer", "in", "adata", ".", "layers", ".", "keys", "(", ")", "]", "if", "min_shared_counts", "is", "not", "None", "or", "min_shared_cells", "is", "not", "None", ":", "layers", ".", "extend", "(", "[", "'shared'", "]", ")", "for", "layer", "in", "layers", ":", "if", "layer", "is", "'spliced'", ":", "_min_counts", ",", "_min_cells", ",", "_max_counts", ",", "_max_cells", "=", "min_counts", ",", "min_cells", ",", "max_counts", ",", "max_cells", "elif", "layer", "is", "'unspliced'", ":", "_min_counts", ",", "_min_cells", ",", "_max_counts", ",", "_max_cells", "=", "min_counts_u", ",", "min_cells_u", ",", "max_counts_u", ",", "max_cells_u", "else", ":", "# shared counts/cells", "_min_counts", ",", "_min_cells", ",", "_max_counts", ",", "_max_cells", "=", "min_shared_counts", ",", "min_shared_cells", ",", "None", ",", "None", "if", "layer", "in", "adata", ".", "layers", ".", "keys", "(", ")", ":", "X", "=", "adata", ".", "layers", "[", "layer", "]", "else", ":", "# shared counts/cells", "Xs", ",", "Xu", "=", "adata", ".", "layers", "[", "'spliced'", "]", ",", "adata", ".", "layers", "[", "'unspliced'", "]", "nonzeros", "=", "(", "Xs", ">", "0", ")", ".", "multiply", "(", "Xu", ">", "0", ")", "if", "issparse", "(", "Xs", ")", "else", "(", "Xs", ">", "0", ")", "*", "(", "Xu", ">", "0", ")", "X", "=", "nonzeros", ".", "multiply", "(", "Xs", ")", "+", "nonzeros", ".", "multiply", "(", "Xu", ")", "if", "issparse", "(", "nonzeros", ")", "else", "nonzeros", "*", "(", "Xs", "+", "Xu", ")", "gene_subset", "=", "np", ".", "ones", "(", "adata", ".", "n_vars", ",", "dtype", "=", "bool", ")", "if", "_min_counts", "is", "not", "None", "or", "_max_counts", "is", "not", "None", ":", "gene_subset", ",", "_", "=", "filter", "(", "X", ",", "min_counts", "=", "_min_counts", ",", "max_counts", "=", "_max_counts", ")", "adata", ".", "_inplace_subset_var", "(", "gene_subset", ")", "if", "_min_cells", "is", "not", "None", "or", "_max_cells", "is", "not", "None", ":", "gene_subset", ",", "_", "=", "filter", "(", "X", ",", "min_cells", "=", "_min_cells", ",", "max_cells", "=", "_max_cells", ")", "adata", ".", "_inplace_subset_var", "(", "gene_subset", ")", "s", "=", "np", ".", "sum", "(", "~", "gene_subset", ")", "if", "s", ">", "0", ":", "logg", ".", "info", "(", "'Filtered out {} genes that are detected'", ".", "format", "(", "s", ")", ",", "end", "=", "' '", ")", "if", "_min_cells", "is", "not", "None", "or", "_min_counts", "is", "not", "None", ":", "logg", ".", "info", "(", "'in less than'", ",", "str", "(", "_min_cells", ")", "+", "' cells ('", "+", "str", "(", "layer", ")", "+", "').'", "if", "_min_counts", "is", "None", "else", "str", "(", "_min_counts", ")", "+", "' counts ('", "+", "str", "(", "layer", ")", "+", "').'", ",", "no_indent", "=", "True", ")", "if", "max_cells", "is", "not", "None", "or", "max_counts", "is", "not", "None", ":", "logg", ".", "info", "(", "'in more than '", ",", "str", "(", "_max_cells", ")", "+", "' cells('", "+", "str", "(", "layer", ")", "+", "').'", "if", "_max_counts", "is", "None", "else", "str", "(", "_max_counts", ")", "+", "' counts ('", "+", "str", "(", "layer", ")", "+", "').'", ",", "no_indent", "=", "True", ")", "return", "adata", "if", "copy", "else", "None" ]
Filter genes based on number of cells or counts. Keep genes that have at least `min_counts` counts or are expressed in at least `min_cells` cells or have at most `max_counts` counts or are expressed in at most `max_cells` cells. Only provide one of the optional parameters `min_counts`, `min_cells`, `max_counts`, `max_cells` per call. Parameters ---------- data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.spmatrix` The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond to cells and columns to genes. min_counts : `int`, optional (default: `None`) Minimum number of counts required for a gene to pass filtering. min_cells : `int`, optional (default: `None`) Minimum number of cells expressed required for a gene to pass filtering. max_counts : `int`, optional (default: `None`) Maximum number of counts required for a gene to pass filtering. max_cells : `int`, optional (default: `None`) Maximum number of cells expressed required for a gene to pass filtering. min_counts_u : `int`, optional (default: `None`) Minimum number of unspliced counts required for a gene to pass filtering. min_cells_u : `int`, optional (default: `None`) Minimum number of unspliced cells expressed required for a gene to pass filtering. max_counts_u : `int`, optional (default: `None`) Maximum number of unspliced counts required for a gene to pass filtering. max_cells_u : `int`, optional (default: `None`) Maximum number of unspliced cells expressed required for a gene to pass filtering. min_shared_counts: `int`, optional (default: `None`) Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene. min_shared_cells: `int`, optional (default: `None`) Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced. copy : `bool`, optional (default: `False`) Determines whether a copy is returned. Returns ------- Filters the object and adds `n_counts` to `adata.var`.
[ "Filter", "genes", "based", "on", "number", "of", "cells", "or", "counts", ".", "Keep", "genes", "that", "have", "at", "least", "min_counts", "counts", "or", "are", "expressed", "in", "at", "least", "min_cells", "cells", "or", "have", "at", "most", "max_counts", "counts", "or", "are", "expressed", "in", "at", "most", "max_cells", "cells", ".", "Only", "provide", "one", "of", "the", "optional", "parameters", "min_counts", "min_cells", "max_counts", "max_cells", "per", "call", "." ]
python
train
dslackw/slpkg
slpkg/slack/patches.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/slack/patches.py#L159-L185
def dialog_checklist(self): """Create checklist to choose packages for upgrade """ data = [] for upg in self.upgrade_all: data.append(upg[:-4]) text = "Press 'spacebar' to unchoose packages from upgrade" title = " Upgrade " backtitle = "{0} {1}".format(self.meta.__all__, self.meta.__version__) status = True pkgs = DialogUtil(data, text, title, backtitle, status).checklist() index = 0 for pkg, comp, uncomp in zip(self.upgrade_all, self.comp_sum, self.uncomp_sum): if pkg[:-4] not in pkgs: self.dwn_links.pop(index) self.upgrade_all.pop(index) self.comp_sum.pop(index) self.uncomp_sum.pop(index) self.count_upg -= 1 del comp, uncomp index -= 1 index += 1 if not self.upgrade_all: raise SystemExit()
[ "def", "dialog_checklist", "(", "self", ")", ":", "data", "=", "[", "]", "for", "upg", "in", "self", ".", "upgrade_all", ":", "data", ".", "append", "(", "upg", "[", ":", "-", "4", "]", ")", "text", "=", "\"Press 'spacebar' to unchoose packages from upgrade\"", "title", "=", "\" Upgrade \"", "backtitle", "=", "\"{0} {1}\"", ".", "format", "(", "self", ".", "meta", ".", "__all__", ",", "self", ".", "meta", ".", "__version__", ")", "status", "=", "True", "pkgs", "=", "DialogUtil", "(", "data", ",", "text", ",", "title", ",", "backtitle", ",", "status", ")", ".", "checklist", "(", ")", "index", "=", "0", "for", "pkg", ",", "comp", ",", "uncomp", "in", "zip", "(", "self", ".", "upgrade_all", ",", "self", ".", "comp_sum", ",", "self", ".", "uncomp_sum", ")", ":", "if", "pkg", "[", ":", "-", "4", "]", "not", "in", "pkgs", ":", "self", ".", "dwn_links", ".", "pop", "(", "index", ")", "self", ".", "upgrade_all", ".", "pop", "(", "index", ")", "self", ".", "comp_sum", ".", "pop", "(", "index", ")", "self", ".", "uncomp_sum", ".", "pop", "(", "index", ")", "self", ".", "count_upg", "-=", "1", "del", "comp", ",", "uncomp", "index", "-=", "1", "index", "+=", "1", "if", "not", "self", ".", "upgrade_all", ":", "raise", "SystemExit", "(", ")" ]
Create checklist to choose packages for upgrade
[ "Create", "checklist", "to", "choose", "packages", "for", "upgrade" ]
python
train
alex-kostirin/pyatomac
atomac/ldtpd/value.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/value.py#L146-L162
def scrollleft(self, window_name, object_name): """ Scroll left @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ if not self.verifyscrollbarhorizontal(window_name, object_name): raise LdtpServerException('Object not horizontal scrollbar') return self.setmin(window_name, object_name)
[ "def", "scrollleft", "(", "self", ",", "window_name", ",", "object_name", ")", ":", "if", "not", "self", ".", "verifyscrollbarhorizontal", "(", "window_name", ",", "object_name", ")", ":", "raise", "LdtpServerException", "(", "'Object not horizontal scrollbar'", ")", "return", "self", ".", "setmin", "(", "window_name", ",", "object_name", ")" ]
Scroll left @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
[ "Scroll", "left" ]
python
valid
google/openhtf
openhtf/plugs/usb/adb_protocol.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L277-L285
def _set_or_check_remote_id(self, remote_id): """Set or check the remote id.""" if not self.remote_id: assert self.closed_state == self.ClosedState.PENDING, 'Bad ClosedState!' self.remote_id = remote_id self.closed_state = self.ClosedState.OPEN elif self.remote_id != remote_id: raise usb_exceptions.AdbProtocolError( '%s remote-id change to %s', self, remote_id)
[ "def", "_set_or_check_remote_id", "(", "self", ",", "remote_id", ")", ":", "if", "not", "self", ".", "remote_id", ":", "assert", "self", ".", "closed_state", "==", "self", ".", "ClosedState", ".", "PENDING", ",", "'Bad ClosedState!'", "self", ".", "remote_id", "=", "remote_id", "self", ".", "closed_state", "=", "self", ".", "ClosedState", ".", "OPEN", "elif", "self", ".", "remote_id", "!=", "remote_id", ":", "raise", "usb_exceptions", ".", "AdbProtocolError", "(", "'%s remote-id change to %s'", ",", "self", ",", "remote_id", ")" ]
Set or check the remote id.
[ "Set", "or", "check", "the", "remote", "id", "." ]
python
train