repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
dropbox/stone
stone/frontend/parser.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/parser.py#L718-L727
def p_examples_add(self, p): 'examples : examples example' p[0] = p[1] if p[2].label in p[0]: existing_ex = p[0][p[2].label] self.errors.append( ("Example with label '%s' already defined on line %d." % (existing_ex.label, existing_ex.lineno), p[2].lineno, p[2].path)) p[0][p[2].label] = p[2]
[ "def", "p_examples_add", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "if", "p", "[", "2", "]", ".", "label", "in", "p", "[", "0", "]", ":", "existing_ex", "=", "p", "[", "0", "]", "[", "p", "[", "2", "]", ".", "label", "]", "self", ".", "errors", ".", "append", "(", "(", "\"Example with label '%s' already defined on line %d.\"", "%", "(", "existing_ex", ".", "label", ",", "existing_ex", ".", "lineno", ")", ",", "p", "[", "2", "]", ".", "lineno", ",", "p", "[", "2", "]", ".", "path", ")", ")", "p", "[", "0", "]", "[", "p", "[", "2", "]", ".", "label", "]", "=", "p", "[", "2", "]" ]
examples : examples example
[ "examples", ":", "examples", "example" ]
python
train
chuck1/codemach
codemach/machine.py
https://github.com/chuck1/codemach/blob/b0e02f363da7aa58de7d6ad6499784282958adeb/codemach/machine.py#L374-L380
def pop(self, n): """ Pop the **n** topmost items from the stack and return them as a ``list``. """ poped = self.__stack[len(self.__stack) - n:] del self.__stack[len(self.__stack) - n:] return poped
[ "def", "pop", "(", "self", ",", "n", ")", ":", "poped", "=", "self", ".", "__stack", "[", "len", "(", "self", ".", "__stack", ")", "-", "n", ":", "]", "del", "self", ".", "__stack", "[", "len", "(", "self", ".", "__stack", ")", "-", "n", ":", "]", "return", "poped" ]
Pop the **n** topmost items from the stack and return them as a ``list``.
[ "Pop", "the", "**", "n", "**", "topmost", "items", "from", "the", "stack", "and", "return", "them", "as", "a", "list", "." ]
python
test
iamteem/redisco
redisco/containers.py
https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/containers.py#L145-L147
def update(self, *others): """Update the set, adding elements from all others.""" self.db.sunionstore(self.key, [self.key] + [o.key for o in others])
[ "def", "update", "(", "self", ",", "*", "others", ")", ":", "self", ".", "db", ".", "sunionstore", "(", "self", ".", "key", ",", "[", "self", ".", "key", "]", "+", "[", "o", ".", "key", "for", "o", "in", "others", "]", ")" ]
Update the set, adding elements from all others.
[ "Update", "the", "set", "adding", "elements", "from", "all", "others", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/tools.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/tools.py#L750-L777
def on_button_press(self, event): """Handle button press events. If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be dragged around. """ if not event.get_button()[1] == 1: # left mouse button return False view = self.view item, handle = HandleFinder(view.hovered_item, view).get_handle_at_point((event.x, event.y)) # Handle must be the end handle of a connection if not handle or not isinstance(item, ConnectionView) or handle not in item.end_handles(): return False if handle is item.from_handle(): self._start_port_v = item.from_port else: self._start_port_v = item.to_port self._parent_state_v = item.parent self._end_handle = handle if isinstance(item, TransitionView): self._is_transition = True self._connection_v = item return True
[ "def", "on_button_press", "(", "self", ",", "event", ")", ":", "if", "not", "event", ".", "get_button", "(", ")", "[", "1", "]", "==", "1", ":", "# left mouse button", "return", "False", "view", "=", "self", ".", "view", "item", ",", "handle", "=", "HandleFinder", "(", "view", ".", "hovered_item", ",", "view", ")", ".", "get_handle_at_point", "(", "(", "event", ".", "x", ",", "event", ".", "y", ")", ")", "# Handle must be the end handle of a connection", "if", "not", "handle", "or", "not", "isinstance", "(", "item", ",", "ConnectionView", ")", "or", "handle", "not", "in", "item", ".", "end_handles", "(", ")", ":", "return", "False", "if", "handle", "is", "item", ".", "from_handle", "(", ")", ":", "self", ".", "_start_port_v", "=", "item", ".", "from_port", "else", ":", "self", ".", "_start_port_v", "=", "item", ".", "to_port", "self", ".", "_parent_state_v", "=", "item", ".", "parent", "self", ".", "_end_handle", "=", "handle", "if", "isinstance", "(", "item", ",", "TransitionView", ")", ":", "self", ".", "_is_transition", "=", "True", "self", ".", "_connection_v", "=", "item", "return", "True" ]
Handle button press events. If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be dragged around.
[ "Handle", "button", "press", "events", "." ]
python
train
rikrd/inspire
inspirespeech/htk_model.py
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk_model.py#L85-L103
def load_model(*args): """Load an HTK model from one ore more files. :param args: Filenames of the model (e.g. macros hmmdefs) :return: The model as an OrderedDict() """ text = '' for fnm in args: text += open(fnm).read() text += '\n' parser = htk_model_parser.htk_modelParser() model = HtkModelSemantics() return parser.parse(text, rule_name='model', ignorecase=True, semantics=model, comments_re="\(\*.*?\*\)", trace=False)
[ "def", "load_model", "(", "*", "args", ")", ":", "text", "=", "''", "for", "fnm", "in", "args", ":", "text", "+=", "open", "(", "fnm", ")", ".", "read", "(", ")", "text", "+=", "'\\n'", "parser", "=", "htk_model_parser", ".", "htk_modelParser", "(", ")", "model", "=", "HtkModelSemantics", "(", ")", "return", "parser", ".", "parse", "(", "text", ",", "rule_name", "=", "'model'", ",", "ignorecase", "=", "True", ",", "semantics", "=", "model", ",", "comments_re", "=", "\"\\(\\*.*?\\*\\)\"", ",", "trace", "=", "False", ")" ]
Load an HTK model from one ore more files. :param args: Filenames of the model (e.g. macros hmmdefs) :return: The model as an OrderedDict()
[ "Load", "an", "HTK", "model", "from", "one", "ore", "more", "files", "." ]
python
train
Alignak-monitoring/alignak
alignak/macroresolver.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/macroresolver.py#L450-L496
def _get_type_of_macro(macros, objs): r"""Set macros types Example:: ARG\d -> ARGN, HOSTBLABLA -> class one and set Host in class) _HOSTTOTO -> HOST CUSTOM MACRO TOTO SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1 :param macros: macros list in a dictionary :type macros: dict :param objs: objects list, used to tag object macros :type objs: list :return: None """ for macro in macros: # ARGN Macros if re.match(r'ARG\d', macro): macros[macro]['type'] = 'ARGN' continue # USERN macros # are managed in the Config class, so no # need to look that here elif re.match(r'_HOST\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'HOST' continue elif re.match(r'_SERVICE\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'SERVICE' # value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1] continue elif re.match(r'_CONTACT\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'CONTACT' continue # On demand macro elif len(macro.split(':')) > 1: macros[macro]['type'] = 'ONDEMAND' continue # OK, classical macro... for obj in objs: if macro in obj.macros: macros[macro]['type'] = 'object' macros[macro]['object'] = obj continue
[ "def", "_get_type_of_macro", "(", "macros", ",", "objs", ")", ":", "for", "macro", "in", "macros", ":", "# ARGN Macros", "if", "re", ".", "match", "(", "r'ARG\\d'", ",", "macro", ")", ":", "macros", "[", "macro", "]", "[", "'type'", "]", "=", "'ARGN'", "continue", "# USERN macros", "# are managed in the Config class, so no", "# need to look that here", "elif", "re", ".", "match", "(", "r'_HOST\\w'", ",", "macro", ")", ":", "macros", "[", "macro", "]", "[", "'type'", "]", "=", "'CUSTOM'", "macros", "[", "macro", "]", "[", "'class'", "]", "=", "'HOST'", "continue", "elif", "re", ".", "match", "(", "r'_SERVICE\\w'", ",", "macro", ")", ":", "macros", "[", "macro", "]", "[", "'type'", "]", "=", "'CUSTOM'", "macros", "[", "macro", "]", "[", "'class'", "]", "=", "'SERVICE'", "# value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1]", "continue", "elif", "re", ".", "match", "(", "r'_CONTACT\\w'", ",", "macro", ")", ":", "macros", "[", "macro", "]", "[", "'type'", "]", "=", "'CUSTOM'", "macros", "[", "macro", "]", "[", "'class'", "]", "=", "'CONTACT'", "continue", "# On demand macro", "elif", "len", "(", "macro", ".", "split", "(", "':'", ")", ")", ">", "1", ":", "macros", "[", "macro", "]", "[", "'type'", "]", "=", "'ONDEMAND'", "continue", "# OK, classical macro...", "for", "obj", "in", "objs", ":", "if", "macro", "in", "obj", ".", "macros", ":", "macros", "[", "macro", "]", "[", "'type'", "]", "=", "'object'", "macros", "[", "macro", "]", "[", "'object'", "]", "=", "obj", "continue" ]
r"""Set macros types Example:: ARG\d -> ARGN, HOSTBLABLA -> class one and set Host in class) _HOSTTOTO -> HOST CUSTOM MACRO TOTO SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1 :param macros: macros list in a dictionary :type macros: dict :param objs: objects list, used to tag object macros :type objs: list :return: None
[ "r", "Set", "macros", "types" ]
python
train
AltSchool/dynamic-rest
dynamic_rest/routers.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/routers.py#L227-L246
def get_canonical_path(resource_key, pk=None): """ Return canonical resource path. Arguments: resource_key - Canonical resource key i.e. Serializer.get_resource_key(). pk - (Optional) Object's primary key for a single-resource URL. Returns: Absolute URL as string. """ if resource_key not in resource_map: # Note: Maybe raise? return None base_path = get_script_prefix() + resource_map[resource_key]['path'] if pk: return '%s/%s/' % (base_path, pk) else: return base_path
[ "def", "get_canonical_path", "(", "resource_key", ",", "pk", "=", "None", ")", ":", "if", "resource_key", "not", "in", "resource_map", ":", "# Note: Maybe raise?", "return", "None", "base_path", "=", "get_script_prefix", "(", ")", "+", "resource_map", "[", "resource_key", "]", "[", "'path'", "]", "if", "pk", ":", "return", "'%s/%s/'", "%", "(", "base_path", ",", "pk", ")", "else", ":", "return", "base_path" ]
Return canonical resource path. Arguments: resource_key - Canonical resource key i.e. Serializer.get_resource_key(). pk - (Optional) Object's primary key for a single-resource URL. Returns: Absolute URL as string.
[ "Return", "canonical", "resource", "path", "." ]
python
train
google/apitools
apitools/gen/service_registry.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/service_registry.py#L371-L421
def __ComputeMethodInfo(self, method_description, request, response, request_field): """Compute the base_api.ApiMethodInfo for this method.""" relative_path = self.__names.NormalizeRelativePath( ''.join((self.__client_info.base_path, method_description['path']))) method_id = method_description['id'] ordered_params = [] for param_name in method_description.get('parameterOrder', []): param_info = method_description['parameters'][param_name] if param_info.get('required', False): ordered_params.append(param_name) method_info = base_api.ApiMethodInfo( relative_path=relative_path, method_id=method_id, http_method=method_description['httpMethod'], description=util.CleanDescription( method_description.get('description', '')), query_params=[], path_params=[], ordered_params=ordered_params, request_type_name=self.__names.ClassName(request), response_type_name=self.__names.ClassName(response), request_field=request_field, ) flat_path = method_description.get('flatPath', None) if flat_path is not None: flat_path = self.__names.NormalizeRelativePath( self.__client_info.base_path + flat_path) if flat_path != relative_path: method_info.flat_path = flat_path if method_description.get('supportsMediaUpload', False): method_info.upload_config = self.__ComputeUploadConfig( method_description.get('mediaUpload'), method_id) method_info.supports_download = method_description.get( 'supportsMediaDownload', False) self.__all_scopes.update(method_description.get('scopes', ())) for param, desc in method_description.get('parameters', {}).items(): param = self.__names.CleanName(param) location = desc['location'] if location == 'query': method_info.query_params.append(param) elif location == 'path': method_info.path_params.append(param) else: raise ValueError( 'Unknown parameter location %s for parameter %s' % ( location, param)) method_info.path_params.sort() method_info.query_params.sort() return method_info
[ "def", "__ComputeMethodInfo", "(", "self", ",", "method_description", ",", "request", ",", "response", ",", "request_field", ")", ":", "relative_path", "=", "self", ".", "__names", ".", "NormalizeRelativePath", "(", "''", ".", "join", "(", "(", "self", ".", "__client_info", ".", "base_path", ",", "method_description", "[", "'path'", "]", ")", ")", ")", "method_id", "=", "method_description", "[", "'id'", "]", "ordered_params", "=", "[", "]", "for", "param_name", "in", "method_description", ".", "get", "(", "'parameterOrder'", ",", "[", "]", ")", ":", "param_info", "=", "method_description", "[", "'parameters'", "]", "[", "param_name", "]", "if", "param_info", ".", "get", "(", "'required'", ",", "False", ")", ":", "ordered_params", ".", "append", "(", "param_name", ")", "method_info", "=", "base_api", ".", "ApiMethodInfo", "(", "relative_path", "=", "relative_path", ",", "method_id", "=", "method_id", ",", "http_method", "=", "method_description", "[", "'httpMethod'", "]", ",", "description", "=", "util", ".", "CleanDescription", "(", "method_description", ".", "get", "(", "'description'", ",", "''", ")", ")", ",", "query_params", "=", "[", "]", ",", "path_params", "=", "[", "]", ",", "ordered_params", "=", "ordered_params", ",", "request_type_name", "=", "self", ".", "__names", ".", "ClassName", "(", "request", ")", ",", "response_type_name", "=", "self", ".", "__names", ".", "ClassName", "(", "response", ")", ",", "request_field", "=", "request_field", ",", ")", "flat_path", "=", "method_description", ".", "get", "(", "'flatPath'", ",", "None", ")", "if", "flat_path", "is", "not", "None", ":", "flat_path", "=", "self", ".", "__names", ".", "NormalizeRelativePath", "(", "self", ".", "__client_info", ".", "base_path", "+", "flat_path", ")", "if", "flat_path", "!=", "relative_path", ":", "method_info", ".", "flat_path", "=", "flat_path", "if", "method_description", ".", "get", "(", "'supportsMediaUpload'", ",", "False", ")", ":", "method_info", ".", "upload_config", "=", "self", ".", "__ComputeUploadConfig", "(", "method_description", ".", "get", "(", "'mediaUpload'", ")", ",", "method_id", ")", "method_info", ".", "supports_download", "=", "method_description", ".", "get", "(", "'supportsMediaDownload'", ",", "False", ")", "self", ".", "__all_scopes", ".", "update", "(", "method_description", ".", "get", "(", "'scopes'", ",", "(", ")", ")", ")", "for", "param", ",", "desc", "in", "method_description", ".", "get", "(", "'parameters'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "param", "=", "self", ".", "__names", ".", "CleanName", "(", "param", ")", "location", "=", "desc", "[", "'location'", "]", "if", "location", "==", "'query'", ":", "method_info", ".", "query_params", ".", "append", "(", "param", ")", "elif", "location", "==", "'path'", ":", "method_info", ".", "path_params", ".", "append", "(", "param", ")", "else", ":", "raise", "ValueError", "(", "'Unknown parameter location %s for parameter %s'", "%", "(", "location", ",", "param", ")", ")", "method_info", ".", "path_params", ".", "sort", "(", ")", "method_info", ".", "query_params", ".", "sort", "(", ")", "return", "method_info" ]
Compute the base_api.ApiMethodInfo for this method.
[ "Compute", "the", "base_api", ".", "ApiMethodInfo", "for", "this", "method", "." ]
python
train
dswah/pyGAM
pygam/pygam.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1075-L1115
def _estimate_r2(self, X=None, y=None, mu=None, weights=None): """ estimate some pseudo R^2 values currently only computes explained deviance. results are stored Parameters ---------- y : array-like of shape (n_samples,) output data vector mu : array-like of shape (n_samples,) expected value of the targets given the model and inputs weights : array-like shape (n_samples,) or None, optional containing sample weights if None, defaults to array of ones Returns ------- None """ if mu is None: mu = self.predict_mu(X=X) if weights is None: weights = np.ones_like(y).astype('float64') null_mu = y.mean() * np.ones_like(y).astype('float64') null_d = self.distribution.deviance(y=y, mu=null_mu, weights=weights) full_d = self.distribution.deviance(y=y, mu=mu, weights=weights) null_ll = self._loglikelihood(y=y, mu=null_mu, weights=weights) full_ll = self._loglikelihood(y=y, mu=mu, weights=weights) r2 = OrderedDict() r2['explained_deviance'] = 1. - full_d.sum()/null_d.sum() r2['McFadden'] = full_ll/null_ll r2['McFadden_adj'] = 1. - (full_ll - self.statistics_['edof'])/null_ll return r2
[ "def", "_estimate_r2", "(", "self", ",", "X", "=", "None", ",", "y", "=", "None", ",", "mu", "=", "None", ",", "weights", "=", "None", ")", ":", "if", "mu", "is", "None", ":", "mu", "=", "self", ".", "predict_mu", "(", "X", "=", "X", ")", "if", "weights", "is", "None", ":", "weights", "=", "np", ".", "ones_like", "(", "y", ")", ".", "astype", "(", "'float64'", ")", "null_mu", "=", "y", ".", "mean", "(", ")", "*", "np", ".", "ones_like", "(", "y", ")", ".", "astype", "(", "'float64'", ")", "null_d", "=", "self", ".", "distribution", ".", "deviance", "(", "y", "=", "y", ",", "mu", "=", "null_mu", ",", "weights", "=", "weights", ")", "full_d", "=", "self", ".", "distribution", ".", "deviance", "(", "y", "=", "y", ",", "mu", "=", "mu", ",", "weights", "=", "weights", ")", "null_ll", "=", "self", ".", "_loglikelihood", "(", "y", "=", "y", ",", "mu", "=", "null_mu", ",", "weights", "=", "weights", ")", "full_ll", "=", "self", ".", "_loglikelihood", "(", "y", "=", "y", ",", "mu", "=", "mu", ",", "weights", "=", "weights", ")", "r2", "=", "OrderedDict", "(", ")", "r2", "[", "'explained_deviance'", "]", "=", "1.", "-", "full_d", ".", "sum", "(", ")", "/", "null_d", ".", "sum", "(", ")", "r2", "[", "'McFadden'", "]", "=", "full_ll", "/", "null_ll", "r2", "[", "'McFadden_adj'", "]", "=", "1.", "-", "(", "full_ll", "-", "self", ".", "statistics_", "[", "'edof'", "]", ")", "/", "null_ll", "return", "r2" ]
estimate some pseudo R^2 values currently only computes explained deviance. results are stored Parameters ---------- y : array-like of shape (n_samples,) output data vector mu : array-like of shape (n_samples,) expected value of the targets given the model and inputs weights : array-like shape (n_samples,) or None, optional containing sample weights if None, defaults to array of ones Returns ------- None
[ "estimate", "some", "pseudo", "R^2", "values" ]
python
train
radjkarl/imgProcessor
imgProcessor/interpolate/interpolate2dStructuredCrossAvg.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/interpolate2dStructuredCrossAvg.py#L7-L19
def interpolate2dStructuredCrossAvg(grid, mask, kernel=15, power=2): ''' ####### usefull if large empty areas need to be filled ''' vals = np.empty(shape=4, dtype=grid.dtype) dist = np.empty(shape=4, dtype=np.uint16) weights = np.empty(shape=4, dtype=np.float32) valid = np.empty(shape=4, dtype=bool) return _calc(grid, mask, power, kernel, vals, dist, weights, valid)
[ "def", "interpolate2dStructuredCrossAvg", "(", "grid", ",", "mask", ",", "kernel", "=", "15", ",", "power", "=", "2", ")", ":", "vals", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "grid", ".", "dtype", ")", "dist", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "np", ".", "uint16", ")", "weights", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "np", ".", "float32", ")", "valid", "=", "np", ".", "empty", "(", "shape", "=", "4", ",", "dtype", "=", "bool", ")", "return", "_calc", "(", "grid", ",", "mask", ",", "power", ",", "kernel", ",", "vals", ",", "dist", ",", "weights", ",", "valid", ")" ]
####### usefull if large empty areas need to be filled
[ "#######", "usefull", "if", "large", "empty", "areas", "need", "to", "be", "filled" ]
python
train
NYUCCL/psiTurk
psiturk/psiturk_shell.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L1029-L1051
def do_worker(self, arg): """ Usage: worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force] worker reject (--hit <hit_id> | <assignment_id> ...) worker unreject (--hit <hit_id> | <assignment_id> ...) worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...) worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies] worker help """ if arg['approve']: self.worker_approve(arg['--all'], arg['<hit_id>'], arg['<assignment_id>'], arg['--all-studies'], arg['--force']) elif arg['reject']: self.amt_services_wrapper.worker_reject(arg['<hit_id>'], arg['<assignment_id>']) elif arg['unreject']: self.amt_services_wrapper.worker_unreject(arg['<hit_id>'], arg['<assignment_id>']) elif arg['list']: self.worker_list(arg['--submitted'], arg['--approved'], arg['--rejected'], arg['<hit_id>'], arg['--all-studies']) elif arg['bonus']: self.amt_services_wrapper.worker_bonus(arg['<hit_id>'], arg['--auto'], arg['<amount>'], '', arg['<assignment_id>']) else: self.help_worker()
[ "def", "do_worker", "(", "self", ",", "arg", ")", ":", "if", "arg", "[", "'approve'", "]", ":", "self", ".", "worker_approve", "(", "arg", "[", "'--all'", "]", ",", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'<assignment_id>'", "]", ",", "arg", "[", "'--all-studies'", "]", ",", "arg", "[", "'--force'", "]", ")", "elif", "arg", "[", "'reject'", "]", ":", "self", ".", "amt_services_wrapper", ".", "worker_reject", "(", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'<assignment_id>'", "]", ")", "elif", "arg", "[", "'unreject'", "]", ":", "self", ".", "amt_services_wrapper", ".", "worker_unreject", "(", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'<assignment_id>'", "]", ")", "elif", "arg", "[", "'list'", "]", ":", "self", ".", "worker_list", "(", "arg", "[", "'--submitted'", "]", ",", "arg", "[", "'--approved'", "]", ",", "arg", "[", "'--rejected'", "]", ",", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'--all-studies'", "]", ")", "elif", "arg", "[", "'bonus'", "]", ":", "self", ".", "amt_services_wrapper", ".", "worker_bonus", "(", "arg", "[", "'<hit_id>'", "]", ",", "arg", "[", "'--auto'", "]", ",", "arg", "[", "'<amount>'", "]", ",", "''", ",", "arg", "[", "'<assignment_id>'", "]", ")", "else", ":", "self", ".", "help_worker", "(", ")" ]
Usage: worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force] worker reject (--hit <hit_id> | <assignment_id> ...) worker unreject (--hit <hit_id> | <assignment_id> ...) worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...) worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies] worker help
[ "Usage", ":", "worker", "approve", "(", "--", "all", "|", "--", "hit", "<hit_id", ">", "...", "|", "<assignment_id", ">", "...", ")", "[", "--", "all", "-", "studies", "]", "[", "--", "force", "]", "worker", "reject", "(", "--", "hit", "<hit_id", ">", "|", "<assignment_id", ">", "...", ")", "worker", "unreject", "(", "--", "hit", "<hit_id", ">", "|", "<assignment_id", ">", "...", ")", "worker", "bonus", "(", "--", "amount", "<amount", ">", "|", "--", "auto", ")", "(", "--", "hit", "<hit_id", ">", "|", "<assignment_id", ">", "...", ")", "worker", "list", "[", "--", "submitted", "|", "--", "approved", "|", "--", "rejected", "]", "[", "(", "--", "hit", "<hit_id", ">", ")", "]", "[", "--", "all", "-", "studies", "]", "worker", "help" ]
python
train
shoebot/shoebot
lib/graph/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/graph/__init__.py#L725-L797
def create(iterations=1000, distance=1.0, layout=LAYOUT_SPRING, depth=True): """ Returns a new graph with predefined styling. """ #global _ctx _ctx.colormode(_ctx.RGB) g = graph(iterations, distance, layout) # Styles for different types of nodes. s = style.style g.styles.append(s(style.LIGHT , _ctx, fill = _ctx.color(0.0, 0.0, 0.0, 0.20))) g.styles.append(s(style.DARK , _ctx, fill = _ctx.color(0.3, 0.5, 0.7, 0.75))) g.styles.append(s(style.BACK , _ctx, fill = _ctx.color(0.5, 0.8, 0.0, 0.50))) g.styles.append(s(style.IMPORTANT, _ctx, fill = _ctx.color(0.3, 0.6, 0.8, 0.75))) g.styles.append(s(style.HIGHLIGHT, _ctx, stroke = _ctx.color(1.0, 0.0, 0.5), strokewidth=1.5)) g.styles.append(s(style.MARKED , _ctx)) g.styles.append(s(style.ROOT , _ctx, text = _ctx.color(1.0, 0.0, 0.4, 1.00), stroke = _ctx.color(0.8, 0.8, 0.8, 0.60), strokewidth = 1.5, fontsize = 16, textwidth = 150)) # Important nodes get a double stroke. def important_node(s, node, alpha=1.0): style.style(None, _ctx).node(s, node, alpha) r = node.r * 1.4 _ctx.nofill() _ctx.oval(node.x-r, node.y-r, r*2, r*2) # Marked nodes have an inner dot. def marked_node(s, node, alpha=1.0): style.style(None, _ctx).node(s, node, alpha) r = node.r * 0.3 _ctx.fill(s.stroke) _ctx.oval(node.x-r, node.y-r, r*2, r*2) g.styles.important.node = important_node g.styles.marked.node = marked_node g.styles.depth = depth # Styling guidelines. All nodes have the default style, except: # 1) a node directly connected to the root gets the LIGHT style. # 2) a node with more than 4 edges gets the DARK style. # 3) a node with a weight of 0.75-1.0 gets the IMPORTANT style. # 4) the graph.root node gets the ROOT style. # 5) the node last clicked gets the BACK style. g.styles.guide.append(style.LIGHT , lambda graph, node: graph.root in node.links) g.styles.guide.append(style.DARK , lambda graph, node: len(node.links) > 4) g.styles.guide.append(style.IMPORTANT , lambda graph, node: node.weight > 0.75) g.styles.guide.append(style.ROOT , lambda graph, node: node == graph.root) g.styles.guide.append(style.BACK , lambda graph, node: node == graph.events.clicked) # An additional rule applies every node's weight to its radius. def balance(graph, node): node.r = node.r*0.75 + node.r*node.weight*0.75 g.styles.guide.append("balance", balance) # An additional rule that keeps leaf nodes closely clustered. def cluster(graph, node): if len(node.links) == 1: node.links.edge(node.links[0]).length *= 0.5 g.styles.guide.append("cluster", cluster) g.styles.guide.order = [ style.LIGHT, style.DARK, style.IMPORTANT, style.ROOT, style.BACK, "balance", "nurse" ] return g
[ "def", "create", "(", "iterations", "=", "1000", ",", "distance", "=", "1.0", ",", "layout", "=", "LAYOUT_SPRING", ",", "depth", "=", "True", ")", ":", "#global _ctx", "_ctx", ".", "colormode", "(", "_ctx", ".", "RGB", ")", "g", "=", "graph", "(", "iterations", ",", "distance", ",", "layout", ")", "# Styles for different types of nodes.", "s", "=", "style", ".", "style", "g", ".", "styles", ".", "append", "(", "s", "(", "style", ".", "LIGHT", ",", "_ctx", ",", "fill", "=", "_ctx", ".", "color", "(", "0.0", ",", "0.0", ",", "0.0", ",", "0.20", ")", ")", ")", "g", ".", "styles", ".", "append", "(", "s", "(", "style", ".", "DARK", ",", "_ctx", ",", "fill", "=", "_ctx", ".", "color", "(", "0.3", ",", "0.5", ",", "0.7", ",", "0.75", ")", ")", ")", "g", ".", "styles", ".", "append", "(", "s", "(", "style", ".", "BACK", ",", "_ctx", ",", "fill", "=", "_ctx", ".", "color", "(", "0.5", ",", "0.8", ",", "0.0", ",", "0.50", ")", ")", ")", "g", ".", "styles", ".", "append", "(", "s", "(", "style", ".", "IMPORTANT", ",", "_ctx", ",", "fill", "=", "_ctx", ".", "color", "(", "0.3", ",", "0.6", ",", "0.8", ",", "0.75", ")", ")", ")", "g", ".", "styles", ".", "append", "(", "s", "(", "style", ".", "HIGHLIGHT", ",", "_ctx", ",", "stroke", "=", "_ctx", ".", "color", "(", "1.0", ",", "0.0", ",", "0.5", ")", ",", "strokewidth", "=", "1.5", ")", ")", "g", ".", "styles", ".", "append", "(", "s", "(", "style", ".", "MARKED", ",", "_ctx", ")", ")", "g", ".", "styles", ".", "append", "(", "s", "(", "style", ".", "ROOT", ",", "_ctx", ",", "text", "=", "_ctx", ".", "color", "(", "1.0", ",", "0.0", ",", "0.4", ",", "1.00", ")", ",", "stroke", "=", "_ctx", ".", "color", "(", "0.8", ",", "0.8", ",", "0.8", ",", "0.60", ")", ",", "strokewidth", "=", "1.5", ",", "fontsize", "=", "16", ",", "textwidth", "=", "150", ")", ")", "# Important nodes get a double stroke.", "def", "important_node", "(", "s", ",", "node", ",", "alpha", "=", "1.0", ")", ":", "style", ".", "style", "(", "None", ",", "_ctx", ")", ".", "node", "(", "s", ",", "node", ",", "alpha", ")", "r", "=", "node", ".", "r", "*", "1.4", "_ctx", ".", "nofill", "(", ")", "_ctx", ".", "oval", "(", "node", ".", "x", "-", "r", ",", "node", ".", "y", "-", "r", ",", "r", "*", "2", ",", "r", "*", "2", ")", "# Marked nodes have an inner dot.", "def", "marked_node", "(", "s", ",", "node", ",", "alpha", "=", "1.0", ")", ":", "style", ".", "style", "(", "None", ",", "_ctx", ")", ".", "node", "(", "s", ",", "node", ",", "alpha", ")", "r", "=", "node", ".", "r", "*", "0.3", "_ctx", ".", "fill", "(", "s", ".", "stroke", ")", "_ctx", ".", "oval", "(", "node", ".", "x", "-", "r", ",", "node", ".", "y", "-", "r", ",", "r", "*", "2", ",", "r", "*", "2", ")", "g", ".", "styles", ".", "important", ".", "node", "=", "important_node", "g", ".", "styles", ".", "marked", ".", "node", "=", "marked_node", "g", ".", "styles", ".", "depth", "=", "depth", "# Styling guidelines. All nodes have the default style, except:", "# 1) a node directly connected to the root gets the LIGHT style.", "# 2) a node with more than 4 edges gets the DARK style.", "# 3) a node with a weight of 0.75-1.0 gets the IMPORTANT style.", "# 4) the graph.root node gets the ROOT style.", "# 5) the node last clicked gets the BACK style. ", "g", ".", "styles", ".", "guide", ".", "append", "(", "style", ".", "LIGHT", ",", "lambda", "graph", ",", "node", ":", "graph", ".", "root", "in", "node", ".", "links", ")", "g", ".", "styles", ".", "guide", ".", "append", "(", "style", ".", "DARK", ",", "lambda", "graph", ",", "node", ":", "len", "(", "node", ".", "links", ")", ">", "4", ")", "g", ".", "styles", ".", "guide", ".", "append", "(", "style", ".", "IMPORTANT", ",", "lambda", "graph", ",", "node", ":", "node", ".", "weight", ">", "0.75", ")", "g", ".", "styles", ".", "guide", ".", "append", "(", "style", ".", "ROOT", ",", "lambda", "graph", ",", "node", ":", "node", "==", "graph", ".", "root", ")", "g", ".", "styles", ".", "guide", ".", "append", "(", "style", ".", "BACK", ",", "lambda", "graph", ",", "node", ":", "node", "==", "graph", ".", "events", ".", "clicked", ")", "# An additional rule applies every node's weight to its radius.", "def", "balance", "(", "graph", ",", "node", ")", ":", "node", ".", "r", "=", "node", ".", "r", "*", "0.75", "+", "node", ".", "r", "*", "node", ".", "weight", "*", "0.75", "g", ".", "styles", ".", "guide", ".", "append", "(", "\"balance\"", ",", "balance", ")", "# An additional rule that keeps leaf nodes closely clustered.", "def", "cluster", "(", "graph", ",", "node", ")", ":", "if", "len", "(", "node", ".", "links", ")", "==", "1", ":", "node", ".", "links", ".", "edge", "(", "node", ".", "links", "[", "0", "]", ")", ".", "length", "*=", "0.5", "g", ".", "styles", ".", "guide", ".", "append", "(", "\"cluster\"", ",", "cluster", ")", "g", ".", "styles", ".", "guide", ".", "order", "=", "[", "style", ".", "LIGHT", ",", "style", ".", "DARK", ",", "style", ".", "IMPORTANT", ",", "style", ".", "ROOT", ",", "style", ".", "BACK", ",", "\"balance\"", ",", "\"nurse\"", "]", "return", "g" ]
Returns a new graph with predefined styling.
[ "Returns", "a", "new", "graph", "with", "predefined", "styling", "." ]
python
valid
log2timeline/dfvfs
dfvfs/resolver/resolver.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/resolver/resolver.py#L42-L66
def OpenFileEntry(cls, path_spec_object, resolver_context=None): """Opens a file entry object defined by path specification. Args: path_spec_object (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built in context which is not multi process safe. Returns: FileEntry: file entry or None if the path specification could not be resolved. """ file_system = cls.OpenFileSystem( path_spec_object, resolver_context=resolver_context) if resolver_context is None: resolver_context = cls._resolver_context file_entry = file_system.GetFileEntryByPathSpec(path_spec_object) # Release the file system so it will be removed from the cache # when the file entry is destroyed. resolver_context.ReleaseFileSystem(file_system) return file_entry
[ "def", "OpenFileEntry", "(", "cls", ",", "path_spec_object", ",", "resolver_context", "=", "None", ")", ":", "file_system", "=", "cls", ".", "OpenFileSystem", "(", "path_spec_object", ",", "resolver_context", "=", "resolver_context", ")", "if", "resolver_context", "is", "None", ":", "resolver_context", "=", "cls", ".", "_resolver_context", "file_entry", "=", "file_system", ".", "GetFileEntryByPathSpec", "(", "path_spec_object", ")", "# Release the file system so it will be removed from the cache", "# when the file entry is destroyed.", "resolver_context", ".", "ReleaseFileSystem", "(", "file_system", ")", "return", "file_entry" ]
Opens a file entry object defined by path specification. Args: path_spec_object (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built in context which is not multi process safe. Returns: FileEntry: file entry or None if the path specification could not be resolved.
[ "Opens", "a", "file", "entry", "object", "defined", "by", "path", "specification", "." ]
python
train
log2timeline/plaso
plaso/analysis/browser_search.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/browser_search.py#L117-L135
def _ExtractGMailSearchQuery(self, url): """Extracts a search query from a GMail search URL. GMail: https://mail.google.com/mail/u/0/#search/query[/?] Args: url (str): URL. Returns: str: search query or None if no query was found. """ if 'search/' not in url: return None _, _, line = url.partition('search/') line, _, _ = line.partition('/') line, _, _ = line.partition('?') return line.replace('+', ' ')
[ "def", "_ExtractGMailSearchQuery", "(", "self", ",", "url", ")", ":", "if", "'search/'", "not", "in", "url", ":", "return", "None", "_", ",", "_", ",", "line", "=", "url", ".", "partition", "(", "'search/'", ")", "line", ",", "_", ",", "_", "=", "line", ".", "partition", "(", "'/'", ")", "line", ",", "_", ",", "_", "=", "line", ".", "partition", "(", "'?'", ")", "return", "line", ".", "replace", "(", "'+'", ",", "' '", ")" ]
Extracts a search query from a GMail search URL. GMail: https://mail.google.com/mail/u/0/#search/query[/?] Args: url (str): URL. Returns: str: search query or None if no query was found.
[ "Extracts", "a", "search", "query", "from", "a", "GMail", "search", "URL", "." ]
python
train
lowandrew/OLCTools
coreGenome/core.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/core.py#L169-L193
def annotatedcore(self): """ Calculates the core genome of organisms using custom databases """ logging.info('Calculating annotated core') # Determine the total number of core genes self.total_core() # Iterate through all the samples, and process all Escherichia for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Create a set to store the names of all the core genes in this strain sample[self.analysistype].coreset = set() if sample.general.referencegenus == 'Escherichia': # Add the Escherichia sample to the runmetadata self.runmetadata.samples.append(sample) # Parse the BLAST report try: report = sample[self.analysistype].report self.blastparser(report=report, sample=sample, fieldnames=self.fieldnames) except KeyError: sample[self.analysistype].coreset = list() # Create the report self.reporter()
[ "def", "annotatedcore", "(", "self", ")", ":", "logging", ".", "info", "(", "'Calculating annotated core'", ")", "# Determine the total number of core genes", "self", ".", "total_core", "(", ")", "# Iterate through all the samples, and process all Escherichia", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "# Create a set to store the names of all the core genes in this strain", "sample", "[", "self", ".", "analysistype", "]", ".", "coreset", "=", "set", "(", ")", "if", "sample", ".", "general", ".", "referencegenus", "==", "'Escherichia'", ":", "# Add the Escherichia sample to the runmetadata", "self", ".", "runmetadata", ".", "samples", ".", "append", "(", "sample", ")", "# Parse the BLAST report", "try", ":", "report", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "report", "self", ".", "blastparser", "(", "report", "=", "report", ",", "sample", "=", "sample", ",", "fieldnames", "=", "self", ".", "fieldnames", ")", "except", "KeyError", ":", "sample", "[", "self", ".", "analysistype", "]", ".", "coreset", "=", "list", "(", ")", "# Create the report", "self", ".", "reporter", "(", ")" ]
Calculates the core genome of organisms using custom databases
[ "Calculates", "the", "core", "genome", "of", "organisms", "using", "custom", "databases" ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L2827-L3374
def PintPars(datablock, araiblock, zijdblock, start, end, accept, **kwargs): """ calculate the paleointensity magic parameters make some definitions """ if 'version' in list(kwargs.keys()) and kwargs['version'] == 3: meth_key = 'method_codes' beta_key = 'int_b_beta' temp_key, min_key, max_key = 'treat_temp', 'meas_step_min', 'meas_step_max' dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi' # convert dataframe to list of dictionaries datablock = datablock.to_dict('records') z_key = 'int_z' drats_key = 'int_drats' drat_key = 'int_drat' md_key = 'int_md' dec_key = 'dir_dec' inc_key = 'dir_inc' mad_key = 'int_mad_free' dang_key = 'int_dang' ptrm_key = 'int_n_ptrm' theta_key = 'int_theta' gamma_key = 'int_gamma' delta_key = 'int_delta' frac_key = 'int_frac' gmax_key = 'int_gmax' scat_key = 'int_scat' else: beta_key = 'specimen_b_beta' meth_key = 'magic_method_codes' temp_key, min_key, max_key = 'treatment_temp', 'measurement_step_min', 'measurement_step_max' z_key = 'specimen_z' drats_key = 'specimen_drats' drat_key = 'specimen_drat' md_key = 'specimen_md' dec_key = 'specimen_dec' inc_key = 'specimen_inc' mad_key = 'specimen_int_mad' dang_key = 'specimen_dang' ptrm_key = 'specimen_int_ptrm_n' theta_key = 'specimen_theta' gamma_key = 'specimen_gamma' delta_key = 'specimen_delta' frac_key = 'specimen_frac' gmax_key = 'specimen_gmax' scat_key = 'specimen_scat' first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], [] methcode, ThetaChecks, DeltaChecks, GammaChecks = "", "", "", "" zptrm_check = [] first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks = araiblock[ 0], araiblock[1], araiblock[2], araiblock[3], araiblock[4], araiblock[5] if len(araiblock) > 6: # used only for perpendicular method of paleointensity ThetaChecks = araiblock[6] # used only for perpendicular method of paleointensity DeltaChecks = araiblock[7] xi, yi, diffcum = [], [], 0 xiz, xzi, yiz, yzi = [], [], [], [] Nptrm, dmax = 0, -1e-22 # check if even zero and infield steps if len(first_Z) > len(first_I): maxe = len(first_I) - 1 else: maxe = len(first_Z) - 1 if end == 0 or end > maxe: end = maxe # get the MAD, DANG, etc. for directional data bstep = araiblock[0][start][0] estep = araiblock[0][end][0] zstart, zend = 0, len(zijdblock) for k in range(len(zijdblock)): zrec = zijdblock[k] if zrec[0] == bstep: zstart = k if zrec[0] == estep: zend = k PCA = domean(zijdblock, zstart, zend, 'DE-BFL') D, Diz, Dzi, Du = [], [], [], [] # list of NRM vectors, and separated by zi and iz for rec in zijdblock: D.append((rec[1], rec[2], rec[3])) Du.append((rec[1], rec[2])) if rec[4] == 1: Dzi.append((rec[1], rec[2])) # if this is ZI step else: Diz.append((rec[1], rec[2])) # if this is IZ step # calculate the vector difference sum vds = dovds(D) b_zi, b_iz = [], [] # collect data included in ZigZag calculation if end + 1 >= len(first_Z): stop = end - 1 else: stop = end for k in range(start, end + 1): for l in range(len(first_I)): irec = first_I[l] if irec[0] == first_Z[k][0]: xi.append(irec[3]) yi.append(first_Z[k][3]) pars, errcode = int_pars(xi, yi, vds) if errcode == 1: return pars, errcode # for k in range(start,end+1): for k in range(len(first_Z) - 1): for l in range(k): # only go down to 10% of NRM..... if old_div(first_Z[k][3], vds) > 0.1: irec = first_I[l] if irec[4] == 1 and first_I[l + 1][4] == 0: # a ZI step xzi = irec[3] yzi = first_Z[k][3] xiz = first_I[l + 1][3] yiz = first_Z[k + 1][3] slope = np.arctan2((yzi - yiz), (xiz - xzi)) r = np.sqrt((yzi - yiz)**2 + (xiz - xzi)**2) if r > .1 * vds: b_zi.append(slope) # suppress noise elif irec[4] == 0 and first_I[l + 1][4] == 1: # an IZ step xiz = irec[3] yiz = first_Z[k][3] xzi = first_I[l + 1][3] yzi = first_Z[k + 1][3] slope = np.arctan2((yiz - yzi), (xzi - xiz)) r = np.sqrt((yiz - yzi)**2 + (xzi - xiz)**2) if r > .1 * vds: b_iz.append(slope) # suppress noise # ZigZag, Frat, Trat = -1, 0, 0 if len(Diz) > 2 and len(Dzi) > 2: ZigZag = 0 dizp = fisher_mean(Diz) # get Fisher stats on IZ steps dzip = fisher_mean(Dzi) # get Fisher stats on ZI steps dup = fisher_mean(Du) # get Fisher stats on all steps # # if directions are TOO well grouped, can get false positive for ftest, so # angles must be > 3 degrees apart. # if angle([dizp['dec'], dizp['inc']], [dzip['dec'], dzip['inc']]) > 3.: F = (dup['n'] - 2.) * (dzip['r'] + dizp['r'] - dup['r']) / \ (dup['n'] - dzip['r'] - dizp['r'] ) # Watson test for common mean nf = 2. * (dup['n'] - 2.) # number of degees of freedom ftest = fcalc(2, nf) Frat = old_div(F, ftest) if Frat > 1.: ZigZag = Frat # fails zigzag on directions methcode = "SM-FTEST" # now do slopes if len(b_zi) > 2 and len(b_iz) > 2: bzi_m, bzi_sig = gausspars(b_zi) # mean, std dev biz_m, biz_sig = gausspars(b_iz) n_zi = float(len(b_zi)) n_iz = float(len(b_iz)) b_diff = abs(bzi_m - biz_m) # difference in means # # avoid false positives - set 3 degree slope difference here too if b_diff > 3 * np.pi / 180.: nf = n_zi + n_iz - 2. # degrees of freedom svar = old_div(((n_zi - 1.) * bzi_sig**2 + (n_iz - 1.) * biz_sig**2), nf) T = old_div((b_diff), np.sqrt( svar * (old_div(1.0, n_zi) + old_div(1.0, n_iz)))) # student's t ttest = tcalc(nf, .05) # t-test at 95% conf. Trat = old_div(T, ttest) if Trat > 1 and Trat > Frat: ZigZag = Trat # fails zigzag on directions methcode = "SM-TTEST" pars[z_key] = ZigZag pars[meth_key] = methcode # do drats if len(ptrm_check) != 0: diffcum, drat_max = 0, 0 for prec in ptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 2 # don't count alteration that happens after this step if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] if abs(prec[3] - irec[3]) > drat_max: drat_max = abs(prec[3] - irec[3]) pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) pars[drat_key] = (100 * abs(drat_max) / first_I[zend][3]) elif len(zptrm_check) != 0: diffcum = 0 for prec in zptrm_check: step = prec[0] endbak = end zend = end while zend > len(zijdblock) - 1: zend = zend - 1 if step < zijdblock[zend][0]: Nptrm += 1 for irec in first_I: if irec[0] == step: break diffcum += prec[3] - irec[3] pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3]) else: pars[drats_key] = -1 pars[drat_key] = -1 # and the pTRM tails if len(ptrm_tail) != 0: for trec in ptrm_tail: step = trec[0] for irec in first_I: if irec[0] == step: break if abs(trec[3]) > dmax: dmax = abs(trec[3]) pars[md_key] = (100 * dmax / vds) else: pars[md_key] = -1 pars[min_key] = bstep pars[max_key] = estep pars[dec_key] = PCA["specimen_dec"] pars[inc_key] = PCA["specimen_inc"] pars[mad_key] = PCA["specimen_mad"] pars[dang_key] = PCA["specimen_dang"] pars[ptrm_key] = Nptrm # and the ThetaChecks if ThetaChecks != "": t = 0 for theta in ThetaChecks: if theta[0] >= bstep and theta[0] <= estep and theta[1] > t: t = theta[1] pars[theta_key] = t else: pars[theta_key] = -1 # and the DeltaChecks if DeltaChecks != "": d = 0 for delta in DeltaChecks: if delta[0] >= bstep and delta[0] <= estep and delta[1] > d: d = delta[1] pars[delta_key] else: pars[delta_key] = -1 pars[gamma_key] = -1 if GammaChecks != "": for gamma in GammaChecks: if gamma[0] <= estep: pars['specimen_gamma'] = gamma[1] # -------------------------------------------------------------- # From here added By Ron Shaar 11-Dec 2012 # New parameters defined in Shaar and Tauxe (2012): # FRAC (specimen_frac) - ranges from 0. to 1. # SCAT (specimen_scat) - takes 1/0 # gap_max (specimen_gmax) - ranges from 0. to 1. # -------------------------------------------------------------- # -------------------------------------------------------------- # FRAC is similar to Fvds, but the numerator is the vds fraction: # FRAC= [ vds (start,end)] / total vds ] # gap_max= max [ (vector difference) / vds (start,end)] # -------------------------------------------------------------- # collect all zijderveld data to arrays and calculate VDS z_temperatures = [row[0] for row in zijdblock] zdata = [] # array of zero-fields measurements in Cartezian coordinates # array of vector differences (for vds calculation) vector_diffs = [] NRM = zijdblock[0][3] # NRM for k in range(len(zijdblock)): DIR = [zijdblock[k][1], zijdblock[k][2], old_div(zijdblock[k][3], NRM)] cart = dir2cart(DIR) zdata.append(np.array([cart[0], cart[1], cart[2]])) if k > 0: vector_diffs.append( np.sqrt(sum((np.array(zdata[-2]) - np.array(zdata[-1]))**2))) # last vector difference: from the last point to the origin. vector_diffs.append(np.sqrt(sum(np.array(zdata[-1])**2))) vds = sum(vector_diffs) # vds calculation zdata = np.array(zdata) vector_diffs = np.array(vector_diffs) # calculate the vds within the chosen segment vector_diffs_segment = vector_diffs[zstart:zend] # FRAC calculation FRAC = old_div(sum(vector_diffs_segment), vds) pars[frac_key] = FRAC # gap_max calculation max_FRAC_gap = max( old_div(vector_diffs_segment, sum(vector_diffs_segment))) pars[gmax_key] = max_FRAC_gap # --------------------------------------------------------------------- # Calculate the "scat box" # all data-points, pTRM checks, and tail-checks, should be inside a "scat box" # --------------------------------------------------------------------- # intialization # fail scat due to arai plot data points pars["fail_arai_beta_box_scatter"] = False pars["fail_ptrm_beta_box_scatter"] = False # fail scat due to pTRM checks pars["fail_tail_beta_box_scatter"] = False # fail scat due to tail checks pars[scat_key] = "t" # Pass by default # -------------------------------------------------------------- # collect all Arai plot data points in arrays x_Arai, y_Arai, t_Arai, steps_Arai = [], [], [], [] NRMs = araiblock[0] PTRMs = araiblock[1] ptrm_checks = araiblock[2] ptrm_tail = araiblock[3] PTRMs_temperatures = [row[0] for row in PTRMs] NRMs_temperatures = [row[0] for row in NRMs] NRM = NRMs[0][3] for k in range(len(NRMs)): index_pTRMs = PTRMs_temperatures.index(NRMs[k][0]) x_Arai.append(old_div(PTRMs[index_pTRMs][3], NRM)) y_Arai.append(old_div(NRMs[k][3], NRM)) t_Arai.append(NRMs[k][0]) if NRMs[k][4] == 1: steps_Arai.append('ZI') else: steps_Arai.append('IZ') x_Arai = np.array(x_Arai) y_Arai = np.array(y_Arai) # -------------------------------------------------------------- # collect all pTRM check to arrays x_ptrm_check, y_ptrm_check, ptrm_checks_temperatures, = [], [], [] x_ptrm_check_starting_point, y_ptrm_check_starting_point, ptrm_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_checks)): if ptrm_checks[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-I" in rec[meth_key] and float(rec[temp_key]) == ptrm_checks[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_ptrm_check_starting_point.append(x_Arai[index]) y_ptrm_check_starting_point.append(y_Arai[index]) ptrm_checks_starting_temperatures.append( starting_temperature) index_zerofield = zerofield_temperatures.index( ptrm_checks[k][0]) x_ptrm_check.append(old_div(ptrm_checks[k][3], NRM)) y_ptrm_check.append( old_div(zerofields[index_zerofield][3], NRM)) ptrm_checks_temperatures.append(ptrm_checks[k][0]) break except: pass x_ptrm_check_starting_point = np.array(x_ptrm_check_starting_point) y_ptrm_check_starting_point = np.array(y_ptrm_check_starting_point) ptrm_checks_starting_temperatures = np.array( ptrm_checks_starting_temperatures) x_ptrm_check = np.array(x_ptrm_check) y_ptrm_check = np.array(y_ptrm_check) ptrm_checks_temperatures = np.array(ptrm_checks_temperatures) # -------------------------------------------------------------- # collect tail checks to arrays x_tail_check, y_tail_check, tail_check_temperatures = [], [], [] x_tail_check_starting_point, y_tail_check_starting_point, tail_checks_starting_temperatures = [], [], [] for k in range(len(ptrm_tail)): if ptrm_tail[k][0] in NRMs_temperatures: # find the starting point of the pTRM check: for i in range(len(datablock)): rec = datablock[i] if "LT-PTRM-MD" in rec[meth_key] and float(rec[temp_key]) == ptrm_tail[k][0]: starting_temperature = (float(datablock[i - 1][temp_key])) try: index = t_Arai.index(starting_temperature) x_tail_check_starting_point.append(x_Arai[index]) y_tail_check_starting_point.append(y_Arai[index]) tail_checks_starting_temperatures.append( starting_temperature) index_infield = infield_temperatures.index( ptrm_tail[k][0]) x_tail_check.append( old_div(infields[index_infield][3], NRM)) y_tail_check.append( old_div(ptrm_tail[k][3], NRM) + old_div(zerofields[index_infield][3], NRM)) tail_check_temperatures.append(ptrm_tail[k][0]) break except: pass x_tail_check = np.array(x_tail_check) y_tail_check = np.array(y_tail_check) tail_check_temperatures = np.array(tail_check_temperatures) x_tail_check_starting_point = np.array(x_tail_check_starting_point) y_tail_check_starting_point = np.array(y_tail_check_starting_point) tail_checks_starting_temperatures = np.array( tail_checks_starting_temperatures) # -------------------------------------------------------------- # collect the chosen segment in the Arai plot to arrays x_Arai_segment = x_Arai[start:end + 1] # chosen segent in the Arai plot y_Arai_segment = y_Arai[start:end + 1] # chosen segent in the Arai plot # -------------------------------------------------------------- # collect pTRM checks in segment to arrays # notice, this is different than the conventional DRATS. # for scat calculation we take only the pTRM checks which were carried out # before reaching the highest temperature in the chosen segment x_ptrm_check_for_SCAT, y_ptrm_check_for_SCAT = [], [] for k in range(len(ptrm_checks_temperatures)): if ptrm_checks_temperatures[k] >= pars[min_key] and ptrm_checks_starting_temperatures <= pars[max_key]: x_ptrm_check_for_SCAT.append(x_ptrm_check[k]) y_ptrm_check_for_SCAT.append(y_ptrm_check[k]) x_ptrm_check_for_SCAT = np.array(x_ptrm_check_for_SCAT) y_ptrm_check_for_SCAT = np.array(y_ptrm_check_for_SCAT) # -------------------------------------------------------------- # collect Tail checks in segment to arrays # for scat calculation we take only the tail checks which were carried out # before reaching the highest temperature in the chosen segment x_tail_check_for_SCAT, y_tail_check_for_SCAT = [], [] for k in range(len(tail_check_temperatures)): if tail_check_temperatures[k] >= pars[min_key] and tail_checks_starting_temperatures[k] <= pars[max_key]: x_tail_check_for_SCAT.append(x_tail_check[k]) y_tail_check_for_SCAT.append(y_tail_check[k]) x_tail_check_for_SCAT = np.array(x_tail_check_for_SCAT) y_tail_check_for_SCAT = np.array(y_tail_check_for_SCAT) # -------------------------------------------------------------- # calculate the lines that define the scat box: # if threshold value for beta is not defined, then scat cannot be calculated (pass) # in this case, scat pass if beta_key in list(accept.keys()) and accept[beta_key] != "": b_beta_threshold = float(accept[beta_key]) b = pars[b_key] # best fit line cm_x = np.mean(np.array(x_Arai_segment)) # x center of mass cm_y = np.mean(np.array(y_Arai_segment)) # y center of mass a = cm_y - b * cm_x # lines with slope = slope +/- 2*(specimen_b_beta) two_sigma_beta_threshold = 2 * b_beta_threshold two_sigma_slope_threshold = abs(two_sigma_beta_threshold * b) # a line with a shallower slope (b + 2*beta*b) passing through the center of mass # y=a1+b1x b1 = b + two_sigma_slope_threshold a1 = cm_y - b1 * cm_x # bounding line with steeper slope (b - 2*beta*b) passing through the center of mass # y=a2+b2x b2 = b - two_sigma_slope_threshold a2 = cm_y - b2 * cm_x # lower bounding line of the 'beta box' # y=intercept1+slop1x slop1 = old_div(a1, ((old_div(a2, b2)))) intercept1 = a1 # higher bounding line of the 'beta box' # y=intercept2+slop2x slop2 = old_div(a2, ((old_div(a1, b1)))) intercept2 = a2 pars['specimen_scat_bounding_line_high'] = [intercept2, slop2] pars['specimen_scat_bounding_line_low'] = [intercept1, slop1] # -------------------------------------------------------------- # check if the Arai data points are in the 'box' # the two bounding lines ymin = intercept1 + x_Arai_segment * slop1 ymax = intercept2 + x_Arai_segment * slop2 # arrays of "True" or "False" check_1 = y_Arai_segment > ymax check_2 = y_Arai_segment < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_arai_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the pTRM checks data points are in the 'box' if len(x_ptrm_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_ptrm_check_for_SCAT * slop1 ymax = intercept2 + x_ptrm_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_ptrm_check_for_SCAT > ymax check_2 = y_ptrm_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_ptrm_beta_box_scatter"] = True # -------------------------------------------------------------- # check if the tail checks data points are in the 'box' if len(x_tail_check_for_SCAT) > 0: # the two bounding lines ymin = intercept1 + x_tail_check_for_SCAT * slop1 ymax = intercept2 + x_tail_check_for_SCAT * slop2 # arrays of "True" or "False" check_1 = y_tail_check_for_SCAT > ymax check_2 = y_tail_check_for_SCAT < ymin # check if at least one "True" if (sum(check_1) + sum(check_2)) > 0: pars["fail_tail_beta_box_scatter"] = True # -------------------------------------------------------------- # check if specimen_scat is PASS or FAIL: if pars["fail_tail_beta_box_scatter"] or pars["fail_ptrm_beta_box_scatter"] or pars["fail_arai_beta_box_scatter"]: pars[scat_key] = 'f' else: pars[scat_key] = 't' return pars, 0
[ "def", "PintPars", "(", "datablock", ",", "araiblock", ",", "zijdblock", ",", "start", ",", "end", ",", "accept", ",", "*", "*", "kwargs", ")", ":", "if", "'version'", "in", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "and", "kwargs", "[", "'version'", "]", "==", "3", ":", "meth_key", "=", "'method_codes'", "beta_key", "=", "'int_b_beta'", "temp_key", ",", "min_key", ",", "max_key", "=", "'treat_temp'", ",", "'meas_step_min'", ",", "'meas_step_max'", "dc_theta_key", ",", "dc_phi_key", "=", "'treat_dc_field_theta'", ",", "'treat_dc_field_phi'", "# convert dataframe to list of dictionaries", "datablock", "=", "datablock", ".", "to_dict", "(", "'records'", ")", "z_key", "=", "'int_z'", "drats_key", "=", "'int_drats'", "drat_key", "=", "'int_drat'", "md_key", "=", "'int_md'", "dec_key", "=", "'dir_dec'", "inc_key", "=", "'dir_inc'", "mad_key", "=", "'int_mad_free'", "dang_key", "=", "'int_dang'", "ptrm_key", "=", "'int_n_ptrm'", "theta_key", "=", "'int_theta'", "gamma_key", "=", "'int_gamma'", "delta_key", "=", "'int_delta'", "frac_key", "=", "'int_frac'", "gmax_key", "=", "'int_gmax'", "scat_key", "=", "'int_scat'", "else", ":", "beta_key", "=", "'specimen_b_beta'", "meth_key", "=", "'magic_method_codes'", "temp_key", ",", "min_key", ",", "max_key", "=", "'treatment_temp'", ",", "'measurement_step_min'", ",", "'measurement_step_max'", "z_key", "=", "'specimen_z'", "drats_key", "=", "'specimen_drats'", "drat_key", "=", "'specimen_drat'", "md_key", "=", "'specimen_md'", "dec_key", "=", "'specimen_dec'", "inc_key", "=", "'specimen_inc'", "mad_key", "=", "'specimen_int_mad'", "dang_key", "=", "'specimen_dang'", "ptrm_key", "=", "'specimen_int_ptrm_n'", "theta_key", "=", "'specimen_theta'", "gamma_key", "=", "'specimen_gamma'", "delta_key", "=", "'specimen_delta'", "frac_key", "=", "'specimen_frac'", "gmax_key", "=", "'specimen_gmax'", "scat_key", "=", "'specimen_scat'", "first_Z", ",", "first_I", ",", "zptrm_check", ",", "ptrm_check", ",", "ptrm_tail", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "methcode", ",", "ThetaChecks", ",", "DeltaChecks", ",", "GammaChecks", "=", "\"\"", ",", "\"\"", ",", "\"\"", ",", "\"\"", "zptrm_check", "=", "[", "]", "first_Z", ",", "first_I", ",", "ptrm_check", ",", "ptrm_tail", ",", "zptrm_check", ",", "GammaChecks", "=", "araiblock", "[", "0", "]", ",", "araiblock", "[", "1", "]", ",", "araiblock", "[", "2", "]", ",", "araiblock", "[", "3", "]", ",", "araiblock", "[", "4", "]", ",", "araiblock", "[", "5", "]", "if", "len", "(", "araiblock", ")", ">", "6", ":", "# used only for perpendicular method of paleointensity", "ThetaChecks", "=", "araiblock", "[", "6", "]", "# used only for perpendicular method of paleointensity", "DeltaChecks", "=", "araiblock", "[", "7", "]", "xi", ",", "yi", ",", "diffcum", "=", "[", "]", ",", "[", "]", ",", "0", "xiz", ",", "xzi", ",", "yiz", ",", "yzi", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "Nptrm", ",", "dmax", "=", "0", ",", "-", "1e-22", "# check if even zero and infield steps", "if", "len", "(", "first_Z", ")", ">", "len", "(", "first_I", ")", ":", "maxe", "=", "len", "(", "first_I", ")", "-", "1", "else", ":", "maxe", "=", "len", "(", "first_Z", ")", "-", "1", "if", "end", "==", "0", "or", "end", ">", "maxe", ":", "end", "=", "maxe", "# get the MAD, DANG, etc. for directional data", "bstep", "=", "araiblock", "[", "0", "]", "[", "start", "]", "[", "0", "]", "estep", "=", "araiblock", "[", "0", "]", "[", "end", "]", "[", "0", "]", "zstart", ",", "zend", "=", "0", ",", "len", "(", "zijdblock", ")", "for", "k", "in", "range", "(", "len", "(", "zijdblock", ")", ")", ":", "zrec", "=", "zijdblock", "[", "k", "]", "if", "zrec", "[", "0", "]", "==", "bstep", ":", "zstart", "=", "k", "if", "zrec", "[", "0", "]", "==", "estep", ":", "zend", "=", "k", "PCA", "=", "domean", "(", "zijdblock", ",", "zstart", ",", "zend", ",", "'DE-BFL'", ")", "D", ",", "Diz", ",", "Dzi", ",", "Du", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "# list of NRM vectors, and separated by zi and iz", "for", "rec", "in", "zijdblock", ":", "D", ".", "append", "(", "(", "rec", "[", "1", "]", ",", "rec", "[", "2", "]", ",", "rec", "[", "3", "]", ")", ")", "Du", ".", "append", "(", "(", "rec", "[", "1", "]", ",", "rec", "[", "2", "]", ")", ")", "if", "rec", "[", "4", "]", "==", "1", ":", "Dzi", ".", "append", "(", "(", "rec", "[", "1", "]", ",", "rec", "[", "2", "]", ")", ")", "# if this is ZI step", "else", ":", "Diz", ".", "append", "(", "(", "rec", "[", "1", "]", ",", "rec", "[", "2", "]", ")", ")", "# if this is IZ step", "# calculate the vector difference sum", "vds", "=", "dovds", "(", "D", ")", "b_zi", ",", "b_iz", "=", "[", "]", ",", "[", "]", "# collect data included in ZigZag calculation", "if", "end", "+", "1", ">=", "len", "(", "first_Z", ")", ":", "stop", "=", "end", "-", "1", "else", ":", "stop", "=", "end", "for", "k", "in", "range", "(", "start", ",", "end", "+", "1", ")", ":", "for", "l", "in", "range", "(", "len", "(", "first_I", ")", ")", ":", "irec", "=", "first_I", "[", "l", "]", "if", "irec", "[", "0", "]", "==", "first_Z", "[", "k", "]", "[", "0", "]", ":", "xi", ".", "append", "(", "irec", "[", "3", "]", ")", "yi", ".", "append", "(", "first_Z", "[", "k", "]", "[", "3", "]", ")", "pars", ",", "errcode", "=", "int_pars", "(", "xi", ",", "yi", ",", "vds", ")", "if", "errcode", "==", "1", ":", "return", "pars", ",", "errcode", "# for k in range(start,end+1):", "for", "k", "in", "range", "(", "len", "(", "first_Z", ")", "-", "1", ")", ":", "for", "l", "in", "range", "(", "k", ")", ":", "# only go down to 10% of NRM.....", "if", "old_div", "(", "first_Z", "[", "k", "]", "[", "3", "]", ",", "vds", ")", ">", "0.1", ":", "irec", "=", "first_I", "[", "l", "]", "if", "irec", "[", "4", "]", "==", "1", "and", "first_I", "[", "l", "+", "1", "]", "[", "4", "]", "==", "0", ":", "# a ZI step", "xzi", "=", "irec", "[", "3", "]", "yzi", "=", "first_Z", "[", "k", "]", "[", "3", "]", "xiz", "=", "first_I", "[", "l", "+", "1", "]", "[", "3", "]", "yiz", "=", "first_Z", "[", "k", "+", "1", "]", "[", "3", "]", "slope", "=", "np", ".", "arctan2", "(", "(", "yzi", "-", "yiz", ")", ",", "(", "xiz", "-", "xzi", ")", ")", "r", "=", "np", ".", "sqrt", "(", "(", "yzi", "-", "yiz", ")", "**", "2", "+", "(", "xiz", "-", "xzi", ")", "**", "2", ")", "if", "r", ">", ".1", "*", "vds", ":", "b_zi", ".", "append", "(", "slope", ")", "# suppress noise", "elif", "irec", "[", "4", "]", "==", "0", "and", "first_I", "[", "l", "+", "1", "]", "[", "4", "]", "==", "1", ":", "# an IZ step", "xiz", "=", "irec", "[", "3", "]", "yiz", "=", "first_Z", "[", "k", "]", "[", "3", "]", "xzi", "=", "first_I", "[", "l", "+", "1", "]", "[", "3", "]", "yzi", "=", "first_Z", "[", "k", "+", "1", "]", "[", "3", "]", "slope", "=", "np", ".", "arctan2", "(", "(", "yiz", "-", "yzi", ")", ",", "(", "xzi", "-", "xiz", ")", ")", "r", "=", "np", ".", "sqrt", "(", "(", "yiz", "-", "yzi", ")", "**", "2", "+", "(", "xzi", "-", "xiz", ")", "**", "2", ")", "if", "r", ">", ".1", "*", "vds", ":", "b_iz", ".", "append", "(", "slope", ")", "# suppress noise", "#", "ZigZag", ",", "Frat", ",", "Trat", "=", "-", "1", ",", "0", ",", "0", "if", "len", "(", "Diz", ")", ">", "2", "and", "len", "(", "Dzi", ")", ">", "2", ":", "ZigZag", "=", "0", "dizp", "=", "fisher_mean", "(", "Diz", ")", "# get Fisher stats on IZ steps", "dzip", "=", "fisher_mean", "(", "Dzi", ")", "# get Fisher stats on ZI steps", "dup", "=", "fisher_mean", "(", "Du", ")", "# get Fisher stats on all steps", "#", "# if directions are TOO well grouped, can get false positive for ftest, so", "# angles must be > 3 degrees apart.", "#", "if", "angle", "(", "[", "dizp", "[", "'dec'", "]", ",", "dizp", "[", "'inc'", "]", "]", ",", "[", "dzip", "[", "'dec'", "]", ",", "dzip", "[", "'inc'", "]", "]", ")", ">", "3.", ":", "F", "=", "(", "dup", "[", "'n'", "]", "-", "2.", ")", "*", "(", "dzip", "[", "'r'", "]", "+", "dizp", "[", "'r'", "]", "-", "dup", "[", "'r'", "]", ")", "/", "(", "dup", "[", "'n'", "]", "-", "dzip", "[", "'r'", "]", "-", "dizp", "[", "'r'", "]", ")", "# Watson test for common mean", "nf", "=", "2.", "*", "(", "dup", "[", "'n'", "]", "-", "2.", ")", "# number of degees of freedom", "ftest", "=", "fcalc", "(", "2", ",", "nf", ")", "Frat", "=", "old_div", "(", "F", ",", "ftest", ")", "if", "Frat", ">", "1.", ":", "ZigZag", "=", "Frat", "# fails zigzag on directions", "methcode", "=", "\"SM-FTEST\"", "# now do slopes", "if", "len", "(", "b_zi", ")", ">", "2", "and", "len", "(", "b_iz", ")", ">", "2", ":", "bzi_m", ",", "bzi_sig", "=", "gausspars", "(", "b_zi", ")", "# mean, std dev", "biz_m", ",", "biz_sig", "=", "gausspars", "(", "b_iz", ")", "n_zi", "=", "float", "(", "len", "(", "b_zi", ")", ")", "n_iz", "=", "float", "(", "len", "(", "b_iz", ")", ")", "b_diff", "=", "abs", "(", "bzi_m", "-", "biz_m", ")", "# difference in means", "#", "# avoid false positives - set 3 degree slope difference here too", "if", "b_diff", ">", "3", "*", "np", ".", "pi", "/", "180.", ":", "nf", "=", "n_zi", "+", "n_iz", "-", "2.", "# degrees of freedom", "svar", "=", "old_div", "(", "(", "(", "n_zi", "-", "1.", ")", "*", "bzi_sig", "**", "2", "+", "(", "n_iz", "-", "1.", ")", "*", "biz_sig", "**", "2", ")", ",", "nf", ")", "T", "=", "old_div", "(", "(", "b_diff", ")", ",", "np", ".", "sqrt", "(", "svar", "*", "(", "old_div", "(", "1.0", ",", "n_zi", ")", "+", "old_div", "(", "1.0", ",", "n_iz", ")", ")", ")", ")", "# student's t", "ttest", "=", "tcalc", "(", "nf", ",", ".05", ")", "# t-test at 95% conf.", "Trat", "=", "old_div", "(", "T", ",", "ttest", ")", "if", "Trat", ">", "1", "and", "Trat", ">", "Frat", ":", "ZigZag", "=", "Trat", "# fails zigzag on directions", "methcode", "=", "\"SM-TTEST\"", "pars", "[", "z_key", "]", "=", "ZigZag", "pars", "[", "meth_key", "]", "=", "methcode", "# do drats", "if", "len", "(", "ptrm_check", ")", "!=", "0", ":", "diffcum", ",", "drat_max", "=", "0", ",", "0", "for", "prec", "in", "ptrm_check", ":", "step", "=", "prec", "[", "0", "]", "endbak", "=", "end", "zend", "=", "end", "while", "zend", ">", "len", "(", "zijdblock", ")", "-", "1", ":", "zend", "=", "zend", "-", "2", "# don't count alteration that happens after this step", "if", "step", "<", "zijdblock", "[", "zend", "]", "[", "0", "]", ":", "Nptrm", "+=", "1", "for", "irec", "in", "first_I", ":", "if", "irec", "[", "0", "]", "==", "step", ":", "break", "diffcum", "+=", "prec", "[", "3", "]", "-", "irec", "[", "3", "]", "if", "abs", "(", "prec", "[", "3", "]", "-", "irec", "[", "3", "]", ")", ">", "drat_max", ":", "drat_max", "=", "abs", "(", "prec", "[", "3", "]", "-", "irec", "[", "3", "]", ")", "pars", "[", "drats_key", "]", "=", "(", "100", "*", "abs", "(", "diffcum", ")", "/", "first_I", "[", "zend", "]", "[", "3", "]", ")", "pars", "[", "drat_key", "]", "=", "(", "100", "*", "abs", "(", "drat_max", ")", "/", "first_I", "[", "zend", "]", "[", "3", "]", ")", "elif", "len", "(", "zptrm_check", ")", "!=", "0", ":", "diffcum", "=", "0", "for", "prec", "in", "zptrm_check", ":", "step", "=", "prec", "[", "0", "]", "endbak", "=", "end", "zend", "=", "end", "while", "zend", ">", "len", "(", "zijdblock", ")", "-", "1", ":", "zend", "=", "zend", "-", "1", "if", "step", "<", "zijdblock", "[", "zend", "]", "[", "0", "]", ":", "Nptrm", "+=", "1", "for", "irec", "in", "first_I", ":", "if", "irec", "[", "0", "]", "==", "step", ":", "break", "diffcum", "+=", "prec", "[", "3", "]", "-", "irec", "[", "3", "]", "pars", "[", "drats_key", "]", "=", "(", "100", "*", "abs", "(", "diffcum", ")", "/", "first_I", "[", "zend", "]", "[", "3", "]", ")", "else", ":", "pars", "[", "drats_key", "]", "=", "-", "1", "pars", "[", "drat_key", "]", "=", "-", "1", "# and the pTRM tails", "if", "len", "(", "ptrm_tail", ")", "!=", "0", ":", "for", "trec", "in", "ptrm_tail", ":", "step", "=", "trec", "[", "0", "]", "for", "irec", "in", "first_I", ":", "if", "irec", "[", "0", "]", "==", "step", ":", "break", "if", "abs", "(", "trec", "[", "3", "]", ")", ">", "dmax", ":", "dmax", "=", "abs", "(", "trec", "[", "3", "]", ")", "pars", "[", "md_key", "]", "=", "(", "100", "*", "dmax", "/", "vds", ")", "else", ":", "pars", "[", "md_key", "]", "=", "-", "1", "pars", "[", "min_key", "]", "=", "bstep", "pars", "[", "max_key", "]", "=", "estep", "pars", "[", "dec_key", "]", "=", "PCA", "[", "\"specimen_dec\"", "]", "pars", "[", "inc_key", "]", "=", "PCA", "[", "\"specimen_inc\"", "]", "pars", "[", "mad_key", "]", "=", "PCA", "[", "\"specimen_mad\"", "]", "pars", "[", "dang_key", "]", "=", "PCA", "[", "\"specimen_dang\"", "]", "pars", "[", "ptrm_key", "]", "=", "Nptrm", "# and the ThetaChecks", "if", "ThetaChecks", "!=", "\"\"", ":", "t", "=", "0", "for", "theta", "in", "ThetaChecks", ":", "if", "theta", "[", "0", "]", ">=", "bstep", "and", "theta", "[", "0", "]", "<=", "estep", "and", "theta", "[", "1", "]", ">", "t", ":", "t", "=", "theta", "[", "1", "]", "pars", "[", "theta_key", "]", "=", "t", "else", ":", "pars", "[", "theta_key", "]", "=", "-", "1", "# and the DeltaChecks", "if", "DeltaChecks", "!=", "\"\"", ":", "d", "=", "0", "for", "delta", "in", "DeltaChecks", ":", "if", "delta", "[", "0", "]", ">=", "bstep", "and", "delta", "[", "0", "]", "<=", "estep", "and", "delta", "[", "1", "]", ">", "d", ":", "d", "=", "delta", "[", "1", "]", "pars", "[", "delta_key", "]", "else", ":", "pars", "[", "delta_key", "]", "=", "-", "1", "pars", "[", "gamma_key", "]", "=", "-", "1", "if", "GammaChecks", "!=", "\"\"", ":", "for", "gamma", "in", "GammaChecks", ":", "if", "gamma", "[", "0", "]", "<=", "estep", ":", "pars", "[", "'specimen_gamma'", "]", "=", "gamma", "[", "1", "]", "# --------------------------------------------------------------", "# From here added By Ron Shaar 11-Dec 2012", "# New parameters defined in Shaar and Tauxe (2012):", "# FRAC (specimen_frac) - ranges from 0. to 1.", "# SCAT (specimen_scat) - takes 1/0", "# gap_max (specimen_gmax) - ranges from 0. to 1.", "# --------------------------------------------------------------", "# --------------------------------------------------------------", "# FRAC is similar to Fvds, but the numerator is the vds fraction:", "# FRAC= [ vds (start,end)] / total vds ]", "# gap_max= max [ (vector difference) / vds (start,end)]", "# --------------------------------------------------------------", "# collect all zijderveld data to arrays and calculate VDS", "z_temperatures", "=", "[", "row", "[", "0", "]", "for", "row", "in", "zijdblock", "]", "zdata", "=", "[", "]", "# array of zero-fields measurements in Cartezian coordinates", "# array of vector differences (for vds calculation)", "vector_diffs", "=", "[", "]", "NRM", "=", "zijdblock", "[", "0", "]", "[", "3", "]", "# NRM", "for", "k", "in", "range", "(", "len", "(", "zijdblock", ")", ")", ":", "DIR", "=", "[", "zijdblock", "[", "k", "]", "[", "1", "]", ",", "zijdblock", "[", "k", "]", "[", "2", "]", ",", "old_div", "(", "zijdblock", "[", "k", "]", "[", "3", "]", ",", "NRM", ")", "]", "cart", "=", "dir2cart", "(", "DIR", ")", "zdata", ".", "append", "(", "np", ".", "array", "(", "[", "cart", "[", "0", "]", ",", "cart", "[", "1", "]", ",", "cart", "[", "2", "]", "]", ")", ")", "if", "k", ">", "0", ":", "vector_diffs", ".", "append", "(", "np", ".", "sqrt", "(", "sum", "(", "(", "np", ".", "array", "(", "zdata", "[", "-", "2", "]", ")", "-", "np", ".", "array", "(", "zdata", "[", "-", "1", "]", ")", ")", "**", "2", ")", ")", ")", "# last vector difference: from the last point to the origin.", "vector_diffs", ".", "append", "(", "np", ".", "sqrt", "(", "sum", "(", "np", ".", "array", "(", "zdata", "[", "-", "1", "]", ")", "**", "2", ")", ")", ")", "vds", "=", "sum", "(", "vector_diffs", ")", "# vds calculation", "zdata", "=", "np", ".", "array", "(", "zdata", ")", "vector_diffs", "=", "np", ".", "array", "(", "vector_diffs", ")", "# calculate the vds within the chosen segment", "vector_diffs_segment", "=", "vector_diffs", "[", "zstart", ":", "zend", "]", "# FRAC calculation", "FRAC", "=", "old_div", "(", "sum", "(", "vector_diffs_segment", ")", ",", "vds", ")", "pars", "[", "frac_key", "]", "=", "FRAC", "# gap_max calculation", "max_FRAC_gap", "=", "max", "(", "old_div", "(", "vector_diffs_segment", ",", "sum", "(", "vector_diffs_segment", ")", ")", ")", "pars", "[", "gmax_key", "]", "=", "max_FRAC_gap", "# ---------------------------------------------------------------------", "# Calculate the \"scat box\"", "# all data-points, pTRM checks, and tail-checks, should be inside a \"scat box\"", "# ---------------------------------------------------------------------", "# intialization", "# fail scat due to arai plot data points", "pars", "[", "\"fail_arai_beta_box_scatter\"", "]", "=", "False", "pars", "[", "\"fail_ptrm_beta_box_scatter\"", "]", "=", "False", "# fail scat due to pTRM checks", "pars", "[", "\"fail_tail_beta_box_scatter\"", "]", "=", "False", "# fail scat due to tail checks", "pars", "[", "scat_key", "]", "=", "\"t\"", "# Pass by default", "# --------------------------------------------------------------", "# collect all Arai plot data points in arrays", "x_Arai", ",", "y_Arai", ",", "t_Arai", ",", "steps_Arai", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "NRMs", "=", "araiblock", "[", "0", "]", "PTRMs", "=", "araiblock", "[", "1", "]", "ptrm_checks", "=", "araiblock", "[", "2", "]", "ptrm_tail", "=", "araiblock", "[", "3", "]", "PTRMs_temperatures", "=", "[", "row", "[", "0", "]", "for", "row", "in", "PTRMs", "]", "NRMs_temperatures", "=", "[", "row", "[", "0", "]", "for", "row", "in", "NRMs", "]", "NRM", "=", "NRMs", "[", "0", "]", "[", "3", "]", "for", "k", "in", "range", "(", "len", "(", "NRMs", ")", ")", ":", "index_pTRMs", "=", "PTRMs_temperatures", ".", "index", "(", "NRMs", "[", "k", "]", "[", "0", "]", ")", "x_Arai", ".", "append", "(", "old_div", "(", "PTRMs", "[", "index_pTRMs", "]", "[", "3", "]", ",", "NRM", ")", ")", "y_Arai", ".", "append", "(", "old_div", "(", "NRMs", "[", "k", "]", "[", "3", "]", ",", "NRM", ")", ")", "t_Arai", ".", "append", "(", "NRMs", "[", "k", "]", "[", "0", "]", ")", "if", "NRMs", "[", "k", "]", "[", "4", "]", "==", "1", ":", "steps_Arai", ".", "append", "(", "'ZI'", ")", "else", ":", "steps_Arai", ".", "append", "(", "'IZ'", ")", "x_Arai", "=", "np", ".", "array", "(", "x_Arai", ")", "y_Arai", "=", "np", ".", "array", "(", "y_Arai", ")", "# --------------------------------------------------------------", "# collect all pTRM check to arrays", "x_ptrm_check", ",", "y_ptrm_check", ",", "ptrm_checks_temperatures", ",", "=", "[", "]", ",", "[", "]", ",", "[", "]", "x_ptrm_check_starting_point", ",", "y_ptrm_check_starting_point", ",", "ptrm_checks_starting_temperatures", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "k", "in", "range", "(", "len", "(", "ptrm_checks", ")", ")", ":", "if", "ptrm_checks", "[", "k", "]", "[", "0", "]", "in", "NRMs_temperatures", ":", "# find the starting point of the pTRM check:", "for", "i", "in", "range", "(", "len", "(", "datablock", ")", ")", ":", "rec", "=", "datablock", "[", "i", "]", "if", "\"LT-PTRM-I\"", "in", "rec", "[", "meth_key", "]", "and", "float", "(", "rec", "[", "temp_key", "]", ")", "==", "ptrm_checks", "[", "k", "]", "[", "0", "]", ":", "starting_temperature", "=", "(", "float", "(", "datablock", "[", "i", "-", "1", "]", "[", "temp_key", "]", ")", ")", "try", ":", "index", "=", "t_Arai", ".", "index", "(", "starting_temperature", ")", "x_ptrm_check_starting_point", ".", "append", "(", "x_Arai", "[", "index", "]", ")", "y_ptrm_check_starting_point", ".", "append", "(", "y_Arai", "[", "index", "]", ")", "ptrm_checks_starting_temperatures", ".", "append", "(", "starting_temperature", ")", "index_zerofield", "=", "zerofield_temperatures", ".", "index", "(", "ptrm_checks", "[", "k", "]", "[", "0", "]", ")", "x_ptrm_check", ".", "append", "(", "old_div", "(", "ptrm_checks", "[", "k", "]", "[", "3", "]", ",", "NRM", ")", ")", "y_ptrm_check", ".", "append", "(", "old_div", "(", "zerofields", "[", "index_zerofield", "]", "[", "3", "]", ",", "NRM", ")", ")", "ptrm_checks_temperatures", ".", "append", "(", "ptrm_checks", "[", "k", "]", "[", "0", "]", ")", "break", "except", ":", "pass", "x_ptrm_check_starting_point", "=", "np", ".", "array", "(", "x_ptrm_check_starting_point", ")", "y_ptrm_check_starting_point", "=", "np", ".", "array", "(", "y_ptrm_check_starting_point", ")", "ptrm_checks_starting_temperatures", "=", "np", ".", "array", "(", "ptrm_checks_starting_temperatures", ")", "x_ptrm_check", "=", "np", ".", "array", "(", "x_ptrm_check", ")", "y_ptrm_check", "=", "np", ".", "array", "(", "y_ptrm_check", ")", "ptrm_checks_temperatures", "=", "np", ".", "array", "(", "ptrm_checks_temperatures", ")", "# --------------------------------------------------------------", "# collect tail checks to arrays", "x_tail_check", ",", "y_tail_check", ",", "tail_check_temperatures", "=", "[", "]", ",", "[", "]", ",", "[", "]", "x_tail_check_starting_point", ",", "y_tail_check_starting_point", ",", "tail_checks_starting_temperatures", "=", "[", "]", ",", "[", "]", ",", "[", "]", "for", "k", "in", "range", "(", "len", "(", "ptrm_tail", ")", ")", ":", "if", "ptrm_tail", "[", "k", "]", "[", "0", "]", "in", "NRMs_temperatures", ":", "# find the starting point of the pTRM check:", "for", "i", "in", "range", "(", "len", "(", "datablock", ")", ")", ":", "rec", "=", "datablock", "[", "i", "]", "if", "\"LT-PTRM-MD\"", "in", "rec", "[", "meth_key", "]", "and", "float", "(", "rec", "[", "temp_key", "]", ")", "==", "ptrm_tail", "[", "k", "]", "[", "0", "]", ":", "starting_temperature", "=", "(", "float", "(", "datablock", "[", "i", "-", "1", "]", "[", "temp_key", "]", ")", ")", "try", ":", "index", "=", "t_Arai", ".", "index", "(", "starting_temperature", ")", "x_tail_check_starting_point", ".", "append", "(", "x_Arai", "[", "index", "]", ")", "y_tail_check_starting_point", ".", "append", "(", "y_Arai", "[", "index", "]", ")", "tail_checks_starting_temperatures", ".", "append", "(", "starting_temperature", ")", "index_infield", "=", "infield_temperatures", ".", "index", "(", "ptrm_tail", "[", "k", "]", "[", "0", "]", ")", "x_tail_check", ".", "append", "(", "old_div", "(", "infields", "[", "index_infield", "]", "[", "3", "]", ",", "NRM", ")", ")", "y_tail_check", ".", "append", "(", "old_div", "(", "ptrm_tail", "[", "k", "]", "[", "3", "]", ",", "NRM", ")", "+", "old_div", "(", "zerofields", "[", "index_infield", "]", "[", "3", "]", ",", "NRM", ")", ")", "tail_check_temperatures", ".", "append", "(", "ptrm_tail", "[", "k", "]", "[", "0", "]", ")", "break", "except", ":", "pass", "x_tail_check", "=", "np", ".", "array", "(", "x_tail_check", ")", "y_tail_check", "=", "np", ".", "array", "(", "y_tail_check", ")", "tail_check_temperatures", "=", "np", ".", "array", "(", "tail_check_temperatures", ")", "x_tail_check_starting_point", "=", "np", ".", "array", "(", "x_tail_check_starting_point", ")", "y_tail_check_starting_point", "=", "np", ".", "array", "(", "y_tail_check_starting_point", ")", "tail_checks_starting_temperatures", "=", "np", ".", "array", "(", "tail_checks_starting_temperatures", ")", "# --------------------------------------------------------------", "# collect the chosen segment in the Arai plot to arrays", "x_Arai_segment", "=", "x_Arai", "[", "start", ":", "end", "+", "1", "]", "# chosen segent in the Arai plot", "y_Arai_segment", "=", "y_Arai", "[", "start", ":", "end", "+", "1", "]", "# chosen segent in the Arai plot", "# --------------------------------------------------------------", "# collect pTRM checks in segment to arrays", "# notice, this is different than the conventional DRATS.", "# for scat calculation we take only the pTRM checks which were carried out", "# before reaching the highest temperature in the chosen segment", "x_ptrm_check_for_SCAT", ",", "y_ptrm_check_for_SCAT", "=", "[", "]", ",", "[", "]", "for", "k", "in", "range", "(", "len", "(", "ptrm_checks_temperatures", ")", ")", ":", "if", "ptrm_checks_temperatures", "[", "k", "]", ">=", "pars", "[", "min_key", "]", "and", "ptrm_checks_starting_temperatures", "<=", "pars", "[", "max_key", "]", ":", "x_ptrm_check_for_SCAT", ".", "append", "(", "x_ptrm_check", "[", "k", "]", ")", "y_ptrm_check_for_SCAT", ".", "append", "(", "y_ptrm_check", "[", "k", "]", ")", "x_ptrm_check_for_SCAT", "=", "np", ".", "array", "(", "x_ptrm_check_for_SCAT", ")", "y_ptrm_check_for_SCAT", "=", "np", ".", "array", "(", "y_ptrm_check_for_SCAT", ")", "# --------------------------------------------------------------", "# collect Tail checks in segment to arrays", "# for scat calculation we take only the tail checks which were carried out", "# before reaching the highest temperature in the chosen segment", "x_tail_check_for_SCAT", ",", "y_tail_check_for_SCAT", "=", "[", "]", ",", "[", "]", "for", "k", "in", "range", "(", "len", "(", "tail_check_temperatures", ")", ")", ":", "if", "tail_check_temperatures", "[", "k", "]", ">=", "pars", "[", "min_key", "]", "and", "tail_checks_starting_temperatures", "[", "k", "]", "<=", "pars", "[", "max_key", "]", ":", "x_tail_check_for_SCAT", ".", "append", "(", "x_tail_check", "[", "k", "]", ")", "y_tail_check_for_SCAT", ".", "append", "(", "y_tail_check", "[", "k", "]", ")", "x_tail_check_for_SCAT", "=", "np", ".", "array", "(", "x_tail_check_for_SCAT", ")", "y_tail_check_for_SCAT", "=", "np", ".", "array", "(", "y_tail_check_for_SCAT", ")", "# --------------------------------------------------------------", "# calculate the lines that define the scat box:", "# if threshold value for beta is not defined, then scat cannot be calculated (pass)", "# in this case, scat pass", "if", "beta_key", "in", "list", "(", "accept", ".", "keys", "(", ")", ")", "and", "accept", "[", "beta_key", "]", "!=", "\"\"", ":", "b_beta_threshold", "=", "float", "(", "accept", "[", "beta_key", "]", ")", "b", "=", "pars", "[", "b_key", "]", "# best fit line", "cm_x", "=", "np", ".", "mean", "(", "np", ".", "array", "(", "x_Arai_segment", ")", ")", "# x center of mass", "cm_y", "=", "np", ".", "mean", "(", "np", ".", "array", "(", "y_Arai_segment", ")", ")", "# y center of mass", "a", "=", "cm_y", "-", "b", "*", "cm_x", "# lines with slope = slope +/- 2*(specimen_b_beta)", "two_sigma_beta_threshold", "=", "2", "*", "b_beta_threshold", "two_sigma_slope_threshold", "=", "abs", "(", "two_sigma_beta_threshold", "*", "b", ")", "# a line with a shallower slope (b + 2*beta*b) passing through the center of mass", "# y=a1+b1x", "b1", "=", "b", "+", "two_sigma_slope_threshold", "a1", "=", "cm_y", "-", "b1", "*", "cm_x", "# bounding line with steeper slope (b - 2*beta*b) passing through the center of mass", "# y=a2+b2x", "b2", "=", "b", "-", "two_sigma_slope_threshold", "a2", "=", "cm_y", "-", "b2", "*", "cm_x", "# lower bounding line of the 'beta box'", "# y=intercept1+slop1x", "slop1", "=", "old_div", "(", "a1", ",", "(", "(", "old_div", "(", "a2", ",", "b2", ")", ")", ")", ")", "intercept1", "=", "a1", "# higher bounding line of the 'beta box'", "# y=intercept2+slop2x", "slop2", "=", "old_div", "(", "a2", ",", "(", "(", "old_div", "(", "a1", ",", "b1", ")", ")", ")", ")", "intercept2", "=", "a2", "pars", "[", "'specimen_scat_bounding_line_high'", "]", "=", "[", "intercept2", ",", "slop2", "]", "pars", "[", "'specimen_scat_bounding_line_low'", "]", "=", "[", "intercept1", ",", "slop1", "]", "# --------------------------------------------------------------", "# check if the Arai data points are in the 'box'", "# the two bounding lines", "ymin", "=", "intercept1", "+", "x_Arai_segment", "*", "slop1", "ymax", "=", "intercept2", "+", "x_Arai_segment", "*", "slop2", "# arrays of \"True\" or \"False\"", "check_1", "=", "y_Arai_segment", ">", "ymax", "check_2", "=", "y_Arai_segment", "<", "ymin", "# check if at least one \"True\"", "if", "(", "sum", "(", "check_1", ")", "+", "sum", "(", "check_2", ")", ")", ">", "0", ":", "pars", "[", "\"fail_arai_beta_box_scatter\"", "]", "=", "True", "# --------------------------------------------------------------", "# check if the pTRM checks data points are in the 'box'", "if", "len", "(", "x_ptrm_check_for_SCAT", ")", ">", "0", ":", "# the two bounding lines", "ymin", "=", "intercept1", "+", "x_ptrm_check_for_SCAT", "*", "slop1", "ymax", "=", "intercept2", "+", "x_ptrm_check_for_SCAT", "*", "slop2", "# arrays of \"True\" or \"False\"", "check_1", "=", "y_ptrm_check_for_SCAT", ">", "ymax", "check_2", "=", "y_ptrm_check_for_SCAT", "<", "ymin", "# check if at least one \"True\"", "if", "(", "sum", "(", "check_1", ")", "+", "sum", "(", "check_2", ")", ")", ">", "0", ":", "pars", "[", "\"fail_ptrm_beta_box_scatter\"", "]", "=", "True", "# --------------------------------------------------------------", "# check if the tail checks data points are in the 'box'", "if", "len", "(", "x_tail_check_for_SCAT", ")", ">", "0", ":", "# the two bounding lines", "ymin", "=", "intercept1", "+", "x_tail_check_for_SCAT", "*", "slop1", "ymax", "=", "intercept2", "+", "x_tail_check_for_SCAT", "*", "slop2", "# arrays of \"True\" or \"False\"", "check_1", "=", "y_tail_check_for_SCAT", ">", "ymax", "check_2", "=", "y_tail_check_for_SCAT", "<", "ymin", "# check if at least one \"True\"", "if", "(", "sum", "(", "check_1", ")", "+", "sum", "(", "check_2", ")", ")", ">", "0", ":", "pars", "[", "\"fail_tail_beta_box_scatter\"", "]", "=", "True", "# --------------------------------------------------------------", "# check if specimen_scat is PASS or FAIL:", "if", "pars", "[", "\"fail_tail_beta_box_scatter\"", "]", "or", "pars", "[", "\"fail_ptrm_beta_box_scatter\"", "]", "or", "pars", "[", "\"fail_arai_beta_box_scatter\"", "]", ":", "pars", "[", "scat_key", "]", "=", "'f'", "else", ":", "pars", "[", "scat_key", "]", "=", "'t'", "return", "pars", ",", "0" ]
calculate the paleointensity magic parameters make some definitions
[ "calculate", "the", "paleointensity", "magic", "parameters", "make", "some", "definitions" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/atkinson_boore_1995.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/atkinson_boore_1995.py#L75-L105
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ C = self.COEFFS[imt] # clip rhypo at 10 (this is the minimum distance used in # deriving the equation), see page 22, this avoids singularity # in mean value equation rhypo = dists.rhypo.copy() rhypo[rhypo < 10] = 10 # convert magnitude from Mblg to Mw mag = rup.mag * 0.98 - 0.39 if rup.mag <= 5.5 else \ 2.715 - 0.277 * rup.mag + 0.127 * rup.mag * rup.mag # functional form as explained in 'Youngs_fit_to_AB95lookup.doc' f1 = np.minimum(np.log(rhypo), np.log(70.)) f2 = np.maximum(np.log(rhypo / 130.), 0) mean = ( C['c1'] + C['c2'] * mag + C['c3'] * mag ** 2 + (C['c4'] + C['c5'] * mag) * f1 + (C['c6'] + C['c7'] * mag) * f2 + C['c8'] * rhypo ) stddevs = self._get_stddevs(stddev_types, dists.rhypo.shape[0]) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "C", "=", "self", ".", "COEFFS", "[", "imt", "]", "# clip rhypo at 10 (this is the minimum distance used in", "# deriving the equation), see page 22, this avoids singularity", "# in mean value equation", "rhypo", "=", "dists", ".", "rhypo", ".", "copy", "(", ")", "rhypo", "[", "rhypo", "<", "10", "]", "=", "10", "# convert magnitude from Mblg to Mw", "mag", "=", "rup", ".", "mag", "*", "0.98", "-", "0.39", "if", "rup", ".", "mag", "<=", "5.5", "else", "2.715", "-", "0.277", "*", "rup", ".", "mag", "+", "0.127", "*", "rup", ".", "mag", "*", "rup", ".", "mag", "# functional form as explained in 'Youngs_fit_to_AB95lookup.doc'", "f1", "=", "np", ".", "minimum", "(", "np", ".", "log", "(", "rhypo", ")", ",", "np", ".", "log", "(", "70.", ")", ")", "f2", "=", "np", ".", "maximum", "(", "np", ".", "log", "(", "rhypo", "/", "130.", ")", ",", "0", ")", "mean", "=", "(", "C", "[", "'c1'", "]", "+", "C", "[", "'c2'", "]", "*", "mag", "+", "C", "[", "'c3'", "]", "*", "mag", "**", "2", "+", "(", "C", "[", "'c4'", "]", "+", "C", "[", "'c5'", "]", "*", "mag", ")", "*", "f1", "+", "(", "C", "[", "'c6'", "]", "+", "C", "[", "'c7'", "]", "*", "mag", ")", "*", "f2", "+", "C", "[", "'c8'", "]", "*", "rhypo", ")", "stddevs", "=", "self", ".", "_get_stddevs", "(", "stddev_types", ",", "dists", ".", "rhypo", ".", "shape", "[", "0", "]", ")", "return", "mean", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
globality-corp/microcosm-flask
microcosm_flask/formatting/base.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/formatting/base.py#L50-L74
def build_etag(self, response, include_etag=True, **kwargs): """ Add an etag to the response body. Uses spooky where possible because it is empirically fast and well-regarded. See: http://blog.reverberate.org/2012/01/state-of-hash-functions-2012.html """ if not include_etag: return if not spooky: # use built-in md5 response.add_etag() return # use spooky response.headers["ETag"] = quote_etag( hexlify( spooky.hash128( response.get_data(), ).to_bytes(16, "little"), ).decode("utf-8"), )
[ "def", "build_etag", "(", "self", ",", "response", ",", "include_etag", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "not", "include_etag", ":", "return", "if", "not", "spooky", ":", "# use built-in md5", "response", ".", "add_etag", "(", ")", "return", "# use spooky", "response", ".", "headers", "[", "\"ETag\"", "]", "=", "quote_etag", "(", "hexlify", "(", "spooky", ".", "hash128", "(", "response", ".", "get_data", "(", ")", ",", ")", ".", "to_bytes", "(", "16", ",", "\"little\"", ")", ",", ")", ".", "decode", "(", "\"utf-8\"", ")", ",", ")" ]
Add an etag to the response body. Uses spooky where possible because it is empirically fast and well-regarded. See: http://blog.reverberate.org/2012/01/state-of-hash-functions-2012.html
[ "Add", "an", "etag", "to", "the", "response", "body", "." ]
python
train
fananimi/pyzk
zk/base.py
https://github.com/fananimi/pyzk/blob/1a765d616526efdcb4c9adfcc9b1d10f6ed8b938/zk/base.py#L1349-L1395
def __recieve_tcp_data(self, data_recv, size): """ data_recv, raw tcp packet must analyze tcp_length must return data, broken """ data = [] tcp_length = self.__test_tcp_top(data_recv) if self.verbose: print ("tcp_length {}, size {}".format(tcp_length, size)) if tcp_length <= 0: if self.verbose: print ("Incorrect tcp packet") return None, b"" if (tcp_length - 8) < size: if self.verbose: print ("tcp length too small... retrying") resp, bh = self.__recieve_tcp_data(data_recv, tcp_length - 8) data.append(resp) size -= len(resp) if self.verbose: print ("new tcp DATA packet to fill misssing {}".format(size)) data_recv = bh + self.__sock.recv(size + 16 ) if self.verbose: print ("new tcp DATA starting with {} bytes".format(len(data_recv))) resp, bh = self.__recieve_tcp_data(data_recv, size) data.append(resp) if self.verbose: print ("for misssing {} recieved {} with extra {}".format(size, len(resp), len(bh))) return b''.join(data), bh recieved = len(data_recv) if self.verbose: print ("recieved {}, size {}".format(recieved, size)) response = unpack('HHHH', data_recv[8:16])[0] if recieved >= (size + 32): if response == const.CMD_DATA: resp = data_recv[16 : size + 16] if self.verbose: print ("resp complete len {}".format(len(resp))) return resp, data_recv[size + 16:] else: if self.verbose: print("incorrect response!!! {}".format(response)) return None, b"" else: if self.verbose: print ("try DATA incomplete (actual valid {})".format(recieved-16)) data.append(data_recv[16 : size + 16 ]) size -= recieved - 16 broken_header = b"" if size < 0: broken_header = data_recv[size:] if self.verbose: print ("broken", (broken_header).encode('hex')) if size > 0: data_recv = self.__recieve_raw_data(size) data.append(data_recv) return b''.join(data), broken_header
[ "def", "__recieve_tcp_data", "(", "self", ",", "data_recv", ",", "size", ")", ":", "data", "=", "[", "]", "tcp_length", "=", "self", ".", "__test_tcp_top", "(", "data_recv", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"tcp_length {}, size {}\"", ".", "format", "(", "tcp_length", ",", "size", ")", ")", "if", "tcp_length", "<=", "0", ":", "if", "self", ".", "verbose", ":", "print", "(", "\"Incorrect tcp packet\"", ")", "return", "None", ",", "b\"\"", "if", "(", "tcp_length", "-", "8", ")", "<", "size", ":", "if", "self", ".", "verbose", ":", "print", "(", "\"tcp length too small... retrying\"", ")", "resp", ",", "bh", "=", "self", ".", "__recieve_tcp_data", "(", "data_recv", ",", "tcp_length", "-", "8", ")", "data", ".", "append", "(", "resp", ")", "size", "-=", "len", "(", "resp", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"new tcp DATA packet to fill misssing {}\"", ".", "format", "(", "size", ")", ")", "data_recv", "=", "bh", "+", "self", ".", "__sock", ".", "recv", "(", "size", "+", "16", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"new tcp DATA starting with {} bytes\"", ".", "format", "(", "len", "(", "data_recv", ")", ")", ")", "resp", ",", "bh", "=", "self", ".", "__recieve_tcp_data", "(", "data_recv", ",", "size", ")", "data", ".", "append", "(", "resp", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"for misssing {} recieved {} with extra {}\"", ".", "format", "(", "size", ",", "len", "(", "resp", ")", ",", "len", "(", "bh", ")", ")", ")", "return", "b''", ".", "join", "(", "data", ")", ",", "bh", "recieved", "=", "len", "(", "data_recv", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"recieved {}, size {}\"", ".", "format", "(", "recieved", ",", "size", ")", ")", "response", "=", "unpack", "(", "'HHHH'", ",", "data_recv", "[", "8", ":", "16", "]", ")", "[", "0", "]", "if", "recieved", ">=", "(", "size", "+", "32", ")", ":", "if", "response", "==", "const", ".", "CMD_DATA", ":", "resp", "=", "data_recv", "[", "16", ":", "size", "+", "16", "]", "if", "self", ".", "verbose", ":", "print", "(", "\"resp complete len {}\"", ".", "format", "(", "len", "(", "resp", ")", ")", ")", "return", "resp", ",", "data_recv", "[", "size", "+", "16", ":", "]", "else", ":", "if", "self", ".", "verbose", ":", "print", "(", "\"incorrect response!!! {}\"", ".", "format", "(", "response", ")", ")", "return", "None", ",", "b\"\"", "else", ":", "if", "self", ".", "verbose", ":", "print", "(", "\"try DATA incomplete (actual valid {})\"", ".", "format", "(", "recieved", "-", "16", ")", ")", "data", ".", "append", "(", "data_recv", "[", "16", ":", "size", "+", "16", "]", ")", "size", "-=", "recieved", "-", "16", "broken_header", "=", "b\"\"", "if", "size", "<", "0", ":", "broken_header", "=", "data_recv", "[", "size", ":", "]", "if", "self", ".", "verbose", ":", "print", "(", "\"broken\"", ",", "(", "broken_header", ")", ".", "encode", "(", "'hex'", ")", ")", "if", "size", ">", "0", ":", "data_recv", "=", "self", ".", "__recieve_raw_data", "(", "size", ")", "data", ".", "append", "(", "data_recv", ")", "return", "b''", ".", "join", "(", "data", ")", ",", "broken_header" ]
data_recv, raw tcp packet must analyze tcp_length must return data, broken
[ "data_recv", "raw", "tcp", "packet", "must", "analyze", "tcp_length" ]
python
train
tanwanirahul/django-batch-requests
batch_requests/settings.py
https://github.com/tanwanirahul/django-batch-requests/blob/9c5afc42f7542f466247f4ffed9c44e1c49fa20d/batch_requests/settings.py#L27-L34
def import_class(class_path): ''' Imports the class for the given class name. ''' module_name, class_name = class_path.rsplit(".", 1) module = import_module(module_name) claz = getattr(module, class_name) return claz
[ "def", "import_class", "(", "class_path", ")", ":", "module_name", ",", "class_name", "=", "class_path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "module", "=", "import_module", "(", "module_name", ")", "claz", "=", "getattr", "(", "module", ",", "class_name", ")", "return", "claz" ]
Imports the class for the given class name.
[ "Imports", "the", "class", "for", "the", "given", "class", "name", "." ]
python
train
RedisJSON/rejson-py
rejson/client.py
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L205-L213
def jsonarrinsert(self, name, path, index, *args): """ Inserts the objects ``args`` to the array at index ``index`` under the ``path` in key ``name`` """ pieces = [name, str_path(path), index] for o in args: pieces.append(self._encode(o)) return self.execute_command('JSON.ARRINSERT', *pieces)
[ "def", "jsonarrinsert", "(", "self", ",", "name", ",", "path", ",", "index", ",", "*", "args", ")", ":", "pieces", "=", "[", "name", ",", "str_path", "(", "path", ")", ",", "index", "]", "for", "o", "in", "args", ":", "pieces", ".", "append", "(", "self", ".", "_encode", "(", "o", ")", ")", "return", "self", ".", "execute_command", "(", "'JSON.ARRINSERT'", ",", "*", "pieces", ")" ]
Inserts the objects ``args`` to the array at index ``index`` under the ``path` in key ``name``
[ "Inserts", "the", "objects", "args", "to", "the", "array", "at", "index", "index", "under", "the", "path", "in", "key", "name" ]
python
train
vecnet/vecnet.openmalaria
vecnet/openmalaria/scenario/interventions.py
https://github.com/vecnet/vecnet.openmalaria/blob/795bc9d1b81a6c664f14879edda7a7c41188e95a/vecnet/openmalaria/scenario/interventions.py#L516-L523
def anophelesParams(self): """ :rtype: AnophelesParams """ list_of_anopheles = [] for anophelesParams in self.gvi.findall("anophelesParams"): list_of_anopheles.append(AnophelesParams(anophelesParams)) return list_of_anopheles
[ "def", "anophelesParams", "(", "self", ")", ":", "list_of_anopheles", "=", "[", "]", "for", "anophelesParams", "in", "self", ".", "gvi", ".", "findall", "(", "\"anophelesParams\"", ")", ":", "list_of_anopheles", ".", "append", "(", "AnophelesParams", "(", "anophelesParams", ")", ")", "return", "list_of_anopheles" ]
:rtype: AnophelesParams
[ ":", "rtype", ":", "AnophelesParams" ]
python
train
juicer/juicer
juicer/utils/__init__.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/__init__.py#L837-L844
def repo_def_matches_reality(juicer_def, pulp_def): """Compare a juicer repo def with a given pulp definition. Compute and return the update necessary to make `pulp_def` match `juicer_def`. `juicer_def` - A JuicerRepo() object representing a juicer repository `pulp_def` - A PulpRepo() object representing a pulp repository """ return juicer.common.Repo.RepoDiff(juicer_repo=juicer_def, pulp_repo=pulp_def)
[ "def", "repo_def_matches_reality", "(", "juicer_def", ",", "pulp_def", ")", ":", "return", "juicer", ".", "common", ".", "Repo", ".", "RepoDiff", "(", "juicer_repo", "=", "juicer_def", ",", "pulp_repo", "=", "pulp_def", ")" ]
Compare a juicer repo def with a given pulp definition. Compute and return the update necessary to make `pulp_def` match `juicer_def`. `juicer_def` - A JuicerRepo() object representing a juicer repository `pulp_def` - A PulpRepo() object representing a pulp repository
[ "Compare", "a", "juicer", "repo", "def", "with", "a", "given", "pulp", "definition", ".", "Compute", "and", "return", "the", "update", "necessary", "to", "make", "pulp_def", "match", "juicer_def", "." ]
python
train
sebp/scikit-survival
sksurv/kernels/clinical.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L35-L50
def _get_continuous_and_ordinal_array(x): """Convert array from continuous and ordered categorical columns""" nominal_columns = x.select_dtypes(include=['object', 'category']).columns ordinal_columns = pandas.Index([v for v in nominal_columns if x[v].cat.ordered]) continuous_columns = x.select_dtypes(include=[numpy.number]).columns x_num = x.loc[:, continuous_columns].astype(numpy.float64).values if len(ordinal_columns) > 0: x = _ordinal_as_numeric(x, ordinal_columns) nominal_columns = nominal_columns.difference(ordinal_columns) x_out = numpy.column_stack((x_num, x)) else: x_out = x_num return x_out, nominal_columns
[ "def", "_get_continuous_and_ordinal_array", "(", "x", ")", ":", "nominal_columns", "=", "x", ".", "select_dtypes", "(", "include", "=", "[", "'object'", ",", "'category'", "]", ")", ".", "columns", "ordinal_columns", "=", "pandas", ".", "Index", "(", "[", "v", "for", "v", "in", "nominal_columns", "if", "x", "[", "v", "]", ".", "cat", ".", "ordered", "]", ")", "continuous_columns", "=", "x", ".", "select_dtypes", "(", "include", "=", "[", "numpy", ".", "number", "]", ")", ".", "columns", "x_num", "=", "x", ".", "loc", "[", ":", ",", "continuous_columns", "]", ".", "astype", "(", "numpy", ".", "float64", ")", ".", "values", "if", "len", "(", "ordinal_columns", ")", ">", "0", ":", "x", "=", "_ordinal_as_numeric", "(", "x", ",", "ordinal_columns", ")", "nominal_columns", "=", "nominal_columns", ".", "difference", "(", "ordinal_columns", ")", "x_out", "=", "numpy", ".", "column_stack", "(", "(", "x_num", ",", "x", ")", ")", "else", ":", "x_out", "=", "x_num", "return", "x_out", ",", "nominal_columns" ]
Convert array from continuous and ordered categorical columns
[ "Convert", "array", "from", "continuous", "and", "ordered", "categorical", "columns" ]
python
train
django-fluent/django-fluent-blogs
fluent_blogs/admin/abstractbase.py
https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/admin/abstractbase.py#L72-L85
def formfield_for_dbfield(self, db_field, **kwargs): """ Allow formfield_overrides to contain field names too. """ overrides = self.formfield_overrides.get(db_field.name) if overrides: kwargs.update(overrides) field = super(AbstractEntryBaseAdmin, self).formfield_for_dbfield(db_field, **kwargs) # Pass user to the form. if db_field.name == 'author': field.user = kwargs['request'].user return field
[ "def", "formfield_for_dbfield", "(", "self", ",", "db_field", ",", "*", "*", "kwargs", ")", ":", "overrides", "=", "self", ".", "formfield_overrides", ".", "get", "(", "db_field", ".", "name", ")", "if", "overrides", ":", "kwargs", ".", "update", "(", "overrides", ")", "field", "=", "super", "(", "AbstractEntryBaseAdmin", ",", "self", ")", ".", "formfield_for_dbfield", "(", "db_field", ",", "*", "*", "kwargs", ")", "# Pass user to the form.", "if", "db_field", ".", "name", "==", "'author'", ":", "field", ".", "user", "=", "kwargs", "[", "'request'", "]", ".", "user", "return", "field" ]
Allow formfield_overrides to contain field names too.
[ "Allow", "formfield_overrides", "to", "contain", "field", "names", "too", "." ]
python
train
Qiskit/qiskit-terra
qiskit/validation/base.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/validation/base.py#L201-L224
def _create_validation_schema(schema_cls): """Create a patched Schema for validating models. Model validation is not part of Marshmallow. Schemas have a ``validate`` method but this delegates execution on ``load`` and discards the result. Similarly, ``load`` will call ``_deserialize`` on every field in the schema. This function patches the ``_deserialize`` instance method of each field to make it call a custom defined method ``check_type`` provided by Qiskit in the different fields at ``qiskit.validation.fields``. Returns: BaseSchema: a copy of the original Schema, overriding the ``_deserialize()`` call of its fields. """ validation_schema = schema_cls() for _, field in validation_schema.fields.items(): if isinstance(field, ModelTypeValidator): validate_function = field.__class__.check_type field._deserialize = MethodType(validate_function, field) return validation_schema
[ "def", "_create_validation_schema", "(", "schema_cls", ")", ":", "validation_schema", "=", "schema_cls", "(", ")", "for", "_", ",", "field", "in", "validation_schema", ".", "fields", ".", "items", "(", ")", ":", "if", "isinstance", "(", "field", ",", "ModelTypeValidator", ")", ":", "validate_function", "=", "field", ".", "__class__", ".", "check_type", "field", ".", "_deserialize", "=", "MethodType", "(", "validate_function", ",", "field", ")", "return", "validation_schema" ]
Create a patched Schema for validating models. Model validation is not part of Marshmallow. Schemas have a ``validate`` method but this delegates execution on ``load`` and discards the result. Similarly, ``load`` will call ``_deserialize`` on every field in the schema. This function patches the ``_deserialize`` instance method of each field to make it call a custom defined method ``check_type`` provided by Qiskit in the different fields at ``qiskit.validation.fields``. Returns: BaseSchema: a copy of the original Schema, overriding the ``_deserialize()`` call of its fields.
[ "Create", "a", "patched", "Schema", "for", "validating", "models", "." ]
python
test
robinandeer/puzzle
puzzle/models/sql/models.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/sql/models.py#L55-L59
def case_comments(self): """Return only comments made on the case.""" comments = (comment for comment in self.comments if comment.variant_id is None) return comments
[ "def", "case_comments", "(", "self", ")", ":", "comments", "=", "(", "comment", "for", "comment", "in", "self", ".", "comments", "if", "comment", ".", "variant_id", "is", "None", ")", "return", "comments" ]
Return only comments made on the case.
[ "Return", "only", "comments", "made", "on", "the", "case", "." ]
python
train
inasafe/inasafe
safe/utilities/gis.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/gis.py#L356-L363
def qgis_version_detailed(): """Get the detailed version of QGIS. :returns: List containing major, minor and patch. :rtype: list """ version = str(Qgis.QGIS_VERSION_INT) return [int(version[0]), int(version[1:3]), int(version[3:])]
[ "def", "qgis_version_detailed", "(", ")", ":", "version", "=", "str", "(", "Qgis", ".", "QGIS_VERSION_INT", ")", "return", "[", "int", "(", "version", "[", "0", "]", ")", ",", "int", "(", "version", "[", "1", ":", "3", "]", ")", ",", "int", "(", "version", "[", "3", ":", "]", ")", "]" ]
Get the detailed version of QGIS. :returns: List containing major, minor and patch. :rtype: list
[ "Get", "the", "detailed", "version", "of", "QGIS", "." ]
python
train
webrecorder/pywb
pywb/apps/frontendapp.py
https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/apps/frontendapp.py#L105-L130
def _init_routes(self): """Initialize the routes and based on the configuration file makes available specific routes (proxy mode, record)""" self.url_map = Map() self.url_map.add(Rule('/static/_/<coll>/<path:filepath>', endpoint=self.serve_static)) self.url_map.add(Rule('/static/<path:filepath>', endpoint=self.serve_static)) self.url_map.add(Rule('/collinfo.json', endpoint=self.serve_listing)) if self.is_valid_coll('$root'): coll_prefix = '' else: coll_prefix = '/<coll>' self.url_map.add(Rule('/', endpoint=self.serve_home)) self.url_map.add(Rule(coll_prefix + self.cdx_api_endpoint, endpoint=self.serve_cdx)) self.url_map.add(Rule(coll_prefix + '/', endpoint=self.serve_coll_page)) self.url_map.add(Rule(coll_prefix + '/timemap/<timemap_output>/<path:url>', endpoint=self.serve_content)) if self.recorder_path: self.url_map.add(Rule(coll_prefix + self.RECORD_ROUTE + '/<path:url>', endpoint=self.serve_record)) if self.proxy_prefix is not None: # Add the proxy-fetch endpoint to enable PreservationWorker to make CORS fetches worry free in proxy mode self.url_map.add(Rule('/proxy-fetch/<path:url>', endpoint=self.proxy_fetch, methods=['GET', 'HEAD', 'OPTIONS'])) self.url_map.add(Rule(coll_prefix + '/<path:url>', endpoint=self.serve_content))
[ "def", "_init_routes", "(", "self", ")", ":", "self", ".", "url_map", "=", "Map", "(", ")", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "'/static/_/<coll>/<path:filepath>'", ",", "endpoint", "=", "self", ".", "serve_static", ")", ")", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "'/static/<path:filepath>'", ",", "endpoint", "=", "self", ".", "serve_static", ")", ")", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "'/collinfo.json'", ",", "endpoint", "=", "self", ".", "serve_listing", ")", ")", "if", "self", ".", "is_valid_coll", "(", "'$root'", ")", ":", "coll_prefix", "=", "''", "else", ":", "coll_prefix", "=", "'/<coll>'", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "'/'", ",", "endpoint", "=", "self", ".", "serve_home", ")", ")", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "coll_prefix", "+", "self", ".", "cdx_api_endpoint", ",", "endpoint", "=", "self", ".", "serve_cdx", ")", ")", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "coll_prefix", "+", "'/'", ",", "endpoint", "=", "self", ".", "serve_coll_page", ")", ")", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "coll_prefix", "+", "'/timemap/<timemap_output>/<path:url>'", ",", "endpoint", "=", "self", ".", "serve_content", ")", ")", "if", "self", ".", "recorder_path", ":", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "coll_prefix", "+", "self", ".", "RECORD_ROUTE", "+", "'/<path:url>'", ",", "endpoint", "=", "self", ".", "serve_record", ")", ")", "if", "self", ".", "proxy_prefix", "is", "not", "None", ":", "# Add the proxy-fetch endpoint to enable PreservationWorker to make CORS fetches worry free in proxy mode", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "'/proxy-fetch/<path:url>'", ",", "endpoint", "=", "self", ".", "proxy_fetch", ",", "methods", "=", "[", "'GET'", ",", "'HEAD'", ",", "'OPTIONS'", "]", ")", ")", "self", ".", "url_map", ".", "add", "(", "Rule", "(", "coll_prefix", "+", "'/<path:url>'", ",", "endpoint", "=", "self", ".", "serve_content", ")", ")" ]
Initialize the routes and based on the configuration file makes available specific routes (proxy mode, record)
[ "Initialize", "the", "routes", "and", "based", "on", "the", "configuration", "file", "makes", "available", "specific", "routes", "(", "proxy", "mode", "record", ")" ]
python
train
pysal/giddy
giddy/markov.py
https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L811-L830
def summary(self, file_name=None): """ A summary method to call the Markov homogeneity test to test for temporally lagged spatial dependence. To learn more about the properties of the tests, refer to :cite:`Rey2016a` and :cite:`Kang2018`. """ class_names = ["C%d" % i for i in range(self.k)] regime_names = ["LAG%d" % i for i in range(self.k)] ht = homogeneity(self.T, class_names=class_names, regime_names=regime_names) title = "Spatial Markov Test" if self.variable_name: title = title + ": " + self.variable_name if file_name: ht.summary(file_name=file_name, title=title) else: ht.summary(title=title)
[ "def", "summary", "(", "self", ",", "file_name", "=", "None", ")", ":", "class_names", "=", "[", "\"C%d\"", "%", "i", "for", "i", "in", "range", "(", "self", ".", "k", ")", "]", "regime_names", "=", "[", "\"LAG%d\"", "%", "i", "for", "i", "in", "range", "(", "self", ".", "k", ")", "]", "ht", "=", "homogeneity", "(", "self", ".", "T", ",", "class_names", "=", "class_names", ",", "regime_names", "=", "regime_names", ")", "title", "=", "\"Spatial Markov Test\"", "if", "self", ".", "variable_name", ":", "title", "=", "title", "+", "\": \"", "+", "self", ".", "variable_name", "if", "file_name", ":", "ht", ".", "summary", "(", "file_name", "=", "file_name", ",", "title", "=", "title", ")", "else", ":", "ht", ".", "summary", "(", "title", "=", "title", ")" ]
A summary method to call the Markov homogeneity test to test for temporally lagged spatial dependence. To learn more about the properties of the tests, refer to :cite:`Rey2016a` and :cite:`Kang2018`.
[ "A", "summary", "method", "to", "call", "the", "Markov", "homogeneity", "test", "to", "test", "for", "temporally", "lagged", "spatial", "dependence", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/item.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/item.py#L587-L611
def fill_data_brok_from(self, data, brok_type): """ Add properties to 'data' parameter with properties of this object when 'brok_type' parameter is defined in fill_brok of these properties :param data: object to fill :type data: object :param brok_type: name of brok_type :type brok_type: var :return: None """ cls = self.__class__ # Configuration properties for prop, entry in list(cls.properties.items()): # Is this property intended for broking? if brok_type in entry.fill_brok: data[prop] = self.get_property_value_for_brok(prop, cls.properties) # And the running properties if hasattr(cls, 'running_properties'): # We've got prop in running_properties too for prop, entry in list(cls.running_properties.items()): # if 'fill_brok' in cls.running_properties[prop]: if brok_type in entry.fill_brok: data[prop] = self.get_property_value_for_brok(prop, cls.running_properties)
[ "def", "fill_data_brok_from", "(", "self", ",", "data", ",", "brok_type", ")", ":", "cls", "=", "self", ".", "__class__", "# Configuration properties", "for", "prop", ",", "entry", "in", "list", "(", "cls", ".", "properties", ".", "items", "(", ")", ")", ":", "# Is this property intended for broking?", "if", "brok_type", "in", "entry", ".", "fill_brok", ":", "data", "[", "prop", "]", "=", "self", ".", "get_property_value_for_brok", "(", "prop", ",", "cls", ".", "properties", ")", "# And the running properties", "if", "hasattr", "(", "cls", ",", "'running_properties'", ")", ":", "# We've got prop in running_properties too", "for", "prop", ",", "entry", "in", "list", "(", "cls", ".", "running_properties", ".", "items", "(", ")", ")", ":", "# if 'fill_brok' in cls.running_properties[prop]:", "if", "brok_type", "in", "entry", ".", "fill_brok", ":", "data", "[", "prop", "]", "=", "self", ".", "get_property_value_for_brok", "(", "prop", ",", "cls", ".", "running_properties", ")" ]
Add properties to 'data' parameter with properties of this object when 'brok_type' parameter is defined in fill_brok of these properties :param data: object to fill :type data: object :param brok_type: name of brok_type :type brok_type: var :return: None
[ "Add", "properties", "to", "data", "parameter", "with", "properties", "of", "this", "object", "when", "brok_type", "parameter", "is", "defined", "in", "fill_brok", "of", "these", "properties" ]
python
train
miguelmoreto/pycomtrade
src/pyComtrade.py
https://github.com/miguelmoreto/pycomtrade/blob/1785ebbc96c01a60e58fb11f0aa4848be855aa0d/src/pyComtrade.py#L300-L306
def getAnalogID(self,num): """ Returns the COMTRADE ID of a given channel number. The number to be given is the same of the COMTRADE header. """ listidx = self.An.index(num) # Get the position of the channel number. return self.Ach_id[listidx]
[ "def", "getAnalogID", "(", "self", ",", "num", ")", ":", "listidx", "=", "self", ".", "An", ".", "index", "(", "num", ")", "# Get the position of the channel number.", "return", "self", ".", "Ach_id", "[", "listidx", "]" ]
Returns the COMTRADE ID of a given channel number. The number to be given is the same of the COMTRADE header.
[ "Returns", "the", "COMTRADE", "ID", "of", "a", "given", "channel", "number", ".", "The", "number", "to", "be", "given", "is", "the", "same", "of", "the", "COMTRADE", "header", "." ]
python
train
marteinn/genres
genres/db.py
https://github.com/marteinn/genres/blob/4bbc90f7c2c527631380c08b4d99a4e40abed955/genres/db.py#L89-L102
def _parse_entry(entry, limit=10): """ Finds both label and if provided, the points for ranking. """ entry = entry.split(",") label = entry[0] points = limit if len(entry) > 1: proc = float(entry[1].strip()) points = limit * proc return label, int(points)
[ "def", "_parse_entry", "(", "entry", ",", "limit", "=", "10", ")", ":", "entry", "=", "entry", ".", "split", "(", "\",\"", ")", "label", "=", "entry", "[", "0", "]", "points", "=", "limit", "if", "len", "(", "entry", ")", ">", "1", ":", "proc", "=", "float", "(", "entry", "[", "1", "]", ".", "strip", "(", ")", ")", "points", "=", "limit", "*", "proc", "return", "label", ",", "int", "(", "points", ")" ]
Finds both label and if provided, the points for ranking.
[ "Finds", "both", "label", "and", "if", "provided", "the", "points", "for", "ranking", "." ]
python
train
ddorn/GUI
GUI/math.py
https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/math.py#L109-L117
def normnorm(self): """ Return a vecor noraml to this one with a norm of one :return: V2 """ n = self.norm() return V2(-self.y / n, self.x / n)
[ "def", "normnorm", "(", "self", ")", ":", "n", "=", "self", ".", "norm", "(", ")", "return", "V2", "(", "-", "self", ".", "y", "/", "n", ",", "self", ".", "x", "/", "n", ")" ]
Return a vecor noraml to this one with a norm of one :return: V2
[ "Return", "a", "vecor", "noraml", "to", "this", "one", "with", "a", "norm", "of", "one" ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/reflect/RecursiveObjectWriter.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/RecursiveObjectWriter.py#L93-L107
def copy_properties(dest, src): """ Copies content of one object to another object by recursively reading all properties from source object and then recursively writing them to destination object. :param dest: a destination object to write properties to. :param src: a source object to read properties from """ if dest == None or src == None: return values = RecursiveObjectReader.get_properties(src) RecursiveObjectWriter.set_properties(dest, values)
[ "def", "copy_properties", "(", "dest", ",", "src", ")", ":", "if", "dest", "==", "None", "or", "src", "==", "None", ":", "return", "values", "=", "RecursiveObjectReader", ".", "get_properties", "(", "src", ")", "RecursiveObjectWriter", ".", "set_properties", "(", "dest", ",", "values", ")" ]
Copies content of one object to another object by recursively reading all properties from source object and then recursively writing them to destination object. :param dest: a destination object to write properties to. :param src: a source object to read properties from
[ "Copies", "content", "of", "one", "object", "to", "another", "object", "by", "recursively", "reading", "all", "properties", "from", "source", "object", "and", "then", "recursively", "writing", "them", "to", "destination", "object", "." ]
python
train
numba/llvmlite
llvmlite/binding/targets.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/binding/targets.py#L47-L69
def get_host_cpu_features(): """ Returns a dictionary-like object indicating the CPU features for current architecture and whether they are enabled for this CPU. The key-value pairs are the feature name as string and a boolean indicating whether the feature is available. The returned value is an instance of ``FeatureMap`` class, which adds a new method ``.flatten()`` for returning a string suitable for use as the "features" argument to ``Target.create_target_machine()``. If LLVM has not implemented this feature or it fails to get the information, this function will raise a RuntimeError exception. """ with ffi.OutputString() as out: outdict = FeatureMap() if not ffi.lib.LLVMPY_GetHostCPUFeatures(out): raise RuntimeError("failed to get host cpu features.") flag_map = {'+': True, '-': False} content = str(out) if content: # protect against empty string for feat in content.split(','): if feat: # protect against empty feature outdict[feat[1:]] = flag_map[feat[0]] return outdict
[ "def", "get_host_cpu_features", "(", ")", ":", "with", "ffi", ".", "OutputString", "(", ")", "as", "out", ":", "outdict", "=", "FeatureMap", "(", ")", "if", "not", "ffi", ".", "lib", ".", "LLVMPY_GetHostCPUFeatures", "(", "out", ")", ":", "raise", "RuntimeError", "(", "\"failed to get host cpu features.\"", ")", "flag_map", "=", "{", "'+'", ":", "True", ",", "'-'", ":", "False", "}", "content", "=", "str", "(", "out", ")", "if", "content", ":", "# protect against empty string", "for", "feat", "in", "content", ".", "split", "(", "','", ")", ":", "if", "feat", ":", "# protect against empty feature", "outdict", "[", "feat", "[", "1", ":", "]", "]", "=", "flag_map", "[", "feat", "[", "0", "]", "]", "return", "outdict" ]
Returns a dictionary-like object indicating the CPU features for current architecture and whether they are enabled for this CPU. The key-value pairs are the feature name as string and a boolean indicating whether the feature is available. The returned value is an instance of ``FeatureMap`` class, which adds a new method ``.flatten()`` for returning a string suitable for use as the "features" argument to ``Target.create_target_machine()``. If LLVM has not implemented this feature or it fails to get the information, this function will raise a RuntimeError exception.
[ "Returns", "a", "dictionary", "-", "like", "object", "indicating", "the", "CPU", "features", "for", "current", "architecture", "and", "whether", "they", "are", "enabled", "for", "this", "CPU", ".", "The", "key", "-", "value", "pairs", "are", "the", "feature", "name", "as", "string", "and", "a", "boolean", "indicating", "whether", "the", "feature", "is", "available", ".", "The", "returned", "value", "is", "an", "instance", "of", "FeatureMap", "class", "which", "adds", "a", "new", "method", ".", "flatten", "()", "for", "returning", "a", "string", "suitable", "for", "use", "as", "the", "features", "argument", "to", "Target", ".", "create_target_machine", "()", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/utils.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/utils.py#L376-L392
def convert_date_from_iso_to_human(value): """Convert a date-value to the ISO date standard for humans.""" try: year, month, day = value.split("-") except ValueError: # Not separated by "-". Space? try: year, month, day = value.split(" ") except ValueError: # What gives? OK, lets just return as is return value try: date_object = datetime(int(year), int(month), int(day)) except TypeError: return value return date_object.strftime("%d %b %Y")
[ "def", "convert_date_from_iso_to_human", "(", "value", ")", ":", "try", ":", "year", ",", "month", ",", "day", "=", "value", ".", "split", "(", "\"-\"", ")", "except", "ValueError", ":", "# Not separated by \"-\". Space?", "try", ":", "year", ",", "month", ",", "day", "=", "value", ".", "split", "(", "\" \"", ")", "except", "ValueError", ":", "# What gives? OK, lets just return as is", "return", "value", "try", ":", "date_object", "=", "datetime", "(", "int", "(", "year", ")", ",", "int", "(", "month", ")", ",", "int", "(", "day", ")", ")", "except", "TypeError", ":", "return", "value", "return", "date_object", ".", "strftime", "(", "\"%d %b %Y\"", ")" ]
Convert a date-value to the ISO date standard for humans.
[ "Convert", "a", "date", "-", "value", "to", "the", "ISO", "date", "standard", "for", "humans", "." ]
python
valid
Jammy2211/PyAutoLens
autolens/model/profiles/mass_profiles.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/profiles/mass_profiles.py#L353-L356
def einstein_radius_rescaled(self): """Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles parameters""" return ((3 - self.slope) / (1 + self.axis_ratio)) * self.einstein_radius ** (self.slope - 1)
[ "def", "einstein_radius_rescaled", "(", "self", ")", ":", "return", "(", "(", "3", "-", "self", ".", "slope", ")", "/", "(", "1", "+", "self", ".", "axis_ratio", ")", ")", "*", "self", ".", "einstein_radius", "**", "(", "self", ".", "slope", "-", "1", ")" ]
Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles parameters
[ "Rescale", "the", "einstein", "radius", "by", "slope", "and", "axis_ratio", "to", "reduce", "its", "degeneracy", "with", "other", "mass", "-", "profiles", "parameters" ]
python
valid
Falkonry/falkonry-python-client
falkonryclient/service/falkonry.py
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/falkonry.py#L438-L446
def get_status(self, tracker_id): """ Get status of a task :param tracker_id: string """ url = '/app/track/' + tracker_id response = self.http.get(url) return response
[ "def", "get_status", "(", "self", ",", "tracker_id", ")", ":", "url", "=", "'/app/track/'", "+", "tracker_id", "response", "=", "self", ".", "http", ".", "get", "(", "url", ")", "return", "response" ]
Get status of a task :param tracker_id: string
[ "Get", "status", "of", "a", "task", ":", "param", "tracker_id", ":", "string" ]
python
train
touilleMan/marshmallow-mongoengine
marshmallow_mongoengine/conversion/fields.py
https://github.com/touilleMan/marshmallow-mongoengine/blob/21223700ea1f1d0209c967761e5c22635ee721e7/marshmallow_mongoengine/conversion/fields.py#L143-L155
def register_field(mongo_field_cls, marshmallow_field_cls, available_params=()): """ Bind a marshmallow field to it corresponding mongoengine field :param mongo_field_cls: Mongoengine Field :param marshmallow_field_cls: Marshmallow Field :param available_params: List of :class marshmallow_mongoengine.cnoversion.params.MetaParam: instances to import the mongoengine field config to marshmallow """ class Builder(MetaFieldBuilder): AVAILABLE_PARAMS = available_params MARSHMALLOW_FIELD_CLS = marshmallow_field_cls register_field_builder(mongo_field_cls, Builder)
[ "def", "register_field", "(", "mongo_field_cls", ",", "marshmallow_field_cls", ",", "available_params", "=", "(", ")", ")", ":", "class", "Builder", "(", "MetaFieldBuilder", ")", ":", "AVAILABLE_PARAMS", "=", "available_params", "MARSHMALLOW_FIELD_CLS", "=", "marshmallow_field_cls", "register_field_builder", "(", "mongo_field_cls", ",", "Builder", ")" ]
Bind a marshmallow field to it corresponding mongoengine field :param mongo_field_cls: Mongoengine Field :param marshmallow_field_cls: Marshmallow Field :param available_params: List of :class marshmallow_mongoengine.cnoversion.params.MetaParam: instances to import the mongoengine field config to marshmallow
[ "Bind", "a", "marshmallow", "field", "to", "it", "corresponding", "mongoengine", "field", ":", "param", "mongo_field_cls", ":", "Mongoengine", "Field", ":", "param", "marshmallow_field_cls", ":", "Marshmallow", "Field", ":", "param", "available_params", ":", "List", "of", ":", "class", "marshmallow_mongoengine", ".", "cnoversion", ".", "params", ".", "MetaParam", ":", "instances", "to", "import", "the", "mongoengine", "field", "config", "to", "marshmallow" ]
python
train
Azure/azure-sdk-for-python
azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py#L778-L855
def add_face_from_stream( self, large_face_list_id, image, user_data=None, target_face=None, custom_headers=None, raw=False, callback=None, **operation_config): """Add a face to a large face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire. :param large_face_list_id: Id referencing a particular large face list. :type large_face_list_id: str :param image: An image stream. :type image: Generator :param user_data: User-specified data about the face for any purpose. The maximum length is 1KB. :type user_data: str :param target_face: A face rectangle to specify the target face to be added to a person in the format of "targetFace=left,top,width,height". E.g. "targetFace=10,10,100,100". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: PersistedFace or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.face.models.PersistedFace or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>` """ # Construct URL url = self.add_face_from_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'largeFaceListId': self._serialize.url("large_face_list_id", large_face_list_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if user_data is not None: query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._client.stream_upload(image, callback) # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "add_face_from_stream", "(", "self", ",", "large_face_list_id", ",", "image", ",", "user_data", "=", "None", ",", "target_face", "=", "None", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "callback", "=", "None", ",", "*", "*", "operation_config", ")", ":", "# Construct URL", "url", "=", "self", ".", "add_face_from_stream", ".", "metadata", "[", "'url'", "]", "path_format_arguments", "=", "{", "'Endpoint'", ":", "self", ".", "_serialize", ".", "url", "(", "\"self.config.endpoint\"", ",", "self", ".", "config", ".", "endpoint", ",", "'str'", ",", "skip_quote", "=", "True", ")", ",", "'largeFaceListId'", ":", "self", ".", "_serialize", ".", "url", "(", "\"large_face_list_id\"", ",", "large_face_list_id", ",", "'str'", ",", "max_length", "=", "64", ",", "pattern", "=", "r'^[a-z0-9-_]+$'", ")", "}", "url", "=", "self", ".", "_client", ".", "format_url", "(", "url", ",", "*", "*", "path_format_arguments", ")", "# Construct parameters", "query_parameters", "=", "{", "}", "if", "user_data", "is", "not", "None", ":", "query_parameters", "[", "'userData'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"user_data\"", ",", "user_data", ",", "'str'", ",", "max_length", "=", "1024", ")", "if", "target_face", "is", "not", "None", ":", "query_parameters", "[", "'targetFace'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "\"target_face\"", ",", "target_face", ",", "'[int]'", ",", "div", "=", "','", ")", "# Construct headers", "header_parameters", "=", "{", "}", "header_parameters", "[", "'Accept'", "]", "=", "'application/json'", "header_parameters", "[", "'Content-Type'", "]", "=", "'application/octet-stream'", "if", "custom_headers", ":", "header_parameters", ".", "update", "(", "custom_headers", ")", "# Construct body", "body_content", "=", "self", ".", "_client", ".", "stream_upload", "(", "image", ",", "callback", ")", "# Construct and send request", "request", "=", "self", ".", "_client", ".", "post", "(", "url", ",", "query_parameters", ",", "header_parameters", ",", "body_content", ")", "response", "=", "self", ".", "_client", ".", "send", "(", "request", ",", "stream", "=", "False", ",", "*", "*", "operation_config", ")", "if", "response", ".", "status_code", "not", "in", "[", "200", "]", ":", "raise", "models", ".", "APIErrorException", "(", "self", ".", "_deserialize", ",", "response", ")", "deserialized", "=", "None", "if", "response", ".", "status_code", "==", "200", ":", "deserialized", "=", "self", ".", "_deserialize", "(", "'PersistedFace'", ",", "response", ")", "if", "raw", ":", "client_raw_response", "=", "ClientRawResponse", "(", "deserialized", ",", "response", ")", "return", "client_raw_response", "return", "deserialized" ]
Add a face to a large face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire. :param large_face_list_id: Id referencing a particular large face list. :type large_face_list_id: str :param image: An image stream. :type image: Generator :param user_data: User-specified data about the face for any purpose. The maximum length is 1KB. :type user_data: str :param target_face: A face rectangle to specify the target face to be added to a person in the format of "targetFace=left,top,width,height". E.g. "targetFace=10,10,100,100". If there is more than one face in the image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param callback: When specified, will be called with each chunk of data that is streamed. The callback should take two arguments, the bytes of the current chunk of data and the response object. If the data is uploading, response will be None. :type callback: Callable[Bytes, response=None] :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: PersistedFace or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.vision.face.models.PersistedFace or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
[ "Add", "a", "face", "to", "a", "large", "face", "list", ".", "The", "input", "face", "is", "specified", "as", "an", "image", "with", "a", "targetFace", "rectangle", ".", "It", "returns", "a", "persistedFaceId", "representing", "the", "added", "face", "and", "persistedFaceId", "will", "not", "expire", "." ]
python
test
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/collection.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/collection.py#L2359-L2454
def find_one_and_update(self, filter, update, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, **kwargs): """Finds a single document and updates it, returning either the original or the updated document. >>> db.test.find_one_and_update( ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) {u'_id': 665, u'done': False, u'count': 25}} By default :meth:`find_one_and_update` returns the original version of the document before the update was applied. To return the updated version of the document instead, use the *return_document* option. >>> from pymongo import ReturnDocument >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... return_document=ReturnDocument.AFTER) {u'_id': u'userid', u'seq': 1} You can limit the fields returned with the *projection* option. >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... return_document=ReturnDocument.AFTER) {u'seq': 2} The *upsert* option can be used to create the document if it doesn't already exist. >>> db.example.delete_many({}).deleted_count 1 >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... upsert=True, ... return_document=ReturnDocument.AFTER) {u'seq': 1} If multiple documents match *filter*, a *sort* can be applied. >>> for doc in db.test.find({'done': True}): ... print(doc) ... {u'_id': 665, u'done': True, u'result': {u'count': 26}} {u'_id': 701, u'done': True, u'result': {u'count': 17}} >>> db.test.find_one_and_update( ... {'done': True}, ... {'$set': {'final': True}}, ... sort=[('_id', pymongo.DESCENDING)]) {u'_id': 701, u'done': True, u'result': {u'count': 17}} :Parameters: - `filter`: A query that matches the document to update. - `update`: The update operations to apply. - `projection` (optional): A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a dict to exclude fields from the result (e.g. projection={'_id': False}). - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is updated. - `upsert` (optional): When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - `return_document`: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was updated, or ``None`` if no document matches. If :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 3.2 Respects write concern. .. warning:: Starting in PyMongo 3.2, this command uses the :class:`~pymongo.write_concern.WriteConcern` of this :class:`~pymongo.collection.Collection` when connected to MongoDB >= 3.2. Note that using an elevated write concern with this command may be slower compared to using the default write concern. .. versionadded:: 3.0 """ common.validate_ok_for_update(update) kwargs['update'] = update return self.__find_and_modify(filter, projection, sort, upsert, return_document, **kwargs)
[ "def", "find_one_and_update", "(", "self", ",", "filter", ",", "update", ",", "projection", "=", "None", ",", "sort", "=", "None", ",", "upsert", "=", "False", ",", "return_document", "=", "ReturnDocument", ".", "BEFORE", ",", "*", "*", "kwargs", ")", ":", "common", ".", "validate_ok_for_update", "(", "update", ")", "kwargs", "[", "'update'", "]", "=", "update", "return", "self", ".", "__find_and_modify", "(", "filter", ",", "projection", ",", "sort", ",", "upsert", ",", "return_document", ",", "*", "*", "kwargs", ")" ]
Finds a single document and updates it, returning either the original or the updated document. >>> db.test.find_one_and_update( ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) {u'_id': 665, u'done': False, u'count': 25}} By default :meth:`find_one_and_update` returns the original version of the document before the update was applied. To return the updated version of the document instead, use the *return_document* option. >>> from pymongo import ReturnDocument >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... return_document=ReturnDocument.AFTER) {u'_id': u'userid', u'seq': 1} You can limit the fields returned with the *projection* option. >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... return_document=ReturnDocument.AFTER) {u'seq': 2} The *upsert* option can be used to create the document if it doesn't already exist. >>> db.example.delete_many({}).deleted_count 1 >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... upsert=True, ... return_document=ReturnDocument.AFTER) {u'seq': 1} If multiple documents match *filter*, a *sort* can be applied. >>> for doc in db.test.find({'done': True}): ... print(doc) ... {u'_id': 665, u'done': True, u'result': {u'count': 26}} {u'_id': 701, u'done': True, u'result': {u'count': 17}} >>> db.test.find_one_and_update( ... {'done': True}, ... {'$set': {'final': True}}, ... sort=[('_id', pymongo.DESCENDING)]) {u'_id': 701, u'done': True, u'result': {u'count': 17}} :Parameters: - `filter`: A query that matches the document to update. - `update`: The update operations to apply. - `projection` (optional): A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a dict to exclude fields from the result (e.g. projection={'_id': False}). - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is updated. - `upsert` (optional): When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - `return_document`: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was updated, or ``None`` if no document matches. If :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 3.2 Respects write concern. .. warning:: Starting in PyMongo 3.2, this command uses the :class:`~pymongo.write_concern.WriteConcern` of this :class:`~pymongo.collection.Collection` when connected to MongoDB >= 3.2. Note that using an elevated write concern with this command may be slower compared to using the default write concern. .. versionadded:: 3.0
[ "Finds", "a", "single", "document", "and", "updates", "it", "returning", "either", "the", "original", "or", "the", "updated", "document", "." ]
python
train
softlayer/softlayer-python
SoftLayer/managers/ipsec.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/ipsec.py#L179-L187
def remove_remote_subnet(self, context_id, subnet_id): """Removes a remote subnet from a tunnel context. :param int context_id: The id-value representing the context instance. :param int subnet_id: The id-value representing the remote subnet. :return bool: True if remote subnet removal was successful. """ return self.context.removeCustomerSubnetFromNetworkTunnel(subnet_id, id=context_id)
[ "def", "remove_remote_subnet", "(", "self", ",", "context_id", ",", "subnet_id", ")", ":", "return", "self", ".", "context", ".", "removeCustomerSubnetFromNetworkTunnel", "(", "subnet_id", ",", "id", "=", "context_id", ")" ]
Removes a remote subnet from a tunnel context. :param int context_id: The id-value representing the context instance. :param int subnet_id: The id-value representing the remote subnet. :return bool: True if remote subnet removal was successful.
[ "Removes", "a", "remote", "subnet", "from", "a", "tunnel", "context", "." ]
python
train
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L739-L754
def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression. """ return (_global_error_suppressions.get(category, False) or linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
[ "def", "IsErrorSuppressedByNolint", "(", "category", ",", "linenum", ")", ":", "return", "(", "_global_error_suppressions", ".", "get", "(", "category", ",", "False", ")", "or", "linenum", "in", "_error_suppressions", ".", "get", "(", "category", ",", "set", "(", ")", ")", "or", "linenum", "in", "_error_suppressions", ".", "get", "(", "None", ",", "set", "(", ")", ")", ")" ]
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression.
[ "Returns", "true", "if", "the", "specified", "error", "category", "is", "suppressed", "on", "this", "line", "." ]
python
valid
chrisspen/burlap
burlap/vm.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vm.py#L644-L654
def respawn(name=None, group=None): """ Deletes and recreates one or more VM instances. """ if name is None: name = get_name() delete(name=name, group=group) instance = get_or_create(name=name, group=group) env.host_string = instance.public_dns_name
[ "def", "respawn", "(", "name", "=", "None", ",", "group", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "get_name", "(", ")", "delete", "(", "name", "=", "name", ",", "group", "=", "group", ")", "instance", "=", "get_or_create", "(", "name", "=", "name", ",", "group", "=", "group", ")", "env", ".", "host_string", "=", "instance", ".", "public_dns_name" ]
Deletes and recreates one or more VM instances.
[ "Deletes", "and", "recreates", "one", "or", "more", "VM", "instances", "." ]
python
valid
amperser/proselint
proselint/checks/misc/illogic.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/misc/illogic.py#L52-L60
def check_without_your_collusion(text): """Check the textself.""" err = "misc.illogic.collusion" msg = "It's impossible to defraud yourself. Try 'aquiescence'." regex = "without your collusion" return existence_check( text, [regex], err, msg, require_padding=False, offset=-1)
[ "def", "check_without_your_collusion", "(", "text", ")", ":", "err", "=", "\"misc.illogic.collusion\"", "msg", "=", "\"It's impossible to defraud yourself. Try 'aquiescence'.\"", "regex", "=", "\"without your collusion\"", "return", "existence_check", "(", "text", ",", "[", "regex", "]", ",", "err", ",", "msg", ",", "require_padding", "=", "False", ",", "offset", "=", "-", "1", ")" ]
Check the textself.
[ "Check", "the", "textself", "." ]
python
train
aws/aws-encryption-sdk-python
doc/conf.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/doc/conf.py#L21-L27
def get_version(): """Reads the version (MAJOR.MINOR) from this module.""" release = get_release() split_version = release.split(".") if len(split_version) == 3: return ".".join(split_version[:2]) return release
[ "def", "get_version", "(", ")", ":", "release", "=", "get_release", "(", ")", "split_version", "=", "release", ".", "split", "(", "\".\"", ")", "if", "len", "(", "split_version", ")", "==", "3", ":", "return", "\".\"", ".", "join", "(", "split_version", "[", ":", "2", "]", ")", "return", "release" ]
Reads the version (MAJOR.MINOR) from this module.
[ "Reads", "the", "version", "(", "MAJOR", ".", "MINOR", ")", "from", "this", "module", "." ]
python
train
gitpython-developers/GitPython
git/objects/submodule/base.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/objects/submodule/base.py#L287-L308
def _write_git_file_and_module_config(cls, working_tree_dir, module_abspath): """Writes a .git file containing a (preferably) relative path to the actual git module repository. It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir :note: will overwrite existing files ! :note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed if it becomes one :param working_tree_dir: directory to write the .git file into :param module_abspath: absolute path to the bare repository """ git_file = osp.join(working_tree_dir, '.git') rela_path = osp.relpath(module_abspath, start=working_tree_dir) if is_win: if osp.isfile(git_file): os.remove(git_file) with open(git_file, 'wb') as fp: fp.write(("gitdir: %s" % rela_path).encode(defenc)) with GitConfigParser(osp.join(module_abspath, 'config'), read_only=False, merge_includes=False) as writer: writer.set_value('core', 'worktree', to_native_path_linux(osp.relpath(working_tree_dir, start=module_abspath)))
[ "def", "_write_git_file_and_module_config", "(", "cls", ",", "working_tree_dir", ",", "module_abspath", ")", ":", "git_file", "=", "osp", ".", "join", "(", "working_tree_dir", ",", "'.git'", ")", "rela_path", "=", "osp", ".", "relpath", "(", "module_abspath", ",", "start", "=", "working_tree_dir", ")", "if", "is_win", ":", "if", "osp", ".", "isfile", "(", "git_file", ")", ":", "os", ".", "remove", "(", "git_file", ")", "with", "open", "(", "git_file", ",", "'wb'", ")", "as", "fp", ":", "fp", ".", "write", "(", "(", "\"gitdir: %s\"", "%", "rela_path", ")", ".", "encode", "(", "defenc", ")", ")", "with", "GitConfigParser", "(", "osp", ".", "join", "(", "module_abspath", ",", "'config'", ")", ",", "read_only", "=", "False", ",", "merge_includes", "=", "False", ")", "as", "writer", ":", "writer", ".", "set_value", "(", "'core'", ",", "'worktree'", ",", "to_native_path_linux", "(", "osp", ".", "relpath", "(", "working_tree_dir", ",", "start", "=", "module_abspath", ")", ")", ")" ]
Writes a .git file containing a (preferably) relative path to the actual git module repository. It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir :note: will overwrite existing files ! :note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed if it becomes one :param working_tree_dir: directory to write the .git file into :param module_abspath: absolute path to the bare repository
[ "Writes", "a", ".", "git", "file", "containing", "a", "(", "preferably", ")", "relative", "path", "to", "the", "actual", "git", "module", "repository", ".", "It", "is", "an", "error", "if", "the", "module_abspath", "cannot", "be", "made", "into", "a", "relative", "path", "relative", "to", "the", "working_tree_dir", ":", "note", ":", "will", "overwrite", "existing", "files", "!", ":", "note", ":", "as", "we", "rewrite", "both", "the", "git", "file", "as", "well", "as", "the", "module", "configuration", "we", "might", "fail", "on", "the", "configuration", "and", "will", "not", "roll", "back", "changes", "done", "to", "the", "git", "file", ".", "This", "should", "be", "a", "non", "-", "issue", "but", "may", "easily", "be", "fixed", "if", "it", "becomes", "one", ":", "param", "working_tree_dir", ":", "directory", "to", "write", "the", ".", "git", "file", "into", ":", "param", "module_abspath", ":", "absolute", "path", "to", "the", "bare", "repository" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/zhao_2016.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/zhao_2016.py#L82-L102
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] C_SITE = self.SITE_COEFFS[imt] s_c, idx = self._get_site_classification(sites.vs30) sa_rock = (self.get_magnitude_scaling_term(C, rup) + self.get_sof_term(C, rup) + self.get_depth_term(C, rup) + self.get_distance_term(C, dists, rup)) sa_soil = self.add_site_amplification(C, C_SITE, sites, sa_rock, idx, rup) stddevs = self.get_stddevs(C, sites.vs30.shape, idx, stddev_types) return sa_soil, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# extracting dictionary of coefficients specific to required", "# intensity measure type.", "C", "=", "self", ".", "COEFFS", "[", "imt", "]", "C_SITE", "=", "self", ".", "SITE_COEFFS", "[", "imt", "]", "s_c", ",", "idx", "=", "self", ".", "_get_site_classification", "(", "sites", ".", "vs30", ")", "sa_rock", "=", "(", "self", ".", "get_magnitude_scaling_term", "(", "C", ",", "rup", ")", "+", "self", ".", "get_sof_term", "(", "C", ",", "rup", ")", "+", "self", ".", "get_depth_term", "(", "C", ",", "rup", ")", "+", "self", ".", "get_distance_term", "(", "C", ",", "dists", ",", "rup", ")", ")", "sa_soil", "=", "self", ".", "add_site_amplification", "(", "C", ",", "C_SITE", ",", "sites", ",", "sa_rock", ",", "idx", ",", "rup", ")", "stddevs", "=", "self", ".", "get_stddevs", "(", "C", ",", "sites", ".", "vs30", ".", "shape", ",", "idx", ",", "stddev_types", ")", "return", "sa_soil", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
COLORFULBOARD/revision
revision/client.py
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/client.py#L225-L233
def tmp_file_path(self): """ :return: :rtype: str """ return os.path.normpath(os.path.join( TMP_DIR, self.filename ))
[ "def", "tmp_file_path", "(", "self", ")", ":", "return", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "TMP_DIR", ",", "self", ".", "filename", ")", ")" ]
:return: :rtype: str
[ ":", "return", ":", ":", "rtype", ":", "str" ]
python
train
PythonCharmers/python-future
src/future/types/newbytes.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newbytes.py#L324-L332
def rindex(self, sub, *args): ''' S.rindex(sub [,start [,end]]) -> int Like S.rfind() but raise ValueError when the substring is not found. ''' pos = self.rfind(sub, *args) if pos == -1: raise ValueError('substring not found')
[ "def", "rindex", "(", "self", ",", "sub", ",", "*", "args", ")", ":", "pos", "=", "self", ".", "rfind", "(", "sub", ",", "*", "args", ")", "if", "pos", "==", "-", "1", ":", "raise", "ValueError", "(", "'substring not found'", ")" ]
S.rindex(sub [,start [,end]]) -> int Like S.rfind() but raise ValueError when the substring is not found.
[ "S", ".", "rindex", "(", "sub", "[", "start", "[", "end", "]]", ")", "-", ">", "int" ]
python
train
dhermes/bezier
src/bezier/surface.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L322-L352
def edges(self): """The edges of the surface. .. doctest:: surface-edges :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5 , 1.0, 0.1875, 0.625, 0.0], ... [0.0, -0.1875, 0.0, 0.5 , 0.625, 1.0], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> edge1, _, _ = surface.edges >>> edge1 <Curve (degree=2, dimension=2)> >>> edge1.nodes array([[ 0. , 0.5 , 1. ], [ 0. , -0.1875, 0. ]]) Returns: Tuple[~bezier.curve.Curve, ~bezier.curve.Curve, \ ~bezier.curve.Curve]: The edges of the surface. """ edge1, edge2, edge3 = self._get_edges() # NOTE: It is crucial that we return copies here. Since the edges # are cached, if they were mutable, callers could # inadvertently mutate the cached value. edge1 = edge1._copy() # pylint: disable=protected-access edge2 = edge2._copy() # pylint: disable=protected-access edge3 = edge3._copy() # pylint: disable=protected-access return edge1, edge2, edge3
[ "def", "edges", "(", "self", ")", ":", "edge1", ",", "edge2", ",", "edge3", "=", "self", ".", "_get_edges", "(", ")", "# NOTE: It is crucial that we return copies here. Since the edges", "# are cached, if they were mutable, callers could", "# inadvertently mutate the cached value.", "edge1", "=", "edge1", ".", "_copy", "(", ")", "# pylint: disable=protected-access", "edge2", "=", "edge2", ".", "_copy", "(", ")", "# pylint: disable=protected-access", "edge3", "=", "edge3", ".", "_copy", "(", ")", "# pylint: disable=protected-access", "return", "edge1", ",", "edge2", ",", "edge3" ]
The edges of the surface. .. doctest:: surface-edges :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5 , 1.0, 0.1875, 0.625, 0.0], ... [0.0, -0.1875, 0.0, 0.5 , 0.625, 1.0], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> edge1, _, _ = surface.edges >>> edge1 <Curve (degree=2, dimension=2)> >>> edge1.nodes array([[ 0. , 0.5 , 1. ], [ 0. , -0.1875, 0. ]]) Returns: Tuple[~bezier.curve.Curve, ~bezier.curve.Curve, \ ~bezier.curve.Curve]: The edges of the surface.
[ "The", "edges", "of", "the", "surface", "." ]
python
train
un33k/django-ipware
ipware/utils.py
https://github.com/un33k/django-ipware/blob/dc6b754137d1bb7d056ac206a6e0443aa3ed68dc/ipware/utils.py#L91-L100
def get_ip_info(ip_str): """ Given a string, it returns a tuple of (IP, Routable). """ ip = None is_routable_ip = False if is_valid_ip(ip_str): ip = ip_str is_routable_ip = is_public_ip(ip) return ip, is_routable_ip
[ "def", "get_ip_info", "(", "ip_str", ")", ":", "ip", "=", "None", "is_routable_ip", "=", "False", "if", "is_valid_ip", "(", "ip_str", ")", ":", "ip", "=", "ip_str", "is_routable_ip", "=", "is_public_ip", "(", "ip", ")", "return", "ip", ",", "is_routable_ip" ]
Given a string, it returns a tuple of (IP, Routable).
[ "Given", "a", "string", "it", "returns", "a", "tuple", "of", "(", "IP", "Routable", ")", "." ]
python
train
log2timeline/dfvfs
dfvfs/vfs/tar_file_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/tar_file_system.py#L165-L190
def GetTARInfoByPathSpec(self, path_spec): """Retrieves the TAR info for a path specification. Args: path_spec (PathSpec): a path specification. Returns: tarfile.TARInfo: TAR info or None if it does not exist. Raises: PathSpecError: if the path specification is incorrect. """ location = getattr(path_spec, 'location', None) if location is None: raise errors.PathSpecError('Path specification missing location.') if not location.startswith(self.LOCATION_ROOT): raise errors.PathSpecError('Invalid location in path specification.') if len(location) == 1: return None try: return self._tar_file.getmember(location[1:]) except KeyError: pass
[ "def", "GetTARInfoByPathSpec", "(", "self", ",", "path_spec", ")", ":", "location", "=", "getattr", "(", "path_spec", ",", "'location'", ",", "None", ")", "if", "location", "is", "None", ":", "raise", "errors", ".", "PathSpecError", "(", "'Path specification missing location.'", ")", "if", "not", "location", ".", "startswith", "(", "self", ".", "LOCATION_ROOT", ")", ":", "raise", "errors", ".", "PathSpecError", "(", "'Invalid location in path specification.'", ")", "if", "len", "(", "location", ")", "==", "1", ":", "return", "None", "try", ":", "return", "self", ".", "_tar_file", ".", "getmember", "(", "location", "[", "1", ":", "]", ")", "except", "KeyError", ":", "pass" ]
Retrieves the TAR info for a path specification. Args: path_spec (PathSpec): a path specification. Returns: tarfile.TARInfo: TAR info or None if it does not exist. Raises: PathSpecError: if the path specification is incorrect.
[ "Retrieves", "the", "TAR", "info", "for", "a", "path", "specification", "." ]
python
train
senaite/senaite.core
bika/lims/content/calculation.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/calculation.py#L237-L262
def getCalculationDependencies(self, flat=False, deps=None): """ Recursively calculates all dependencies of this calculation. The return value is dictionary of dictionaries (of dictionaries...) {service_UID1: {service_UID2: {service_UID3: {}, service_UID4: {}, }, }, } set flat=True to get a simple list of AnalysisService objects """ if deps is None: deps = [] if flat is True else {} for service in self.getDependentServices(): calc = service.getCalculation() if calc: calc.getCalculationDependencies(flat, deps) if flat: deps.append(service) else: deps[service.UID()] = {} return deps
[ "def", "getCalculationDependencies", "(", "self", ",", "flat", "=", "False", ",", "deps", "=", "None", ")", ":", "if", "deps", "is", "None", ":", "deps", "=", "[", "]", "if", "flat", "is", "True", "else", "{", "}", "for", "service", "in", "self", ".", "getDependentServices", "(", ")", ":", "calc", "=", "service", ".", "getCalculation", "(", ")", "if", "calc", ":", "calc", ".", "getCalculationDependencies", "(", "flat", ",", "deps", ")", "if", "flat", ":", "deps", ".", "append", "(", "service", ")", "else", ":", "deps", "[", "service", ".", "UID", "(", ")", "]", "=", "{", "}", "return", "deps" ]
Recursively calculates all dependencies of this calculation. The return value is dictionary of dictionaries (of dictionaries...) {service_UID1: {service_UID2: {service_UID3: {}, service_UID4: {}, }, }, } set flat=True to get a simple list of AnalysisService objects
[ "Recursively", "calculates", "all", "dependencies", "of", "this", "calculation", ".", "The", "return", "value", "is", "dictionary", "of", "dictionaries", "(", "of", "dictionaries", "...", ")" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/data.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/data.py#L853-L866
def check_dataset_in_collection(dataset_id, collection_id, **kwargs): """ Check whether a dataset is contained inside a collection :param dataset ID :param collection ID :returns 'Y' or 'N' """ _get_collection(collection_id) collection_item = _get_collection_item(collection_id, dataset_id) if collection_item is None: return 'N' else: return 'Y'
[ "def", "check_dataset_in_collection", "(", "dataset_id", ",", "collection_id", ",", "*", "*", "kwargs", ")", ":", "_get_collection", "(", "collection_id", ")", "collection_item", "=", "_get_collection_item", "(", "collection_id", ",", "dataset_id", ")", "if", "collection_item", "is", "None", ":", "return", "'N'", "else", ":", "return", "'Y'" ]
Check whether a dataset is contained inside a collection :param dataset ID :param collection ID :returns 'Y' or 'N'
[ "Check", "whether", "a", "dataset", "is", "contained", "inside", "a", "collection", ":", "param", "dataset", "ID", ":", "param", "collection", "ID", ":", "returns", "Y", "or", "N" ]
python
train
wavycloud/pyboto3
pyboto3/rds.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/rds.py#L6300-L6729
def modify_db_instance(DBInstanceIdentifier=None, AllocatedStorage=None, DBInstanceClass=None, DBSubnetGroupName=None, DBSecurityGroups=None, VpcSecurityGroupIds=None, ApplyImmediately=None, MasterUserPassword=None, DBParameterGroupName=None, BackupRetentionPeriod=None, PreferredBackupWindow=None, PreferredMaintenanceWindow=None, MultiAZ=None, EngineVersion=None, AllowMajorVersionUpgrade=None, AutoMinorVersionUpgrade=None, LicenseModel=None, Iops=None, OptionGroupName=None, NewDBInstanceIdentifier=None, StorageType=None, TdeCredentialArn=None, TdeCredentialPassword=None, CACertificateIdentifier=None, Domain=None, CopyTagsToSnapshot=None, MonitoringInterval=None, DBPortNumber=None, PubliclyAccessible=None, MonitoringRoleArn=None, DomainIAMRoleName=None, PromotionTier=None, EnableIAMDatabaseAuthentication=None): """ Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. See also: AWS API Documentation Examples This example immediately changes the specified settings for the specified DB instance. Expected Output: :example: response = client.modify_db_instance( DBInstanceIdentifier='string', AllocatedStorage=123, DBInstanceClass='string', DBSubnetGroupName='string', DBSecurityGroups=[ 'string', ], VpcSecurityGroupIds=[ 'string', ], ApplyImmediately=True|False, MasterUserPassword='string', DBParameterGroupName='string', BackupRetentionPeriod=123, PreferredBackupWindow='string', PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AllowMajorVersionUpgrade=True|False, AutoMinorVersionUpgrade=True|False, LicenseModel='string', Iops=123, OptionGroupName='string', NewDBInstanceIdentifier='string', StorageType='string', TdeCredentialArn='string', TdeCredentialPassword='string', CACertificateIdentifier='string', Domain='string', CopyTagsToSnapshot=True|False, MonitoringInterval=123, DBPortNumber=123, PubliclyAccessible=True|False, MonitoringRoleArn='string', DomainIAMRoleName='string', PromotionTier=123, EnableIAMDatabaseAuthentication=True|False ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] The DB instance identifier. This value is stored as a lowercase string. Constraints: Must be the identifier for an existing DB instance Must contain from 1 to 63 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens :type AllocatedStorage: integer :param AllocatedStorage: The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request. MySQL Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer MariaDB Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer PostgreSQL Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer Oracle Default: Uses existing setting Valid Values: 10-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. SQL Server Cannot be modified. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance. :type DBInstanceClass: string :param DBInstanceClass: The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action. Note that not all instance classes are available in all regions for all DB engines. Passing a value for this setting causes an outage during the change and is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request. Default: Uses existing setting Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large :type DBSubnetGroupName: string :param DBSubnetGroupName: The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance is not in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Updating the VPC for a DB Instance . Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify true for the ApplyImmediately parameter. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Example: mySubnetGroup :type DBSecurityGroups: list :param DBSecurityGroups: A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens (string) -- :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible. Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens (string) -- :type ApplyImmediately: boolean :param ApplyImmediately: Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance. If this parameter is set to false , changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance , or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied. Default: false :type MasterUserPassword: string :param MasterUserPassword: The new password for the DB instance master user. Can be any printable ASCII character except '/', ''', or '@'. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Default: Uses existing setting Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server). Note Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked. :type DBParameterGroupName: string :param DBParameterGroupName: The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window. Default: Uses existing setting Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance. :type BackupRetentionPeriod: integer :param BackupRetentionPeriod: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible. Default: Uses existing setting Constraints: Must be a value from 0 to 35 Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6 Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5 Cannot be set to 0 if the DB instance is a source to Read Replicas :type PreferredBackupWindow: string :param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: Must be in the format hh24:mi-hh24:mi Times should be in Universal Time Coordinated (UTC) Must not conflict with the preferred maintenance window Must be at least 30 minutes :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes :type MultiAZ: boolean :param MultiAZ: Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. Constraints: Cannot be specified if the DB instance is a Read Replica. :type EngineVersion: string :param EngineVersion: The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. For a list of valid engine versions, see CreateDBInstance . :type AllowMajorVersionUpgrade: boolean :param AllowMajorVersionUpgrade: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version. :type LicenseModel: string :param LicenseModel: The license model for the DB instance. Valid values: license-included | bring-your-own-license | general-public-license :type Iops: integer :param Iops: The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. Default: Uses existing setting Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect. SQL Server Setting the IOPS value for the SQL Server database engine is not supported. Type: Integer If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance. :type OptionGroupName: string :param OptionGroupName: Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type NewDBInstanceIdentifier: string :param NewDBInstanceIdentifier: The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens :type StorageType: string :param StorageType: Specifies the storage type to be associated with the DB instance. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type TdeCredentialArn: string :param TdeCredentialArn: The ARN from the Key Store with which to associate the instance for TDE encryption. :type TdeCredentialPassword: string :param TdeCredentialPassword: The password for the given ARN from the Key Store in order to access the device. :type CACertificateIdentifier: string :param CACertificateIdentifier: Indicates the certificate that needs to be associated with the instance. :type Domain: string :param Domain: The Active Directory Domain to move the instance to. Specify none to remove the instance from its current domain. The domain must be created prior to this operation. Currently only a Microsoft SQL Server instance can be created in a Active Directory Domain. :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false. The default is false. :type MonitoringInterval: integer :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 :type DBPortNumber: integer :param DBPortNumber: The port number on which the database accepts connections. The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance. Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter. MySQL Default: 3306 Valid Values: 1150-65535 MariaDB Default: 3306 Valid Values: 1150-65535 PostgreSQL Default: 5432 Valid Values: 1150-65535 Type: Integer Oracle Default: 1521 Valid Values: 1150-65535 SQL Server Default: 1433 Valid Values: 1150-65535 except for 1434 , 3389 , 47001 , 49152 , and 49152 through 49156 . Amazon Aurora Default: 3306 Valid Values: 1150-65535 :type PubliclyAccessible: boolean :param PubliclyAccessible: Boolean value that indicates if the DB instance has a publicly resolvable DNS name. Set to True to make the DB instance Internet-facing with a publicly resolvable DNS name, which resolves to a public IP address. Set to False to make the DB instance internal with a DNS name that resolves to a private IP address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be true in order for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter. Default: false :type MonitoringRoleArn: string :param MonitoringRoleArn: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess . For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring . If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. :type DomainIAMRoleName: string :param DomainIAMRoleName: The name of the IAM role to use when making API calls to the Directory Service. :type PromotionTier: integer :param PromotionTier: A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster . Default: 1 Valid Values: 0 - 15 :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Default: false :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: CreateDBInstance DeleteDBInstance ModifyDBInstance """ pass
[ "def", "modify_db_instance", "(", "DBInstanceIdentifier", "=", "None", ",", "AllocatedStorage", "=", "None", ",", "DBInstanceClass", "=", "None", ",", "DBSubnetGroupName", "=", "None", ",", "DBSecurityGroups", "=", "None", ",", "VpcSecurityGroupIds", "=", "None", ",", "ApplyImmediately", "=", "None", ",", "MasterUserPassword", "=", "None", ",", "DBParameterGroupName", "=", "None", ",", "BackupRetentionPeriod", "=", "None", ",", "PreferredBackupWindow", "=", "None", ",", "PreferredMaintenanceWindow", "=", "None", ",", "MultiAZ", "=", "None", ",", "EngineVersion", "=", "None", ",", "AllowMajorVersionUpgrade", "=", "None", ",", "AutoMinorVersionUpgrade", "=", "None", ",", "LicenseModel", "=", "None", ",", "Iops", "=", "None", ",", "OptionGroupName", "=", "None", ",", "NewDBInstanceIdentifier", "=", "None", ",", "StorageType", "=", "None", ",", "TdeCredentialArn", "=", "None", ",", "TdeCredentialPassword", "=", "None", ",", "CACertificateIdentifier", "=", "None", ",", "Domain", "=", "None", ",", "CopyTagsToSnapshot", "=", "None", ",", "MonitoringInterval", "=", "None", ",", "DBPortNumber", "=", "None", ",", "PubliclyAccessible", "=", "None", ",", "MonitoringRoleArn", "=", "None", ",", "DomainIAMRoleName", "=", "None", ",", "PromotionTier", "=", "None", ",", "EnableIAMDatabaseAuthentication", "=", "None", ")", ":", "pass" ]
Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. See also: AWS API Documentation Examples This example immediately changes the specified settings for the specified DB instance. Expected Output: :example: response = client.modify_db_instance( DBInstanceIdentifier='string', AllocatedStorage=123, DBInstanceClass='string', DBSubnetGroupName='string', DBSecurityGroups=[ 'string', ], VpcSecurityGroupIds=[ 'string', ], ApplyImmediately=True|False, MasterUserPassword='string', DBParameterGroupName='string', BackupRetentionPeriod=123, PreferredBackupWindow='string', PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AllowMajorVersionUpgrade=True|False, AutoMinorVersionUpgrade=True|False, LicenseModel='string', Iops=123, OptionGroupName='string', NewDBInstanceIdentifier='string', StorageType='string', TdeCredentialArn='string', TdeCredentialPassword='string', CACertificateIdentifier='string', Domain='string', CopyTagsToSnapshot=True|False, MonitoringInterval=123, DBPortNumber=123, PubliclyAccessible=True|False, MonitoringRoleArn='string', DomainIAMRoleName='string', PromotionTier=123, EnableIAMDatabaseAuthentication=True|False ) :type DBInstanceIdentifier: string :param DBInstanceIdentifier: [REQUIRED] The DB instance identifier. This value is stored as a lowercase string. Constraints: Must be the identifier for an existing DB instance Must contain from 1 to 63 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens :type AllocatedStorage: integer :param AllocatedStorage: The new storage capacity of the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless ApplyImmediately is set to true for this request. MySQL Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer MariaDB Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer PostgreSQL Default: Uses existing setting Valid Values: 5-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. Type: Integer Oracle Default: Uses existing setting Valid Values: 10-6144 Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. SQL Server Cannot be modified. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance. :type DBInstanceClass: string :param DBInstanceClass: The new compute and memory capacity of the DB instance. To determine the instance classes that are available for a particular DB engine, use the DescribeOrderableDBInstanceOptions action. Note that not all instance classes are available in all regions for all DB engines. Passing a value for this setting causes an outage during the change and is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request. Default: Uses existing setting Valid Values: db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge | db.m3.medium | db.m3.large | db.m3.xlarge | db.m3.2xlarge | db.m4.large | db.m4.xlarge | db.m4.2xlarge | db.m4.4xlarge | db.m4.10xlarge | db.r3.large | db.r3.xlarge | db.r3.2xlarge | db.r3.4xlarge | db.r3.8xlarge | db.t2.micro | db.t2.small | db.t2.medium | db.t2.large :type DBSubnetGroupName: string :param DBSubnetGroupName: The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance is not in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Updating the VPC for a DB Instance . Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify true for the ApplyImmediately parameter. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Example: mySubnetGroup :type DBSecurityGroups: list :param DBSecurityGroups: A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens (string) -- :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible. Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens (string) -- :type ApplyImmediately: boolean :param ApplyImmediately: Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance. If this parameter is set to false , changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and will be applied on the next call to RebootDBInstance , or the next failure reboot. Review the table of parameters in Modifying a DB Instance and Using the Apply Immediately Parameter to see the impact that setting ApplyImmediately to true or false has for each modified parameter and to determine when the changes will be applied. Default: false :type MasterUserPassword: string :param MasterUserPassword: The new password for the DB instance master user. Can be any printable ASCII character except '/', ''', or '@'. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Default: Uses existing setting Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server). Note Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked. :type DBParameterGroupName: string :param DBParameterGroupName: The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window. Default: Uses existing setting Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance. :type BackupRetentionPeriod: integer :param BackupRetentionPeriod: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible. Default: Uses existing setting Constraints: Must be a value from 0 to 35 Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6 Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5 Cannot be set to 0 if the DB instance is a source to Read Replicas :type PreferredBackupWindow: string :param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: Must be in the format hh24:mi-hh24:mi Times should be in Universal Time Coordinated (UTC) Must not conflict with the preferred maintenance window Must be at least 30 minutes :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes :type MultiAZ: boolean :param MultiAZ: Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. Constraints: Cannot be specified if the DB instance is a Read Replica. :type EngineVersion: string :param EngineVersion: The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. For a list of valid engine versions, see CreateDBInstance . :type AllowMajorVersionUpgrade: boolean :param AllowMajorVersionUpgrade: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version. :type LicenseModel: string :param LicenseModel: The license model for the DB instance. Valid values: license-included | bring-your-own-license | general-public-license :type Iops: integer :param Iops: The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. Default: Uses existing setting Constraints: Value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect. SQL Server Setting the IOPS value for the SQL Server database engine is not supported. Type: Integer If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance will be available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance will be suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance. :type OptionGroupName: string :param OptionGroupName: Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted. Permanent options, such as the TDE option for Oracle Advanced Security TDE, cannot be removed from an option group, and that option group cannot be removed from a DB instance once it is associated with a DB instance :type NewDBInstanceIdentifier: string :param NewDBInstanceIdentifier: The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens :type StorageType: string :param StorageType: Specifies the storage type to be associated with the DB instance. Valid values: standard | gp2 | io1 If you specify io1 , you must also include a value for the Iops parameter. Default: io1 if the Iops parameter is specified; otherwise standard :type TdeCredentialArn: string :param TdeCredentialArn: The ARN from the Key Store with which to associate the instance for TDE encryption. :type TdeCredentialPassword: string :param TdeCredentialPassword: The password for the given ARN from the Key Store in order to access the device. :type CACertificateIdentifier: string :param CACertificateIdentifier: Indicates the certificate that needs to be associated with the instance. :type Domain: string :param Domain: The Active Directory Domain to move the instance to. Specify none to remove the instance from its current domain. The domain must be created prior to this operation. Currently only a Microsoft SQL Server instance can be created in a Active Directory Domain. :type CopyTagsToSnapshot: boolean :param CopyTagsToSnapshot: True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false. The default is false. :type MonitoringInterval: integer :param MonitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 :type DBPortNumber: integer :param DBPortNumber: The port number on which the database accepts connections. The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance. Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter. MySQL Default: 3306 Valid Values: 1150-65535 MariaDB Default: 3306 Valid Values: 1150-65535 PostgreSQL Default: 5432 Valid Values: 1150-65535 Type: Integer Oracle Default: 1521 Valid Values: 1150-65535 SQL Server Default: 1433 Valid Values: 1150-65535 except for 1434 , 3389 , 47001 , 49152 , and 49152 through 49156 . Amazon Aurora Default: 3306 Valid Values: 1150-65535 :type PubliclyAccessible: boolean :param PubliclyAccessible: Boolean value that indicates if the DB instance has a publicly resolvable DNS name. Set to True to make the DB instance Internet-facing with a publicly resolvable DNS name, which resolves to a public IP address. Set to False to make the DB instance internal with a DNS name that resolves to a private IP address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be true in order for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter. Default: false :type MonitoringRoleArn: string :param MonitoringRoleArn: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess . For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring . If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. :type DomainIAMRoleName: string :param DomainIAMRoleName: The name of the IAM role to use when making API calls to the Directory Service. :type PromotionTier: integer :param PromotionTier: A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster . Default: 1 Valid Values: 0 - 15 :type EnableIAMDatabaseAuthentication: boolean :param EnableIAMDatabaseAuthentication: True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false. You can enable IAM database authentication for the following database engines For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Default: false :rtype: dict :return: { 'DBInstance': { 'DBInstanceIdentifier': 'string', 'DBInstanceClass': 'string', 'Engine': 'string', 'DBInstanceStatus': 'string', 'MasterUsername': 'string', 'DBName': 'string', 'Endpoint': { 'Address': 'string', 'Port': 123, 'HostedZoneId': 'string' }, 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'PreferredBackupWindow': 'string', 'BackupRetentionPeriod': 123, 'DBSecurityGroups': [ { 'DBSecurityGroupName': 'string', 'Status': 'string' }, ], 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'DBParameterGroups': [ { 'DBParameterGroupName': 'string', 'ParameterApplyStatus': 'string' }, ], 'AvailabilityZone': 'string', 'DBSubnetGroup': { 'DBSubnetGroupName': 'string', 'DBSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ], 'DBSubnetGroupArn': 'string' }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'DBInstanceClass': 'string', 'AllocatedStorage': 123, 'MasterUserPassword': 'string', 'Port': 123, 'BackupRetentionPeriod': 123, 'MultiAZ': True|False, 'EngineVersion': 'string', 'LicenseModel': 'string', 'Iops': 123, 'DBInstanceIdentifier': 'string', 'StorageType': 'string', 'CACertificateIdentifier': 'string', 'DBSubnetGroupName': 'string' }, 'LatestRestorableTime': datetime(2015, 1, 1), 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'ReadReplicaSourceDBInstanceIdentifier': 'string', 'ReadReplicaDBInstanceIdentifiers': [ 'string', ], 'ReadReplicaDBClusterIdentifiers': [ 'string', ], 'LicenseModel': 'string', 'Iops': 123, 'OptionGroupMemberships': [ { 'OptionGroupName': 'string', 'Status': 'string' }, ], 'CharacterSetName': 'string', 'SecondaryAvailabilityZone': 'string', 'PubliclyAccessible': True|False, 'StatusInfos': [ { 'StatusType': 'string', 'Normal': True|False, 'Status': 'string', 'Message': 'string' }, ], 'StorageType': 'string', 'TdeCredentialArn': 'string', 'DbInstancePort': 123, 'DBClusterIdentifier': 'string', 'StorageEncrypted': True|False, 'KmsKeyId': 'string', 'DbiResourceId': 'string', 'CACertificateIdentifier': 'string', 'DomainMemberships': [ { 'Domain': 'string', 'Status': 'string', 'FQDN': 'string', 'IAMRoleName': 'string' }, ], 'CopyTagsToSnapshot': True|False, 'MonitoringInterval': 123, 'EnhancedMonitoringResourceArn': 'string', 'MonitoringRoleArn': 'string', 'PromotionTier': 123, 'DBInstanceArn': 'string', 'Timezone': 'string', 'IAMDatabaseAuthenticationEnabled': True|False } } :returns: CreateDBInstance DeleteDBInstance ModifyDBInstance
[ "Modifies", "settings", "for", "a", "DB", "instance", ".", "You", "can", "change", "one", "or", "more", "database", "configuration", "parameters", "by", "specifying", "these", "parameters", "and", "the", "new", "values", "in", "the", "request", ".", "See", "also", ":", "AWS", "API", "Documentation", "Examples", "This", "example", "immediately", "changes", "the", "specified", "settings", "for", "the", "specified", "DB", "instance", ".", "Expected", "Output", ":", ":", "example", ":", "response", "=", "client", ".", "modify_db_instance", "(", "DBInstanceIdentifier", "=", "string", "AllocatedStorage", "=", "123", "DBInstanceClass", "=", "string", "DBSubnetGroupName", "=", "string", "DBSecurityGroups", "=", "[", "string", "]", "VpcSecurityGroupIds", "=", "[", "string", "]", "ApplyImmediately", "=", "True|False", "MasterUserPassword", "=", "string", "DBParameterGroupName", "=", "string", "BackupRetentionPeriod", "=", "123", "PreferredBackupWindow", "=", "string", "PreferredMaintenanceWindow", "=", "string", "MultiAZ", "=", "True|False", "EngineVersion", "=", "string", "AllowMajorVersionUpgrade", "=", "True|False", "AutoMinorVersionUpgrade", "=", "True|False", "LicenseModel", "=", "string", "Iops", "=", "123", "OptionGroupName", "=", "string", "NewDBInstanceIdentifier", "=", "string", "StorageType", "=", "string", "TdeCredentialArn", "=", "string", "TdeCredentialPassword", "=", "string", "CACertificateIdentifier", "=", "string", "Domain", "=", "string", "CopyTagsToSnapshot", "=", "True|False", "MonitoringInterval", "=", "123", "DBPortNumber", "=", "123", "PubliclyAccessible", "=", "True|False", "MonitoringRoleArn", "=", "string", "DomainIAMRoleName", "=", "string", "PromotionTier", "=", "123", "EnableIAMDatabaseAuthentication", "=", "True|False", ")", ":", "type", "DBInstanceIdentifier", ":", "string", ":", "param", "DBInstanceIdentifier", ":", "[", "REQUIRED", "]", "The", "DB", "instance", "identifier", ".", "This", "value", "is", "stored", "as", "a", "lowercase", "string", ".", "Constraints", ":", "Must", "be", "the", "identifier", "for", "an", "existing", "DB", "instance", "Must", "contain", "from", "1", "to", "63", "alphanumeric", "characters", "or", "hyphens", "First", "character", "must", "be", "a", "letter", "Cannot", "end", "with", "a", "hyphen", "or", "contain", "two", "consecutive", "hyphens" ]
python
train
djgagne/hagelslag
hagelslag/evaluation/ObjectEvaluator.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ObjectEvaluator.py#L62-L87
def load_forecasts(self): """ Loads the forecast files and gathers the forecast information into pandas DataFrames. """ forecast_path = self.forecast_json_path + "/{0}/{1}/".format(self.run_date.strftime("%Y%m%d"), self.ensemble_member) forecast_files = sorted(glob(forecast_path + "*.json")) for forecast_file in forecast_files: file_obj = open(forecast_file) json_obj = json.load(file_obj) file_obj.close() track_id = json_obj['properties']["id"] obs_track_id = json_obj['properties']["obs_track_id"] forecast_hours = json_obj['properties']['times'] duration = json_obj['properties']['duration'] for f, feature in enumerate(json_obj['features']): area = np.sum(feature["properties"]["masks"]) step_id = track_id + "_{0:02d}".format(f) for model_type in self.model_types: for model_name in self.model_names[model_type]: prediction = feature['properties'][model_type + "_" + model_name.replace(" ", "-")] if model_type == "condition": prediction = [prediction] row = [track_id, obs_track_id, self.ensemble_name, self.ensemble_member, forecast_hours[f], f + 1, duration, area] + prediction self.forecasts[model_type][model_name].loc[step_id] = row
[ "def", "load_forecasts", "(", "self", ")", ":", "forecast_path", "=", "self", ".", "forecast_json_path", "+", "\"/{0}/{1}/\"", ".", "format", "(", "self", ".", "run_date", ".", "strftime", "(", "\"%Y%m%d\"", ")", ",", "self", ".", "ensemble_member", ")", "forecast_files", "=", "sorted", "(", "glob", "(", "forecast_path", "+", "\"*.json\"", ")", ")", "for", "forecast_file", "in", "forecast_files", ":", "file_obj", "=", "open", "(", "forecast_file", ")", "json_obj", "=", "json", ".", "load", "(", "file_obj", ")", "file_obj", ".", "close", "(", ")", "track_id", "=", "json_obj", "[", "'properties'", "]", "[", "\"id\"", "]", "obs_track_id", "=", "json_obj", "[", "'properties'", "]", "[", "\"obs_track_id\"", "]", "forecast_hours", "=", "json_obj", "[", "'properties'", "]", "[", "'times'", "]", "duration", "=", "json_obj", "[", "'properties'", "]", "[", "'duration'", "]", "for", "f", ",", "feature", "in", "enumerate", "(", "json_obj", "[", "'features'", "]", ")", ":", "area", "=", "np", ".", "sum", "(", "feature", "[", "\"properties\"", "]", "[", "\"masks\"", "]", ")", "step_id", "=", "track_id", "+", "\"_{0:02d}\"", ".", "format", "(", "f", ")", "for", "model_type", "in", "self", ".", "model_types", ":", "for", "model_name", "in", "self", ".", "model_names", "[", "model_type", "]", ":", "prediction", "=", "feature", "[", "'properties'", "]", "[", "model_type", "+", "\"_\"", "+", "model_name", ".", "replace", "(", "\" \"", ",", "\"-\"", ")", "]", "if", "model_type", "==", "\"condition\"", ":", "prediction", "=", "[", "prediction", "]", "row", "=", "[", "track_id", ",", "obs_track_id", ",", "self", ".", "ensemble_name", ",", "self", ".", "ensemble_member", ",", "forecast_hours", "[", "f", "]", ",", "f", "+", "1", ",", "duration", ",", "area", "]", "+", "prediction", "self", ".", "forecasts", "[", "model_type", "]", "[", "model_name", "]", ".", "loc", "[", "step_id", "]", "=", "row" ]
Loads the forecast files and gathers the forecast information into pandas DataFrames.
[ "Loads", "the", "forecast", "files", "and", "gathers", "the", "forecast", "information", "into", "pandas", "DataFrames", "." ]
python
train
budacom/trading-bots
trading_bots/bots/registry.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/bots/registry.py#L199-L254
def populate(self, installed_bots=None): """ Load bots. Import each bot module. It is thread-safe and idempotent, but not re-entrant. """ if self.ready: return # populate() might be called by two threads in parallel on servers # that create threads before initializing the WSGI callable. with self._lock: if self.ready: return # An RLock prevents other threads from entering this section. The # compare and set operation below is atomic. if self.loading: # Prevent re-entrant calls to avoid running AppConfig.ready() # methods twice. raise RuntimeError("populate() isn't re-entrant") self.loading = True # Phase 1: Initialize bots for entry in installed_bots or {}: if isinstance(entry, Bot): cls = entry entry = '.'.join([cls.__module__, cls.__name__]) bot_reg = BotRegistry.create(entry) if bot_reg.label in self.bots: raise ImproperlyConfigured( "Bot labels aren't unique, " "duplicates: %s" % bot_reg.label) self.bots[bot_reg.label] = bot_reg bot_reg.bots = self # Check for duplicate bot names. counts = Counter( bot_reg.name for bot_reg in self.bots.values()) duplicates = [ name for name, count in counts.most_common() if count > 1] if duplicates: raise ImproperlyConfigured( "Bot names aren't unique, " "duplicates: %s" % ", ".join(duplicates)) self.bots_ready = True # Phase 2: import config files for bot in self.bots.values(): bot.import_configs() self.configs_ready = True self.ready = True
[ "def", "populate", "(", "self", ",", "installed_bots", "=", "None", ")", ":", "if", "self", ".", "ready", ":", "return", "# populate() might be called by two threads in parallel on servers", "# that create threads before initializing the WSGI callable.", "with", "self", ".", "_lock", ":", "if", "self", ".", "ready", ":", "return", "# An RLock prevents other threads from entering this section. The", "# compare and set operation below is atomic.", "if", "self", ".", "loading", ":", "# Prevent re-entrant calls to avoid running AppConfig.ready()", "# methods twice.", "raise", "RuntimeError", "(", "\"populate() isn't re-entrant\"", ")", "self", ".", "loading", "=", "True", "# Phase 1: Initialize bots", "for", "entry", "in", "installed_bots", "or", "{", "}", ":", "if", "isinstance", "(", "entry", ",", "Bot", ")", ":", "cls", "=", "entry", "entry", "=", "'.'", ".", "join", "(", "[", "cls", ".", "__module__", ",", "cls", ".", "__name__", "]", ")", "bot_reg", "=", "BotRegistry", ".", "create", "(", "entry", ")", "if", "bot_reg", ".", "label", "in", "self", ".", "bots", ":", "raise", "ImproperlyConfigured", "(", "\"Bot labels aren't unique, \"", "\"duplicates: %s\"", "%", "bot_reg", ".", "label", ")", "self", ".", "bots", "[", "bot_reg", ".", "label", "]", "=", "bot_reg", "bot_reg", ".", "bots", "=", "self", "# Check for duplicate bot names.", "counts", "=", "Counter", "(", "bot_reg", ".", "name", "for", "bot_reg", "in", "self", ".", "bots", ".", "values", "(", ")", ")", "duplicates", "=", "[", "name", "for", "name", ",", "count", "in", "counts", ".", "most_common", "(", ")", "if", "count", ">", "1", "]", "if", "duplicates", ":", "raise", "ImproperlyConfigured", "(", "\"Bot names aren't unique, \"", "\"duplicates: %s\"", "%", "\", \"", ".", "join", "(", "duplicates", ")", ")", "self", ".", "bots_ready", "=", "True", "# Phase 2: import config files", "for", "bot", "in", "self", ".", "bots", ".", "values", "(", ")", ":", "bot", ".", "import_configs", "(", ")", "self", ".", "configs_ready", "=", "True", "self", ".", "ready", "=", "True" ]
Load bots. Import each bot module. It is thread-safe and idempotent, but not re-entrant.
[ "Load", "bots", ".", "Import", "each", "bot", "module", ".", "It", "is", "thread", "-", "safe", "and", "idempotent", "but", "not", "re", "-", "entrant", "." ]
python
train
titusjan/argos
argos/config/qtctis.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/qtctis.py#L54-L65
def createPenWidthCti(nodeName, defaultData=1.0, zeroValueText=None): """ Creates a FloatCti with defaults for configuring a QPen width. If specialValueZero is set, this string will be displayed when 0.0 is selected. If specialValueZero is None, the minValue will be 0.1 """ # A pen line width of zero indicates a cosmetic pen. This means that the pen width is # always drawn one pixel wide, independent of the transformation set on the painter. # Note that line widths other than 1 may be slow when anti aliasing is on. return FloatCti(nodeName, defaultData=defaultData, specialValueText=zeroValueText, minValue=0.1 if zeroValueText is None else 0.0, maxValue=100, stepSize=0.1, decimals=1)
[ "def", "createPenWidthCti", "(", "nodeName", ",", "defaultData", "=", "1.0", ",", "zeroValueText", "=", "None", ")", ":", "# A pen line width of zero indicates a cosmetic pen. This means that the pen width is", "# always drawn one pixel wide, independent of the transformation set on the painter.", "# Note that line widths other than 1 may be slow when anti aliasing is on.", "return", "FloatCti", "(", "nodeName", ",", "defaultData", "=", "defaultData", ",", "specialValueText", "=", "zeroValueText", ",", "minValue", "=", "0.1", "if", "zeroValueText", "is", "None", "else", "0.0", ",", "maxValue", "=", "100", ",", "stepSize", "=", "0.1", ",", "decimals", "=", "1", ")" ]
Creates a FloatCti with defaults for configuring a QPen width. If specialValueZero is set, this string will be displayed when 0.0 is selected. If specialValueZero is None, the minValue will be 0.1
[ "Creates", "a", "FloatCti", "with", "defaults", "for", "configuring", "a", "QPen", "width", "." ]
python
train
robin900/gspread-dataframe
gspread_dataframe.py
https://github.com/robin900/gspread-dataframe/blob/b64fef7ec196bfed69362aa35c593f448830a735/gspread_dataframe.py#L59-L77
def _resize_to_minimum(worksheet, rows=None, cols=None): """ Resize the worksheet to guarantee a minimum size, either in rows, or columns, or both. Both rows and cols are optional. """ # get the current size current_cols, current_rows = ( worksheet.col_count, worksheet.row_count ) if rows is not None and rows <= current_rows: rows = None if cols is not None and cols <= current_cols: cols = None if cols is not None or rows is not None: worksheet.resize(rows, cols)
[ "def", "_resize_to_minimum", "(", "worksheet", ",", "rows", "=", "None", ",", "cols", "=", "None", ")", ":", "# get the current size", "current_cols", ",", "current_rows", "=", "(", "worksheet", ".", "col_count", ",", "worksheet", ".", "row_count", ")", "if", "rows", "is", "not", "None", "and", "rows", "<=", "current_rows", ":", "rows", "=", "None", "if", "cols", "is", "not", "None", "and", "cols", "<=", "current_cols", ":", "cols", "=", "None", "if", "cols", "is", "not", "None", "or", "rows", "is", "not", "None", ":", "worksheet", ".", "resize", "(", "rows", ",", "cols", ")" ]
Resize the worksheet to guarantee a minimum size, either in rows, or columns, or both. Both rows and cols are optional.
[ "Resize", "the", "worksheet", "to", "guarantee", "a", "minimum", "size", "either", "in", "rows", "or", "columns", "or", "both", "." ]
python
train
grantmcconnaughey/Lintly
lintly/builds.py
https://github.com/grantmcconnaughey/Lintly/blob/73c1ee36740ac5bb2a32d3f24fca2a27f4d4e466/lintly/builds.py#L82-L97
def find_diff_violations(self, patch): """ Uses the diff for this build to find changed lines that also have violations. """ violations = collections.defaultdict(list) for line in patch.changed_lines: file_violations = self._all_violations.get(line['file_name']) if not file_violations: continue line_violations = [v for v in file_violations if v.line == line['line_number']] for v in line_violations: violations[line['file_name']].append(v) return violations
[ "def", "find_diff_violations", "(", "self", ",", "patch", ")", ":", "violations", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "line", "in", "patch", ".", "changed_lines", ":", "file_violations", "=", "self", ".", "_all_violations", ".", "get", "(", "line", "[", "'file_name'", "]", ")", "if", "not", "file_violations", ":", "continue", "line_violations", "=", "[", "v", "for", "v", "in", "file_violations", "if", "v", ".", "line", "==", "line", "[", "'line_number'", "]", "]", "for", "v", "in", "line_violations", ":", "violations", "[", "line", "[", "'file_name'", "]", "]", ".", "append", "(", "v", ")", "return", "violations" ]
Uses the diff for this build to find changed lines that also have violations.
[ "Uses", "the", "diff", "for", "this", "build", "to", "find", "changed", "lines", "that", "also", "have", "violations", "." ]
python
train
mandeep/Travis-Encrypt
travis/cli.py
https://github.com/mandeep/Travis-Encrypt/blob/0dd2da1c71feaadcb84bdeb26827e6dfe1bd3b41/travis/cli.py#L53-L107
def cli(username, repository, path, password, deploy, env, clipboard, env_file): """Encrypt passwords and environment variables for use with Travis CI. Travis Encrypt requires as arguments the user's GitHub username and repository name. Once the arguments are passed, a password prompt will ask for the password that needs to be encrypted. The given password will then be encrypted via the PKCS1v15 padding scheme and printed to standard output. If the path to a .travis.yml file is given as an argument, the encrypted password is added to the .travis.yml file. """ key = retrieve_public_key('{}/{}' .format(username, repository)) if env_file: if path: config = load_travis_configuration(path) for env_var, value in dotenv_values(env_file).items(): encrypted_env = encrypt_key(key, value.encode()) config.setdefault('env', {}).setdefault('global', {})[env_var] = {'secure': encrypted_env} dump_travis_configuration(config, path) print('Encrypted variables from {} added to {}'.format(env_file, path)) else: print('\nPlease add the following to your .travis.yml:') for env_var, value in dotenv_values(env_file).items(): encrypted_env = encrypt_key(key, value.encode()) print("{}:\n secure: {}".format(env_var, encrypted_env)) else: encrypted_password = encrypt_key(key, password.encode()) if path: config = load_travis_configuration(path) if config is None: config = OrderedDict() if deploy: config.setdefault('deploy', {}).setdefault('password', {})['secure'] = encrypted_password elif env: try: config.setdefault('env', {}).setdefault('global', {})['secure'] = encrypted_password except TypeError: for item in config['env']['global']: if isinstance(item, dict) and 'secure' in item: item['secure'] = encrypted_password else: config.setdefault('password', {})['secure'] = encrypted_password dump_travis_configuration(config, path) print('Encrypted password added to {}' .format(path)) elif clipboard: pyperclip.copy(encrypted_password) print('\nThe encrypted password has been copied to your clipboard.') else: print('\nPlease add the following to your .travis.yml:\nsecure: {}' .format(encrypted_password))
[ "def", "cli", "(", "username", ",", "repository", ",", "path", ",", "password", ",", "deploy", ",", "env", ",", "clipboard", ",", "env_file", ")", ":", "key", "=", "retrieve_public_key", "(", "'{}/{}'", ".", "format", "(", "username", ",", "repository", ")", ")", "if", "env_file", ":", "if", "path", ":", "config", "=", "load_travis_configuration", "(", "path", ")", "for", "env_var", ",", "value", "in", "dotenv_values", "(", "env_file", ")", ".", "items", "(", ")", ":", "encrypted_env", "=", "encrypt_key", "(", "key", ",", "value", ".", "encode", "(", ")", ")", "config", ".", "setdefault", "(", "'env'", ",", "{", "}", ")", ".", "setdefault", "(", "'global'", ",", "{", "}", ")", "[", "env_var", "]", "=", "{", "'secure'", ":", "encrypted_env", "}", "dump_travis_configuration", "(", "config", ",", "path", ")", "print", "(", "'Encrypted variables from {} added to {}'", ".", "format", "(", "env_file", ",", "path", ")", ")", "else", ":", "print", "(", "'\\nPlease add the following to your .travis.yml:'", ")", "for", "env_var", ",", "value", "in", "dotenv_values", "(", "env_file", ")", ".", "items", "(", ")", ":", "encrypted_env", "=", "encrypt_key", "(", "key", ",", "value", ".", "encode", "(", ")", ")", "print", "(", "\"{}:\\n secure: {}\"", ".", "format", "(", "env_var", ",", "encrypted_env", ")", ")", "else", ":", "encrypted_password", "=", "encrypt_key", "(", "key", ",", "password", ".", "encode", "(", ")", ")", "if", "path", ":", "config", "=", "load_travis_configuration", "(", "path", ")", "if", "config", "is", "None", ":", "config", "=", "OrderedDict", "(", ")", "if", "deploy", ":", "config", ".", "setdefault", "(", "'deploy'", ",", "{", "}", ")", ".", "setdefault", "(", "'password'", ",", "{", "}", ")", "[", "'secure'", "]", "=", "encrypted_password", "elif", "env", ":", "try", ":", "config", ".", "setdefault", "(", "'env'", ",", "{", "}", ")", ".", "setdefault", "(", "'global'", ",", "{", "}", ")", "[", "'secure'", "]", "=", "encrypted_password", "except", "TypeError", ":", "for", "item", "in", "config", "[", "'env'", "]", "[", "'global'", "]", ":", "if", "isinstance", "(", "item", ",", "dict", ")", "and", "'secure'", "in", "item", ":", "item", "[", "'secure'", "]", "=", "encrypted_password", "else", ":", "config", ".", "setdefault", "(", "'password'", ",", "{", "}", ")", "[", "'secure'", "]", "=", "encrypted_password", "dump_travis_configuration", "(", "config", ",", "path", ")", "print", "(", "'Encrypted password added to {}'", ".", "format", "(", "path", ")", ")", "elif", "clipboard", ":", "pyperclip", ".", "copy", "(", "encrypted_password", ")", "print", "(", "'\\nThe encrypted password has been copied to your clipboard.'", ")", "else", ":", "print", "(", "'\\nPlease add the following to your .travis.yml:\\nsecure: {}'", ".", "format", "(", "encrypted_password", ")", ")" ]
Encrypt passwords and environment variables for use with Travis CI. Travis Encrypt requires as arguments the user's GitHub username and repository name. Once the arguments are passed, a password prompt will ask for the password that needs to be encrypted. The given password will then be encrypted via the PKCS1v15 padding scheme and printed to standard output. If the path to a .travis.yml file is given as an argument, the encrypted password is added to the .travis.yml file.
[ "Encrypt", "passwords", "and", "environment", "variables", "for", "use", "with", "Travis", "CI", "." ]
python
train
saltstack/salt
salt/states/boto_secgroup.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_secgroup.py#L603-L663
def absent( name, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None): ''' Ensure a security group with the specified name does not exist. name Name of the security group. vpc_id The ID of the VPC to remove the security group from, if any. Exclusive with vpc_name. vpc_name The name of the VPC to remove the security group from, if any. Exclusive with vpc_name. .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key, keyid=keyid, profile=profile, vpc_id=vpc_id, vpc_name=vpc_name) if sg: if __opts__['test']: ret['comment'] = 'Security group {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_secgroup.delete'](name=name, group_id=None, region=region, key=key, keyid=keyid, profile=profile, vpc_id=vpc_id, vpc_name=vpc_name) if deleted: ret['changes']['old'] = {'secgroup': sg} ret['changes']['new'] = {'secgroup': None} ret['comment'] = 'Security group {0} deleted.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} security group.'.format(name) else: ret['comment'] = '{0} security group does not exist.'.format(name) return ret
[ "def", "absent", "(", "name", ",", "vpc_id", "=", "None", ",", "vpc_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "sg", "=", "__salt__", "[", "'boto_secgroup.get_config'", "]", "(", "name", "=", "name", ",", "group_id", "=", "None", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "vpc_id", "=", "vpc_id", ",", "vpc_name", "=", "vpc_name", ")", "if", "sg", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Security group {0} is set to be removed.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "deleted", "=", "__salt__", "[", "'boto_secgroup.delete'", "]", "(", "name", "=", "name", ",", "group_id", "=", "None", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "vpc_id", "=", "vpc_id", ",", "vpc_name", "=", "vpc_name", ")", "if", "deleted", ":", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "{", "'secgroup'", ":", "sg", "}", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "{", "'secgroup'", ":", "None", "}", "ret", "[", "'comment'", "]", "=", "'Security group {0} deleted.'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to delete {0} security group.'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'{0} security group does not exist.'", ".", "format", "(", "name", ")", "return", "ret" ]
Ensure a security group with the specified name does not exist. name Name of the security group. vpc_id The ID of the VPC to remove the security group from, if any. Exclusive with vpc_name. vpc_name The name of the VPC to remove the security group from, if any. Exclusive with vpc_name. .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0
[ "Ensure", "a", "security", "group", "with", "the", "specified", "name", "does", "not", "exist", "." ]
python
train
LogicalDash/LiSE
allegedb/allegedb/__init__.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/__init__.py#L972-L983
def new_multidigraph(self, name, data=None, **attr): """Return a new instance of type MultiDiGraph, initialized with the given data if provided. :arg name: a name for the graph :arg data: dictionary or NetworkX graph object providing initial state """ self._init_graph(name, 'MultiDiGraph') mdg = MultiDiGraph(self, name, data, **attr) self._graph_objs[name] = mdg return mdg
[ "def", "new_multidigraph", "(", "self", ",", "name", ",", "data", "=", "None", ",", "*", "*", "attr", ")", ":", "self", ".", "_init_graph", "(", "name", ",", "'MultiDiGraph'", ")", "mdg", "=", "MultiDiGraph", "(", "self", ",", "name", ",", "data", ",", "*", "*", "attr", ")", "self", ".", "_graph_objs", "[", "name", "]", "=", "mdg", "return", "mdg" ]
Return a new instance of type MultiDiGraph, initialized with the given data if provided. :arg name: a name for the graph :arg data: dictionary or NetworkX graph object providing initial state
[ "Return", "a", "new", "instance", "of", "type", "MultiDiGraph", "initialized", "with", "the", "given", "data", "if", "provided", "." ]
python
train
bcbio/bcbio-nextgen
scripts/utils/plink_to_vcf.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/plink_to_vcf.py#L119-L154
def fix_nonref_positions(in_file, ref_file): """Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed. """ ignore_chrs = ["."] ref2bit = twobit.TwoBitFile(open(ref_file)) out_file = in_file.replace("-raw.vcf", ".vcf") with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.rstrip("\r\n").split("\t") pos = int(parts[1]) # handle chr/non-chr naming if parts[0] not in ref2bit.keys() and parts[0].replace("chr", "") in ref2bit.keys(): parts[0] = parts[0].replace("chr", "") # handle X chromosome elif parts[0] not in ref2bit.keys() and parts[0] == "23": for test in ["X", "chrX"]: if test in ref2bit.keys(): parts[0] == test ref_base = None if parts[0] not in ignore_chrs: try: ref_base = ref2bit[parts[0]].get(pos-1, pos).upper() except Exception as msg: print "Skipping line. Failed to retrieve reference base for %s\n%s" % (str(parts), msg) parts = fix_vcf_line(parts, ref_base) if parts is not None: out_handle.write("\t".join(parts) + "\n") return out_file
[ "def", "fix_nonref_positions", "(", "in_file", ",", "ref_file", ")", ":", "ignore_chrs", "=", "[", "\".\"", "]", "ref2bit", "=", "twobit", ".", "TwoBitFile", "(", "open", "(", "ref_file", ")", ")", "out_file", "=", "in_file", ".", "replace", "(", "\"-raw.vcf\"", ",", "\".vcf\"", ")", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "out_handle", ".", "write", "(", "line", ")", "else", ":", "parts", "=", "line", ".", "rstrip", "(", "\"\\r\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "pos", "=", "int", "(", "parts", "[", "1", "]", ")", "# handle chr/non-chr naming", "if", "parts", "[", "0", "]", "not", "in", "ref2bit", ".", "keys", "(", ")", "and", "parts", "[", "0", "]", ".", "replace", "(", "\"chr\"", ",", "\"\"", ")", "in", "ref2bit", ".", "keys", "(", ")", ":", "parts", "[", "0", "]", "=", "parts", "[", "0", "]", ".", "replace", "(", "\"chr\"", ",", "\"\"", ")", "# handle X chromosome", "elif", "parts", "[", "0", "]", "not", "in", "ref2bit", ".", "keys", "(", ")", "and", "parts", "[", "0", "]", "==", "\"23\"", ":", "for", "test", "in", "[", "\"X\"", ",", "\"chrX\"", "]", ":", "if", "test", "in", "ref2bit", ".", "keys", "(", ")", ":", "parts", "[", "0", "]", "==", "test", "ref_base", "=", "None", "if", "parts", "[", "0", "]", "not", "in", "ignore_chrs", ":", "try", ":", "ref_base", "=", "ref2bit", "[", "parts", "[", "0", "]", "]", ".", "get", "(", "pos", "-", "1", ",", "pos", ")", ".", "upper", "(", ")", "except", "Exception", "as", "msg", ":", "print", "\"Skipping line. Failed to retrieve reference base for %s\\n%s\"", "%", "(", "str", "(", "parts", ")", ",", "msg", ")", "parts", "=", "fix_vcf_line", "(", "parts", ",", "ref_base", ")", "if", "parts", "is", "not", "None", ":", "out_handle", ".", "write", "(", "\"\\t\"", ".", "join", "(", "parts", ")", "+", "\"\\n\"", ")", "return", "out_file" ]
Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed.
[ "Fix", "Genotyping", "VCF", "positions", "where", "the", "bases", "are", "all", "variants", "." ]
python
train
iotile/coretools
transport_plugins/websocket/iotile_transport_websocket/generic/async_client.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/websocket/iotile_transport_websocket/generic/async_client.py#L89-L104
async def stop(self): """Stop this websocket client and disconnect from the server. This method is idempotent and may be called multiple times. If called when there is no active connection, it will simply return. """ if self._connection_task is None: return try: await self._connection_task.stop() finally: self._con = None self._connection_task = None self._manager.clear()
[ "async", "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_connection_task", "is", "None", ":", "return", "try", ":", "await", "self", ".", "_connection_task", ".", "stop", "(", ")", "finally", ":", "self", ".", "_con", "=", "None", "self", ".", "_connection_task", "=", "None", "self", ".", "_manager", ".", "clear", "(", ")" ]
Stop this websocket client and disconnect from the server. This method is idempotent and may be called multiple times. If called when there is no active connection, it will simply return.
[ "Stop", "this", "websocket", "client", "and", "disconnect", "from", "the", "server", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/reftrackwidget.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L59-L73
def select(self, ): """Store the selected taskfileinfo self.selected and accept the dialog :returns: None :rtype: None :raises: None """ s = self.browser.selected_indexes(self.browser.get_depth()-1) if not s: return i = s[0].internalPointer() if i: tfi = i.internal_data() self.selected = tfi self.accept()
[ "def", "select", "(", "self", ",", ")", ":", "s", "=", "self", ".", "browser", ".", "selected_indexes", "(", "self", ".", "browser", ".", "get_depth", "(", ")", "-", "1", ")", "if", "not", "s", ":", "return", "i", "=", "s", "[", "0", "]", ".", "internalPointer", "(", ")", "if", "i", ":", "tfi", "=", "i", ".", "internal_data", "(", ")", "self", ".", "selected", "=", "tfi", "self", ".", "accept", "(", ")" ]
Store the selected taskfileinfo self.selected and accept the dialog :returns: None :rtype: None :raises: None
[ "Store", "the", "selected", "taskfileinfo", "self", ".", "selected", "and", "accept", "the", "dialog" ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/html5lib/treeadapters/sax.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/treeadapters/sax.py#L13-L50
def to_sax(walker, handler): """Call SAX-like content handler based on treewalker walker :arg walker: the treewalker to use to walk the tree to convert it :arg handler: SAX handler to use """ handler.startDocument() for prefix, namespace in prefix_mapping.items(): handler.startPrefixMapping(prefix, namespace) for token in walker: type = token["type"] if type == "Doctype": continue elif type in ("StartTag", "EmptyTag"): attrs = AttributesNSImpl(token["data"], unadjustForeignAttributes) handler.startElementNS((token["namespace"], token["name"]), token["name"], attrs) if type == "EmptyTag": handler.endElementNS((token["namespace"], token["name"]), token["name"]) elif type == "EndTag": handler.endElementNS((token["namespace"], token["name"]), token["name"]) elif type in ("Characters", "SpaceCharacters"): handler.characters(token["data"]) elif type == "Comment": pass else: assert False, "Unknown token type" for prefix, namespace in prefix_mapping.items(): handler.endPrefixMapping(prefix) handler.endDocument()
[ "def", "to_sax", "(", "walker", ",", "handler", ")", ":", "handler", ".", "startDocument", "(", ")", "for", "prefix", ",", "namespace", "in", "prefix_mapping", ".", "items", "(", ")", ":", "handler", ".", "startPrefixMapping", "(", "prefix", ",", "namespace", ")", "for", "token", "in", "walker", ":", "type", "=", "token", "[", "\"type\"", "]", "if", "type", "==", "\"Doctype\"", ":", "continue", "elif", "type", "in", "(", "\"StartTag\"", ",", "\"EmptyTag\"", ")", ":", "attrs", "=", "AttributesNSImpl", "(", "token", "[", "\"data\"", "]", ",", "unadjustForeignAttributes", ")", "handler", ".", "startElementNS", "(", "(", "token", "[", "\"namespace\"", "]", ",", "token", "[", "\"name\"", "]", ")", ",", "token", "[", "\"name\"", "]", ",", "attrs", ")", "if", "type", "==", "\"EmptyTag\"", ":", "handler", ".", "endElementNS", "(", "(", "token", "[", "\"namespace\"", "]", ",", "token", "[", "\"name\"", "]", ")", ",", "token", "[", "\"name\"", "]", ")", "elif", "type", "==", "\"EndTag\"", ":", "handler", ".", "endElementNS", "(", "(", "token", "[", "\"namespace\"", "]", ",", "token", "[", "\"name\"", "]", ")", ",", "token", "[", "\"name\"", "]", ")", "elif", "type", "in", "(", "\"Characters\"", ",", "\"SpaceCharacters\"", ")", ":", "handler", ".", "characters", "(", "token", "[", "\"data\"", "]", ")", "elif", "type", "==", "\"Comment\"", ":", "pass", "else", ":", "assert", "False", ",", "\"Unknown token type\"", "for", "prefix", ",", "namespace", "in", "prefix_mapping", ".", "items", "(", ")", ":", "handler", ".", "endPrefixMapping", "(", "prefix", ")", "handler", ".", "endDocument", "(", ")" ]
Call SAX-like content handler based on treewalker walker :arg walker: the treewalker to use to walk the tree to convert it :arg handler: SAX handler to use
[ "Call", "SAX", "-", "like", "content", "handler", "based", "on", "treewalker", "walker" ]
python
train
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L425-L460
def post_series_publish(self, id, **data): """ POST /series/:id/publish/ Publishes a repeating event series and all of its occurrences that are not already canceled or deleted. Once a date is cancelled it can still be uncancelled and can be viewed by the public. A deleted date cannot be undeleted and cannot by viewed by the public. In order for publish to be permitted, the event must have all necessary information, including a name and description, an organizer, at least one ticket, and valid payment options. This API endpoint will return argument errors for event fields that fail to validate the publish requirements. Returns a boolean indicating success or failure of the publish. field_error event.name MISSING Your event must have a name to be published. field_error event.start MISSING Your event must have a start date to be published. field_error event.end MISSING Your event must have an end date to be published. field_error event.start.timezone MISSING Your event start and end dates must have matching time zones to be published. field_error event.organizer MISSING Your event must have an organizer to be published. field_error event.currency MISSING Your event must have a currency to be published. field_error event.currency INVALID Your event must have a valid currency to be published. field_error event.tickets MISSING Your event must have at least one ticket to be published. field_error event.tickets.N.name MISSING All tickets must have names in order for your event to be published. The N will be the ticket class ID with the error. field_error event.tickets.N.quantity_total MISSING All non-donation tickets must have an available quantity value in order for your event to be published. The N will be the ticket class ID with the error. field_error event.tickets.N.cost MISSING All non-donation tickets must have a cost (which can be ``0.00`` for free tickets) in order for your event to be published. The N will be the ticket class ID with the error. .. _unpublish-series-by-id: """ return self.post("/series/{0}/publish/".format(id), data=data)
[ "def", "post_series_publish", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "post", "(", "\"/series/{0}/publish/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
POST /series/:id/publish/ Publishes a repeating event series and all of its occurrences that are not already canceled or deleted. Once a date is cancelled it can still be uncancelled and can be viewed by the public. A deleted date cannot be undeleted and cannot by viewed by the public. In order for publish to be permitted, the event must have all necessary information, including a name and description, an organizer, at least one ticket, and valid payment options. This API endpoint will return argument errors for event fields that fail to validate the publish requirements. Returns a boolean indicating success or failure of the publish. field_error event.name MISSING Your event must have a name to be published. field_error event.start MISSING Your event must have a start date to be published. field_error event.end MISSING Your event must have an end date to be published. field_error event.start.timezone MISSING Your event start and end dates must have matching time zones to be published. field_error event.organizer MISSING Your event must have an organizer to be published. field_error event.currency MISSING Your event must have a currency to be published. field_error event.currency INVALID Your event must have a valid currency to be published. field_error event.tickets MISSING Your event must have at least one ticket to be published. field_error event.tickets.N.name MISSING All tickets must have names in order for your event to be published. The N will be the ticket class ID with the error. field_error event.tickets.N.quantity_total MISSING All non-donation tickets must have an available quantity value in order for your event to be published. The N will be the ticket class ID with the error. field_error event.tickets.N.cost MISSING All non-donation tickets must have a cost (which can be ``0.00`` for free tickets) in order for your event to be published. The N will be the ticket class ID with the error. .. _unpublish-series-by-id:
[ "POST", "/", "series", "/", ":", "id", "/", "publish", "/", "Publishes", "a", "repeating", "event", "series", "and", "all", "of", "its", "occurrences", "that", "are", "not", "already", "canceled", "or", "deleted", ".", "Once", "a", "date", "is", "cancelled", "it", "can", "still", "be", "uncancelled", "and", "can", "be", "viewed", "by", "the", "public", ".", "A", "deleted", "date", "cannot", "be", "undeleted", "and", "cannot", "by", "viewed", "by", "the", "public", ".", "In", "order", "for", "publish", "to", "be", "permitted", "the", "event", "must", "have", "all", "necessary", "information", "including", "a", "name", "and", "description", "an", "organizer", "at", "least", "one", "ticket", "and", "valid", "payment", "options", ".", "This", "API", "endpoint", "will", "return", "argument", "errors", "for", "event", "fields", "that", "fail", "to", "validate", "the", "publish", "requirements", ".", "Returns", "a", "boolean", "indicating", "success", "or", "failure", "of", "the", "publish", ".", "field_error", "event", ".", "name", "MISSING", "Your", "event", "must", "have", "a", "name", "to", "be", "published", ".", "field_error", "event", ".", "start", "MISSING", "Your", "event", "must", "have", "a", "start", "date", "to", "be", "published", ".", "field_error", "event", ".", "end", "MISSING", "Your", "event", "must", "have", "an", "end", "date", "to", "be", "published", ".", "field_error", "event", ".", "start", ".", "timezone", "MISSING", "Your", "event", "start", "and", "end", "dates", "must", "have", "matching", "time", "zones", "to", "be", "published", ".", "field_error", "event", ".", "organizer", "MISSING", "Your", "event", "must", "have", "an", "organizer", "to", "be", "published", ".", "field_error", "event", ".", "currency", "MISSING", "Your", "event", "must", "have", "a", "currency", "to", "be", "published", ".", "field_error", "event", ".", "currency", "INVALID", "Your", "event", "must", "have", "a", "valid", "currency", "to", "be", "published", ".", "field_error", "event", ".", "tickets", "MISSING", "Your", "event", "must", "have", "at", "least", "one", "ticket", "to", "be", "published", ".", "field_error", "event", ".", "tickets", ".", "N", ".", "name", "MISSING", "All", "tickets", "must", "have", "names", "in", "order", "for", "your", "event", "to", "be", "published", ".", "The", "N", "will", "be", "the", "ticket", "class", "ID", "with", "the", "error", ".", "field_error", "event", ".", "tickets", ".", "N", ".", "quantity_total", "MISSING", "All", "non", "-", "donation", "tickets", "must", "have", "an", "available", "quantity", "value", "in", "order", "for", "your", "event", "to", "be", "published", ".", "The", "N", "will", "be", "the", "ticket", "class", "ID", "with", "the", "error", ".", "field_error", "event", ".", "tickets", ".", "N", ".", "cost", "MISSING", "All", "non", "-", "donation", "tickets", "must", "have", "a", "cost", "(", "which", "can", "be", "0", ".", "00", "for", "free", "tickets", ")", "in", "order", "for", "your", "event", "to", "be", "published", ".", "The", "N", "will", "be", "the", "ticket", "class", "ID", "with", "the", "error", ".", "..", "_unpublish", "-", "series", "-", "by", "-", "id", ":" ]
python
train
SITools2/pySitools2_1.0
sitools2/core/pySitools2.py
https://github.com/SITools2/pySitools2_1.0/blob/acd13198162456ba401a0b923af989bb29feb3b6/sitools2/core/pySitools2.py#L373-L379
def __parseColumns(self): """Returns the list of columns related to the dataset.""" columns = [] if self.__dataItem.has_key('columnModel'): for column in self.__dataItem['columnModel']: columns.append(Column(column)) return columns
[ "def", "__parseColumns", "(", "self", ")", ":", "columns", "=", "[", "]", "if", "self", ".", "__dataItem", ".", "has_key", "(", "'columnModel'", ")", ":", "for", "column", "in", "self", ".", "__dataItem", "[", "'columnModel'", "]", ":", "columns", ".", "append", "(", "Column", "(", "column", ")", ")", "return", "columns" ]
Returns the list of columns related to the dataset.
[ "Returns", "the", "list", "of", "columns", "related", "to", "the", "dataset", "." ]
python
train
kwikteam/phy
phy/stats/ccg.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/stats/ccg.py#L19-L26
def _increment(arr, indices): """Increment some indices in a 1D vector of non-negative integers. Repeated indices are taken into account.""" arr = _as_array(arr) indices = _as_array(indices) bbins = np.bincount(indices) arr[:len(bbins)] += bbins return arr
[ "def", "_increment", "(", "arr", ",", "indices", ")", ":", "arr", "=", "_as_array", "(", "arr", ")", "indices", "=", "_as_array", "(", "indices", ")", "bbins", "=", "np", ".", "bincount", "(", "indices", ")", "arr", "[", ":", "len", "(", "bbins", ")", "]", "+=", "bbins", "return", "arr" ]
Increment some indices in a 1D vector of non-negative integers. Repeated indices are taken into account.
[ "Increment", "some", "indices", "in", "a", "1D", "vector", "of", "non", "-", "negative", "integers", ".", "Repeated", "indices", "are", "taken", "into", "account", "." ]
python
train
benley/butcher
butcher/util.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/util.py#L91-L102
def flatten(listish): """Flatten an arbitrarily-nested list of strings and lists. Works for any subclass of basestring and any type of iterable. """ for elem in listish: if (isinstance(elem, collections.Iterable) and not isinstance(elem, basestring)): for subelem in flatten(elem): yield subelem else: yield elem
[ "def", "flatten", "(", "listish", ")", ":", "for", "elem", "in", "listish", ":", "if", "(", "isinstance", "(", "elem", ",", "collections", ".", "Iterable", ")", "and", "not", "isinstance", "(", "elem", ",", "basestring", ")", ")", ":", "for", "subelem", "in", "flatten", "(", "elem", ")", ":", "yield", "subelem", "else", ":", "yield", "elem" ]
Flatten an arbitrarily-nested list of strings and lists. Works for any subclass of basestring and any type of iterable.
[ "Flatten", "an", "arbitrarily", "-", "nested", "list", "of", "strings", "and", "lists", "." ]
python
train
DataBiosphere/toil
src/toil/wdl/wdl_synthesis.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L560-L598
def write_function(self, job): ''' Writes out a python function for each WDL "task" object. Each python function is a unit of work written out as a string in preparation to being written out to a file. In WDL, each "job" is called a "task". Each WDL task is written out in multiple steps: 1: Header and inputs (e.g. 'def mapping(self, input1, input2)') 2: Log job name (e.g. 'job.fileStore.logToMaster('initialize_jobs')') 3: Create temp dir (e.g. 'tempDir = fileStore.getLocalTempDir()') 4: import filenames and use readGlobalFile() to get files from the jobStore 5: Reformat commandline variables (like converting to ' '.join(files)). 6: Commandline call using subprocess.Popen(). 7: Write the section returning the outputs. Also logs stats. :return: a giant string containing the meat of the job defs for the toil script. ''' # write the function header fn_section = self.write_function_header(job) # write out commandline keywords fn_section += self.write_function_cmdline(job) if self.needsdocker(job): # write a bash script to inject into the docker fn_section += self.write_function_bashscriptline(job) # write a call to the docker API fn_section += self.write_function_dockercall(job) else: # write a subprocess call fn_section += self.write_function_subprocesspopen() # write the outputs for the definition to return fn_section += self.write_function_outputreturn(job, docker=self.needsdocker(job)) return fn_section
[ "def", "write_function", "(", "self", ",", "job", ")", ":", "# write the function header", "fn_section", "=", "self", ".", "write_function_header", "(", "job", ")", "# write out commandline keywords", "fn_section", "+=", "self", ".", "write_function_cmdline", "(", "job", ")", "if", "self", ".", "needsdocker", "(", "job", ")", ":", "# write a bash script to inject into the docker", "fn_section", "+=", "self", ".", "write_function_bashscriptline", "(", "job", ")", "# write a call to the docker API", "fn_section", "+=", "self", ".", "write_function_dockercall", "(", "job", ")", "else", ":", "# write a subprocess call", "fn_section", "+=", "self", ".", "write_function_subprocesspopen", "(", ")", "# write the outputs for the definition to return", "fn_section", "+=", "self", ".", "write_function_outputreturn", "(", "job", ",", "docker", "=", "self", ".", "needsdocker", "(", "job", ")", ")", "return", "fn_section" ]
Writes out a python function for each WDL "task" object. Each python function is a unit of work written out as a string in preparation to being written out to a file. In WDL, each "job" is called a "task". Each WDL task is written out in multiple steps: 1: Header and inputs (e.g. 'def mapping(self, input1, input2)') 2: Log job name (e.g. 'job.fileStore.logToMaster('initialize_jobs')') 3: Create temp dir (e.g. 'tempDir = fileStore.getLocalTempDir()') 4: import filenames and use readGlobalFile() to get files from the jobStore 5: Reformat commandline variables (like converting to ' '.join(files)). 6: Commandline call using subprocess.Popen(). 7: Write the section returning the outputs. Also logs stats. :return: a giant string containing the meat of the job defs for the toil script.
[ "Writes", "out", "a", "python", "function", "for", "each", "WDL", "task", "object", "." ]
python
train
jazzband/django-push-notifications
push_notifications/wns.py
https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/wns.py#L259-L322
def dict_to_xml_schema(data): """ Input a dictionary to be converted to xml. There should be only one key at the top level. The value must be a dict with (required) `children` key and (optional) `attrs` key. This will be called the `sub-element dictionary`. The `attrs` value must be a dictionary; each value will be added to the element's xml tag as attributes. e.g.: {"example": { "attrs": { "key1": "value1", ... }, ... }} would result in: <example key1="value1" key2="value2"></example> If the value is a dict it must contain one or more keys which will be used as the sub-element names. Each sub-element must have a value of a sub-element dictionary(see above) or a list of sub-element dictionaries. If the value is not a dict, it will be the value of the element. If the value is a list, multiple elements of the same tag will be created from each sub-element dict in the list. :param data: dict: Used to create an XML tree. e.g.: example_data = { "toast": { "attrs": { "launch": "param", "duration": "short", }, "children": { "visual": { "children": { "binding": { "attrs": {"template": "ToastText01"}, "children": { "text": [ { "attrs": {"id": "1"}, "children": "text1", }, { "attrs": {"id": "2"}, "children": "text2", }, ], }, }, }, }, }, }, } :return: ElementTree.Element """ for key, value in data.items(): root = _add_element_attrs(ET.Element(key), value.get("attrs", {})) children = value.get("children", None) if isinstance(children, dict): _add_sub_elements_from_dict(root, children) return root
[ "def", "dict_to_xml_schema", "(", "data", ")", ":", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "root", "=", "_add_element_attrs", "(", "ET", ".", "Element", "(", "key", ")", ",", "value", ".", "get", "(", "\"attrs\"", ",", "{", "}", ")", ")", "children", "=", "value", ".", "get", "(", "\"children\"", ",", "None", ")", "if", "isinstance", "(", "children", ",", "dict", ")", ":", "_add_sub_elements_from_dict", "(", "root", ",", "children", ")", "return", "root" ]
Input a dictionary to be converted to xml. There should be only one key at the top level. The value must be a dict with (required) `children` key and (optional) `attrs` key. This will be called the `sub-element dictionary`. The `attrs` value must be a dictionary; each value will be added to the element's xml tag as attributes. e.g.: {"example": { "attrs": { "key1": "value1", ... }, ... }} would result in: <example key1="value1" key2="value2"></example> If the value is a dict it must contain one or more keys which will be used as the sub-element names. Each sub-element must have a value of a sub-element dictionary(see above) or a list of sub-element dictionaries. If the value is not a dict, it will be the value of the element. If the value is a list, multiple elements of the same tag will be created from each sub-element dict in the list. :param data: dict: Used to create an XML tree. e.g.: example_data = { "toast": { "attrs": { "launch": "param", "duration": "short", }, "children": { "visual": { "children": { "binding": { "attrs": {"template": "ToastText01"}, "children": { "text": [ { "attrs": {"id": "1"}, "children": "text1", }, { "attrs": {"id": "2"}, "children": "text2", }, ], }, }, }, }, }, }, } :return: ElementTree.Element
[ "Input", "a", "dictionary", "to", "be", "converted", "to", "xml", ".", "There", "should", "be", "only", "one", "key", "at", "the", "top", "level", ".", "The", "value", "must", "be", "a", "dict", "with", "(", "required", ")", "children", "key", "and", "(", "optional", ")", "attrs", "key", ".", "This", "will", "be", "called", "the", "sub", "-", "element", "dictionary", "." ]
python
train
iotile/coretools
transport_plugins/awsiot/iotile_transport_awsiot/device_adapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/device_adapter.py#L364-L381
def _find_connection(self, topic): """Attempt to find a connection id corresponding with a topic The device is found by assuming the topic ends in <slug>/[control|data]/channel Args: topic (string): The topic we received a message on Returns: int: The internal connect id (device slug) associated with this topic """ parts = topic.split('/') if len(parts) < 3: return None slug = parts[-3] return slug
[ "def", "_find_connection", "(", "self", ",", "topic", ")", ":", "parts", "=", "topic", ".", "split", "(", "'/'", ")", "if", "len", "(", "parts", ")", "<", "3", ":", "return", "None", "slug", "=", "parts", "[", "-", "3", "]", "return", "slug" ]
Attempt to find a connection id corresponding with a topic The device is found by assuming the topic ends in <slug>/[control|data]/channel Args: topic (string): The topic we received a message on Returns: int: The internal connect id (device slug) associated with this topic
[ "Attempt", "to", "find", "a", "connection", "id", "corresponding", "with", "a", "topic" ]
python
train
MediaFire/mediafire-python-open-sdk
mediafire/uploader.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/uploader.py#L353-L363
def _upload_check(self, upload_info, resumable=False): """Wrapper around upload/check""" return self._api.upload_check( filename=upload_info.name, size=upload_info.size, hash_=upload_info.hash_info.file, folder_key=upload_info.folder_key, filedrop_key=upload_info.filedrop_key, path=upload_info.path, resumable=resumable )
[ "def", "_upload_check", "(", "self", ",", "upload_info", ",", "resumable", "=", "False", ")", ":", "return", "self", ".", "_api", ".", "upload_check", "(", "filename", "=", "upload_info", ".", "name", ",", "size", "=", "upload_info", ".", "size", ",", "hash_", "=", "upload_info", ".", "hash_info", ".", "file", ",", "folder_key", "=", "upload_info", ".", "folder_key", ",", "filedrop_key", "=", "upload_info", ".", "filedrop_key", ",", "path", "=", "upload_info", ".", "path", ",", "resumable", "=", "resumable", ")" ]
Wrapper around upload/check
[ "Wrapper", "around", "upload", "/", "check" ]
python
train
NuGrid/NuGridPy
nugridpy/data_plot.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/data_plot.py#L1540-L1935
def abu_chart(self, cycle, mass_range=None ,ilabel=True, imlabel=True, imlabel_fontsize=8, imagic=False, boxstable=True, lbound=(-12, 0), plotaxis=[0, 0, 0, 0], show=True, color_map='jet', ifig=None,data_provided=False,thedata=None, savefig=False,drawfig=None,drawax=None,mov=False, path=None): ''' Plots an abundance chart Parameters ---------- cycle : string, integer or list The cycle we are looking in. If it is a list of cycles, this method will then do a plot for each of these cycles and save them all to a file. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atomic mass within this range. This will throw an error if this range does not make sence ie [45,2] if None, it will plot over the entire range. The default is None. ilabel : boolean, optional Elemental labels off/on. The default is True. imlabel : boolean, optional Label for isotopic masses off/on. The default is True. imlabel_fontsize : integer, optional Fontsize for isotopic mass labels. The default is 8. imagic : boolean, optional Turn lines for magic numbers off/on. The default is False. boxstable : boolean, optional Plot the black boxes around the stable elements. The defaults is True. lbound : tuple, optional Boundaries for colour spectrum ploted. The default is (-12,0). plotaxis : list, optional Set axis limit. If [0, 0, 0, 0] the complete range in (N,Z) will be plotted. It equates to [xMin, xMax, Ymin, Ymax]. The default is [0, 0, 0, 0]. show : boolean, optional Boolean of if the plot should be displayed. Useful with saving multiple plots using abu_chartMulti. The default is True. color_map : string, optional Color map according to choices in matplotlib (e.g. www.scipy.org/Cookbook/Matplotlib/Show_colormaps). The default is 'jet'. ifig : integer, optional Figure number, if ifig is None it wiil be set to the cycle number. The defaults is None. savefig : boolean, optional Whether or not to save the figure. The default is False drawfig, drawax, mov : optional, not necessary for user to set these variables The figure and axes containers to be drawn on, and whether or not a movie is being made (only True when se.movie is called, which sets mov to True automatically path: path where to save figure ''' if ifig == None and not mov: ifig=cycle if type(cycle)==type([]): self.abu_chartMulti(cycle, mass_range,ilabel,imlabel,imlabel_fontsize,imagic,boxstable,\ lbound,plotaxis,color_map, path=path) return plotType=self._classTest() if mass_range!=None and mass_range[0]>mass_range[1]: raise IOError("Please input a proper mass range") if plotType=='se': if not data_provided: cycle=self.se.findCycle(cycle) # nin=zeros(len(self.se.A)) # zin=zeros(len(self.se.Z)) yin=self.get(cycle, 'iso_massf') isom=self.se.isomeric_states masses = self.se.get(cycle,'mass') else: cycle=cycle # why so serious? yin=thedata[0] isom=self.se.isomeric_states masses = thedata[1] # for i in xrange(len(nin)): # zin[i]=self.se.Z[i] # nin[i]=self.se.A[i]-zin[i] # SJONES implicit loop instead: zin=array([el for el in self.se.Z]) nin=array([el for el in self.se.A])-zin #Test if the mass cell order is inverted #and hence mass[-1] the center. if masses[0]>masses[-1]: #invert print('Inverted order of mass cells will be taken into account.') yin=yin[::-1] masses=masses[::-1] if mass_range != None: # trim out only the zones needed: tmpyps=[] masses.sort() # SJ: not sure why this sort if necessary # for i in xrange(len(masses)): # if (masses[i] >mass_range[0] and masses[i]<mass_range[1]) or\ # (masses[i]==mass_range[0] or masses[i]==mass_range[1]): # tmpyps.append(yin[i]) # yin=tmpyps # find lower and upper indices and slice instead: idxl=np.abs(masses-mass_range[0]).argmin() if masses[idxl] < mass_range[0]: idxl+=1 idxu=np.abs(masses-mass_range[1]).argmin() if masses[idxu] > mass_range[1]: idxu-=1 yin=yin[idxl:idxu+1] #tmp=zeros(len(yin[0])) #for i in xrange(len(yin)): # for j in xrange(len(yin[i])): # tmp[j]+=yin[i][j] tmp2=sum(yin,axis=0) # SJONES sum along axis instead of nested loop tmp=old_div(tmp2,len(yin)) yin=tmp elif plotType=='PPN': ain=self.get('A',cycle) zin=self.get('Z',cycle) nin=ain-zin yin=self.get('ABUNDANCE_MF',cycle) isom=self.get('ISOM',cycle) if mass_range != None: tmpA=[] tmpZ=[] tmpIsom=[] tmpyps=[] for i in range(len(nin)): if (ain[i] >mass_range[0] and ain[i]<mass_range[1])\ or (ain[i]==mass_range[0] or ain[i]==mass_range[1]): tmpA.append(nin[i]) tmpZ.append(zin[i]) tmpIsom.append(isom[i]) tmpyps.append(yin[i]) zin=tmpZ nin=tmpA yin=tmpyps isom=tmpIsom else: raise IOError("This method, abu_chart, is not supported by this class") # in case we call from ipython -pylab, turn interactive on at end again turnoff=False if not show: try: ioff() turnoff=True except NameError: turnoff=False nnmax = int(max(nin))+1 nzmax = int(max(zin))+1 nzycheck = zeros([nnmax,nzmax,3]) for i in range(len(nin)): if isom[i]==1: ni = int(nin[i]) zi = int(zin[i]) nzycheck[ni,zi,0] = 1 nzycheck[ni,zi,1] = yin[i] ####################################################################### # elemental names: elname(i) is the name of element with Z=i elname=self.elements_names #### create plot ## define axis and plot style (colormap, size, fontsize etc.) if plotaxis==[0,0,0,0]: xdim=10 ydim=6 else: dx = plotaxis[1]-plotaxis[0] dy = plotaxis[3]-plotaxis[2] ydim = 6 xdim = ydim*dx/dy params = {'axes.labelsize': 12, 'text.fontsize': 12, 'legend.fontsize': 12, 'xtick.labelsize': 12, 'ytick.labelsize': 12, 'text.usetex': True} #pl.rcParams.update(params) #May cause Error, someting to do with tex if mov: fig=drawfig fig.set_size_inches(xdim,ydim) artists=[] else: fig=pl.figure(ifig,figsize=(xdim,ydim),dpi=100) axx = 0.10 axy = 0.10 axw = 0.85 axh = 0.8 if mov: ax=drawax else: ax=pl.axes([axx,axy,axw,axh]) # Tick marks xminorlocator = MultipleLocator(1) xmajorlocator = MultipleLocator(5) ax.xaxis.set_major_locator(xmajorlocator) ax.xaxis.set_minor_locator(xminorlocator) yminorlocator = MultipleLocator(1) ymajorlocator = MultipleLocator(5) ax.yaxis.set_major_locator(ymajorlocator) ax.yaxis.set_minor_locator(yminorlocator) # color map choice for abundances cmapa = cm.get_cmap(name=color_map) # color map choice for arrows cmapr = cm.autumn # if a value is below the lower limit its set to white cmapa.set_under(color='w') cmapr.set_under(color='w') # set value range for abundance colors (log10(Y)) norma = colors.Normalize(vmin=lbound[0],vmax=lbound[1]) # set x- and y-axis scale aspect ratio to 1 ax.set_aspect('equal') #print time,temp and density on top temp = ' '#'%8.3e' %ff['temp'] time = ' '#'%8.3e' %ff['time'] dens = ' '#'%8.3e' %ff['dens'] #May cause Error, someting to do with tex ''' #box1 = TextArea("t : " + time + " s~~/~~T$_{9}$ : " + temp + "~~/~~$\\rho_{b}$ : " \ # + dens + ' g/cm$^{3}$', textprops=dict(color="k")) anchored_box = AnchoredOffsetbox(loc=3, child=box1, pad=0., frameon=False, bbox_to_anchor=(0., 1.02), bbox_transform=ax.transAxes, borderpad=0., ) ax.add_artist(anchored_box) ''' ## Colour bar plotted patches = [] color = [] for i in range(nzmax): for j in range(nnmax): if nzycheck[j,i,0]==1: xy = j-0.5,i-0.5 rect = Rectangle(xy,1,1,) # abundance yab = nzycheck[j,i,1] if yab == 0: yab=1e-99 col =log10(yab) patches.append(rect) color.append(col) p = PatchCollection(patches, cmap=cmapa, norm=norma) p.set_array(array(color)) p.set_zorder(1) if mov: artist1=ax.add_collection(p) artists.append(artist1) else: ax.add_collection(p) if not mov: cb = pl.colorbar(p) # colorbar label cb.set_label('log$_{10}$(X)') # plot file name graphname = 'abundance-chart'+str(cycle) # Add black frames for stable isotopes if boxstable: for i in range(len(self.stable_el)): if i == 0: continue tmp = self.stable_el[i] try: zz= self.elements_names.index(tmp[0]) #charge except: continue for j in range(len(tmp)): if j == 0: continue nn = int(tmp[j]) #atomic mass nn=nn-zz xy = nn-0.5,zz-0.5 rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=3.) rect.set_zorder(2) ax.add_patch(rect) # decide which array to take for label positions iarr = 0 # plot element labels if ilabel: for z in range(nzmax): try: nmin = min(argwhere(nzycheck[:,z,iarr]))[0]-1 ax.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',\ fontsize='x-small',clip_on=True) except ValueError: continue # plot mass numbers if imlabel: for z in range(nzmax): for n in range(nnmax): a = z+n if nzycheck[n,z,iarr]==1: ax.text(n,z,a,horizontalalignment='center',verticalalignment='center',\ fontsize=imlabel_fontsize,clip_on=True) # plot lines at magic numbers if imagic: ixymagic=[2, 8, 20, 28, 50, 82, 126] nmagic = len(ixymagic) for magic in ixymagic: if magic<=nzmax: try: xnmin = min(argwhere(nzycheck[:,magic,iarr]))[0] xnmax = max(argwhere(nzycheck[:,magic,iarr]))[0] line = ax.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-') except ValueError: dummy=0 if magic<=nnmax: try: yzmin = min(argwhere(nzycheck[magic,:,iarr]))[0] yzmax = max(argwhere(nzycheck[magic,:,iarr]))[0] line = ax.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-') except ValueError: dummy=0 # set axis limits if plotaxis==[0,0,0,0]: xmax=max(nin) ymax=max(zin) ax.axis([-0.5,xmax+0.5,-0.5,ymax+0.5]) else: ax.axis(plotaxis) # set x- and y-axis label ax.set_xlabel('neutron number (A-Z)') ax.set_ylabel('proton number Z') if not mov: pl.title('Isotopic Chart for cycle '+str(int(cycle))) if savefig: if path is not None: graphname = os.path.join(path, graphname) fig.savefig(graphname) print(graphname,'is done') if show: pl.show() if turnoff: ion() if mov: return p,artists else: return
[ "def", "abu_chart", "(", "self", ",", "cycle", ",", "mass_range", "=", "None", ",", "ilabel", "=", "True", ",", "imlabel", "=", "True", ",", "imlabel_fontsize", "=", "8", ",", "imagic", "=", "False", ",", "boxstable", "=", "True", ",", "lbound", "=", "(", "-", "12", ",", "0", ")", ",", "plotaxis", "=", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "show", "=", "True", ",", "color_map", "=", "'jet'", ",", "ifig", "=", "None", ",", "data_provided", "=", "False", ",", "thedata", "=", "None", ",", "savefig", "=", "False", ",", "drawfig", "=", "None", ",", "drawax", "=", "None", ",", "mov", "=", "False", ",", "path", "=", "None", ")", ":", "if", "ifig", "==", "None", "and", "not", "mov", ":", "ifig", "=", "cycle", "if", "type", "(", "cycle", ")", "==", "type", "(", "[", "]", ")", ":", "self", ".", "abu_chartMulti", "(", "cycle", ",", "mass_range", ",", "ilabel", ",", "imlabel", ",", "imlabel_fontsize", ",", "imagic", ",", "boxstable", ",", "lbound", ",", "plotaxis", ",", "color_map", ",", "path", "=", "path", ")", "return", "plotType", "=", "self", ".", "_classTest", "(", ")", "if", "mass_range", "!=", "None", "and", "mass_range", "[", "0", "]", ">", "mass_range", "[", "1", "]", ":", "raise", "IOError", "(", "\"Please input a proper mass range\"", ")", "if", "plotType", "==", "'se'", ":", "if", "not", "data_provided", ":", "cycle", "=", "self", ".", "se", ".", "findCycle", "(", "cycle", ")", "# nin=zeros(len(self.se.A))", "# zin=zeros(len(self.se.Z))", "yin", "=", "self", ".", "get", "(", "cycle", ",", "'iso_massf'", ")", "isom", "=", "self", ".", "se", ".", "isomeric_states", "masses", "=", "self", ".", "se", ".", "get", "(", "cycle", ",", "'mass'", ")", "else", ":", "cycle", "=", "cycle", "# why so serious?", "yin", "=", "thedata", "[", "0", "]", "isom", "=", "self", ".", "se", ".", "isomeric_states", "masses", "=", "thedata", "[", "1", "]", "# for i in xrange(len(nin)):", "# zin[i]=self.se.Z[i]", "# nin[i]=self.se.A[i]-zin[i]", "# SJONES implicit loop instead:", "zin", "=", "array", "(", "[", "el", "for", "el", "in", "self", ".", "se", ".", "Z", "]", ")", "nin", "=", "array", "(", "[", "el", "for", "el", "in", "self", ".", "se", ".", "A", "]", ")", "-", "zin", "#Test if the mass cell order is inverted", "#and hence mass[-1] the center.", "if", "masses", "[", "0", "]", ">", "masses", "[", "-", "1", "]", ":", "#invert", "print", "(", "'Inverted order of mass cells will be taken into account.'", ")", "yin", "=", "yin", "[", ":", ":", "-", "1", "]", "masses", "=", "masses", "[", ":", ":", "-", "1", "]", "if", "mass_range", "!=", "None", ":", "# trim out only the zones needed:", "tmpyps", "=", "[", "]", "masses", ".", "sort", "(", ")", "# SJ: not sure why this sort if necessary", "# for i in xrange(len(masses)):", "# if (masses[i] >mass_range[0] and masses[i]<mass_range[1]) or\\", "# (masses[i]==mass_range[0] or masses[i]==mass_range[1]):", "# tmpyps.append(yin[i])", "# yin=tmpyps", "# find lower and upper indices and slice instead:", "idxl", "=", "np", ".", "abs", "(", "masses", "-", "mass_range", "[", "0", "]", ")", ".", "argmin", "(", ")", "if", "masses", "[", "idxl", "]", "<", "mass_range", "[", "0", "]", ":", "idxl", "+=", "1", "idxu", "=", "np", ".", "abs", "(", "masses", "-", "mass_range", "[", "1", "]", ")", ".", "argmin", "(", ")", "if", "masses", "[", "idxu", "]", ">", "mass_range", "[", "1", "]", ":", "idxu", "-=", "1", "yin", "=", "yin", "[", "idxl", ":", "idxu", "+", "1", "]", "#tmp=zeros(len(yin[0]))", "#for i in xrange(len(yin)):", "# for j in xrange(len(yin[i])):", "# tmp[j]+=yin[i][j]", "tmp2", "=", "sum", "(", "yin", ",", "axis", "=", "0", ")", "# SJONES sum along axis instead of nested loop", "tmp", "=", "old_div", "(", "tmp2", ",", "len", "(", "yin", ")", ")", "yin", "=", "tmp", "elif", "plotType", "==", "'PPN'", ":", "ain", "=", "self", ".", "get", "(", "'A'", ",", "cycle", ")", "zin", "=", "self", ".", "get", "(", "'Z'", ",", "cycle", ")", "nin", "=", "ain", "-", "zin", "yin", "=", "self", ".", "get", "(", "'ABUNDANCE_MF'", ",", "cycle", ")", "isom", "=", "self", ".", "get", "(", "'ISOM'", ",", "cycle", ")", "if", "mass_range", "!=", "None", ":", "tmpA", "=", "[", "]", "tmpZ", "=", "[", "]", "tmpIsom", "=", "[", "]", "tmpyps", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "nin", ")", ")", ":", "if", "(", "ain", "[", "i", "]", ">", "mass_range", "[", "0", "]", "and", "ain", "[", "i", "]", "<", "mass_range", "[", "1", "]", ")", "or", "(", "ain", "[", "i", "]", "==", "mass_range", "[", "0", "]", "or", "ain", "[", "i", "]", "==", "mass_range", "[", "1", "]", ")", ":", "tmpA", ".", "append", "(", "nin", "[", "i", "]", ")", "tmpZ", ".", "append", "(", "zin", "[", "i", "]", ")", "tmpIsom", ".", "append", "(", "isom", "[", "i", "]", ")", "tmpyps", ".", "append", "(", "yin", "[", "i", "]", ")", "zin", "=", "tmpZ", "nin", "=", "tmpA", "yin", "=", "tmpyps", "isom", "=", "tmpIsom", "else", ":", "raise", "IOError", "(", "\"This method, abu_chart, is not supported by this class\"", ")", "# in case we call from ipython -pylab, turn interactive on at end again", "turnoff", "=", "False", "if", "not", "show", ":", "try", ":", "ioff", "(", ")", "turnoff", "=", "True", "except", "NameError", ":", "turnoff", "=", "False", "nnmax", "=", "int", "(", "max", "(", "nin", ")", ")", "+", "1", "nzmax", "=", "int", "(", "max", "(", "zin", ")", ")", "+", "1", "nzycheck", "=", "zeros", "(", "[", "nnmax", ",", "nzmax", ",", "3", "]", ")", "for", "i", "in", "range", "(", "len", "(", "nin", ")", ")", ":", "if", "isom", "[", "i", "]", "==", "1", ":", "ni", "=", "int", "(", "nin", "[", "i", "]", ")", "zi", "=", "int", "(", "zin", "[", "i", "]", ")", "nzycheck", "[", "ni", ",", "zi", ",", "0", "]", "=", "1", "nzycheck", "[", "ni", ",", "zi", ",", "1", "]", "=", "yin", "[", "i", "]", "#######################################################################", "# elemental names: elname(i) is the name of element with Z=i", "elname", "=", "self", ".", "elements_names", "#### create plot", "## define axis and plot style (colormap, size, fontsize etc.)", "if", "plotaxis", "==", "[", "0", ",", "0", ",", "0", ",", "0", "]", ":", "xdim", "=", "10", "ydim", "=", "6", "else", ":", "dx", "=", "plotaxis", "[", "1", "]", "-", "plotaxis", "[", "0", "]", "dy", "=", "plotaxis", "[", "3", "]", "-", "plotaxis", "[", "2", "]", "ydim", "=", "6", "xdim", "=", "ydim", "*", "dx", "/", "dy", "params", "=", "{", "'axes.labelsize'", ":", "12", ",", "'text.fontsize'", ":", "12", ",", "'legend.fontsize'", ":", "12", ",", "'xtick.labelsize'", ":", "12", ",", "'ytick.labelsize'", ":", "12", ",", "'text.usetex'", ":", "True", "}", "#pl.rcParams.update(params) #May cause Error, someting to do with tex", "if", "mov", ":", "fig", "=", "drawfig", "fig", ".", "set_size_inches", "(", "xdim", ",", "ydim", ")", "artists", "=", "[", "]", "else", ":", "fig", "=", "pl", ".", "figure", "(", "ifig", ",", "figsize", "=", "(", "xdim", ",", "ydim", ")", ",", "dpi", "=", "100", ")", "axx", "=", "0.10", "axy", "=", "0.10", "axw", "=", "0.85", "axh", "=", "0.8", "if", "mov", ":", "ax", "=", "drawax", "else", ":", "ax", "=", "pl", ".", "axes", "(", "[", "axx", ",", "axy", ",", "axw", ",", "axh", "]", ")", "# Tick marks", "xminorlocator", "=", "MultipleLocator", "(", "1", ")", "xmajorlocator", "=", "MultipleLocator", "(", "5", ")", "ax", ".", "xaxis", ".", "set_major_locator", "(", "xmajorlocator", ")", "ax", ".", "xaxis", ".", "set_minor_locator", "(", "xminorlocator", ")", "yminorlocator", "=", "MultipleLocator", "(", "1", ")", "ymajorlocator", "=", "MultipleLocator", "(", "5", ")", "ax", ".", "yaxis", ".", "set_major_locator", "(", "ymajorlocator", ")", "ax", ".", "yaxis", ".", "set_minor_locator", "(", "yminorlocator", ")", "# color map choice for abundances", "cmapa", "=", "cm", ".", "get_cmap", "(", "name", "=", "color_map", ")", "# color map choice for arrows", "cmapr", "=", "cm", ".", "autumn", "# if a value is below the lower limit its set to white", "cmapa", ".", "set_under", "(", "color", "=", "'w'", ")", "cmapr", ".", "set_under", "(", "color", "=", "'w'", ")", "# set value range for abundance colors (log10(Y))", "norma", "=", "colors", ".", "Normalize", "(", "vmin", "=", "lbound", "[", "0", "]", ",", "vmax", "=", "lbound", "[", "1", "]", ")", "# set x- and y-axis scale aspect ratio to 1", "ax", ".", "set_aspect", "(", "'equal'", ")", "#print time,temp and density on top", "temp", "=", "' '", "#'%8.3e' %ff['temp']", "time", "=", "' '", "#'%8.3e' %ff['time']", "dens", "=", "' '", "#'%8.3e' %ff['dens']", "#May cause Error, someting to do with tex", "'''\n #box1 = TextArea(\"t : \" + time + \" s~~/~~T$_{9}$ : \" + temp + \"~~/~~$\\\\rho_{b}$ : \" \\\n # + dens + ' g/cm$^{3}$', textprops=dict(color=\"k\"))\n anchored_box = AnchoredOffsetbox(loc=3,\n child=box1, pad=0.,\n frameon=False,\n bbox_to_anchor=(0., 1.02),\n bbox_transform=ax.transAxes,\n borderpad=0.,\n )\n ax.add_artist(anchored_box)\n '''", "## Colour bar plotted", "patches", "=", "[", "]", "color", "=", "[", "]", "for", "i", "in", "range", "(", "nzmax", ")", ":", "for", "j", "in", "range", "(", "nnmax", ")", ":", "if", "nzycheck", "[", "j", ",", "i", ",", "0", "]", "==", "1", ":", "xy", "=", "j", "-", "0.5", ",", "i", "-", "0.5", "rect", "=", "Rectangle", "(", "xy", ",", "1", ",", "1", ",", ")", "# abundance", "yab", "=", "nzycheck", "[", "j", ",", "i", ",", "1", "]", "if", "yab", "==", "0", ":", "yab", "=", "1e-99", "col", "=", "log10", "(", "yab", ")", "patches", ".", "append", "(", "rect", ")", "color", ".", "append", "(", "col", ")", "p", "=", "PatchCollection", "(", "patches", ",", "cmap", "=", "cmapa", ",", "norm", "=", "norma", ")", "p", ".", "set_array", "(", "array", "(", "color", ")", ")", "p", ".", "set_zorder", "(", "1", ")", "if", "mov", ":", "artist1", "=", "ax", ".", "add_collection", "(", "p", ")", "artists", ".", "append", "(", "artist1", ")", "else", ":", "ax", ".", "add_collection", "(", "p", ")", "if", "not", "mov", ":", "cb", "=", "pl", ".", "colorbar", "(", "p", ")", "# colorbar label", "cb", ".", "set_label", "(", "'log$_{10}$(X)'", ")", "# plot file name", "graphname", "=", "'abundance-chart'", "+", "str", "(", "cycle", ")", "# Add black frames for stable isotopes", "if", "boxstable", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "stable_el", ")", ")", ":", "if", "i", "==", "0", ":", "continue", "tmp", "=", "self", ".", "stable_el", "[", "i", "]", "try", ":", "zz", "=", "self", ".", "elements_names", ".", "index", "(", "tmp", "[", "0", "]", ")", "#charge", "except", ":", "continue", "for", "j", "in", "range", "(", "len", "(", "tmp", ")", ")", ":", "if", "j", "==", "0", ":", "continue", "nn", "=", "int", "(", "tmp", "[", "j", "]", ")", "#atomic mass", "nn", "=", "nn", "-", "zz", "xy", "=", "nn", "-", "0.5", ",", "zz", "-", "0.5", "rect", "=", "Rectangle", "(", "xy", ",", "1", ",", "1", ",", "ec", "=", "'k'", ",", "fc", "=", "'None'", ",", "fill", "=", "'False'", ",", "lw", "=", "3.", ")", "rect", ".", "set_zorder", "(", "2", ")", "ax", ".", "add_patch", "(", "rect", ")", "# decide which array to take for label positions", "iarr", "=", "0", "# plot element labels", "if", "ilabel", ":", "for", "z", "in", "range", "(", "nzmax", ")", ":", "try", ":", "nmin", "=", "min", "(", "argwhere", "(", "nzycheck", "[", ":", ",", "z", ",", "iarr", "]", ")", ")", "[", "0", "]", "-", "1", "ax", ".", "text", "(", "nmin", ",", "z", ",", "elname", "[", "z", "]", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "fontsize", "=", "'x-small'", ",", "clip_on", "=", "True", ")", "except", "ValueError", ":", "continue", "# plot mass numbers", "if", "imlabel", ":", "for", "z", "in", "range", "(", "nzmax", ")", ":", "for", "n", "in", "range", "(", "nnmax", ")", ":", "a", "=", "z", "+", "n", "if", "nzycheck", "[", "n", ",", "z", ",", "iarr", "]", "==", "1", ":", "ax", ".", "text", "(", "n", ",", "z", ",", "a", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "fontsize", "=", "imlabel_fontsize", ",", "clip_on", "=", "True", ")", "# plot lines at magic numbers", "if", "imagic", ":", "ixymagic", "=", "[", "2", ",", "8", ",", "20", ",", "28", ",", "50", ",", "82", ",", "126", "]", "nmagic", "=", "len", "(", "ixymagic", ")", "for", "magic", "in", "ixymagic", ":", "if", "magic", "<=", "nzmax", ":", "try", ":", "xnmin", "=", "min", "(", "argwhere", "(", "nzycheck", "[", ":", ",", "magic", ",", "iarr", "]", ")", ")", "[", "0", "]", "xnmax", "=", "max", "(", "argwhere", "(", "nzycheck", "[", ":", ",", "magic", ",", "iarr", "]", ")", ")", "[", "0", "]", "line", "=", "ax", ".", "plot", "(", "[", "xnmin", ",", "xnmax", "]", ",", "[", "magic", ",", "magic", "]", ",", "lw", "=", "3.", ",", "color", "=", "'r'", ",", "ls", "=", "'-'", ")", "except", "ValueError", ":", "dummy", "=", "0", "if", "magic", "<=", "nnmax", ":", "try", ":", "yzmin", "=", "min", "(", "argwhere", "(", "nzycheck", "[", "magic", ",", ":", ",", "iarr", "]", ")", ")", "[", "0", "]", "yzmax", "=", "max", "(", "argwhere", "(", "nzycheck", "[", "magic", ",", ":", ",", "iarr", "]", ")", ")", "[", "0", "]", "line", "=", "ax", ".", "plot", "(", "[", "magic", ",", "magic", "]", ",", "[", "yzmin", ",", "yzmax", "]", ",", "lw", "=", "3.", ",", "color", "=", "'r'", ",", "ls", "=", "'-'", ")", "except", "ValueError", ":", "dummy", "=", "0", "# set axis limits", "if", "plotaxis", "==", "[", "0", ",", "0", ",", "0", ",", "0", "]", ":", "xmax", "=", "max", "(", "nin", ")", "ymax", "=", "max", "(", "zin", ")", "ax", ".", "axis", "(", "[", "-", "0.5", ",", "xmax", "+", "0.5", ",", "-", "0.5", ",", "ymax", "+", "0.5", "]", ")", "else", ":", "ax", ".", "axis", "(", "plotaxis", ")", "# set x- and y-axis label", "ax", ".", "set_xlabel", "(", "'neutron number (A-Z)'", ")", "ax", ".", "set_ylabel", "(", "'proton number Z'", ")", "if", "not", "mov", ":", "pl", ".", "title", "(", "'Isotopic Chart for cycle '", "+", "str", "(", "int", "(", "cycle", ")", ")", ")", "if", "savefig", ":", "if", "path", "is", "not", "None", ":", "graphname", "=", "os", ".", "path", ".", "join", "(", "path", ",", "graphname", ")", "fig", ".", "savefig", "(", "graphname", ")", "print", "(", "graphname", ",", "'is done'", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "if", "turnoff", ":", "ion", "(", ")", "if", "mov", ":", "return", "p", ",", "artists", "else", ":", "return" ]
Plots an abundance chart Parameters ---------- cycle : string, integer or list The cycle we are looking in. If it is a list of cycles, this method will then do a plot for each of these cycles and save them all to a file. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atomic mass within this range. This will throw an error if this range does not make sence ie [45,2] if None, it will plot over the entire range. The default is None. ilabel : boolean, optional Elemental labels off/on. The default is True. imlabel : boolean, optional Label for isotopic masses off/on. The default is True. imlabel_fontsize : integer, optional Fontsize for isotopic mass labels. The default is 8. imagic : boolean, optional Turn lines for magic numbers off/on. The default is False. boxstable : boolean, optional Plot the black boxes around the stable elements. The defaults is True. lbound : tuple, optional Boundaries for colour spectrum ploted. The default is (-12,0). plotaxis : list, optional Set axis limit. If [0, 0, 0, 0] the complete range in (N,Z) will be plotted. It equates to [xMin, xMax, Ymin, Ymax]. The default is [0, 0, 0, 0]. show : boolean, optional Boolean of if the plot should be displayed. Useful with saving multiple plots using abu_chartMulti. The default is True. color_map : string, optional Color map according to choices in matplotlib (e.g. www.scipy.org/Cookbook/Matplotlib/Show_colormaps). The default is 'jet'. ifig : integer, optional Figure number, if ifig is None it wiil be set to the cycle number. The defaults is None. savefig : boolean, optional Whether or not to save the figure. The default is False drawfig, drawax, mov : optional, not necessary for user to set these variables The figure and axes containers to be drawn on, and whether or not a movie is being made (only True when se.movie is called, which sets mov to True automatically path: path where to save figure
[ "Plots", "an", "abundance", "chart" ]
python
train
gem/oq-engine
openquake/commands/plot.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/plot.py#L120-L140
def make_figure_source_geom(extractors, what): """ Extract the geometry of a given sources Example: http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3 """ import matplotlib.pyplot as plt fig = plt.figure() [ex] = extractors sitecol = ex.get('sitecol') geom_by_src = vars(ex.get(what)) ax = fig.add_subplot(1, 1, 1) ax.grid(True) ax.set_xlabel('Source') bmap = basemap('cyl', sitecol) for src, geom in geom_by_src.items(): if src != 'array': bmap.plot(geom['lon'], geom['lat'], label=src) bmap.plot(sitecol['lon'], sitecol['lat'], 'x') ax.legend() return plt
[ "def", "make_figure_source_geom", "(", "extractors", ",", "what", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "fig", "=", "plt", ".", "figure", "(", ")", "[", "ex", "]", "=", "extractors", "sitecol", "=", "ex", ".", "get", "(", "'sitecol'", ")", "geom_by_src", "=", "vars", "(", "ex", ".", "get", "(", "what", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "ax", ".", "grid", "(", "True", ")", "ax", ".", "set_xlabel", "(", "'Source'", ")", "bmap", "=", "basemap", "(", "'cyl'", ",", "sitecol", ")", "for", "src", ",", "geom", "in", "geom_by_src", ".", "items", "(", ")", ":", "if", "src", "!=", "'array'", ":", "bmap", ".", "plot", "(", "geom", "[", "'lon'", "]", ",", "geom", "[", "'lat'", "]", ",", "label", "=", "src", ")", "bmap", ".", "plot", "(", "sitecol", "[", "'lon'", "]", ",", "sitecol", "[", "'lat'", "]", ",", "'x'", ")", "ax", ".", "legend", "(", ")", "return", "plt" ]
Extract the geometry of a given sources Example: http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3
[ "Extract", "the", "geometry", "of", "a", "given", "sources", "Example", ":", "http", ":", "//", "127", ".", "0", ".", "0", ".", "1", ":", "8800", "/", "v1", "/", "calc", "/", "30", "/", "extract", "/", "source_geom", "/", "1", "2", "3" ]
python
train
cloudtools/stacker
stacker/actions/build.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/actions/build.py#L41-L59
def should_update(stack): """Tests whether a stack should be submitted for updates to CF. Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be updated, return True. """ if stack.locked: if not stack.force: logger.debug("Stack %s locked and not in --force list. " "Refusing to update.", stack.name) return False else: logger.debug("Stack %s locked, but is in --force " "list.", stack.name) return True
[ "def", "should_update", "(", "stack", ")", ":", "if", "stack", ".", "locked", ":", "if", "not", "stack", ".", "force", ":", "logger", ".", "debug", "(", "\"Stack %s locked and not in --force list. \"", "\"Refusing to update.\"", ",", "stack", ".", "name", ")", "return", "False", "else", ":", "logger", ".", "debug", "(", "\"Stack %s locked, but is in --force \"", "\"list.\"", ",", "stack", ".", "name", ")", "return", "True" ]
Tests whether a stack should be submitted for updates to CF. Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be updated, return True.
[ "Tests", "whether", "a", "stack", "should", "be", "submitted", "for", "updates", "to", "CF", "." ]
python
train
quora/qcore
qcore/asserts.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/asserts.py#L104-L130
def assert_eq(expected, actual, message=None, tolerance=None, extra=None): """Raises an AssertionError if expected != actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is larger than the tolerance. """ if tolerance is None: assert expected == actual, _assert_fail_message( message, expected, actual, "!=", extra ) else: assert isinstance(tolerance, _number_types), ( "tolerance parameter to assert_eq must be a number: %r" % tolerance ) assert isinstance(expected, _number_types) and isinstance( actual, _number_types ), ( "parameters must be numbers when tolerance is specified: %r, %r" % (expected, actual) ) diff = abs(expected - actual) assert diff <= tolerance, _assert_fail_message( message, expected, actual, "is more than %r away from" % tolerance, extra )
[ "def", "assert_eq", "(", "expected", ",", "actual", ",", "message", "=", "None", ",", "tolerance", "=", "None", ",", "extra", "=", "None", ")", ":", "if", "tolerance", "is", "None", ":", "assert", "expected", "==", "actual", ",", "_assert_fail_message", "(", "message", ",", "expected", ",", "actual", ",", "\"!=\"", ",", "extra", ")", "else", ":", "assert", "isinstance", "(", "tolerance", ",", "_number_types", ")", ",", "(", "\"tolerance parameter to assert_eq must be a number: %r\"", "%", "tolerance", ")", "assert", "isinstance", "(", "expected", ",", "_number_types", ")", "and", "isinstance", "(", "actual", ",", "_number_types", ")", ",", "(", "\"parameters must be numbers when tolerance is specified: %r, %r\"", "%", "(", "expected", ",", "actual", ")", ")", "diff", "=", "abs", "(", "expected", "-", "actual", ")", "assert", "diff", "<=", "tolerance", ",", "_assert_fail_message", "(", "message", ",", "expected", ",", "actual", ",", "\"is more than %r away from\"", "%", "tolerance", ",", "extra", ")" ]
Raises an AssertionError if expected != actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is larger than the tolerance.
[ "Raises", "an", "AssertionError", "if", "expected", "!", "=", "actual", "." ]
python
train
PMEAL/OpenPNM
openpnm/utils/petsc.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/utils/petsc.py#L49-L97
def _initialize_A(self): r""" This method creates the petsc sparse coefficients matrix from the OpenPNM scipy one. The method also equally decomposes the matrix at certain rows into different blocks (each block contains all the columns) and distributes them over the pre-assigned cores for parallel computing. The method can be used in serial. """ # Matrix of coefficients size self.m, self.n = (self.A).shape # Create a petsc sparse matrix self.petsc_A = PETSc.Mat() self.petsc_A.create(PETSc.COMM_WORLD) self.petsc_A.setSizes([self.m, self.n]) self.petsc_A.setType('aij') # sparse self.petsc_A.setUp() # Pre-allocate memory for the coefficients matrix. # Optional, but needed in the case where the # matrix does not already exist. # self.petsc_A.setPreallocationNNZ([ \ # sp.sparse.csr_matrix.getnnz(A,axis=0)]) # Loop over owned block of rows on this processor # and insert entry values (for parallel computing). self.Istart, self.Iend = self.petsc_A.getOwnershipRange() # Assign values to the coefficients matrix from the scipy # sparse csr one: petscMat = \ # PETSc.Mat().createAIJ(size=existingMat.shape, csr= \ # (existingMat.indptr,existingMat.indices,existingMat.data)) size_tmp = self.A.shape csr1 = (self.A.indptr[self.Istart:self.Iend+1] - self.A.indptr[self.Istart]) ind1 = self.A.indptr[self.Istart] ind2 = self.A.indptr[self.Iend] csr2 = self.A.indices[ind1:ind2] csr3 = self.A.data[ind1:ind2] self.petsc_A = PETSc.Mat().createAIJ(size=size_tmp, csr=(csr1, csr2, csr3)) # Communicate off-processor values # and setup internal data structures # for performing parallel operations self.petsc_A.assemblyBegin() self.petsc_A.assemblyEnd()
[ "def", "_initialize_A", "(", "self", ")", ":", "# Matrix of coefficients size", "self", ".", "m", ",", "self", ".", "n", "=", "(", "self", ".", "A", ")", ".", "shape", "# Create a petsc sparse matrix", "self", ".", "petsc_A", "=", "PETSc", ".", "Mat", "(", ")", "self", ".", "petsc_A", ".", "create", "(", "PETSc", ".", "COMM_WORLD", ")", "self", ".", "petsc_A", ".", "setSizes", "(", "[", "self", ".", "m", ",", "self", ".", "n", "]", ")", "self", ".", "petsc_A", ".", "setType", "(", "'aij'", ")", "# sparse", "self", ".", "petsc_A", ".", "setUp", "(", ")", "# Pre-allocate memory for the coefficients matrix.", "# Optional, but needed in the case where the", "# matrix does not already exist.", "# self.petsc_A.setPreallocationNNZ([ \\", "# sp.sparse.csr_matrix.getnnz(A,axis=0)])", "# Loop over owned block of rows on this processor", "# and insert entry values (for parallel computing).", "self", ".", "Istart", ",", "self", ".", "Iend", "=", "self", ".", "petsc_A", ".", "getOwnershipRange", "(", ")", "# Assign values to the coefficients matrix from the scipy", "# sparse csr one: petscMat = \\", "# PETSc.Mat().createAIJ(size=existingMat.shape, csr= \\", "# (existingMat.indptr,existingMat.indices,existingMat.data))", "size_tmp", "=", "self", ".", "A", ".", "shape", "csr1", "=", "(", "self", ".", "A", ".", "indptr", "[", "self", ".", "Istart", ":", "self", ".", "Iend", "+", "1", "]", "-", "self", ".", "A", ".", "indptr", "[", "self", ".", "Istart", "]", ")", "ind1", "=", "self", ".", "A", ".", "indptr", "[", "self", ".", "Istart", "]", "ind2", "=", "self", ".", "A", ".", "indptr", "[", "self", ".", "Iend", "]", "csr2", "=", "self", ".", "A", ".", "indices", "[", "ind1", ":", "ind2", "]", "csr3", "=", "self", ".", "A", ".", "data", "[", "ind1", ":", "ind2", "]", "self", ".", "petsc_A", "=", "PETSc", ".", "Mat", "(", ")", ".", "createAIJ", "(", "size", "=", "size_tmp", ",", "csr", "=", "(", "csr1", ",", "csr2", ",", "csr3", ")", ")", "# Communicate off-processor values", "# and setup internal data structures", "# for performing parallel operations", "self", ".", "petsc_A", ".", "assemblyBegin", "(", ")", "self", ".", "petsc_A", ".", "assemblyEnd", "(", ")" ]
r""" This method creates the petsc sparse coefficients matrix from the OpenPNM scipy one. The method also equally decomposes the matrix at certain rows into different blocks (each block contains all the columns) and distributes them over the pre-assigned cores for parallel computing. The method can be used in serial.
[ "r", "This", "method", "creates", "the", "petsc", "sparse", "coefficients", "matrix", "from", "the", "OpenPNM", "scipy", "one", ".", "The", "method", "also", "equally", "decomposes", "the", "matrix", "at", "certain", "rows", "into", "different", "blocks", "(", "each", "block", "contains", "all", "the", "columns", ")", "and", "distributes", "them", "over", "the", "pre", "-", "assigned", "cores", "for", "parallel", "computing", ".", "The", "method", "can", "be", "used", "in", "serial", "." ]
python
train
mikedh/trimesh
trimesh/exchange/ply.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/ply.py#L289-L419
def elements_to_kwargs(elements, fix_texture, image): """ Given an elements data structure, extract the keyword arguments that a Trimesh object constructor will expect. Parameters ------------ elements: OrderedDict object, with fields and data loaded Returns ----------- kwargs: dict, with keys for Trimesh constructor. eg: mesh = trimesh.Trimesh(**kwargs) """ kwargs = {'metadata': {'ply_raw': elements}} vertices = np.column_stack([elements['vertex']['data'][i] for i in 'xyz']) if not util.is_shape(vertices, (-1, 3)): raise ValueError('Vertices were not (n,3)!') try: face_data = elements['face']['data'] except (KeyError, ValueError): # some PLY files only include vertices face_data = None faces = None # what keys do in-the-wild exporters use for vertices index_names = ['vertex_index', 'vertex_indices'] texcoord = None if util.is_shape(face_data, (-1, (3, 4))): faces = face_data elif isinstance(face_data, dict): # get vertex indexes for i in index_names: if i in face_data: faces = face_data[i] break # if faces have UV coordinates defined use them if 'texcoord' in face_data: texcoord = face_data['texcoord'] elif isinstance(face_data, np.ndarray): face_blob = elements['face']['data'] # some exporters set this name to 'vertex_index' # and some others use 'vertex_indices' but we really # don't care about the name unless there are multiple if len(face_blob.dtype.names) == 1: name = face_blob.dtype.names[0] elif len(face_blob.dtype.names) > 1: # loop through options for i in face_blob.dtype.names: if i in index_names: name = i break # get faces faces = face_blob[name]['f1'] try: texcoord = face_blob['texcoord']['f1'] except (ValueError, KeyError): # accessing numpy arrays with named fields # incorrectly is a ValueError pass # PLY stores texture coordinates per- face which is # slightly annoying, as we have to then figure out # which vertices have the same position but different UV expected = (faces.shape[0], faces.shape[1] * 2) if (image is not None and texcoord is not None and texcoord.shape == expected): # vertices with the same position but different # UV coordinates can't be merged without it # looking like it went through a woodchipper # in- the- wild PLY comes with things merged that # probably shouldn't be so disconnect vertices if fix_texture: # reshape to correspond with flattened faces uv = texcoord.reshape((-1, 2)) # round UV to OOM 10^4 as they are pixel coordinates # and more precision is not necessary or desirable search = np.column_stack(( vertices[faces.reshape(-1)], (uv * 1e4).round())) # find vertices which have the same position AND UV unique, inverse = grouping.unique_rows(search) # set vertices, faces, and UV to the new values vertices = search[:, :3][unique] faces = inverse.reshape((-1, 3)) uv = uv[unique] else: # don't alter vertices, UV will look like crap # if it was exported with vertices merged uv = np.zeros((len(vertices), 2)) uv[faces.reshape(-1)] = texcoord.reshape((-1, 2)) # create the visuals object for the texture kwargs['visual'] = visual.texture.TextureVisuals( uv=uv, image=image) # kwargs for Trimesh or PointCloud kwargs.update({'faces': faces, 'vertices': vertices}) # if both vertex and face color are defined pick the one # with the most going on colors = [] signal = [] if kwargs['faces'] is not None: f_color, f_signal = element_colors(elements['face']) colors.append({'face_colors': f_color}) signal.append(f_signal) if kwargs['vertices'] is not None: v_color, v_signal = element_colors(elements['vertex']) colors.append({'vertex_colors': v_color}) signal.append(v_signal) # add the winning colors to the result kwargs.update(colors[np.argmax(signal)]) return kwargs
[ "def", "elements_to_kwargs", "(", "elements", ",", "fix_texture", ",", "image", ")", ":", "kwargs", "=", "{", "'metadata'", ":", "{", "'ply_raw'", ":", "elements", "}", "}", "vertices", "=", "np", ".", "column_stack", "(", "[", "elements", "[", "'vertex'", "]", "[", "'data'", "]", "[", "i", "]", "for", "i", "in", "'xyz'", "]", ")", "if", "not", "util", ".", "is_shape", "(", "vertices", ",", "(", "-", "1", ",", "3", ")", ")", ":", "raise", "ValueError", "(", "'Vertices were not (n,3)!'", ")", "try", ":", "face_data", "=", "elements", "[", "'face'", "]", "[", "'data'", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "# some PLY files only include vertices", "face_data", "=", "None", "faces", "=", "None", "# what keys do in-the-wild exporters use for vertices", "index_names", "=", "[", "'vertex_index'", ",", "'vertex_indices'", "]", "texcoord", "=", "None", "if", "util", ".", "is_shape", "(", "face_data", ",", "(", "-", "1", ",", "(", "3", ",", "4", ")", ")", ")", ":", "faces", "=", "face_data", "elif", "isinstance", "(", "face_data", ",", "dict", ")", ":", "# get vertex indexes", "for", "i", "in", "index_names", ":", "if", "i", "in", "face_data", ":", "faces", "=", "face_data", "[", "i", "]", "break", "# if faces have UV coordinates defined use them", "if", "'texcoord'", "in", "face_data", ":", "texcoord", "=", "face_data", "[", "'texcoord'", "]", "elif", "isinstance", "(", "face_data", ",", "np", ".", "ndarray", ")", ":", "face_blob", "=", "elements", "[", "'face'", "]", "[", "'data'", "]", "# some exporters set this name to 'vertex_index'", "# and some others use 'vertex_indices' but we really", "# don't care about the name unless there are multiple", "if", "len", "(", "face_blob", ".", "dtype", ".", "names", ")", "==", "1", ":", "name", "=", "face_blob", ".", "dtype", ".", "names", "[", "0", "]", "elif", "len", "(", "face_blob", ".", "dtype", ".", "names", ")", ">", "1", ":", "# loop through options", "for", "i", "in", "face_blob", ".", "dtype", ".", "names", ":", "if", "i", "in", "index_names", ":", "name", "=", "i", "break", "# get faces", "faces", "=", "face_blob", "[", "name", "]", "[", "'f1'", "]", "try", ":", "texcoord", "=", "face_blob", "[", "'texcoord'", "]", "[", "'f1'", "]", "except", "(", "ValueError", ",", "KeyError", ")", ":", "# accessing numpy arrays with named fields", "# incorrectly is a ValueError", "pass", "# PLY stores texture coordinates per- face which is", "# slightly annoying, as we have to then figure out", "# which vertices have the same position but different UV", "expected", "=", "(", "faces", ".", "shape", "[", "0", "]", ",", "faces", ".", "shape", "[", "1", "]", "*", "2", ")", "if", "(", "image", "is", "not", "None", "and", "texcoord", "is", "not", "None", "and", "texcoord", ".", "shape", "==", "expected", ")", ":", "# vertices with the same position but different", "# UV coordinates can't be merged without it", "# looking like it went through a woodchipper", "# in- the- wild PLY comes with things merged that", "# probably shouldn't be so disconnect vertices", "if", "fix_texture", ":", "# reshape to correspond with flattened faces", "uv", "=", "texcoord", ".", "reshape", "(", "(", "-", "1", ",", "2", ")", ")", "# round UV to OOM 10^4 as they are pixel coordinates", "# and more precision is not necessary or desirable", "search", "=", "np", ".", "column_stack", "(", "(", "vertices", "[", "faces", ".", "reshape", "(", "-", "1", ")", "]", ",", "(", "uv", "*", "1e4", ")", ".", "round", "(", ")", ")", ")", "# find vertices which have the same position AND UV", "unique", ",", "inverse", "=", "grouping", ".", "unique_rows", "(", "search", ")", "# set vertices, faces, and UV to the new values", "vertices", "=", "search", "[", ":", ",", ":", "3", "]", "[", "unique", "]", "faces", "=", "inverse", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "uv", "=", "uv", "[", "unique", "]", "else", ":", "# don't alter vertices, UV will look like crap", "# if it was exported with vertices merged", "uv", "=", "np", ".", "zeros", "(", "(", "len", "(", "vertices", ")", ",", "2", ")", ")", "uv", "[", "faces", ".", "reshape", "(", "-", "1", ")", "]", "=", "texcoord", ".", "reshape", "(", "(", "-", "1", ",", "2", ")", ")", "# create the visuals object for the texture", "kwargs", "[", "'visual'", "]", "=", "visual", ".", "texture", ".", "TextureVisuals", "(", "uv", "=", "uv", ",", "image", "=", "image", ")", "# kwargs for Trimesh or PointCloud", "kwargs", ".", "update", "(", "{", "'faces'", ":", "faces", ",", "'vertices'", ":", "vertices", "}", ")", "# if both vertex and face color are defined pick the one", "# with the most going on", "colors", "=", "[", "]", "signal", "=", "[", "]", "if", "kwargs", "[", "'faces'", "]", "is", "not", "None", ":", "f_color", ",", "f_signal", "=", "element_colors", "(", "elements", "[", "'face'", "]", ")", "colors", ".", "append", "(", "{", "'face_colors'", ":", "f_color", "}", ")", "signal", ".", "append", "(", "f_signal", ")", "if", "kwargs", "[", "'vertices'", "]", "is", "not", "None", ":", "v_color", ",", "v_signal", "=", "element_colors", "(", "elements", "[", "'vertex'", "]", ")", "colors", ".", "append", "(", "{", "'vertex_colors'", ":", "v_color", "}", ")", "signal", ".", "append", "(", "v_signal", ")", "# add the winning colors to the result", "kwargs", ".", "update", "(", "colors", "[", "np", ".", "argmax", "(", "signal", ")", "]", ")", "return", "kwargs" ]
Given an elements data structure, extract the keyword arguments that a Trimesh object constructor will expect. Parameters ------------ elements: OrderedDict object, with fields and data loaded Returns ----------- kwargs: dict, with keys for Trimesh constructor. eg: mesh = trimesh.Trimesh(**kwargs)
[ "Given", "an", "elements", "data", "structure", "extract", "the", "keyword", "arguments", "that", "a", "Trimesh", "object", "constructor", "will", "expect", "." ]
python
train
alvarogzp/telegram-bot-framework
bot/action/standard/userinfo.py
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/userinfo.py#L26-L30
def get_instance(cls, state): """:rtype: UserStorageHandler""" if cls.instance is None: cls.instance = UserStorageHandler(state) return cls.instance
[ "def", "get_instance", "(", "cls", ",", "state", ")", ":", "if", "cls", ".", "instance", "is", "None", ":", "cls", ".", "instance", "=", "UserStorageHandler", "(", "state", ")", "return", "cls", ".", "instance" ]
:rtype: UserStorageHandler
[ ":", "rtype", ":", "UserStorageHandler" ]
python
train
aliyun/aliyun-log-python-sdk
aliyun/log/logclient.py
https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L1691-L1710
def get_machine_group(self, project_name, group_name): """ get machine group in a project Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type group_name: string :param group_name: the group name to get :return: GetMachineGroupResponse :raise: LogException """ headers = {} params = {} resource = "/machinegroups/" + group_name (resp, headers) = self._send("GET", project_name, None, resource, params, headers) return GetMachineGroupResponse(resp, headers)
[ "def", "get_machine_group", "(", "self", ",", "project_name", ",", "group_name", ")", ":", "headers", "=", "{", "}", "params", "=", "{", "}", "resource", "=", "\"/machinegroups/\"", "+", "group_name", "(", "resp", ",", "headers", ")", "=", "self", ".", "_send", "(", "\"GET\"", ",", "project_name", ",", "None", ",", "resource", ",", "params", ",", "headers", ")", "return", "GetMachineGroupResponse", "(", "resp", ",", "headers", ")" ]
get machine group in a project Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type group_name: string :param group_name: the group name to get :return: GetMachineGroupResponse :raise: LogException
[ "get", "machine", "group", "in", "a", "project", "Unsuccessful", "opertaion", "will", "cause", "an", "LogException", ".", ":", "type", "project_name", ":", "string", ":", "param", "project_name", ":", "the", "Project", "name", ":", "type", "group_name", ":", "string", ":", "param", "group_name", ":", "the", "group", "name", "to", "get", ":", "return", ":", "GetMachineGroupResponse", ":", "raise", ":", "LogException" ]
python
train
KartikTalwar/Duolingo
duolingo.py
https://github.com/KartikTalwar/Duolingo/blob/0f7e9a0d4bfa864ade82890fca3789679ef38bee/duolingo.py#L237-L255
def get_languages(self, abbreviations=False): """ Get praticed languages. :param abbreviations: Get language as abbreviation or not :type abbreviations: bool :return: List of languages :rtype: list of str """ data = [] for lang in self.user_data.languages: if lang['learning']: if abbreviations: data.append(lang['language']) else: data.append(lang['language_string']) return data
[ "def", "get_languages", "(", "self", ",", "abbreviations", "=", "False", ")", ":", "data", "=", "[", "]", "for", "lang", "in", "self", ".", "user_data", ".", "languages", ":", "if", "lang", "[", "'learning'", "]", ":", "if", "abbreviations", ":", "data", ".", "append", "(", "lang", "[", "'language'", "]", ")", "else", ":", "data", ".", "append", "(", "lang", "[", "'language_string'", "]", ")", "return", "data" ]
Get praticed languages. :param abbreviations: Get language as abbreviation or not :type abbreviations: bool :return: List of languages :rtype: list of str
[ "Get", "praticed", "languages", "." ]
python
train
pystorm/pystorm
pystorm/bolt.py
https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L496-L503
def _batch_entry_run(self): """The inside of ``_batch_entry``'s infinite loop. Separated out so it can be properly unit tested. """ time.sleep(self.secs_between_batches) with self._batch_lock: self.process_batches()
[ "def", "_batch_entry_run", "(", "self", ")", ":", "time", ".", "sleep", "(", "self", ".", "secs_between_batches", ")", "with", "self", ".", "_batch_lock", ":", "self", ".", "process_batches", "(", ")" ]
The inside of ``_batch_entry``'s infinite loop. Separated out so it can be properly unit tested.
[ "The", "inside", "of", "_batch_entry", "s", "infinite", "loop", "." ]
python
train
sidecars/python-quickbooks
quickbooks/mixins.py
https://github.com/sidecars/python-quickbooks/blob/4cb2b6da46423bad8b32b85d87f9a97b698144fd/quickbooks/mixins.py#L211-L228
def query(cls, select, qb=None): """ :param select: QBO SQL query select statement :param qb: :return: Returns list """ if not qb: qb = QuickBooks() json_data = qb.query(select) obj_list = [] if cls.qbo_object_name in json_data["QueryResponse"]: for item_json in json_data["QueryResponse"][cls.qbo_object_name]: obj_list.append(cls.from_json(item_json)) return obj_list
[ "def", "query", "(", "cls", ",", "select", ",", "qb", "=", "None", ")", ":", "if", "not", "qb", ":", "qb", "=", "QuickBooks", "(", ")", "json_data", "=", "qb", ".", "query", "(", "select", ")", "obj_list", "=", "[", "]", "if", "cls", ".", "qbo_object_name", "in", "json_data", "[", "\"QueryResponse\"", "]", ":", "for", "item_json", "in", "json_data", "[", "\"QueryResponse\"", "]", "[", "cls", ".", "qbo_object_name", "]", ":", "obj_list", ".", "append", "(", "cls", ".", "from_json", "(", "item_json", ")", ")", "return", "obj_list" ]
:param select: QBO SQL query select statement :param qb: :return: Returns list
[ ":", "param", "select", ":", "QBO", "SQL", "query", "select", "statement", ":", "param", "qb", ":", ":", "return", ":", "Returns", "list" ]
python
train
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L240-L269
def security_rules_list(security_group, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 List security rules within a network security group. :param security_group: The network security group to query. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rules_list testnsg testgroup ''' netconn = __utils__['azurearm.get_client']('network', **kwargs) try: secrules = netconn.security_rules.list( network_security_group_name=security_group, resource_group_name=resource_group ) result = __utils__['azurearm.paged_object_to_list'](secrules) except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "security_rules_list", "(", "security_group", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "secrules", "=", "netconn", ".", "security_rules", ".", "list", "(", "network_security_group_name", "=", "security_group", ",", "resource_group_name", "=", "resource_group", ")", "result", "=", "__utils__", "[", "'azurearm.paged_object_to_list'", "]", "(", "secrules", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
.. versionadded:: 2019.2.0 List security rules within a network security group. :param security_group: The network security group to query. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rules_list testnsg testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
rochacbruno/manage
manage/auto_import.py
https://github.com/rochacbruno/manage/blob/e904c451862f036f4be8723df5704a9844103c74/manage/auto_import.py#L13-L21
def import_submodules(name, submodules=None): """Import all submodules for a package/module name""" sys.path.insert(0, name) if submodules: for submodule in submodules: import_string('{0}.{1}'.format(name, submodule)) else: for item in pkgutil.walk_packages([name]): import_string('{0}.{1}'.format(name, item[1]))
[ "def", "import_submodules", "(", "name", ",", "submodules", "=", "None", ")", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "name", ")", "if", "submodules", ":", "for", "submodule", "in", "submodules", ":", "import_string", "(", "'{0}.{1}'", ".", "format", "(", "name", ",", "submodule", ")", ")", "else", ":", "for", "item", "in", "pkgutil", ".", "walk_packages", "(", "[", "name", "]", ")", ":", "import_string", "(", "'{0}.{1}'", ".", "format", "(", "name", ",", "item", "[", "1", "]", ")", ")" ]
Import all submodules for a package/module name
[ "Import", "all", "submodules", "for", "a", "package", "/", "module", "name" ]
python
train
NicolasLM/spinach
spinach/brokers/redis.py
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L100-L118
def enqueue_jobs(self, jobs: Iterable[Job]): """Enqueue a batch of jobs.""" jobs_to_queue = list() for job in jobs: if job.should_start: job.status = JobStatus.QUEUED else: job.status = JobStatus.WAITING jobs_to_queue.append(job.serialize()) if jobs_to_queue: self._run_script( self._enqueue_job, self._to_namespaced(NOTIFICATIONS_KEY), self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)), self.namespace, self._to_namespaced(FUTURE_JOBS_KEY), *jobs_to_queue )
[ "def", "enqueue_jobs", "(", "self", ",", "jobs", ":", "Iterable", "[", "Job", "]", ")", ":", "jobs_to_queue", "=", "list", "(", ")", "for", "job", "in", "jobs", ":", "if", "job", ".", "should_start", ":", "job", ".", "status", "=", "JobStatus", ".", "QUEUED", "else", ":", "job", ".", "status", "=", "JobStatus", ".", "WAITING", "jobs_to_queue", ".", "append", "(", "job", ".", "serialize", "(", ")", ")", "if", "jobs_to_queue", ":", "self", ".", "_run_script", "(", "self", ".", "_enqueue_job", ",", "self", ".", "_to_namespaced", "(", "NOTIFICATIONS_KEY", ")", ",", "self", ".", "_to_namespaced", "(", "RUNNING_JOBS_KEY", ".", "format", "(", "self", ".", "_id", ")", ")", ",", "self", ".", "namespace", ",", "self", ".", "_to_namespaced", "(", "FUTURE_JOBS_KEY", ")", ",", "*", "jobs_to_queue", ")" ]
Enqueue a batch of jobs.
[ "Enqueue", "a", "batch", "of", "jobs", "." ]
python
train
metagriffin/asset
asset/plugin.py
https://github.com/metagriffin/asset/blob/f2c5e599cd4688f82216d4b5cfa87aab96d8bb8c/asset/plugin.py#L112-L184
def plugins(group, spec=None): # TODO: share this documentation with `../doc/plugin.rst`... ''' Returns a `PluginSet` object for the specified setuptools-style entrypoint `group`. This is just a wrapper around `pkg_resources.iter_entry_points` that allows the plugins to sort and override themselves. The optional `spec` parameter controls how and what plugins are loaded. If it is ``None`` or the special value ``'*'``, then the normal plugin loading will occur, i.e. all registered plugins will be loaded and their self-declared ordering and dependencies will be applied. Otherwise, the `spec` is taken as a comma- or whitespace-separated list of plugins to load. In this mode, the `spec` can either specify an exact list of plugins to load, in the specified order, referred to as an "absolute" spec. Otherwise, it is a "relative" spec, which indicates that it only adjusts the standard registered plugin loading. A spec is a list of either absolute or relative instructions, and they cannot be mixed. In either mode, a plugin is identified either by name for registered plugins (e.g. ``foo``), or by fully-qualified Python module and symbol name for unregistered plugins (e.g. ``package.module.symbol``). Plugins in an absolute spec are loaded in the order specified and can be optionally prefixed with the following special characters: * ``'?'`` : the specified plugin should be loaded if available. If it is not registered, cannot be found, or cannot be loaded, then it is ignored (a DEBUG log message will be emitted, however). Plugins in a relative spec are always prefixed with at least one of the following special characters: * ``'-'`` : removes the specified plugin; this does not affect plugin ordering, it only removes the plugin from the loaded list. If the plugin does not exist, no error is thrown. * ``'+'`` : adds or requires the specified plugin to the loaded set. If the plugin is not a named/registered plugin, then it will be loaded as an asset-symbol, i.e. a Python-dotted module and symbol name. If the plugin does not exist or cannot be loaded, this will throw an error. It does not affect the plugin ordering of registered plugins. * ``'/'`` : the plugin name is taken as a regular expression that will be used to match plugin names and it must terminate in a slash. Note that this must be the **last** element in the spec list. Examples: * ``'*'`` : load all registered plugins. * ``'foo,bar'`` : load the "foo" plugin, then the "bar" plugin. * ``'foo,?bar'`` : load the "foo" plugin and if the "bar" plugin exists, load it too. * ``'-zig'`` : load all registered plugins except the "zig" plugin. * ``'+pkg.foo.bar'`` : load all registered plugins and then load the "pkg.foo.bar" Python symbol. * ``'pkg.foo.bar'`` : load only the "pkg.foo.bar" Python symbol. ''' pspec = _parse_spec(spec) plugs = list(_get_registered_plugins(group, pspec)) plugs += list(_get_unregistered_plugins(group, plugs, pspec)) return PluginSet(group, spec, list(_sort_plugins(group, plugs, pspec, spec)))
[ "def", "plugins", "(", "group", ",", "spec", "=", "None", ")", ":", "# TODO: share this documentation with `../doc/plugin.rst`...", "pspec", "=", "_parse_spec", "(", "spec", ")", "plugs", "=", "list", "(", "_get_registered_plugins", "(", "group", ",", "pspec", ")", ")", "plugs", "+=", "list", "(", "_get_unregistered_plugins", "(", "group", ",", "plugs", ",", "pspec", ")", ")", "return", "PluginSet", "(", "group", ",", "spec", ",", "list", "(", "_sort_plugins", "(", "group", ",", "plugs", ",", "pspec", ",", "spec", ")", ")", ")" ]
Returns a `PluginSet` object for the specified setuptools-style entrypoint `group`. This is just a wrapper around `pkg_resources.iter_entry_points` that allows the plugins to sort and override themselves. The optional `spec` parameter controls how and what plugins are loaded. If it is ``None`` or the special value ``'*'``, then the normal plugin loading will occur, i.e. all registered plugins will be loaded and their self-declared ordering and dependencies will be applied. Otherwise, the `spec` is taken as a comma- or whitespace-separated list of plugins to load. In this mode, the `spec` can either specify an exact list of plugins to load, in the specified order, referred to as an "absolute" spec. Otherwise, it is a "relative" spec, which indicates that it only adjusts the standard registered plugin loading. A spec is a list of either absolute or relative instructions, and they cannot be mixed. In either mode, a plugin is identified either by name for registered plugins (e.g. ``foo``), or by fully-qualified Python module and symbol name for unregistered plugins (e.g. ``package.module.symbol``). Plugins in an absolute spec are loaded in the order specified and can be optionally prefixed with the following special characters: * ``'?'`` : the specified plugin should be loaded if available. If it is not registered, cannot be found, or cannot be loaded, then it is ignored (a DEBUG log message will be emitted, however). Plugins in a relative spec are always prefixed with at least one of the following special characters: * ``'-'`` : removes the specified plugin; this does not affect plugin ordering, it only removes the plugin from the loaded list. If the plugin does not exist, no error is thrown. * ``'+'`` : adds or requires the specified plugin to the loaded set. If the plugin is not a named/registered plugin, then it will be loaded as an asset-symbol, i.e. a Python-dotted module and symbol name. If the plugin does not exist or cannot be loaded, this will throw an error. It does not affect the plugin ordering of registered plugins. * ``'/'`` : the plugin name is taken as a regular expression that will be used to match plugin names and it must terminate in a slash. Note that this must be the **last** element in the spec list. Examples: * ``'*'`` : load all registered plugins. * ``'foo,bar'`` : load the "foo" plugin, then the "bar" plugin. * ``'foo,?bar'`` : load the "foo" plugin and if the "bar" plugin exists, load it too. * ``'-zig'`` : load all registered plugins except the "zig" plugin. * ``'+pkg.foo.bar'`` : load all registered plugins and then load the "pkg.foo.bar" Python symbol. * ``'pkg.foo.bar'`` : load only the "pkg.foo.bar" Python symbol.
[ "Returns", "a", "PluginSet", "object", "for", "the", "specified", "setuptools", "-", "style", "entrypoint", "group", ".", "This", "is", "just", "a", "wrapper", "around", "pkg_resources", ".", "iter_entry_points", "that", "allows", "the", "plugins", "to", "sort", "and", "override", "themselves", "." ]
python
train
jleclanche/fireplace
fireplace/game.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/game.py#L193-L201
def queue_actions(self, source, actions, event_args=None): """ Queue a list of \a actions for processing from \a source. Triggers an aura refresh afterwards. """ source.event_args = event_args ret = self.trigger_actions(source, actions) source.event_args = None return ret
[ "def", "queue_actions", "(", "self", ",", "source", ",", "actions", ",", "event_args", "=", "None", ")", ":", "source", ".", "event_args", "=", "event_args", "ret", "=", "self", ".", "trigger_actions", "(", "source", ",", "actions", ")", "source", ".", "event_args", "=", "None", "return", "ret" ]
Queue a list of \a actions for processing from \a source. Triggers an aura refresh afterwards.
[ "Queue", "a", "list", "of", "\\", "a", "actions", "for", "processing", "from", "\\", "a", "source", ".", "Triggers", "an", "aura", "refresh", "afterwards", "." ]
python
train
joyent/python-manta
manta/auth.py
https://github.com/joyent/python-manta/blob/f68ef142bdbac058c981e3b28e18d77612f5b7c6/manta/auth.py#L246-L310
def ssh_key_info_from_key_data(key_id, priv_key=None): """Get/load SSH key info necessary for signing. @param key_id {str} Either a private ssh key fingerprint, e.g. 'b3:f0:a1:6c:18:3b:42:63:fd:6e:57:42:74:17:d4:bc', or the path to an ssh private key file (like ssh's IdentityFile config option). @param priv_key {str} Optional. SSH private key file data (PEM format). @return {dict} with these keys: - type: "agent" - signer: Crypto signer class (a PKCS#1 v1.5 signer for RSA keys) - fingerprint: key md5 fingerprint - algorithm: See ALGO_FROM_SSH_KEY_TYPE for supported list. - ... some others added by `load_ssh_key()` """ if FINGERPRINT_RE.match(key_id) and priv_key: key_info = {"fingerprint": key_id, "priv_key": priv_key} else: # Otherwise, we attempt to load necessary details from ~/.ssh. key_info = load_ssh_key(key_id) # Load a key signer. key = None try: key = serialization.load_pem_private_key( key_info["priv_key"], password=None, backend=default_backend()) except TypeError as ex: log.debug("could not import key without passphrase (will " "try with passphrase): %s", ex) if "priv_key_path" in key_info: prompt = "Passphrase [%s]: " % key_info["priv_key_path"] else: prompt = "Passphrase: " for i in range(3): passphrase = getpass(prompt) if not passphrase: break try: key = serialization.load_pem_private_key( key_info["priv_key"], password=passphrase, backend=default_backend()) except ValueError: continue else: break if not key: details = "" if "priv_key_path" in key_info: details = " (%s)" % key_info["priv_key_path"] raise MantaError("could not import key" + details) # If load_ssh_key() wasn't run, set the algorithm here. if 'algorithm' not in key_info: if isinstance(key, ec.EllipticCurvePrivateKey): key_info['algorithm'] = ECDSA_ALGO_FROM_KEY_SIZE[str(key.key_size)] elif isinstance(key, rsa.RSAPrivateKey): key_info['algorithm'] = RSA_STR else: raise MantaError("Unsupported key type for: {}".format(key_id)) key_info["signer"] = key key_info["type"] = "ssh_key" return key_info
[ "def", "ssh_key_info_from_key_data", "(", "key_id", ",", "priv_key", "=", "None", ")", ":", "if", "FINGERPRINT_RE", ".", "match", "(", "key_id", ")", "and", "priv_key", ":", "key_info", "=", "{", "\"fingerprint\"", ":", "key_id", ",", "\"priv_key\"", ":", "priv_key", "}", "else", ":", "# Otherwise, we attempt to load necessary details from ~/.ssh.", "key_info", "=", "load_ssh_key", "(", "key_id", ")", "# Load a key signer.", "key", "=", "None", "try", ":", "key", "=", "serialization", ".", "load_pem_private_key", "(", "key_info", "[", "\"priv_key\"", "]", ",", "password", "=", "None", ",", "backend", "=", "default_backend", "(", ")", ")", "except", "TypeError", "as", "ex", ":", "log", ".", "debug", "(", "\"could not import key without passphrase (will \"", "\"try with passphrase): %s\"", ",", "ex", ")", "if", "\"priv_key_path\"", "in", "key_info", ":", "prompt", "=", "\"Passphrase [%s]: \"", "%", "key_info", "[", "\"priv_key_path\"", "]", "else", ":", "prompt", "=", "\"Passphrase: \"", "for", "i", "in", "range", "(", "3", ")", ":", "passphrase", "=", "getpass", "(", "prompt", ")", "if", "not", "passphrase", ":", "break", "try", ":", "key", "=", "serialization", ".", "load_pem_private_key", "(", "key_info", "[", "\"priv_key\"", "]", ",", "password", "=", "passphrase", ",", "backend", "=", "default_backend", "(", ")", ")", "except", "ValueError", ":", "continue", "else", ":", "break", "if", "not", "key", ":", "details", "=", "\"\"", "if", "\"priv_key_path\"", "in", "key_info", ":", "details", "=", "\" (%s)\"", "%", "key_info", "[", "\"priv_key_path\"", "]", "raise", "MantaError", "(", "\"could not import key\"", "+", "details", ")", "# If load_ssh_key() wasn't run, set the algorithm here.", "if", "'algorithm'", "not", "in", "key_info", ":", "if", "isinstance", "(", "key", ",", "ec", ".", "EllipticCurvePrivateKey", ")", ":", "key_info", "[", "'algorithm'", "]", "=", "ECDSA_ALGO_FROM_KEY_SIZE", "[", "str", "(", "key", ".", "key_size", ")", "]", "elif", "isinstance", "(", "key", ",", "rsa", ".", "RSAPrivateKey", ")", ":", "key_info", "[", "'algorithm'", "]", "=", "RSA_STR", "else", ":", "raise", "MantaError", "(", "\"Unsupported key type for: {}\"", ".", "format", "(", "key_id", ")", ")", "key_info", "[", "\"signer\"", "]", "=", "key", "key_info", "[", "\"type\"", "]", "=", "\"ssh_key\"", "return", "key_info" ]
Get/load SSH key info necessary for signing. @param key_id {str} Either a private ssh key fingerprint, e.g. 'b3:f0:a1:6c:18:3b:42:63:fd:6e:57:42:74:17:d4:bc', or the path to an ssh private key file (like ssh's IdentityFile config option). @param priv_key {str} Optional. SSH private key file data (PEM format). @return {dict} with these keys: - type: "agent" - signer: Crypto signer class (a PKCS#1 v1.5 signer for RSA keys) - fingerprint: key md5 fingerprint - algorithm: See ALGO_FROM_SSH_KEY_TYPE for supported list. - ... some others added by `load_ssh_key()`
[ "Get", "/", "load", "SSH", "key", "info", "necessary", "for", "signing", "." ]
python
train
mosdef-hub/mbuild
mbuild/utils/conversion.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/utils/conversion.py#L4-L23
def RB_to_OPLS(c0, c1, c2, c3, c4, c5): """Converts Ryckaert-Bellemans type dihedrals to OPLS type. Parameters ---------- c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol) Returns ------- opls_coeffs : np.array, shape=(4,) Array containing the OPLS dihedrals coeffs f1, f2, f3, and f4 (in kcal/mol) """ f1 = (-1.5 * c3) - (2 * c1) f2 = c0 + c1 + c3 f3 = -0.5 * c3 f4 = -0.25 * c4 return np.array([f1, f2, f3, f4])
[ "def", "RB_to_OPLS", "(", "c0", ",", "c1", ",", "c2", ",", "c3", ",", "c4", ",", "c5", ")", ":", "f1", "=", "(", "-", "1.5", "*", "c3", ")", "-", "(", "2", "*", "c1", ")", "f2", "=", "c0", "+", "c1", "+", "c3", "f3", "=", "-", "0.5", "*", "c3", "f4", "=", "-", "0.25", "*", "c4", "return", "np", ".", "array", "(", "[", "f1", ",", "f2", ",", "f3", ",", "f4", "]", ")" ]
Converts Ryckaert-Bellemans type dihedrals to OPLS type. Parameters ---------- c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol) Returns ------- opls_coeffs : np.array, shape=(4,) Array containing the OPLS dihedrals coeffs f1, f2, f3, and f4 (in kcal/mol)
[ "Converts", "Ryckaert", "-", "Bellemans", "type", "dihedrals", "to", "OPLS", "type", "." ]
python
train
rodluger/everest
everest/inject.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/inject.py#L28-L334
def Inject(ID, inj_model='nPLD', t0=None, per=None, dur=0.1, depth=0.001, mask=False, trn_win=5, poly_order=3, make_fits=False, **kwargs): ''' Run one of the :py:obj:`everest` models with injected transits and attempt to recover the transit depth at the end with a simple linear regression with a polynomial baseline. The depth is stored in the :py:obj:`inject` attribute of the model (a dictionary) as :py:obj:`rec_depth`. A control injection is also performed, in which the transits are injected into the de-trended data; the recovered depth in the control run is stored in :py:obj:`inject` as :py:obj:`rec_depth_control`. :param int ID: The target id :param str inj_model: The name of the :py:obj:`everest` model to run. \ Default `"nPLD"` :param float t0: The transit ephemeris in days. Default is to draw from \ the uniform distributon [0., :py:obj:`per`) :param float per: The injected planet period in days. Default is to draw \ from the uniform distribution [2, 10] :param float dur: The transit duration in days. Must be in the range \ [0.05, 0.5]. Default 0.1 :param float depth: The fractional transit depth. Default 0.001 :param bool mask: Explicitly mask the in-transit cadences when computing \ the PLD model? Default :py:obj:`False` :param float trn_win: The size of the transit window in units of the \ transit duration :param int poly_order: The order of the polynomial used to fit the \ continuum ''' # Randomize the planet params if per is None: a = 3. b = 10. per = a + (b - a) * np.random.random() if t0 is None: t0 = per * np.random.random() # Get the actual class _model = eval(inj_model) inject = {'t0': t0, 'per': per, 'dur': dur, 'depth': depth, 'mask': mask, 'poly_order': poly_order, 'trn_win': trn_win} # Define the injection class class Injection(_model): ''' The :py:obj:`Injection` class is a special subclass of a user-selected :py:obj:`everest` model. See :py:func:`Inject` for more details. ''' def __init__(self, *args, **kwargs): ''' ''' self.inject = kwargs.pop('inject', None) self.parent_class = kwargs.pop('parent_class', None) self.kwargs = kwargs super(Injection, self).__init__(*args, **kwargs) @property def name(self): ''' ''' if self.inject['mask']: maskchar = 'M' else: maskchar = 'U' return '%s_Inject_%s%g' % (self.parent_class, maskchar, self.inject['depth']) def load_tpf(self): ''' Loads the target pixel files and injects transits at the pixel level. ''' # Load the TPF super(Injection, self).load_tpf() log.info("Injecting transits...") # Inject the transits into the regular data transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) for i in range(self.fpix.shape[1]): self.fpix[:, i] *= transit_model self.fraw = np.sum(self.fpix, axis=1) if self.inject['mask']: self.transitmask = np.array(list(set(np.concatenate( [self.transitmask, np.where(transit_model < 1.)[0]]))), dtype=int) # Update the PLD normalization self.get_norm() def recover_depth(self): ''' Recovers the injected transit depth from the long cadence data with a simple LLS solver. The results are all stored in the :py:obj:`inject` attribute of the model. ''' # Control run transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) kwargs = dict(self.kwargs) kwargs.update({'clobber': False}) control = eval(self.parent_class)( self.ID, is_parent=True, **kwargs) control.fraw *= transit_model # Get params log.info("Recovering transit depth...") t0 = self.inject['t0'] per = self.inject['per'] dur = self.inject['dur'] depth = self.inject['depth'] trn_win = self.inject['trn_win'] poly_order = self.inject['poly_order'] for run, tag in zip([self, control], ['', '_control']): # Compute the model mask = np.array( list(set(np.concatenate([run.badmask, run.nanmask]))), dtype=int) flux = np.delete(run.flux / np.nanmedian(run.flux), mask) time = np.delete(run.time, mask) transit_model = (Transit(time, t0=t0, per=per, dur=dur, depth=depth) - 1) / depth # Count the transits t0 += np.ceil((time[0] - dur - t0) / per) * per ttimes0 = np.arange(t0, time[-1] + dur, per) tinds = [] for tt in ttimes0: # Get indices for this chunk inds = np.where(np.abs(time - tt) < trn_win * dur / 2.)[0] # Ensure there's a transit in this chunk, and that # there are enough points for the polynomial fit if np.any(transit_model[inds] < 0.) and \ len(inds) > poly_order: tinds.append(inds) # Our design matrix sz = (poly_order + 1) * len(tinds) X = np.empty((0, 1 + sz), dtype=float) Y = np.array([], dtype=float) T = np.array([], dtype=float) # Loop over all transits for i, inds in enumerate(tinds): # Get the transit model trnvec = transit_model[inds].reshape(-1, 1) # Normalize the time array t = time[inds] t = (t - t[0]) / (t[-1] - t[0]) # Cumulative arrays T = np.append(T, time[inds]) Y = np.append(Y, flux[inds]) # Polynomial vector polyvec = np.array( [t ** o for o in range(0, poly_order + 1)]).T # Update the design matrix with this chunk lzeros = np.zeros((len(t), i * (poly_order + 1))) rzeros = np.zeros( (len(t), sz - (i + 1) * (poly_order + 1))) chunk = np.hstack((trnvec, lzeros, polyvec, rzeros)) X = np.vstack((X, chunk)) # Get the relative depth A = np.dot(X.T, X) B = np.dot(X.T, Y) C = np.linalg.solve(A, B) rec_depth = C[0] # Get the uncertainties sig = 1.4826 * \ np.nanmedian(np.abs(flux - np.nanmedian(flux)) ) / np.nanmedian(flux) cov = sig ** 2 * np.linalg.solve(A, np.eye(A.shape[0])) err = np.sqrt(np.diag(cov)) rec_depth_err = err[0] # Store the results self.inject.update( {'rec_depth%s' % tag: rec_depth, 'rec_depth_err%s' % tag: rec_depth_err}) # Store the detrended, folded data D = (Y - np.dot(C[1:], X[:, 1:].T) + np.nanmedian(Y)) / np.nanmedian(Y) T = (T - t0 - per / 2.) % per - per / 2. self.inject.update( {'fold_time%s' % tag: T, 'fold_flux%s' % tag: D}) def plot_final(self, ax): ''' Plots the injection recovery results. ''' from mpl_toolkits.axes_grid.inset_locator import inset_axes ax.axis('off') ax1 = inset_axes(ax, width="47%", height="100%", loc=6) ax2 = inset_axes(ax, width="47%", height="100%", loc=7) # Plot the recovered folded transits ax1.plot(self.inject['fold_time'], self.inject['fold_flux'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time']), np.max( self.inject['fold_time']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax1.plot(x, y, 'r-') ax1.annotate('INJECTED', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax1.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.margins(0, None) ax1.ticklabel_format(useOffset=False) # Plot the recovered folded transits (control) ax2.plot(self.inject['fold_time_control'], self.inject['fold_flux_control'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time_control']), np.max( self.inject['fold_time_control']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth_control']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax2.plot(x, y, 'r-') ax2.annotate('CONTROL', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax2.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth_control']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.margins(0, None) ax2.ticklabel_format(useOffset=False) N = int(0.995 * len(self.inject['fold_flux_control'])) hi, lo = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])][[N, -N]] fsort = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])] pad = (hi - lo) * 0.2 ylim = (lo - 2 * pad, hi + pad) ax2.set_ylim(ylim) ax1.set_ylim(ylim) ax2.set_yticklabels([]) for tick in ax1.get_xticklabels() + ax1.get_yticklabels() + \ ax2.get_xticklabels(): tick.set_fontsize(5) def finalize(self): ''' Calls the depth recovery routine at the end of the de-trending step. ''' super(Injection, self).finalize() self.recover_depth() return Injection(ID, inject=inject, parent_class=inj_model, make_fits=make_fits, **kwargs)
[ "def", "Inject", "(", "ID", ",", "inj_model", "=", "'nPLD'", ",", "t0", "=", "None", ",", "per", "=", "None", ",", "dur", "=", "0.1", ",", "depth", "=", "0.001", ",", "mask", "=", "False", ",", "trn_win", "=", "5", ",", "poly_order", "=", "3", ",", "make_fits", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Randomize the planet params", "if", "per", "is", "None", ":", "a", "=", "3.", "b", "=", "10.", "per", "=", "a", "+", "(", "b", "-", "a", ")", "*", "np", ".", "random", ".", "random", "(", ")", "if", "t0", "is", "None", ":", "t0", "=", "per", "*", "np", ".", "random", ".", "random", "(", ")", "# Get the actual class", "_model", "=", "eval", "(", "inj_model", ")", "inject", "=", "{", "'t0'", ":", "t0", ",", "'per'", ":", "per", ",", "'dur'", ":", "dur", ",", "'depth'", ":", "depth", ",", "'mask'", ":", "mask", ",", "'poly_order'", ":", "poly_order", ",", "'trn_win'", ":", "trn_win", "}", "# Define the injection class", "class", "Injection", "(", "_model", ")", ":", "'''\n The :py:obj:`Injection` class is a special subclass of a\n user-selected :py:obj:`everest` model.\n See :py:func:`Inject` for more details.\n\n '''", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "'''\n\n '''", "self", ".", "inject", "=", "kwargs", ".", "pop", "(", "'inject'", ",", "None", ")", "self", ".", "parent_class", "=", "kwargs", ".", "pop", "(", "'parent_class'", ",", "None", ")", "self", ".", "kwargs", "=", "kwargs", "super", "(", "Injection", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "@", "property", "def", "name", "(", "self", ")", ":", "'''\n\n '''", "if", "self", ".", "inject", "[", "'mask'", "]", ":", "maskchar", "=", "'M'", "else", ":", "maskchar", "=", "'U'", "return", "'%s_Inject_%s%g'", "%", "(", "self", ".", "parent_class", ",", "maskchar", ",", "self", ".", "inject", "[", "'depth'", "]", ")", "def", "load_tpf", "(", "self", ")", ":", "'''\n Loads the target pixel files and injects transits at the pixel level.\n\n '''", "# Load the TPF", "super", "(", "Injection", ",", "self", ")", ".", "load_tpf", "(", ")", "log", ".", "info", "(", "\"Injecting transits...\"", ")", "# Inject the transits into the regular data", "transit_model", "=", "Transit", "(", "self", ".", "time", ",", "t0", "=", "self", ".", "inject", "[", "'t0'", "]", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'depth'", "]", ")", "for", "i", "in", "range", "(", "self", ".", "fpix", ".", "shape", "[", "1", "]", ")", ":", "self", ".", "fpix", "[", ":", ",", "i", "]", "*=", "transit_model", "self", ".", "fraw", "=", "np", ".", "sum", "(", "self", ".", "fpix", ",", "axis", "=", "1", ")", "if", "self", ".", "inject", "[", "'mask'", "]", ":", "self", ".", "transitmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "transitmask", ",", "np", ".", "where", "(", "transit_model", "<", "1.", ")", "[", "0", "]", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "# Update the PLD normalization", "self", ".", "get_norm", "(", ")", "def", "recover_depth", "(", "self", ")", ":", "'''\n Recovers the injected transit depth from the long\n cadence data with a simple LLS solver.\n The results are all stored in the :py:obj:`inject`\n attribute of the model.\n\n '''", "# Control run", "transit_model", "=", "Transit", "(", "self", ".", "time", ",", "t0", "=", "self", ".", "inject", "[", "'t0'", "]", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'depth'", "]", ")", "kwargs", "=", "dict", "(", "self", ".", "kwargs", ")", "kwargs", ".", "update", "(", "{", "'clobber'", ":", "False", "}", ")", "control", "=", "eval", "(", "self", ".", "parent_class", ")", "(", "self", ".", "ID", ",", "is_parent", "=", "True", ",", "*", "*", "kwargs", ")", "control", ".", "fraw", "*=", "transit_model", "# Get params", "log", ".", "info", "(", "\"Recovering transit depth...\"", ")", "t0", "=", "self", ".", "inject", "[", "'t0'", "]", "per", "=", "self", ".", "inject", "[", "'per'", "]", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", "depth", "=", "self", ".", "inject", "[", "'depth'", "]", "trn_win", "=", "self", ".", "inject", "[", "'trn_win'", "]", "poly_order", "=", "self", ".", "inject", "[", "'poly_order'", "]", "for", "run", ",", "tag", "in", "zip", "(", "[", "self", ",", "control", "]", ",", "[", "''", ",", "'_control'", "]", ")", ":", "# Compute the model", "mask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "run", ".", "badmask", ",", "run", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "flux", "=", "np", ".", "delete", "(", "run", ".", "flux", "/", "np", ".", "nanmedian", "(", "run", ".", "flux", ")", ",", "mask", ")", "time", "=", "np", ".", "delete", "(", "run", ".", "time", ",", "mask", ")", "transit_model", "=", "(", "Transit", "(", "time", ",", "t0", "=", "t0", ",", "per", "=", "per", ",", "dur", "=", "dur", ",", "depth", "=", "depth", ")", "-", "1", ")", "/", "depth", "# Count the transits", "t0", "+=", "np", ".", "ceil", "(", "(", "time", "[", "0", "]", "-", "dur", "-", "t0", ")", "/", "per", ")", "*", "per", "ttimes0", "=", "np", ".", "arange", "(", "t0", ",", "time", "[", "-", "1", "]", "+", "dur", ",", "per", ")", "tinds", "=", "[", "]", "for", "tt", "in", "ttimes0", ":", "# Get indices for this chunk", "inds", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "time", "-", "tt", ")", "<", "trn_win", "*", "dur", "/", "2.", ")", "[", "0", "]", "# Ensure there's a transit in this chunk, and that", "# there are enough points for the polynomial fit", "if", "np", ".", "any", "(", "transit_model", "[", "inds", "]", "<", "0.", ")", "and", "len", "(", "inds", ")", ">", "poly_order", ":", "tinds", ".", "append", "(", "inds", ")", "# Our design matrix", "sz", "=", "(", "poly_order", "+", "1", ")", "*", "len", "(", "tinds", ")", "X", "=", "np", ".", "empty", "(", "(", "0", ",", "1", "+", "sz", ")", ",", "dtype", "=", "float", ")", "Y", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "float", ")", "T", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "float", ")", "# Loop over all transits", "for", "i", ",", "inds", "in", "enumerate", "(", "tinds", ")", ":", "# Get the transit model", "trnvec", "=", "transit_model", "[", "inds", "]", ".", "reshape", "(", "-", "1", ",", "1", ")", "# Normalize the time array", "t", "=", "time", "[", "inds", "]", "t", "=", "(", "t", "-", "t", "[", "0", "]", ")", "/", "(", "t", "[", "-", "1", "]", "-", "t", "[", "0", "]", ")", "# Cumulative arrays", "T", "=", "np", ".", "append", "(", "T", ",", "time", "[", "inds", "]", ")", "Y", "=", "np", ".", "append", "(", "Y", ",", "flux", "[", "inds", "]", ")", "# Polynomial vector", "polyvec", "=", "np", ".", "array", "(", "[", "t", "**", "o", "for", "o", "in", "range", "(", "0", ",", "poly_order", "+", "1", ")", "]", ")", ".", "T", "# Update the design matrix with this chunk", "lzeros", "=", "np", ".", "zeros", "(", "(", "len", "(", "t", ")", ",", "i", "*", "(", "poly_order", "+", "1", ")", ")", ")", "rzeros", "=", "np", ".", "zeros", "(", "(", "len", "(", "t", ")", ",", "sz", "-", "(", "i", "+", "1", ")", "*", "(", "poly_order", "+", "1", ")", ")", ")", "chunk", "=", "np", ".", "hstack", "(", "(", "trnvec", ",", "lzeros", ",", "polyvec", ",", "rzeros", ")", ")", "X", "=", "np", ".", "vstack", "(", "(", "X", ",", "chunk", ")", ")", "# Get the relative depth", "A", "=", "np", ".", "dot", "(", "X", ".", "T", ",", "X", ")", "B", "=", "np", ".", "dot", "(", "X", ".", "T", ",", "Y", ")", "C", "=", "np", ".", "linalg", ".", "solve", "(", "A", ",", "B", ")", "rec_depth", "=", "C", "[", "0", "]", "# Get the uncertainties", "sig", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "flux", "-", "np", ".", "nanmedian", "(", "flux", ")", ")", ")", "/", "np", ".", "nanmedian", "(", "flux", ")", "cov", "=", "sig", "**", "2", "*", "np", ".", "linalg", ".", "solve", "(", "A", ",", "np", ".", "eye", "(", "A", ".", "shape", "[", "0", "]", ")", ")", "err", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "cov", ")", ")", "rec_depth_err", "=", "err", "[", "0", "]", "# Store the results", "self", ".", "inject", ".", "update", "(", "{", "'rec_depth%s'", "%", "tag", ":", "rec_depth", ",", "'rec_depth_err%s'", "%", "tag", ":", "rec_depth_err", "}", ")", "# Store the detrended, folded data", "D", "=", "(", "Y", "-", "np", ".", "dot", "(", "C", "[", "1", ":", "]", ",", "X", "[", ":", ",", "1", ":", "]", ".", "T", ")", "+", "np", ".", "nanmedian", "(", "Y", ")", ")", "/", "np", ".", "nanmedian", "(", "Y", ")", "T", "=", "(", "T", "-", "t0", "-", "per", "/", "2.", ")", "%", "per", "-", "per", "/", "2.", "self", ".", "inject", ".", "update", "(", "{", "'fold_time%s'", "%", "tag", ":", "T", ",", "'fold_flux%s'", "%", "tag", ":", "D", "}", ")", "def", "plot_final", "(", "self", ",", "ax", ")", ":", "'''\n Plots the injection recovery results.\n\n '''", "from", "mpl_toolkits", ".", "axes_grid", ".", "inset_locator", "import", "inset_axes", "ax", ".", "axis", "(", "'off'", ")", "ax1", "=", "inset_axes", "(", "ax", ",", "width", "=", "\"47%\"", ",", "height", "=", "\"100%\"", ",", "loc", "=", "6", ")", "ax2", "=", "inset_axes", "(", "ax", ",", "width", "=", "\"47%\"", ",", "height", "=", "\"100%\"", ",", "loc", "=", "7", ")", "# Plot the recovered folded transits", "ax1", ".", "plot", "(", "self", ".", "inject", "[", "'fold_time'", "]", ",", "self", ".", "inject", "[", "'fold_flux'", "]", ",", "'k.'", ",", "alpha", "=", "0.3", ")", "x", "=", "np", ".", "linspace", "(", "np", ".", "min", "(", "self", ".", "inject", "[", "'fold_time'", "]", ")", ",", "np", ".", "max", "(", "self", ".", "inject", "[", "'fold_time'", "]", ")", ",", "500", ")", "try", ":", "y", "=", "Transit", "(", "x", ",", "t0", "=", "0.", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'rec_depth'", "]", ")", "except", ":", "# Log the error, and carry on", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "l", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "log", ".", "error", "(", "l", ")", "y", "=", "np", ".", "ones_like", "(", "x", ")", "*", "np", ".", "nan", "ax1", ".", "plot", "(", "x", ",", "y", ",", "'r-'", ")", "ax1", ".", "annotate", "(", "'INJECTED'", ",", "xy", "=", "(", "0.98", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ",", "alpha", "=", "0.5", ",", "fontweight", "=", "'bold'", ")", "ax1", ".", "annotate", "(", "'True depth:\\nRecovered depth:'", ",", "xy", "=", "(", "0.02", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax1", ".", "annotate", "(", "'%.6f\\n%.6f'", "%", "(", "self", ".", "inject", "[", "'depth'", "]", ",", "self", ".", "inject", "[", "'rec_depth'", "]", ")", ",", "xy", "=", "(", "0.4", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax1", ".", "margins", "(", "0", ",", "None", ")", "ax1", ".", "ticklabel_format", "(", "useOffset", "=", "False", ")", "# Plot the recovered folded transits (control)", "ax2", ".", "plot", "(", "self", ".", "inject", "[", "'fold_time_control'", "]", ",", "self", ".", "inject", "[", "'fold_flux_control'", "]", ",", "'k.'", ",", "alpha", "=", "0.3", ")", "x", "=", "np", ".", "linspace", "(", "np", ".", "min", "(", "self", ".", "inject", "[", "'fold_time_control'", "]", ")", ",", "np", ".", "max", "(", "self", ".", "inject", "[", "'fold_time_control'", "]", ")", ",", "500", ")", "try", ":", "y", "=", "Transit", "(", "x", ",", "t0", "=", "0.", ",", "per", "=", "self", ".", "inject", "[", "'per'", "]", ",", "dur", "=", "self", ".", "inject", "[", "'dur'", "]", ",", "depth", "=", "self", ".", "inject", "[", "'rec_depth_control'", "]", ")", "except", ":", "# Log the error, and carry on", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "l", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "log", ".", "error", "(", "l", ")", "y", "=", "np", ".", "ones_like", "(", "x", ")", "*", "np", ".", "nan", "ax2", ".", "plot", "(", "x", ",", "y", ",", "'r-'", ")", "ax2", ".", "annotate", "(", "'CONTROL'", ",", "xy", "=", "(", "0.98", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ",", "alpha", "=", "0.5", ",", "fontweight", "=", "'bold'", ")", "ax2", ".", "annotate", "(", "'True depth:\\nRecovered depth:'", ",", "xy", "=", "(", "0.02", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax2", ".", "annotate", "(", "'%.6f\\n%.6f'", "%", "(", "self", ".", "inject", "[", "'depth'", "]", ",", "self", ".", "inject", "[", "'rec_depth_control'", "]", ")", ",", "xy", "=", "(", "0.4", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "6", ",", "color", "=", "'r'", ")", "ax2", ".", "margins", "(", "0", ",", "None", ")", "ax2", ".", "ticklabel_format", "(", "useOffset", "=", "False", ")", "N", "=", "int", "(", "0.995", "*", "len", "(", "self", ".", "inject", "[", "'fold_flux_control'", "]", ")", ")", "hi", ",", "lo", "=", "self", ".", "inject", "[", "'fold_flux_control'", "]", "[", "np", ".", "argsort", "(", "self", ".", "inject", "[", "'fold_flux_control'", "]", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "fsort", "=", "self", ".", "inject", "[", "'fold_flux_control'", "]", "[", "np", ".", "argsort", "(", "self", ".", "inject", "[", "'fold_flux_control'", "]", ")", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.2", "ylim", "=", "(", "lo", "-", "2", "*", "pad", ",", "hi", "+", "pad", ")", "ax2", ".", "set_ylim", "(", "ylim", ")", "ax1", ".", "set_ylim", "(", "ylim", ")", "ax2", ".", "set_yticklabels", "(", "[", "]", ")", "for", "tick", "in", "ax1", ".", "get_xticklabels", "(", ")", "+", "ax1", ".", "get_yticklabels", "(", ")", "+", "ax2", ".", "get_xticklabels", "(", ")", ":", "tick", ".", "set_fontsize", "(", "5", ")", "def", "finalize", "(", "self", ")", ":", "'''\n Calls the depth recovery routine at the end\n of the de-trending step.\n\n '''", "super", "(", "Injection", ",", "self", ")", ".", "finalize", "(", ")", "self", ".", "recover_depth", "(", ")", "return", "Injection", "(", "ID", ",", "inject", "=", "inject", ",", "parent_class", "=", "inj_model", ",", "make_fits", "=", "make_fits", ",", "*", "*", "kwargs", ")" ]
Run one of the :py:obj:`everest` models with injected transits and attempt to recover the transit depth at the end with a simple linear regression with a polynomial baseline. The depth is stored in the :py:obj:`inject` attribute of the model (a dictionary) as :py:obj:`rec_depth`. A control injection is also performed, in which the transits are injected into the de-trended data; the recovered depth in the control run is stored in :py:obj:`inject` as :py:obj:`rec_depth_control`. :param int ID: The target id :param str inj_model: The name of the :py:obj:`everest` model to run. \ Default `"nPLD"` :param float t0: The transit ephemeris in days. Default is to draw from \ the uniform distributon [0., :py:obj:`per`) :param float per: The injected planet period in days. Default is to draw \ from the uniform distribution [2, 10] :param float dur: The transit duration in days. Must be in the range \ [0.05, 0.5]. Default 0.1 :param float depth: The fractional transit depth. Default 0.001 :param bool mask: Explicitly mask the in-transit cadences when computing \ the PLD model? Default :py:obj:`False` :param float trn_win: The size of the transit window in units of the \ transit duration :param int poly_order: The order of the polynomial used to fit the \ continuum
[ "Run", "one", "of", "the", ":", "py", ":", "obj", ":", "everest", "models", "with", "injected", "transits", "and", "attempt", "to", "recover", "the", "transit", "depth", "at", "the", "end", "with", "a", "simple", "linear", "regression", "with", "a", "polynomial", "baseline", ".", "The", "depth", "is", "stored", "in", "the", ":", "py", ":", "obj", ":", "inject", "attribute", "of", "the", "model", "(", "a", "dictionary", ")", "as", ":", "py", ":", "obj", ":", "rec_depth", ".", "A", "control", "injection", "is", "also", "performed", "in", "which", "the", "transits", "are", "injected", "into", "the", "de", "-", "trended", "data", ";", "the", "recovered", "depth", "in", "the", "control", "run", "is", "stored", "in", ":", "py", ":", "obj", ":", "inject", "as", ":", "py", ":", "obj", ":", "rec_depth_control", "." ]
python
train
theislab/scvelo
scvelo/tools/rank_velocity_genes.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/rank_velocity_genes.py#L39-L52
def select_groups(adata, groups='all', key='louvain'): """Get subset of groups in adata.obs[key]. """ strings_to_categoricals(adata) if isinstance(groups, list) and isinstance(groups[0], int): groups = [str(n) for n in groups] categories = adata.obs[key].cat.categories groups_masks = np.array([categories[i] == adata.obs[key].values for i, name in enumerate(categories)]) if groups == 'all': groups = categories.values else: groups_ids = [np.where(categories.values == name)[0][0] for name in groups] groups_masks = groups_masks[groups_ids] groups = categories[groups_ids].values return groups, groups_masks
[ "def", "select_groups", "(", "adata", ",", "groups", "=", "'all'", ",", "key", "=", "'louvain'", ")", ":", "strings_to_categoricals", "(", "adata", ")", "if", "isinstance", "(", "groups", ",", "list", ")", "and", "isinstance", "(", "groups", "[", "0", "]", ",", "int", ")", ":", "groups", "=", "[", "str", "(", "n", ")", "for", "n", "in", "groups", "]", "categories", "=", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", "groups_masks", "=", "np", ".", "array", "(", "[", "categories", "[", "i", "]", "==", "adata", ".", "obs", "[", "key", "]", ".", "values", "for", "i", ",", "name", "in", "enumerate", "(", "categories", ")", "]", ")", "if", "groups", "==", "'all'", ":", "groups", "=", "categories", ".", "values", "else", ":", "groups_ids", "=", "[", "np", ".", "where", "(", "categories", ".", "values", "==", "name", ")", "[", "0", "]", "[", "0", "]", "for", "name", "in", "groups", "]", "groups_masks", "=", "groups_masks", "[", "groups_ids", "]", "groups", "=", "categories", "[", "groups_ids", "]", ".", "values", "return", "groups", ",", "groups_masks" ]
Get subset of groups in adata.obs[key].
[ "Get", "subset", "of", "groups", "in", "adata", ".", "obs", "[", "key", "]", "." ]
python
train
jrderuiter/pybiomart
src/pybiomart/dataset.py
https://github.com/jrderuiter/pybiomart/blob/7802d45fe88549ab0512d6f37f815fc43b172b39/src/pybiomart/dataset.py#L88-L92
def filters(self): """List of filters available for the dataset.""" if self._filters is None: self._filters, self._attributes = self._fetch_configuration() return self._filters
[ "def", "filters", "(", "self", ")", ":", "if", "self", ".", "_filters", "is", "None", ":", "self", ".", "_filters", ",", "self", ".", "_attributes", "=", "self", ".", "_fetch_configuration", "(", ")", "return", "self", ".", "_filters" ]
List of filters available for the dataset.
[ "List", "of", "filters", "available", "for", "the", "dataset", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L1831-L1847
def print_file(self): """Print current file""" editor = self.get_current_editor() filename = self.get_current_filename() printer = Printer(mode=QPrinter.HighResolution, header_font=self.get_plugin_font('printer_header')) printDialog = QPrintDialog(printer, editor) if editor.has_selected_text(): printDialog.setOption(QAbstractPrintDialog.PrintSelection, True) self.redirect_stdio.emit(False) answer = printDialog.exec_() self.redirect_stdio.emit(True) if answer == QDialog.Accepted: self.starting_long_process(_("Printing...")) printer.setDocName(filename) editor.print_(printer) self.ending_long_process()
[ "def", "print_file", "(", "self", ")", ":", "editor", "=", "self", ".", "get_current_editor", "(", ")", "filename", "=", "self", ".", "get_current_filename", "(", ")", "printer", "=", "Printer", "(", "mode", "=", "QPrinter", ".", "HighResolution", ",", "header_font", "=", "self", ".", "get_plugin_font", "(", "'printer_header'", ")", ")", "printDialog", "=", "QPrintDialog", "(", "printer", ",", "editor", ")", "if", "editor", ".", "has_selected_text", "(", ")", ":", "printDialog", ".", "setOption", "(", "QAbstractPrintDialog", ".", "PrintSelection", ",", "True", ")", "self", ".", "redirect_stdio", ".", "emit", "(", "False", ")", "answer", "=", "printDialog", ".", "exec_", "(", ")", "self", ".", "redirect_stdio", ".", "emit", "(", "True", ")", "if", "answer", "==", "QDialog", ".", "Accepted", ":", "self", ".", "starting_long_process", "(", "_", "(", "\"Printing...\"", ")", ")", "printer", ".", "setDocName", "(", "filename", ")", "editor", ".", "print_", "(", "printer", ")", "self", ".", "ending_long_process", "(", ")" ]
Print current file
[ "Print", "current", "file" ]
python
train
qacafe/cdrouter.py
cdrouter/devices.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/devices.py#L312-L321
def connect(self, id): # pylint: disable=invalid-name,redefined-builtin """Open proxy connection to a device's management interface. :param id: Device ID as an int. :return: :class:`devices.Connection <devices.Connection>` object :rtype: devices.Connection """ schema = ConnectionSchema() resp = self.service.post(self.base+str(id)+'/connect/') return self.service.decode(schema, resp)
[ "def", "connect", "(", "self", ",", "id", ")", ":", "# pylint: disable=invalid-name,redefined-builtin", "schema", "=", "ConnectionSchema", "(", ")", "resp", "=", "self", ".", "service", ".", "post", "(", "self", ".", "base", "+", "str", "(", "id", ")", "+", "'/connect/'", ")", "return", "self", ".", "service", ".", "decode", "(", "schema", ",", "resp", ")" ]
Open proxy connection to a device's management interface. :param id: Device ID as an int. :return: :class:`devices.Connection <devices.Connection>` object :rtype: devices.Connection
[ "Open", "proxy", "connection", "to", "a", "device", "s", "management", "interface", "." ]
python
train