text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def auth_required(self, view): """Decorator which checks if user is authenticated Decorator for Flask's view which blocks not authenticated requests :param view: Flask's view function """ @functools.wraps(view) def decorated(*args, **kwargs): log.info("Trying to get access to protected resource: '%s'", view.__name__) if request.method == 'POST': token = request.form['token'] if self.development or self.authenticated(token): return view(*args, **kwargs) else: log.warning("User has not been authorized to get access to resource: %s", view.__name__) else: log.warning("Bad request type! Expected 'POST', actual '%s'", request.method) return abort(403) return decorated
[ "def", "auth_required", "(", "self", ",", "view", ")", ":", "@", "functools", ".", "wraps", "(", "view", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "log", ".", "info", "(", "\"Trying to get access to protected resource: '%s'\"", ",", "view", ".", "__name__", ")", "if", "request", ".", "method", "==", "'POST'", ":", "token", "=", "request", ".", "form", "[", "'token'", "]", "if", "self", ".", "development", "or", "self", ".", "authenticated", "(", "token", ")", ":", "return", "view", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "log", ".", "warning", "(", "\"User has not been authorized to get access to resource: %s\"", ",", "view", ".", "__name__", ")", "else", ":", "log", ".", "warning", "(", "\"Bad request type! Expected 'POST', actual '%s'\"", ",", "request", ".", "method", ")", "return", "abort", "(", "403", ")", "return", "decorated" ]
37.782609
22.782609
def getaddrspec(self): """Parse an RFC 2822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): preserve_ws = True if self.field[self.pos] == '.': if aslist and not aslist[-1].strip(): aslist.pop() aslist.append('.') self.pos += 1 preserve_ws = False elif self.field[self.pos] == '"': aslist.append('"%s"' % quote(self.getquote())) elif self.field[self.pos] in self.atomends: if aslist and not aslist[-1].strip(): aslist.pop() break else: aslist.append(self.getatom()) ws = self.gotonext() if preserve_ws and ws: aslist.append(ws) if self.pos >= len(self.field) or self.field[self.pos] != '@': return EMPTYSTRING.join(aslist) aslist.append('@') self.pos += 1 self.gotonext() return EMPTYSTRING.join(aslist) + self.getdomain()
[ "def", "getaddrspec", "(", "self", ")", ":", "aslist", "=", "[", "]", "self", ".", "gotonext", "(", ")", "while", "self", ".", "pos", "<", "len", "(", "self", ".", "field", ")", ":", "preserve_ws", "=", "True", "if", "self", ".", "field", "[", "self", ".", "pos", "]", "==", "'.'", ":", "if", "aslist", "and", "not", "aslist", "[", "-", "1", "]", ".", "strip", "(", ")", ":", "aslist", ".", "pop", "(", ")", "aslist", ".", "append", "(", "'.'", ")", "self", ".", "pos", "+=", "1", "preserve_ws", "=", "False", "elif", "self", ".", "field", "[", "self", ".", "pos", "]", "==", "'\"'", ":", "aslist", ".", "append", "(", "'\"%s\"'", "%", "quote", "(", "self", ".", "getquote", "(", ")", ")", ")", "elif", "self", ".", "field", "[", "self", ".", "pos", "]", "in", "self", ".", "atomends", ":", "if", "aslist", "and", "not", "aslist", "[", "-", "1", "]", ".", "strip", "(", ")", ":", "aslist", ".", "pop", "(", ")", "break", "else", ":", "aslist", ".", "append", "(", "self", ".", "getatom", "(", ")", ")", "ws", "=", "self", ".", "gotonext", "(", ")", "if", "preserve_ws", "and", "ws", ":", "aslist", ".", "append", "(", "ws", ")", "if", "self", ".", "pos", ">=", "len", "(", "self", ".", "field", ")", "or", "self", ".", "field", "[", "self", ".", "pos", "]", "!=", "'@'", ":", "return", "EMPTYSTRING", ".", "join", "(", "aslist", ")", "aslist", ".", "append", "(", "'@'", ")", "self", ".", "pos", "+=", "1", "self", ".", "gotonext", "(", ")", "return", "EMPTYSTRING", ".", "join", "(", "aslist", ")", "+", "self", ".", "getdomain", "(", ")" ]
33.53125
14.53125
def _euler_to_q(self, euler): """ Create q array from euler angles :param euler: array [roll, pitch, yaw] in rad :returns: array q which represents a quaternion [w, x, y, z] """ assert(len(euler) == 3) phi = euler[0] theta = euler[1] psi = euler[2] c_phi_2 = np.cos(phi / 2) s_phi_2 = np.sin(phi / 2) c_theta_2 = np.cos(theta / 2) s_theta_2 = np.sin(theta / 2) c_psi_2 = np.cos(psi / 2) s_psi_2 = np.sin(psi / 2) q = np.zeros(4) q[0] = (c_phi_2 * c_theta_2 * c_psi_2 + s_phi_2 * s_theta_2 * s_psi_2) q[1] = (s_phi_2 * c_theta_2 * c_psi_2 - c_phi_2 * s_theta_2 * s_psi_2) q[2] = (c_phi_2 * s_theta_2 * c_psi_2 + s_phi_2 * c_theta_2 * s_psi_2) q[3] = (c_phi_2 * c_theta_2 * s_psi_2 - s_phi_2 * s_theta_2 * c_psi_2) return q
[ "def", "_euler_to_q", "(", "self", ",", "euler", ")", ":", "assert", "(", "len", "(", "euler", ")", "==", "3", ")", "phi", "=", "euler", "[", "0", "]", "theta", "=", "euler", "[", "1", "]", "psi", "=", "euler", "[", "2", "]", "c_phi_2", "=", "np", ".", "cos", "(", "phi", "/", "2", ")", "s_phi_2", "=", "np", ".", "sin", "(", "phi", "/", "2", ")", "c_theta_2", "=", "np", ".", "cos", "(", "theta", "/", "2", ")", "s_theta_2", "=", "np", ".", "sin", "(", "theta", "/", "2", ")", "c_psi_2", "=", "np", ".", "cos", "(", "psi", "/", "2", ")", "s_psi_2", "=", "np", ".", "sin", "(", "psi", "/", "2", ")", "q", "=", "np", ".", "zeros", "(", "4", ")", "q", "[", "0", "]", "=", "(", "c_phi_2", "*", "c_theta_2", "*", "c_psi_2", "+", "s_phi_2", "*", "s_theta_2", "*", "s_psi_2", ")", "q", "[", "1", "]", "=", "(", "s_phi_2", "*", "c_theta_2", "*", "c_psi_2", "-", "c_phi_2", "*", "s_theta_2", "*", "s_psi_2", ")", "q", "[", "2", "]", "=", "(", "c_phi_2", "*", "s_theta_2", "*", "c_psi_2", "+", "s_phi_2", "*", "c_theta_2", "*", "s_psi_2", ")", "q", "[", "3", "]", "=", "(", "c_phi_2", "*", "c_theta_2", "*", "s_psi_2", "-", "s_phi_2", "*", "s_theta_2", "*", "c_psi_2", ")", "return", "q" ]
35.692308
9.230769
def _product(*args, **kwds): """ Generates cartesian product of lists given as arguments From itertools.product documentation """ pools = map(tuple, args) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] return result
[ "def", "_product", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "pools", "=", "map", "(", "tuple", ",", "args", ")", "*", "kwds", ".", "get", "(", "'repeat'", ",", "1", ")", "result", "=", "[", "[", "]", "]", "for", "pool", "in", "pools", ":", "result", "=", "[", "x", "+", "[", "y", "]", "for", "x", "in", "result", "for", "y", "in", "pool", "]", "return", "result" ]
25.25
16.75
def predict_fixation_duration( durations, angles, length_diffs, dataset=None, params=None): """ Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations. """ if dataset is None: dataset = np.ones(durations.shape) corrected_durations = np.nan * np.ones(durations.shape) for i, ds in enumerate(np.unique(dataset)): e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y) v0 = [120, 220.0, -.1, 0.5, .1, .1] id_ds = dataset == ds idnan = ( ~np.isnan(angles)) & ( ~np.isnan(durations)) & ( ~np.isnan(length_diffs)) v, s = leastsq( e, v0, args=( angles[ idnan & id_ds], durations[ idnan & id_ds], length_diffs[ idnan & id_ds]), maxfev=10000) corrected_durations[id_ds] = (durations[id_ds] - (leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v))) if params is not None: params['v' + str(i)] = v params['s' + str(i)] = s return corrected_durations
[ "def", "predict_fixation_duration", "(", "durations", ",", "angles", ",", "length_diffs", ",", "dataset", "=", "None", ",", "params", "=", "None", ")", ":", "if", "dataset", "is", "None", ":", "dataset", "=", "np", ".", "ones", "(", "durations", ".", "shape", ")", "corrected_durations", "=", "np", ".", "nan", "*", "np", ".", "ones", "(", "durations", ".", "shape", ")", "for", "i", ",", "ds", "in", "enumerate", "(", "np", ".", "unique", "(", "dataset", ")", ")", ":", "e", "=", "lambda", "v", ",", "x", ",", "y", ",", "z", ":", "(", "leastsq_dual_model", "(", "x", ",", "z", ",", "*", "v", ")", "-", "y", ")", "v0", "=", "[", "120", ",", "220.0", ",", "-", ".1", ",", "0.5", ",", ".1", ",", ".1", "]", "id_ds", "=", "dataset", "==", "ds", "idnan", "=", "(", "~", "np", ".", "isnan", "(", "angles", ")", ")", "&", "(", "~", "np", ".", "isnan", "(", "durations", ")", ")", "&", "(", "~", "np", ".", "isnan", "(", "length_diffs", ")", ")", "v", ",", "s", "=", "leastsq", "(", "e", ",", "v0", ",", "args", "=", "(", "angles", "[", "idnan", "&", "id_ds", "]", ",", "durations", "[", "idnan", "&", "id_ds", "]", ",", "length_diffs", "[", "idnan", "&", "id_ds", "]", ")", ",", "maxfev", "=", "10000", ")", "corrected_durations", "[", "id_ds", "]", "=", "(", "durations", "[", "id_ds", "]", "-", "(", "leastsq_dual_model", "(", "angles", "[", "id_ds", "]", ",", "length_diffs", "[", "id_ds", "]", ",", "*", "v", ")", ")", ")", "if", "params", "is", "not", "None", ":", "params", "[", "'v'", "+", "str", "(", "i", ")", "]", "=", "v", "params", "[", "'s'", "+", "str", "(", "i", ")", "]", "=", "s", "return", "corrected_durations" ]
38.866667
13.8
def update_config_mode(self, prompt): # pylint: disable=no-self-use """Update config mode based on the prompt analysis.""" mode = 'global' if prompt: if 'config' in prompt: mode = 'config' elif 'admin' in prompt: mode = 'admin' self.log("Mode: {}".format(mode)) return mode
[ "def", "update_config_mode", "(", "self", ",", "prompt", ")", ":", "# pylint: disable=no-self-use", "mode", "=", "'global'", "if", "prompt", ":", "if", "'config'", "in", "prompt", ":", "mode", "=", "'config'", "elif", "'admin'", "in", "prompt", ":", "mode", "=", "'admin'", "self", ".", "log", "(", "\"Mode: {}\"", ".", "format", "(", "mode", ")", ")", "return", "mode" ]
32.818182
14.454545
def reply(self): """ Reply to the selected item. This is a utility method and should not be bound to a key directly. Item type: Submission - add a top level comment Comment - add a comment reply Message - reply to a private message """ data = self.get_selected_item() if data['type'] == 'Submission': body = data['text'] description = 'submission' reply = data['object'].add_comment elif data['type'] in ('Comment', 'InboxComment'): body = data['body'] description = 'comment' reply = data['object'].reply elif data['type'] == 'Message': body = data['body'] description = 'private message' reply = data['object'].reply else: self.term.flash() return # Construct the text that will be displayed in the editor file. # The post body will be commented out and added for reference lines = [' |' + line for line in body.split('\n')] content = '\n'.join(lines) comment_info = docs.REPLY_FILE.format( author=data['author'], type=description, content=content) with self.term.open_editor(comment_info) as comment: if not comment: self.term.show_notification('Canceled') return with self.term.loader('Posting {}'.format(description), delay=0): reply(comment) # Give reddit time to process the submission time.sleep(2.0) if self.term.loader.exception is None: self.reload_page() else: raise TemporaryFileError()
[ "def", "reply", "(", "self", ")", ":", "data", "=", "self", ".", "get_selected_item", "(", ")", "if", "data", "[", "'type'", "]", "==", "'Submission'", ":", "body", "=", "data", "[", "'text'", "]", "description", "=", "'submission'", "reply", "=", "data", "[", "'object'", "]", ".", "add_comment", "elif", "data", "[", "'type'", "]", "in", "(", "'Comment'", ",", "'InboxComment'", ")", ":", "body", "=", "data", "[", "'body'", "]", "description", "=", "'comment'", "reply", "=", "data", "[", "'object'", "]", ".", "reply", "elif", "data", "[", "'type'", "]", "==", "'Message'", ":", "body", "=", "data", "[", "'body'", "]", "description", "=", "'private message'", "reply", "=", "data", "[", "'object'", "]", ".", "reply", "else", ":", "self", ".", "term", ".", "flash", "(", ")", "return", "# Construct the text that will be displayed in the editor file.", "# The post body will be commented out and added for reference", "lines", "=", "[", "' |'", "+", "line", "for", "line", "in", "body", ".", "split", "(", "'\\n'", ")", "]", "content", "=", "'\\n'", ".", "join", "(", "lines", ")", "comment_info", "=", "docs", ".", "REPLY_FILE", ".", "format", "(", "author", "=", "data", "[", "'author'", "]", ",", "type", "=", "description", ",", "content", "=", "content", ")", "with", "self", ".", "term", ".", "open_editor", "(", "comment_info", ")", "as", "comment", ":", "if", "not", "comment", ":", "self", ".", "term", ".", "show_notification", "(", "'Canceled'", ")", "return", "with", "self", ".", "term", ".", "loader", "(", "'Posting {}'", ".", "format", "(", "description", ")", ",", "delay", "=", "0", ")", ":", "reply", "(", "comment", ")", "# Give reddit time to process the submission", "time", ".", "sleep", "(", "2.0", ")", "if", "self", ".", "term", ".", "loader", ".", "exception", "is", "None", ":", "self", ".", "reload_page", "(", ")", "else", ":", "raise", "TemporaryFileError", "(", ")" ]
34.27451
15.058824
def _collect_parameters(parameter_names, args, kwargs, defaults): """Creates a dictionary mapping parameters names to their values in the method call. :param parameter_names: The method's parameter names :type parameter_names: list[string] :param args: *args passed into the method :type args: list[object] :param kwargs: **kwargs passed into the method :type kwargs: dict[string, object] :param defaults: The method's default values :type defaults: list[object] :return: Dictionary mapping parameter names to values :rtype: dict[string, object] """ parameters = {} if defaults is not None: zipped_defaults = zip(reversed(parameter_names), reversed(defaults)) for name, default in zipped_defaults: parameters[name] = default for name, value in zip(parameter_names, args): parameters[name] = value for name, value in kwargs.items(): parameters[name] = value return parameters
[ "def", "_collect_parameters", "(", "parameter_names", ",", "args", ",", "kwargs", ",", "defaults", ")", ":", "parameters", "=", "{", "}", "if", "defaults", "is", "not", "None", ":", "zipped_defaults", "=", "zip", "(", "reversed", "(", "parameter_names", ")", ",", "reversed", "(", "defaults", ")", ")", "for", "name", ",", "default", "in", "zipped_defaults", ":", "parameters", "[", "name", "]", "=", "default", "for", "name", ",", "value", "in", "zip", "(", "parameter_names", ",", "args", ")", ":", "parameters", "[", "name", "]", "=", "value", "for", "name", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "parameters", "[", "name", "]", "=", "value", "return", "parameters" ]
43.833333
11.541667
def det_dataset(eb, passband, dataid, comp, time): """ Since RV datasets can have values related to each component in phoebe2, but are component specific in phoebe1 , it is important to determine which dataset to add parameters to. This function will do that. eb - bundle rvpt - relevant phoebe 1 parameters """ rvs = eb.get_dataset(kind='rv').datasets #first check to see if there are currently in RV datasets if dataid == 'Undefined': dataid = None # if len(rvs) == 0: #if there isn't we add one the easy part try: eb._check_label(dataid) rv_dataset = eb.add_dataset('rv', dataset=dataid, times=[]) except ValueError: logger.warning("The name picked for the radial velocity curve is forbidden. Applying default name instead") rv_dataset = eb.add_dataset('rv', times=[]) # else: # #now we have to determine if we add to an existing dataset or make a new one # rvs = eb.get_dataset(kind='rv').datasets # found = False # #set the component of the companion # # if comp == 'primary': # comp_o = 'primary' # else: # comp_o = 'secondary' # for x in rvs: # test_dataset = eb.get_dataset(x, check_visible=False) # # # if len(test_dataset.get_value(qualifier='rvs', component=comp_o, check_visible=False)) == 0: #so at least it has an empty spot now check against filter and length # # removing reference to time_o. If there are no rvs there should be no times # # time_o = test_dataset.get_value('times', component=comp_o) # passband_o = test_dataset.get_value('passband') # # # if np.all(time_o == time) and (passband == passband_o): # if (passband == passband_o): # rv_dataset = test_dataset # found = True # # if not found: # try: # eb._check_label(dataid) # # rv_dataset = eb.add_dataset('rv', dataset=dataid, times=[]) # # except ValueError: # # logger.warning("The name picked for the lightcurve is forbidden. Applying default name instead") # rv_dataset = eb.add_dataset('rv', times=[]) return rv_dataset
[ "def", "det_dataset", "(", "eb", ",", "passband", ",", "dataid", ",", "comp", ",", "time", ")", ":", "rvs", "=", "eb", ".", "get_dataset", "(", "kind", "=", "'rv'", ")", ".", "datasets", "#first check to see if there are currently in RV datasets", "if", "dataid", "==", "'Undefined'", ":", "dataid", "=", "None", "# if len(rvs) == 0:", "#if there isn't we add one the easy part", "try", ":", "eb", ".", "_check_label", "(", "dataid", ")", "rv_dataset", "=", "eb", ".", "add_dataset", "(", "'rv'", ",", "dataset", "=", "dataid", ",", "times", "=", "[", "]", ")", "except", "ValueError", ":", "logger", ".", "warning", "(", "\"The name picked for the radial velocity curve is forbidden. Applying default name instead\"", ")", "rv_dataset", "=", "eb", ".", "add_dataset", "(", "'rv'", ",", "times", "=", "[", "]", ")", "# else:", "# #now we have to determine if we add to an existing dataset or make a new one", "# rvs = eb.get_dataset(kind='rv').datasets", "# found = False", "# #set the component of the companion", "#", "# if comp == 'primary':", "# comp_o = 'primary'", "# else:", "# comp_o = 'secondary'", "# for x in rvs:", "# test_dataset = eb.get_dataset(x, check_visible=False)", "#", "#", "# if len(test_dataset.get_value(qualifier='rvs', component=comp_o, check_visible=False)) == 0: #so at least it has an empty spot now check against filter and length", "# # removing reference to time_o. If there are no rvs there should be no times", "# # time_o = test_dataset.get_value('times', component=comp_o)", "# passband_o = test_dataset.get_value('passband')", "#", "# # if np.all(time_o == time) and (passband == passband_o):", "# if (passband == passband_o):", "# rv_dataset = test_dataset", "# found = True", "#", "# if not found:", "# try:", "# eb._check_label(dataid)", "#", "# rv_dataset = eb.add_dataset('rv', dataset=dataid, times=[])", "#", "# except ValueError:", "#", "# logger.warning(\"The name picked for the lightcurve is forbidden. Applying default name instead\")", "# rv_dataset = eb.add_dataset('rv', times=[])", "return", "rv_dataset" ]
36.741935
28.387097
def word_tokenize(text): """ Split string `text` into word tokens using the Penn Treebank rules """ for (regexp, replacement) in RULES1: text = sub(regexp, replacement, text) # add extra space to make things easier text = " " + text + " " for (regexp, replacement) in RULES2: text = sub(regexp, replacement, text) for regexp in CONTRACTIONS: text = sub(regexp, r"\1 \2 ", text) # split and return return text.split()
[ "def", "word_tokenize", "(", "text", ")", ":", "for", "(", "regexp", ",", "replacement", ")", "in", "RULES1", ":", "text", "=", "sub", "(", "regexp", ",", "replacement", ",", "text", ")", "# add extra space to make things easier", "text", "=", "\" \"", "+", "text", "+", "\" \"", "for", "(", "regexp", ",", "replacement", ")", "in", "RULES2", ":", "text", "=", "sub", "(", "regexp", ",", "replacement", ",", "text", ")", "for", "regexp", "in", "CONTRACTIONS", ":", "text", "=", "sub", "(", "regexp", ",", "r\"\\1 \\2 \"", ",", "text", ")", "# split and return", "return", "text", ".", "split", "(", ")" ]
33.357143
8.5
def _expectation(p, mean, none, kern, feat, nghp=None): """ Compute the expectation: expectation[n] = <x_n K_{x_n, Z}>_p(x_n) - K_{.,} :: Linear kernel or the equivalent for MarkovGaussian :return: NxDxM """ return tf.matrix_transpose(expectation(p, (kern, feat), mean))
[ "def", "_expectation", "(", "p", ",", "mean", ",", "none", ",", "kern", ",", "feat", ",", "nghp", "=", "None", ")", ":", "return", "tf", ".", "matrix_transpose", "(", "expectation", "(", "p", ",", "(", "kern", ",", "feat", ")", ",", "mean", ")", ")" ]
29.8
12.6
def set_frequencies(self, freq, rg=None, setMaxfeq=True, setMinfreq=True, setSpeed=True): ''' Set cores frequencies freq: int frequency in KHz rg: list of range of cores setMaxfeq: set the maximum frequency, default to true setMinfreq: set the minimum frequency, default to true setSpeed: only set the frequency, default to true ''' to_change = self.__get_ranges("online") if type(rg) == int: rg= [rg] if rg: to_change= set(rg) & set(self.__get_ranges("online")) for cpu in to_change: if setSpeed: fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_setspeed") self.__write_cpu_file(fpath, str(freq).encode()) if setMinfreq: fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_min_freq") self.__write_cpu_file(fpath, str(freq).encode()) if setMaxfeq: fpath = path.join("cpu%i"%cpu,"cpufreq","scaling_max_freq") self.__write_cpu_file(fpath, str(freq).encode())
[ "def", "set_frequencies", "(", "self", ",", "freq", ",", "rg", "=", "None", ",", "setMaxfeq", "=", "True", ",", "setMinfreq", "=", "True", ",", "setSpeed", "=", "True", ")", ":", "to_change", "=", "self", ".", "__get_ranges", "(", "\"online\"", ")", "if", "type", "(", "rg", ")", "==", "int", ":", "rg", "=", "[", "rg", "]", "if", "rg", ":", "to_change", "=", "set", "(", "rg", ")", "&", "set", "(", "self", ".", "__get_ranges", "(", "\"online\"", ")", ")", "for", "cpu", "in", "to_change", ":", "if", "setSpeed", ":", "fpath", "=", "path", ".", "join", "(", "\"cpu%i\"", "%", "cpu", ",", "\"cpufreq\"", ",", "\"scaling_setspeed\"", ")", "self", ".", "__write_cpu_file", "(", "fpath", ",", "str", "(", "freq", ")", ".", "encode", "(", ")", ")", "if", "setMinfreq", ":", "fpath", "=", "path", ".", "join", "(", "\"cpu%i\"", "%", "cpu", ",", "\"cpufreq\"", ",", "\"scaling_min_freq\"", ")", "self", ".", "__write_cpu_file", "(", "fpath", ",", "str", "(", "freq", ")", ".", "encode", "(", ")", ")", "if", "setMaxfeq", ":", "fpath", "=", "path", ".", "join", "(", "\"cpu%i\"", "%", "cpu", ",", "\"cpufreq\"", ",", "\"scaling_max_freq\"", ")", "self", ".", "__write_cpu_file", "(", "fpath", ",", "str", "(", "freq", ")", ".", "encode", "(", ")", ")" ]
44.625
22.125
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AssetContext for this AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetContext """ if self._context is None: self._context = AssetContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "AssetContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_context" ]
37.933333
17.666667
def append_stream(self, streamid, stream, encoding=None): """append a file to search for similarities""" if encoding is None: readlines = stream.readlines else: readlines = decoding_stream(stream, encoding).readlines try: self.linesets.append( LineSet( streamid, readlines(), self.ignore_comments, self.ignore_docstrings, self.ignore_imports, ) ) except UnicodeDecodeError: pass
[ "def", "append_stream", "(", "self", ",", "streamid", ",", "stream", ",", "encoding", "=", "None", ")", ":", "if", "encoding", "is", "None", ":", "readlines", "=", "stream", ".", "readlines", "else", ":", "readlines", "=", "decoding_stream", "(", "stream", ",", "encoding", ")", ".", "readlines", "try", ":", "self", ".", "linesets", ".", "append", "(", "LineSet", "(", "streamid", ",", "readlines", "(", ")", ",", "self", ".", "ignore_comments", ",", "self", ".", "ignore_docstrings", ",", "self", ".", "ignore_imports", ",", ")", ")", "except", "UnicodeDecodeError", ":", "pass" ]
32.944444
13.166667
def a_torispherical(D, f, k): r'''Calculates depth of a torispherical head according to [1]_. .. math:: a = a_1 + a_2 .. math:: \alpha = \sin^{-1}\frac{1-2k}{2(f-k)} .. math:: a_1 = fD(1-\cos\alpha) .. math:: a_2 = kD\cos\alpha Parameters ---------- D : float Diameter of the main cylindrical section, [m] f : float Dish-radius parameter; fD = dish radius [1/m] k : float knuckle-radius parameter ; kD = knuckle radius [1/m] Returns ------- a : float Depth of head [m] Examples -------- Example from [1]_. >>> a_torispherical(D=96., f=0.9, k=0.2) 25.684268924767125 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF''' alpha = asin((1-2*k)/(2*(f-k))) a1 = f*D*(1 - cos(alpha)) a2 = k*D*cos(alpha) return a1 + a2
[ "def", "a_torispherical", "(", "D", ",", "f", ",", "k", ")", ":", "alpha", "=", "asin", "(", "(", "1", "-", "2", "*", "k", ")", "/", "(", "2", "*", "(", "f", "-", "k", ")", ")", ")", "a1", "=", "f", "*", "D", "*", "(", "1", "-", "cos", "(", "alpha", ")", ")", "a2", "=", "k", "*", "D", "*", "cos", "(", "alpha", ")", "return", "a1", "+", "a2" ]
21.5
24.863636
def update_job(self, job_id, build=None, custom_data=None, name=None, passed=None, public=None, tags=None): """Edit an existing job.""" method = 'PUT' endpoint = '/rest/v1/{}/jobs/{}'.format(self.client.sauce_username, job_id) data = {} if build is not None: data['build'] = build if custom_data is not None: data['custom-data'] = custom_data if name is not None: data['name'] = name if passed is not None: data['passed'] = passed if public is not None: data['public'] = public if tags is not None: data['tags'] = tags body = json.dumps(data) return self.client.request(method, endpoint, body=body)
[ "def", "update_job", "(", "self", ",", "job_id", ",", "build", "=", "None", ",", "custom_data", "=", "None", ",", "name", "=", "None", ",", "passed", "=", "None", ",", "public", "=", "None", ",", "tags", "=", "None", ")", ":", "method", "=", "'PUT'", "endpoint", "=", "'/rest/v1/{}/jobs/{}'", ".", "format", "(", "self", ".", "client", ".", "sauce_username", ",", "job_id", ")", "data", "=", "{", "}", "if", "build", "is", "not", "None", ":", "data", "[", "'build'", "]", "=", "build", "if", "custom_data", "is", "not", "None", ":", "data", "[", "'custom-data'", "]", "=", "custom_data", "if", "name", "is", "not", "None", ":", "data", "[", "'name'", "]", "=", "name", "if", "passed", "is", "not", "None", ":", "data", "[", "'passed'", "]", "=", "passed", "if", "public", "is", "not", "None", ":", "data", "[", "'public'", "]", "=", "public", "if", "tags", "is", "not", "None", ":", "data", "[", "'tags'", "]", "=", "tags", "body", "=", "json", ".", "dumps", "(", "data", ")", "return", "self", ".", "client", ".", "request", "(", "method", ",", "endpoint", ",", "body", "=", "body", ")" ]
38.714286
12.761905
def from_message(cls, message): """ Create a public blob from a network `.Message`. Specifically, a cert-bearing pubkey auth packet, because by definition OpenSSH-style certificates 'are' their own network representation." """ type_ = message.get_text() return cls(type_=type_, blob=message.asbytes())
[ "def", "from_message", "(", "cls", ",", "message", ")", ":", "type_", "=", "message", ".", "get_text", "(", ")", "return", "cls", "(", "type_", "=", "type_", ",", "blob", "=", "message", ".", "asbytes", "(", ")", ")" ]
38.888889
17.555556
def add_requirement( self, install_req, # type: InstallRequirement parent_req_name=None, # type: Optional[str] extras_requested=None # type: Optional[Iterable[str]] ): # type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501 """Add install_req as a requirement to install. :param parent_req_name: The name of the requirement that needed this added. The name is used because when multiple unnamed requirements resolve to the same name, we could otherwise end up with dependency links that point outside the Requirements set. parent_req must already be added. Note that None implies that this is a user supplied requirement, vs an inferred one. :param extras_requested: an iterable of extras used to evaluate the environment markers. :return: Additional requirements to scan. That is either [] if the requirement is not applicable, or [install_req] if the requirement is applicable and has just been added. """ name = install_req.name # If the markers do not match, ignore this requirement. if not install_req.match_markers(extras_requested): logger.info( "Ignoring %s: markers '%s' don't match your environment", name, install_req.markers, ) return [], None # If the wheel is not supported, raise an error. # Should check this after filtering out based on environment markers to # allow specifying different wheels based on the environment/OS, in a # single requirements file. if install_req.link and install_req.link.is_wheel: wheel = Wheel(install_req.link.filename) if self.check_supported_wheels and not wheel.supported(): raise InstallationError( "%s is not a supported wheel on this platform." % wheel.filename ) # This next bit is really a sanity check. assert install_req.is_direct == (parent_req_name is None), ( "a direct req shouldn't have a parent and also, " "a non direct req should have a parent" ) # Unnamed requirements are scanned again and the requirement won't be # added as a dependency until after scanning. if not name: # url or path requirement w/o an egg fragment self.unnamed_requirements.append(install_req) return [install_req], None try: existing_req = self.get_requirement(name) except KeyError: existing_req = None has_conflicting_requirement = ( parent_req_name is None and existing_req and not existing_req.constraint and existing_req.extras == install_req.extras and existing_req.req.specifier != install_req.req.specifier ) if has_conflicting_requirement: raise InstallationError( "Double requirement given: %s (already in %s, name=%r)" % (install_req, existing_req, name) ) # When no existing requirement exists, add the requirement as a # dependency and it will be scanned again after. if not existing_req: self.requirements[name] = install_req # FIXME: what about other normalizations? E.g., _ vs. -? if name.lower() != name: self.requirement_aliases[name.lower()] = name # We'd want to rescan this requirements later return [install_req], install_req # Assume there's no need to scan, and that we've already # encountered this for scanning. if install_req.constraint or not existing_req.constraint: return [], existing_req does_not_satisfy_constraint = ( install_req.link and not ( existing_req.link and install_req.link.path == existing_req.link.path ) ) if does_not_satisfy_constraint: self.reqs_to_cleanup.append(install_req) raise InstallationError( "Could not satisfy constraints for '%s': " "installation from path or url cannot be " "constrained to a version" % name, ) # If we're now installing a constraint, mark the existing # object for real installation. existing_req.constraint = False existing_req.extras = tuple(sorted( set(existing_req.extras) | set(install_req.extras) )) logger.debug( "Setting %s extras to: %s", existing_req, existing_req.extras, ) # Return the existing requirement for addition to the parent and # scanning again. return [existing_req], existing_req
[ "def", "add_requirement", "(", "self", ",", "install_req", ",", "# type: InstallRequirement", "parent_req_name", "=", "None", ",", "# type: Optional[str]", "extras_requested", "=", "None", "# type: Optional[Iterable[str]]", ")", ":", "# type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] # noqa: E501", "name", "=", "install_req", ".", "name", "# If the markers do not match, ignore this requirement.", "if", "not", "install_req", ".", "match_markers", "(", "extras_requested", ")", ":", "logger", ".", "info", "(", "\"Ignoring %s: markers '%s' don't match your environment\"", ",", "name", ",", "install_req", ".", "markers", ",", ")", "return", "[", "]", ",", "None", "# If the wheel is not supported, raise an error.", "# Should check this after filtering out based on environment markers to", "# allow specifying different wheels based on the environment/OS, in a", "# single requirements file.", "if", "install_req", ".", "link", "and", "install_req", ".", "link", ".", "is_wheel", ":", "wheel", "=", "Wheel", "(", "install_req", ".", "link", ".", "filename", ")", "if", "self", ".", "check_supported_wheels", "and", "not", "wheel", ".", "supported", "(", ")", ":", "raise", "InstallationError", "(", "\"%s is not a supported wheel on this platform.\"", "%", "wheel", ".", "filename", ")", "# This next bit is really a sanity check.", "assert", "install_req", ".", "is_direct", "==", "(", "parent_req_name", "is", "None", ")", ",", "(", "\"a direct req shouldn't have a parent and also, \"", "\"a non direct req should have a parent\"", ")", "# Unnamed requirements are scanned again and the requirement won't be", "# added as a dependency until after scanning.", "if", "not", "name", ":", "# url or path requirement w/o an egg fragment", "self", ".", "unnamed_requirements", ".", "append", "(", "install_req", ")", "return", "[", "install_req", "]", ",", "None", "try", ":", "existing_req", "=", "self", ".", "get_requirement", "(", "name", ")", "except", "KeyError", ":", "existing_req", "=", "None", "has_conflicting_requirement", "=", "(", "parent_req_name", "is", "None", "and", "existing_req", "and", "not", "existing_req", ".", "constraint", "and", "existing_req", ".", "extras", "==", "install_req", ".", "extras", "and", "existing_req", ".", "req", ".", "specifier", "!=", "install_req", ".", "req", ".", "specifier", ")", "if", "has_conflicting_requirement", ":", "raise", "InstallationError", "(", "\"Double requirement given: %s (already in %s, name=%r)\"", "%", "(", "install_req", ",", "existing_req", ",", "name", ")", ")", "# When no existing requirement exists, add the requirement as a", "# dependency and it will be scanned again after.", "if", "not", "existing_req", ":", "self", ".", "requirements", "[", "name", "]", "=", "install_req", "# FIXME: what about other normalizations? E.g., _ vs. -?", "if", "name", ".", "lower", "(", ")", "!=", "name", ":", "self", ".", "requirement_aliases", "[", "name", ".", "lower", "(", ")", "]", "=", "name", "# We'd want to rescan this requirements later", "return", "[", "install_req", "]", ",", "install_req", "# Assume there's no need to scan, and that we've already", "# encountered this for scanning.", "if", "install_req", ".", "constraint", "or", "not", "existing_req", ".", "constraint", ":", "return", "[", "]", ",", "existing_req", "does_not_satisfy_constraint", "=", "(", "install_req", ".", "link", "and", "not", "(", "existing_req", ".", "link", "and", "install_req", ".", "link", ".", "path", "==", "existing_req", ".", "link", ".", "path", ")", ")", "if", "does_not_satisfy_constraint", ":", "self", ".", "reqs_to_cleanup", ".", "append", "(", "install_req", ")", "raise", "InstallationError", "(", "\"Could not satisfy constraints for '%s': \"", "\"installation from path or url cannot be \"", "\"constrained to a version\"", "%", "name", ",", ")", "# If we're now installing a constraint, mark the existing", "# object for real installation.", "existing_req", ".", "constraint", "=", "False", "existing_req", ".", "extras", "=", "tuple", "(", "sorted", "(", "set", "(", "existing_req", ".", "extras", ")", "|", "set", "(", "install_req", ".", "extras", ")", ")", ")", "logger", ".", "debug", "(", "\"Setting %s extras to: %s\"", ",", "existing_req", ",", "existing_req", ".", "extras", ",", ")", "# Return the existing requirement for addition to the parent and", "# scanning again.", "return", "[", "existing_req", "]", ",", "existing_req" ]
42.094828
19.715517
def get_comments_for_reference_on_date(self, reference_id, from_, to): """Pass through to provider CommentLookupSession.get_comments_for_reference_on_date""" # Implemented from azosid template for - # osid.relationship.RelationshipLookupSession.get_relationships_for_source_on_date_template if self._can('lookup'): return self._provider_session.get_comments_for_reference_on_date(reference_id, from_, to) self._check_lookup_conditions() # raises PermissionDenied query = self._query_session.get_comment_query() query.match_source_id(reference_id, match=True) query.match_date(from_, to, match=True) return self._try_harder(query)
[ "def", "get_comments_for_reference_on_date", "(", "self", ",", "reference_id", ",", "from_", ",", "to", ")", ":", "# Implemented from azosid template for -", "# osid.relationship.RelationshipLookupSession.get_relationships_for_source_on_date_template", "if", "self", ".", "_can", "(", "'lookup'", ")", ":", "return", "self", ".", "_provider_session", ".", "get_comments_for_reference_on_date", "(", "reference_id", ",", "from_", ",", "to", ")", "self", ".", "_check_lookup_conditions", "(", ")", "# raises PermissionDenied", "query", "=", "self", ".", "_query_session", ".", "get_comment_query", "(", ")", "query", ".", "match_source_id", "(", "reference_id", ",", "match", "=", "True", ")", "query", ".", "match_date", "(", "from_", ",", "to", ",", "match", "=", "True", ")", "return", "self", ".", "_try_harder", "(", "query", ")" ]
64
21.090909
def handle_basic_container_args(options, parser=None): """Handle the options specified by add_basic_container_args(). @return: a dict that can be used as kwargs for the ContainerExecutor constructor """ dir_modes = {} error_fn = parser.error if parser else sys.exit def handle_dir_mode(path, mode): path = os.path.abspath(path) if not os.path.isdir(path): error_fn( "Cannot specify directory mode for '{}' because it does not exist or is no directory." .format(path)) if path in dir_modes: error_fn("Cannot specify multiple directory modes for '{}'.".format(path)) dir_modes[path] = mode for path in options.hidden_dir: handle_dir_mode(path, DIR_HIDDEN) for path in options.read_only_dir: handle_dir_mode(path, DIR_READ_ONLY) for path in options.overlay_dir: handle_dir_mode(path, DIR_OVERLAY) for path in options.full_access_dir: handle_dir_mode(path, DIR_FULL_ACCESS) if options.keep_tmp: if "/tmp" in dir_modes and not dir_modes["/tmp"] == DIR_FULL_ACCESS: error_fn("Cannot specify both --keep-tmp and --hidden-dir /tmp.") dir_modes["/tmp"] = DIR_FULL_ACCESS elif not "/tmp" in dir_modes: dir_modes["/tmp"] = DIR_HIDDEN if not "/" in dir_modes: dir_modes["/"] = DIR_OVERLAY if not "/run" in dir_modes: dir_modes["/run"] = DIR_HIDDEN if options.container_system_config: if options.network_access: logging.warning("The container configuration disables DNS, " "host lookups will fail despite --network-access. " "Consider using --keep-system-config.") else: # /etc/resolv.conf is necessary for DNS lookups and on many systems is a symlink # to either /run/resolvconf/resolv.conf or /run/systemd/resolve/sub-resolve.conf, # so we keep that directory accessible as well. if not "/run/resolvconf" in dir_modes and os.path.isdir("/run/resolvconf"): dir_modes["/run/resolvconf"] = DIR_READ_ONLY if not "/run/systemd/resolve" in dir_modes and os.path.isdir("/run/systemd/resolve"): dir_modes["/run/systemd/resolve"] = DIR_READ_ONLY return { 'network_access': options.network_access, 'container_tmpfs': options.tmpfs, 'container_system_config': options.container_system_config, 'dir_modes': dir_modes, }
[ "def", "handle_basic_container_args", "(", "options", ",", "parser", "=", "None", ")", ":", "dir_modes", "=", "{", "}", "error_fn", "=", "parser", ".", "error", "if", "parser", "else", "sys", ".", "exit", "def", "handle_dir_mode", "(", "path", ",", "mode", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "error_fn", "(", "\"Cannot specify directory mode for '{}' because it does not exist or is no directory.\"", ".", "format", "(", "path", ")", ")", "if", "path", "in", "dir_modes", ":", "error_fn", "(", "\"Cannot specify multiple directory modes for '{}'.\"", ".", "format", "(", "path", ")", ")", "dir_modes", "[", "path", "]", "=", "mode", "for", "path", "in", "options", ".", "hidden_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_HIDDEN", ")", "for", "path", "in", "options", ".", "read_only_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_READ_ONLY", ")", "for", "path", "in", "options", ".", "overlay_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_OVERLAY", ")", "for", "path", "in", "options", ".", "full_access_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_FULL_ACCESS", ")", "if", "options", ".", "keep_tmp", ":", "if", "\"/tmp\"", "in", "dir_modes", "and", "not", "dir_modes", "[", "\"/tmp\"", "]", "==", "DIR_FULL_ACCESS", ":", "error_fn", "(", "\"Cannot specify both --keep-tmp and --hidden-dir /tmp.\"", ")", "dir_modes", "[", "\"/tmp\"", "]", "=", "DIR_FULL_ACCESS", "elif", "not", "\"/tmp\"", "in", "dir_modes", ":", "dir_modes", "[", "\"/tmp\"", "]", "=", "DIR_HIDDEN", "if", "not", "\"/\"", "in", "dir_modes", ":", "dir_modes", "[", "\"/\"", "]", "=", "DIR_OVERLAY", "if", "not", "\"/run\"", "in", "dir_modes", ":", "dir_modes", "[", "\"/run\"", "]", "=", "DIR_HIDDEN", "if", "options", ".", "container_system_config", ":", "if", "options", ".", "network_access", ":", "logging", ".", "warning", "(", "\"The container configuration disables DNS, \"", "\"host lookups will fail despite --network-access. \"", "\"Consider using --keep-system-config.\"", ")", "else", ":", "# /etc/resolv.conf is necessary for DNS lookups and on many systems is a symlink", "# to either /run/resolvconf/resolv.conf or /run/systemd/resolve/sub-resolve.conf,", "# so we keep that directory accessible as well.", "if", "not", "\"/run/resolvconf\"", "in", "dir_modes", "and", "os", ".", "path", ".", "isdir", "(", "\"/run/resolvconf\"", ")", ":", "dir_modes", "[", "\"/run/resolvconf\"", "]", "=", "DIR_READ_ONLY", "if", "not", "\"/run/systemd/resolve\"", "in", "dir_modes", "and", "os", ".", "path", ".", "isdir", "(", "\"/run/systemd/resolve\"", ")", ":", "dir_modes", "[", "\"/run/systemd/resolve\"", "]", "=", "DIR_READ_ONLY", "return", "{", "'network_access'", ":", "options", ".", "network_access", ",", "'container_tmpfs'", ":", "options", ".", "tmpfs", ",", "'container_system_config'", ":", "options", ".", "container_system_config", ",", "'dir_modes'", ":", "dir_modes", ",", "}" ]
42.086207
19.241379
def _sounds_re(include_erhua=False): """Sounds are syllables + tones""" tone = '[1-5]' optional_final_erhua = '|r\\b' if include_erhua else '' pattern = '({}{}{})'.format(_joined_syllables_re(), tone, optional_final_erhua) return re.compile(pattern, re.IGNORECASE)
[ "def", "_sounds_re", "(", "include_erhua", "=", "False", ")", ":", "tone", "=", "'[1-5]'", "optional_final_erhua", "=", "'|r\\\\b'", "if", "include_erhua", "else", "''", "pattern", "=", "'({}{}{})'", ".", "format", "(", "_joined_syllables_re", "(", ")", ",", "tone", ",", "optional_final_erhua", ")", "return", "re", ".", "compile", "(", "pattern", ",", "re", ".", "IGNORECASE", ")" ]
39.857143
19
def run(items): """Perform detection of structural variations with Manta. """ paired = vcfutils.get_paired(items) data = paired.tumor_data if paired else items[0] work_dir = _sv_workdir(data) variant_file = _get_out_file(work_dir, paired) if not utils.file_exists(variant_file): with file_transaction(data, work_dir) as tx_work_dir: utils.safe_makedir(tx_work_dir) tx_workflow_file = _prep_config(items, paired, tx_work_dir) _run_workflow(items, paired, tx_workflow_file, tx_work_dir) assert utils.file_exists(variant_file), "Manta finished without output file %s" % variant_file variant_file = shared.annotate_with_depth(variant_file, items) out = [] upload_counts = collections.defaultdict(int) for data in items: if "break-point-inspector" in dd.get_tools_on(data): if paired and paired.normal_bam and paired.tumor_name == dd.get_sample_name(data): variant_file = _run_break_point_inspector(data, variant_file, paired, work_dir) if "sv" not in data: data["sv"] = [] final_vcf = shared.finalize_sv(variant_file, data, items) vc = {"variantcaller": "manta", "do_upload": upload_counts[final_vcf] == 0, # only upload a single file per batch "vrn_file": final_vcf} evidence_bam = _get_evidence_bam(work_dir, data) if evidence_bam: vc["read_evidence"] = evidence_bam data["sv"].append(vc) upload_counts[final_vcf] += 1 out.append(data) return out
[ "def", "run", "(", "items", ")", ":", "paired", "=", "vcfutils", ".", "get_paired", "(", "items", ")", "data", "=", "paired", ".", "tumor_data", "if", "paired", "else", "items", "[", "0", "]", "work_dir", "=", "_sv_workdir", "(", "data", ")", "variant_file", "=", "_get_out_file", "(", "work_dir", ",", "paired", ")", "if", "not", "utils", ".", "file_exists", "(", "variant_file", ")", ":", "with", "file_transaction", "(", "data", ",", "work_dir", ")", "as", "tx_work_dir", ":", "utils", ".", "safe_makedir", "(", "tx_work_dir", ")", "tx_workflow_file", "=", "_prep_config", "(", "items", ",", "paired", ",", "tx_work_dir", ")", "_run_workflow", "(", "items", ",", "paired", ",", "tx_workflow_file", ",", "tx_work_dir", ")", "assert", "utils", ".", "file_exists", "(", "variant_file", ")", ",", "\"Manta finished without output file %s\"", "%", "variant_file", "variant_file", "=", "shared", ".", "annotate_with_depth", "(", "variant_file", ",", "items", ")", "out", "=", "[", "]", "upload_counts", "=", "collections", ".", "defaultdict", "(", "int", ")", "for", "data", "in", "items", ":", "if", "\"break-point-inspector\"", "in", "dd", ".", "get_tools_on", "(", "data", ")", ":", "if", "paired", "and", "paired", ".", "normal_bam", "and", "paired", ".", "tumor_name", "==", "dd", ".", "get_sample_name", "(", "data", ")", ":", "variant_file", "=", "_run_break_point_inspector", "(", "data", ",", "variant_file", ",", "paired", ",", "work_dir", ")", "if", "\"sv\"", "not", "in", "data", ":", "data", "[", "\"sv\"", "]", "=", "[", "]", "final_vcf", "=", "shared", ".", "finalize_sv", "(", "variant_file", ",", "data", ",", "items", ")", "vc", "=", "{", "\"variantcaller\"", ":", "\"manta\"", ",", "\"do_upload\"", ":", "upload_counts", "[", "final_vcf", "]", "==", "0", ",", "# only upload a single file per batch", "\"vrn_file\"", ":", "final_vcf", "}", "evidence_bam", "=", "_get_evidence_bam", "(", "work_dir", ",", "data", ")", "if", "evidence_bam", ":", "vc", "[", "\"read_evidence\"", "]", "=", "evidence_bam", "data", "[", "\"sv\"", "]", ".", "append", "(", "vc", ")", "upload_counts", "[", "final_vcf", "]", "+=", "1", "out", ".", "append", "(", "data", ")", "return", "out" ]
47.30303
18.69697
def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ check_is_fitted(self, 'classes_') y = column_or_1d(y, warn=True) classes = np.unique(y) _check_numpy_unicode_bug(classes) if len(np.intersect1d(classes, self.classes_)) < len(classes): diff = np.setdiff1d(classes, self.classes_) raise ValueError("y contains new labels: %s" % str(diff)) return np.searchsorted(self.classes_, y)
[ "def", "transform", "(", "self", ",", "y", ")", ":", "check_is_fitted", "(", "self", ",", "'classes_'", ")", "y", "=", "column_or_1d", "(", "y", ",", "warn", "=", "True", ")", "classes", "=", "np", ".", "unique", "(", "y", ")", "_check_numpy_unicode_bug", "(", "classes", ")", "if", "len", "(", "np", ".", "intersect1d", "(", "classes", ",", "self", ".", "classes_", ")", ")", "<", "len", "(", "classes", ")", ":", "diff", "=", "np", ".", "setdiff1d", "(", "classes", ",", "self", ".", "classes_", ")", "raise", "ValueError", "(", "\"y contains new labels: %s\"", "%", "str", "(", "diff", ")", ")", "return", "np", ".", "searchsorted", "(", "self", ".", "classes_", ",", "y", ")" ]
31.190476
16.52381
def on_activity_lifecycle_changed(self, change): """ If the app pauses without pausing the barcode scanner the camera can't be reopened. So we must do it here. """ d = self.declaration if d.active: if change['value'] == 'paused': self.widget.pause(now=True) elif change['value'] == 'resumed': self.widget.resume()
[ "def", "on_activity_lifecycle_changed", "(", "self", ",", "change", ")", ":", "d", "=", "self", ".", "declaration", "if", "d", ".", "active", ":", "if", "change", "[", "'value'", "]", "==", "'paused'", ":", "self", ".", "widget", ".", "pause", "(", "now", "=", "True", ")", "elif", "change", "[", "'value'", "]", "==", "'resumed'", ":", "self", ".", "widget", ".", "resume", "(", ")" ]
40.5
8.1
def trigger_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/triggers#create-trigger" api_path = "/api/v2/triggers" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "trigger_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/triggers\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
57.5
19
def generate(self, signature_data): """Takes data and returns a signature :arg dict signature_data: data to use to generate a signature :returns: ``Result`` instance """ result = Result() for rule in self.pipeline: rule_name = rule.__class__.__name__ try: if rule.predicate(signature_data, result): rule.action(signature_data, result) except Exception as exc: if self.error_handler: self.error_handler( signature_data, exc_info=sys.exc_info(), extra={'rule': rule_name} ) result.info(rule_name, 'Rule failed: %s', exc) return result
[ "def", "generate", "(", "self", ",", "signature_data", ")", ":", "result", "=", "Result", "(", ")", "for", "rule", "in", "self", ".", "pipeline", ":", "rule_name", "=", "rule", ".", "__class__", ".", "__name__", "try", ":", "if", "rule", ".", "predicate", "(", "signature_data", ",", "result", ")", ":", "rule", ".", "action", "(", "signature_data", ",", "result", ")", "except", "Exception", "as", "exc", ":", "if", "self", ".", "error_handler", ":", "self", ".", "error_handler", "(", "signature_data", ",", "exc_info", "=", "sys", ".", "exc_info", "(", ")", ",", "extra", "=", "{", "'rule'", ":", "rule_name", "}", ")", "result", ".", "info", "(", "rule_name", ",", "'Rule failed: %s'", ",", "exc", ")", "return", "result" ]
29.074074
18.037037
def read_key(pysam_alignment_record): ''' Given a `pysam.AlignedSegment` instance, return the attributes identifying the *read* it comes from (not the alignment). There may be more than one alignment for a read, e.g. chimeric and secondary alignments. ''' return ( pysam_alignment_record.query_name, pysam_alignment_record.is_duplicate, pysam_alignment_record.is_read1, pysam_alignment_record.is_read2, )
[ "def", "read_key", "(", "pysam_alignment_record", ")", ":", "return", "(", "pysam_alignment_record", ".", "query_name", ",", "pysam_alignment_record", ".", "is_duplicate", ",", "pysam_alignment_record", ".", "is_read1", ",", "pysam_alignment_record", ".", "is_read2", ",", ")" ]
37.75
19.75
def start_threads (self): """Spawn threads for URL checking and status printing.""" if self.config["status"]: t = status.Status(self, self.config["status_wait_seconds"]) t.start() self.threads.append(t) if self.config["maxrunseconds"]: t = interrupt.Interrupt(self.config["maxrunseconds"]) t.start() self.threads.append(t) num = self.config["threads"] if num > 0: for dummy in range(num): t = checker.Checker(self.urlqueue, self.logger, self.add_request_session) self.threads.append(t) t.start() else: self.request_sessions[thread.get_ident()] = new_request_session(self.config, self.cookies) checker.check_urls(self.urlqueue, self.logger)
[ "def", "start_threads", "(", "self", ")", ":", "if", "self", ".", "config", "[", "\"status\"", "]", ":", "t", "=", "status", ".", "Status", "(", "self", ",", "self", ".", "config", "[", "\"status_wait_seconds\"", "]", ")", "t", ".", "start", "(", ")", "self", ".", "threads", ".", "append", "(", "t", ")", "if", "self", ".", "config", "[", "\"maxrunseconds\"", "]", ":", "t", "=", "interrupt", ".", "Interrupt", "(", "self", ".", "config", "[", "\"maxrunseconds\"", "]", ")", "t", ".", "start", "(", ")", "self", ".", "threads", ".", "append", "(", "t", ")", "num", "=", "self", ".", "config", "[", "\"threads\"", "]", "if", "num", ">", "0", ":", "for", "dummy", "in", "range", "(", "num", ")", ":", "t", "=", "checker", ".", "Checker", "(", "self", ".", "urlqueue", ",", "self", ".", "logger", ",", "self", ".", "add_request_session", ")", "self", ".", "threads", ".", "append", "(", "t", ")", "t", ".", "start", "(", ")", "else", ":", "self", ".", "request_sessions", "[", "thread", ".", "get_ident", "(", ")", "]", "=", "new_request_session", "(", "self", ".", "config", ",", "self", ".", "cookies", ")", "checker", ".", "check_urls", "(", "self", ".", "urlqueue", ",", "self", ".", "logger", ")" ]
43.421053
17.368421
def set_properties(self, properties, **kwargs): """ :param properties: Property names and values given as key-value pairs of strings :type properties: dict Given key-value pairs in *properties* for property names and values, the properties are set on the analysis for the given property names. Any property with a value of :const:`None` indicates the property will be deleted. .. note:: Any existing properties not mentioned in *properties* are not modified by this method. """ dxpy.api.analysis_set_properties(self._dxid, {"properties": properties}, **kwargs)
[ "def", "set_properties", "(", "self", ",", "properties", ",", "*", "*", "kwargs", ")", ":", "dxpy", ".", "api", ".", "analysis_set_properties", "(", "self", ".", "_dxid", ",", "{", "\"properties\"", ":", "properties", "}", ",", "*", "*", "kwargs", ")" ]
40
24.875
def convert_saturation(self, saturation): """ Convert the saturation from decimal percent (0.0-1.0) to byte representation for use in commands. :param saturation: The saturation from in decimal percent (0.0-1.0). 1.0 is the maximum saturation where no white leds will be on. 0.0 is no saturation. :return: The saturation in byte representation. """ saturation_inverted = 1 - saturation return math.ceil(saturation_inverted * self.MAX_SATURATION)
[ "def", "convert_saturation", "(", "self", ",", "saturation", ")", ":", "saturation_inverted", "=", "1", "-", "saturation", "return", "math", ".", "ceil", "(", "saturation_inverted", "*", "self", ".", "MAX_SATURATION", ")" ]
42.916667
17.916667
def _reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): # pylint: disable=unused-argument """Computes `log(sum(exp(input_tensor))) along the specified axis.""" try: return scipy_special.logsumexp( input_tensor, axis=_astuple(axis), keepdims=keepdims) except NotImplementedError: # We offer a non SP version just in case SP isn't installed and this # because logsumexp is often used. m = _max_mask_non_finite(input_tensor, axis=axis, keepdims=True) y = input_tensor - m y = np.exp(y, out=y) return m + np.log(np.sum(y, axis=_astuple(axis), keepdims=keepdims))
[ "def", "_reduce_logsumexp", "(", "input_tensor", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ",", "name", "=", "None", ")", ":", "# pylint: disable=unused-argument", "try", ":", "return", "scipy_special", ".", "logsumexp", "(", "input_tensor", ",", "axis", "=", "_astuple", "(", "axis", ")", ",", "keepdims", "=", "keepdims", ")", "except", "NotImplementedError", ":", "# We offer a non SP version just in case SP isn't installed and this", "# because logsumexp is often used.", "m", "=", "_max_mask_non_finite", "(", "input_tensor", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "y", "=", "input_tensor", "-", "m", "y", "=", "np", ".", "exp", "(", "y", ",", "out", "=", "y", ")", "return", "m", "+", "np", ".", "log", "(", "np", ".", "sum", "(", "y", ",", "axis", "=", "_astuple", "(", "axis", ")", ",", "keepdims", "=", "keepdims", ")", ")" ]
50.75
22.166667
def output_files(self): """Returns list of output files from this rule, relative to buildroot. In this case it's simple (for now) - the output files are enumerated in the rule definition. """ outs = [os.path.join(self.address.repo, self.address.path, x) for x in self.params['outs']] return outs
[ "def", "output_files", "(", "self", ")", ":", "outs", "=", "[", "os", ".", "path", ".", "join", "(", "self", ".", "address", ".", "repo", ",", "self", ".", "address", ".", "path", ",", "x", ")", "for", "x", "in", "self", ".", "params", "[", "'outs'", "]", "]", "return", "outs" ]
39.111111
18.111111
def _matchremove_simple_endings(self, word): """Remove the noun, adjective, adverb word endings""" was_stemmed = False # noun, adjective, and adverb word endings sorted by charlen, then alph simple_endings = ['ibus', 'ius', 'ae', 'am', 'as', 'em', 'es', 'ia', 'is', 'nt', 'os', 'ud', 'um', 'us', 'a', 'e', 'i', 'o', 'u'] for ending in simple_endings: if word.endswith(ending): word = re.sub(r'{0}$'.format(ending), '', word) was_stemmed = True break return word, was_stemmed
[ "def", "_matchremove_simple_endings", "(", "self", ",", "word", ")", ":", "was_stemmed", "=", "False", "# noun, adjective, and adverb word endings sorted by charlen, then alph", "simple_endings", "=", "[", "'ibus'", ",", "'ius'", ",", "'ae'", ",", "'am'", ",", "'as'", ",", "'em'", ",", "'es'", ",", "'ia'", ",", "'is'", ",", "'nt'", ",", "'os'", ",", "'ud'", ",", "'um'", ",", "'us'", ",", "'a'", ",", "'e'", ",", "'i'", ",", "'o'", ",", "'u'", "]", "for", "ending", "in", "simple_endings", ":", "if", "word", ".", "endswith", "(", "ending", ")", ":", "word", "=", "re", ".", "sub", "(", "r'{0}$'", ".", "format", "(", "ending", ")", ",", "''", ",", "word", ")", "was_stemmed", "=", "True", "break", "return", "word", ",", "was_stemmed" ]
32.060606
12.575758
def get_logger(context=None, name=None): """Return a logger for *context*. Return a :class:`ContextLogger` instance. The instance implements the standard library's :class:`logging.Logger` interface. """ # Many class instances have their own logger. Share them to save memory if # possible, i.e. when *context* is not set. if name is None: name = _logger_name if context is None and name in _logger_dict: return _logger_dict[name] if context is not None and not isinstance(context, six.string_types): context = util.objref(context) logger = logging.getLogger(name) logger = ContextLogger(logger, context) if context is None: _logger_dict[name] = logger return logger
[ "def", "get_logger", "(", "context", "=", "None", ",", "name", "=", "None", ")", ":", "# Many class instances have their own logger. Share them to save memory if", "# possible, i.e. when *context* is not set.", "if", "name", "is", "None", ":", "name", "=", "_logger_name", "if", "context", "is", "None", "and", "name", "in", "_logger_dict", ":", "return", "_logger_dict", "[", "name", "]", "if", "context", "is", "not", "None", "and", "not", "isinstance", "(", "context", ",", "six", ".", "string_types", ")", ":", "context", "=", "util", ".", "objref", "(", "context", ")", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", "=", "ContextLogger", "(", "logger", ",", "context", ")", "if", "context", "is", "None", ":", "_logger_dict", "[", "name", "]", "=", "logger", "return", "logger" ]
38.526316
14.210526
def authenticate(self): """ Authenticate against the HP Cloud Identity Service. This is the first step in any hpcloud.com session, although this method is automatically called when accessing higher-level methods/attributes. **Examples of Credentials Configuration** - Bare minimum for authentication using HP API keys: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo - With multiple *compute* availability zones activated, the region must also be specified (due to current limitations in the OpenStack client libraries): .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo region_name: az-1.region-a.geo-1 - Using ``username`` and ``password`` is also allowed, but discouraged: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 username: farley.mowat password: NeverCryW0lf When both API keys and ``username+password`` are specified, the API keys are used. """ log.info("Authenticating to HP Cloud...") creds = self.creds access_key_id = creds.get('access_key_id', '') secret_access_key = creds.get('secret_access_key', '') # prefer api key + secret key, but fallback to username + password if access_key_id and secret_access_key: self.nova_client.client.os_access_key_id = access_key_id self.nova_client.client.os_secret_key = secret_access_key self.nova_client.authenticate()
[ "def", "authenticate", "(", "self", ")", ":", "log", ".", "info", "(", "\"Authenticating to HP Cloud...\"", ")", "creds", "=", "self", ".", "creds", "access_key_id", "=", "creds", ".", "get", "(", "'access_key_id'", ",", "''", ")", "secret_access_key", "=", "creds", ".", "get", "(", "'secret_access_key'", ",", "''", ")", "# prefer api key + secret key, but fallback to username + password", "if", "access_key_id", "and", "secret_access_key", ":", "self", ".", "nova_client", ".", "client", ".", "os_access_key_id", "=", "access_key_id", "self", ".", "nova_client", ".", "client", ".", "os_secret_key", "=", "secret_access_key", "self", ".", "nova_client", ".", "authenticate", "(", ")" ]
38.862069
23.172414
def v1_url_associations(tags, url): '''Retrieve associations for a given URL. The associations returned have the exact same structure as defined in the ``v1_tag_associate`` route with one addition: a ``tag`` field contains the full tag name for the association. ''' url = urllib.unquote_plus(url.decode('utf-8')).strip() return {'associations': tags.assocs_by_url(url)}
[ "def", "v1_url_associations", "(", "tags", ",", "url", ")", ":", "url", "=", "urllib", ".", "unquote_plus", "(", "url", ".", "decode", "(", "'utf-8'", ")", ")", ".", "strip", "(", ")", "return", "{", "'associations'", ":", "tags", ".", "assocs_by_url", "(", "url", ")", "}" ]
43.333333
20.666667
def prune_mechanism_by_data(graph, key: Optional[str] = None) -> None: """Remove all leaves and source nodes that don't have weights. Is a thin wrapper around :func:`remove_unweighted_leaves` and :func:`remove_unweighted_sources` :param graph: A BEL graph :param key: The key in the node data dictionary representing the experimental data. Defaults to :data:`pybel_tools.constants.WEIGHT`. Equivalent to: >>> remove_unweighted_leaves(graph) >>> remove_unweighted_sources(graph) """ remove_unweighted_leaves(graph, key=key) remove_unweighted_sources(graph, key=key)
[ "def", "prune_mechanism_by_data", "(", "graph", ",", "key", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "None", ":", "remove_unweighted_leaves", "(", "graph", ",", "key", "=", "key", ")", "remove_unweighted_sources", "(", "graph", ",", "key", "=", "key", ")" ]
37.4375
22.125
def get_natural_key_info(cls): """ Derive natural key from first unique_together definition, noting which fields are related objects vs. regular fields. """ fields = cls.get_natural_key_def() info = [] for name in fields: field = cls._meta.get_field(name) rel_to = None if hasattr(field, 'rel'): rel_to = field.rel.to if field.rel else None elif hasattr(field, 'remote_field'): if field.remote_field: rel_to = field.remote_field.model else: rel_to = None info.append((name, rel_to)) return info
[ "def", "get_natural_key_info", "(", "cls", ")", ":", "fields", "=", "cls", ".", "get_natural_key_def", "(", ")", "info", "=", "[", "]", "for", "name", "in", "fields", ":", "field", "=", "cls", ".", "_meta", ".", "get_field", "(", "name", ")", "rel_to", "=", "None", "if", "hasattr", "(", "field", ",", "'rel'", ")", ":", "rel_to", "=", "field", ".", "rel", ".", "to", "if", "field", ".", "rel", "else", "None", "elif", "hasattr", "(", "field", ",", "'remote_field'", ")", ":", "if", "field", ".", "remote_field", ":", "rel_to", "=", "field", ".", "remote_field", ".", "model", "else", ":", "rel_to", "=", "None", "info", ".", "append", "(", "(", "name", ",", "rel_to", ")", ")", "return", "info" ]
36.210526
11.263158
def encrypt(self, key, iv="", cek="", **kwargs): """ Produces a JWE as defined in RFC7516 using RSA algorithms :param key: RSA key :param iv: Initialization vector :param cek: Content master key :param kwargs: Extra keyword arguments :return: A signed payload """ _msg = as_bytes(self.msg) if "zip" in self: if self["zip"] == "DEF": _msg = zlib.compress(_msg) else: raise ParameterError("Zip has unknown value: %s" % self["zip"]) kwarg_cek = cek or None _enc = self["enc"] iv = self._generate_iv(_enc, iv) cek = self._generate_key(_enc, cek) self["cek"] = cek logger.debug("cek: %s, iv: %s" % ([c for c in cek], [c for c in iv])) _encrypt = RSAEncrypter(self.with_digest).encrypt _alg = self["alg"] if kwarg_cek: jwe_enc_key = '' elif _alg == "RSA-OAEP": jwe_enc_key = _encrypt(cek, key, 'pkcs1_oaep_padding') elif _alg == "RSA-OAEP-256": jwe_enc_key = _encrypt(cek, key, 'pkcs1_oaep_256_padding') elif _alg == "RSA1_5": jwe_enc_key = _encrypt(cek, key) else: raise NotSupportedAlgorithm(_alg) jwe = JWEnc(**self.headers()) try: _auth_data = kwargs['auth_data'] except KeyError: _auth_data = jwe.b64_encode_header() ctxt, tag, key = self.enc_setup(_enc, _msg, key=cek, iv=iv, auth_data=_auth_data) return jwe.pack(parts=[jwe_enc_key, iv, ctxt, tag])
[ "def", "encrypt", "(", "self", ",", "key", ",", "iv", "=", "\"\"", ",", "cek", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "_msg", "=", "as_bytes", "(", "self", ".", "msg", ")", "if", "\"zip\"", "in", "self", ":", "if", "self", "[", "\"zip\"", "]", "==", "\"DEF\"", ":", "_msg", "=", "zlib", ".", "compress", "(", "_msg", ")", "else", ":", "raise", "ParameterError", "(", "\"Zip has unknown value: %s\"", "%", "self", "[", "\"zip\"", "]", ")", "kwarg_cek", "=", "cek", "or", "None", "_enc", "=", "self", "[", "\"enc\"", "]", "iv", "=", "self", ".", "_generate_iv", "(", "_enc", ",", "iv", ")", "cek", "=", "self", ".", "_generate_key", "(", "_enc", ",", "cek", ")", "self", "[", "\"cek\"", "]", "=", "cek", "logger", ".", "debug", "(", "\"cek: %s, iv: %s\"", "%", "(", "[", "c", "for", "c", "in", "cek", "]", ",", "[", "c", "for", "c", "in", "iv", "]", ")", ")", "_encrypt", "=", "RSAEncrypter", "(", "self", ".", "with_digest", ")", ".", "encrypt", "_alg", "=", "self", "[", "\"alg\"", "]", "if", "kwarg_cek", ":", "jwe_enc_key", "=", "''", "elif", "_alg", "==", "\"RSA-OAEP\"", ":", "jwe_enc_key", "=", "_encrypt", "(", "cek", ",", "key", ",", "'pkcs1_oaep_padding'", ")", "elif", "_alg", "==", "\"RSA-OAEP-256\"", ":", "jwe_enc_key", "=", "_encrypt", "(", "cek", ",", "key", ",", "'pkcs1_oaep_256_padding'", ")", "elif", "_alg", "==", "\"RSA1_5\"", ":", "jwe_enc_key", "=", "_encrypt", "(", "cek", ",", "key", ")", "else", ":", "raise", "NotSupportedAlgorithm", "(", "_alg", ")", "jwe", "=", "JWEnc", "(", "*", "*", "self", ".", "headers", "(", ")", ")", "try", ":", "_auth_data", "=", "kwargs", "[", "'auth_data'", "]", "except", "KeyError", ":", "_auth_data", "=", "jwe", ".", "b64_encode_header", "(", ")", "ctxt", ",", "tag", ",", "key", "=", "self", ".", "enc_setup", "(", "_enc", ",", "_msg", ",", "key", "=", "cek", ",", "iv", "=", "iv", ",", "auth_data", "=", "_auth_data", ")", "return", "jwe", ".", "pack", "(", "parts", "=", "[", "jwe_enc_key", ",", "iv", ",", "ctxt", ",", "tag", "]", ")" ]
31.627451
18.254902
def merge_subtokens(doc, label="subtok"): """Merge subtokens into a single token. doc (Doc): The Doc object. label (unicode): The subtoken dependency label. RETURNS (Doc): The Doc object with merged subtokens. DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens """ merger = Matcher(doc.vocab) merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}]) matches = merger(doc) spans = [doc[start : end + 1] for _, start, end in matches] with doc.retokenize() as retokenizer: for span in spans: retokenizer.merge(span) return doc
[ "def", "merge_subtokens", "(", "doc", ",", "label", "=", "\"subtok\"", ")", ":", "merger", "=", "Matcher", "(", "doc", ".", "vocab", ")", "merger", ".", "add", "(", "\"SUBTOK\"", ",", "None", ",", "[", "{", "\"DEP\"", ":", "label", ",", "\"op\"", ":", "\"+\"", "}", "]", ")", "matches", "=", "merger", "(", "doc", ")", "spans", "=", "[", "doc", "[", "start", ":", "end", "+", "1", "]", "for", "_", ",", "start", ",", "end", "in", "matches", "]", "with", "doc", ".", "retokenize", "(", ")", "as", "retokenizer", ":", "for", "span", "in", "spans", ":", "retokenizer", ".", "merge", "(", "span", ")", "return", "doc" ]
34.529412
15
def pull(self, platform=None): """ Pull the image digest. Args: platform (str): The platform to pull the image for. Default: ``None`` Returns: (:py:class:`Image`): A reference to the pulled image. """ repository, _ = parse_repository_tag(self.image_name) return self.collection.pull(repository, tag=self.id, platform=platform)
[ "def", "pull", "(", "self", ",", "platform", "=", "None", ")", ":", "repository", ",", "_", "=", "parse_repository_tag", "(", "self", ".", "image_name", ")", "return", "self", ".", "collection", ".", "pull", "(", "repository", ",", "tag", "=", "self", ".", "id", ",", "platform", "=", "platform", ")" ]
31.384615
20.769231
def apply_filters(self, query, filters): """ Apply user specified filters to query """ assert isinstance(query, peewee.Query) assert isinstance(filters, dict)
[ "def", "apply_filters", "(", "self", ",", "query", ",", "filters", ")", ":", "assert", "isinstance", "(", "query", ",", "peewee", ".", "Query", ")", "assert", "isinstance", "(", "filters", ",", "dict", ")" ]
32.166667
1.833333
def set_state(self, state=None, **kwargs): """ Set the view state of the camera Should be a dict (or kwargs) as returned by get_state. It can be an incomlete dict, in which case only the specified properties are set. Parameters ---------- state : dict The camera state. **kwargs : dict Unused keyword arguments. """ D = state or {} D.update(kwargs) for key, val in D.items(): if key not in self._state_props: raise KeyError('Not a valid camera state property %r' % key) setattr(self, key, val)
[ "def", "set_state", "(", "self", ",", "state", "=", "None", ",", "*", "*", "kwargs", ")", ":", "D", "=", "state", "or", "{", "}", "D", ".", "update", "(", "kwargs", ")", "for", "key", ",", "val", "in", "D", ".", "items", "(", ")", ":", "if", "key", "not", "in", "self", ".", "_state_props", ":", "raise", "KeyError", "(", "'Not a valid camera state property %r'", "%", "key", ")", "setattr", "(", "self", ",", "key", ",", "val", ")" ]
31.8
16.25
def routing(routes, request): """Definition for route matching : helper""" # strip trailing slashes from request path path = request.path.strip('/') # iterate through routes to match args = {} for name, route in routes.items(): if route['path'] == '^': # this section exists because regex doesn't work for null character as desired if path == '': match = [True] else: match = [] else: match = re.findall(route['path'], path) if match: # found the matching url, iterate through variables to pass data # check if method exists if not request.method in route['method']: raise TornMethodNotAllowed values = match[0] # in form of tuples if type(values) != bool: for i in range(len(route['variables'])): # if value is blank, check if default exists and pass it instead if type(values) == str: args[route['variables'][i]] = values else: if not values[i] and route['variables'][i] in route['defaults']: values[i] = route['defaults'][route['variables'][i]] args[route['variables'][i]] = values[i] # we have the variables we need, args, path, controller return { 'kwargs' : args, 'controller' : route['controller'] } raise TornNotFoundError
[ "def", "routing", "(", "routes", ",", "request", ")", ":", "# strip trailing slashes from request path", "path", "=", "request", ".", "path", ".", "strip", "(", "'/'", ")", "# iterate through routes to match", "args", "=", "{", "}", "for", "name", ",", "route", "in", "routes", ".", "items", "(", ")", ":", "if", "route", "[", "'path'", "]", "==", "'^'", ":", "# this section exists because regex doesn't work for null character as desired", "if", "path", "==", "''", ":", "match", "=", "[", "True", "]", "else", ":", "match", "=", "[", "]", "else", ":", "match", "=", "re", ".", "findall", "(", "route", "[", "'path'", "]", ",", "path", ")", "if", "match", ":", "# found the matching url, iterate through variables to pass data", "# check if method exists", "if", "not", "request", ".", "method", "in", "route", "[", "'method'", "]", ":", "raise", "TornMethodNotAllowed", "values", "=", "match", "[", "0", "]", "# in form of tuples", "if", "type", "(", "values", ")", "!=", "bool", ":", "for", "i", "in", "range", "(", "len", "(", "route", "[", "'variables'", "]", ")", ")", ":", "# if value is blank, check if default exists and pass it instead", "if", "type", "(", "values", ")", "==", "str", ":", "args", "[", "route", "[", "'variables'", "]", "[", "i", "]", "]", "=", "values", "else", ":", "if", "not", "values", "[", "i", "]", "and", "route", "[", "'variables'", "]", "[", "i", "]", "in", "route", "[", "'defaults'", "]", ":", "values", "[", "i", "]", "=", "route", "[", "'defaults'", "]", "[", "route", "[", "'variables'", "]", "[", "i", "]", "]", "args", "[", "route", "[", "'variables'", "]", "[", "i", "]", "]", "=", "values", "[", "i", "]", "# we have the variables we need, args, path, controller", "return", "{", "'kwargs'", ":", "args", ",", "'controller'", ":", "route", "[", "'controller'", "]", "}", "raise", "TornNotFoundError" ]
38.756098
19.04878
def save_filter(name, filt, full=None, path='filters'): r"""Save DLF-filter and inversion output to plain text files.""" # First we'll save the filter using its internal routine. # This will create the directory ./filters if it doesn't exist already. filt.tofile(path) # If full, we store the inversion output if full: # Get file name path = os.path.abspath(path) if len(name.split('.')) == 2: suffix = '.gz' else: suffix = '' fullfile = os.path.join(path, name.split('.')[0]+'_full.txt' + suffix) # Get number of spacing and shift values nspace, nshift = full[3].shape # Create header header = 'Full inversion output from empymod.fdesign.design\n' header += 'Line 11: Nr of spacing values\n' header += 'Line 12: Nr of shift values\n' header += 'Line 13: Best spacing value\n' header += 'Line 14: Best shift value\n' header += 'Line 15: Min amplitude or max offset\n' header += 'Lines 16-{}: Spacing matrix '.format(nspace+15) header += '({} x {})\n'.format(nspace, nshift) header += 'Lines {}-{}: Spacing matrix '.format(nspace+16, 2*nspace+15) header += '({} x {})\n'.format(nspace, nshift) header += 'Lines {}-{}: Spacing '.format(2*nspace+16, 3*nspace+15) header += 'matrix ({} x {})\n'.format(nspace, nshift) header += 'Line {}: Integer: 0: min amp, 1: max r'.format(3*nspace+16) # Create arrays; put single values in arrays of nshift values nr_spacing = np.r_[nspace, np.zeros(nshift-1)] nr_shift = np.r_[nshift, np.zeros(nshift-1)] best_spacing = np.r_[full[0][0], np.zeros(nshift-1)] best_shift = np.r_[full[0][1], np.zeros(nshift-1)] min_value = np.r_[np.atleast_1d(full[1]), np.zeros(nshift-1)] min_max = np.r_[full[4], np.zeros(nshift-1)] # Collect all in one array fullsave = np.vstack((nr_spacing, nr_shift, best_spacing, best_shift, min_value, full[2][0], full[2][1], full[3], min_max)) # Save array np.savetxt(fullfile, fullsave, header=header)
[ "def", "save_filter", "(", "name", ",", "filt", ",", "full", "=", "None", ",", "path", "=", "'filters'", ")", ":", "# First we'll save the filter using its internal routine.", "# This will create the directory ./filters if it doesn't exist already.", "filt", ".", "tofile", "(", "path", ")", "# If full, we store the inversion output", "if", "full", ":", "# Get file name", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "len", "(", "name", ".", "split", "(", "'.'", ")", ")", "==", "2", ":", "suffix", "=", "'.gz'", "else", ":", "suffix", "=", "''", "fullfile", "=", "os", ".", "path", ".", "join", "(", "path", ",", "name", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "'_full.txt'", "+", "suffix", ")", "# Get number of spacing and shift values", "nspace", ",", "nshift", "=", "full", "[", "3", "]", ".", "shape", "# Create header", "header", "=", "'Full inversion output from empymod.fdesign.design\\n'", "header", "+=", "'Line 11: Nr of spacing values\\n'", "header", "+=", "'Line 12: Nr of shift values\\n'", "header", "+=", "'Line 13: Best spacing value\\n'", "header", "+=", "'Line 14: Best shift value\\n'", "header", "+=", "'Line 15: Min amplitude or max offset\\n'", "header", "+=", "'Lines 16-{}: Spacing matrix '", ".", "format", "(", "nspace", "+", "15", ")", "header", "+=", "'({} x {})\\n'", ".", "format", "(", "nspace", ",", "nshift", ")", "header", "+=", "'Lines {}-{}: Spacing matrix '", ".", "format", "(", "nspace", "+", "16", ",", "2", "*", "nspace", "+", "15", ")", "header", "+=", "'({} x {})\\n'", ".", "format", "(", "nspace", ",", "nshift", ")", "header", "+=", "'Lines {}-{}: Spacing '", ".", "format", "(", "2", "*", "nspace", "+", "16", ",", "3", "*", "nspace", "+", "15", ")", "header", "+=", "'matrix ({} x {})\\n'", ".", "format", "(", "nspace", ",", "nshift", ")", "header", "+=", "'Line {}: Integer: 0: min amp, 1: max r'", ".", "format", "(", "3", "*", "nspace", "+", "16", ")", "# Create arrays; put single values in arrays of nshift values", "nr_spacing", "=", "np", ".", "r_", "[", "nspace", ",", "np", ".", "zeros", "(", "nshift", "-", "1", ")", "]", "nr_shift", "=", "np", ".", "r_", "[", "nshift", ",", "np", ".", "zeros", "(", "nshift", "-", "1", ")", "]", "best_spacing", "=", "np", ".", "r_", "[", "full", "[", "0", "]", "[", "0", "]", ",", "np", ".", "zeros", "(", "nshift", "-", "1", ")", "]", "best_shift", "=", "np", ".", "r_", "[", "full", "[", "0", "]", "[", "1", "]", ",", "np", ".", "zeros", "(", "nshift", "-", "1", ")", "]", "min_value", "=", "np", ".", "r_", "[", "np", ".", "atleast_1d", "(", "full", "[", "1", "]", ")", ",", "np", ".", "zeros", "(", "nshift", "-", "1", ")", "]", "min_max", "=", "np", ".", "r_", "[", "full", "[", "4", "]", ",", "np", ".", "zeros", "(", "nshift", "-", "1", ")", "]", "# Collect all in one array", "fullsave", "=", "np", ".", "vstack", "(", "(", "nr_spacing", ",", "nr_shift", ",", "best_spacing", ",", "best_shift", ",", "min_value", ",", "full", "[", "2", "]", "[", "0", "]", ",", "full", "[", "2", "]", "[", "1", "]", ",", "full", "[", "3", "]", ",", "min_max", ")", ")", "# Save array", "np", ".", "savetxt", "(", "fullfile", ",", "fullsave", ",", "header", "=", "header", ")" ]
39.654545
22.963636
def _cmd_opts_solver(self, cmd_name): """Scan options related to one command and enrich _opt_cmds.""" sections = self.sections_list(cmd_name) cmd_dict = self._opt_cmds[cmd_name] if cmd_name else self._opt_bare for sct in reversed(sections): for opt, opt_meta in self._conf[sct].def_.items(): if not opt_meta.cmd_arg: continue if opt not in cmd_dict: cmd_dict[opt] = sct else: warnings.warn( 'Command <{0}>: {1}.{2} shadowed by {3}.{2}'.format( cmd_name, sct, opt, cmd_dict[opt]), error.LoamWarning, stacklevel=4)
[ "def", "_cmd_opts_solver", "(", "self", ",", "cmd_name", ")", ":", "sections", "=", "self", ".", "sections_list", "(", "cmd_name", ")", "cmd_dict", "=", "self", ".", "_opt_cmds", "[", "cmd_name", "]", "if", "cmd_name", "else", "self", ".", "_opt_bare", "for", "sct", "in", "reversed", "(", "sections", ")", ":", "for", "opt", ",", "opt_meta", "in", "self", ".", "_conf", "[", "sct", "]", ".", "def_", ".", "items", "(", ")", ":", "if", "not", "opt_meta", ".", "cmd_arg", ":", "continue", "if", "opt", "not", "in", "cmd_dict", ":", "cmd_dict", "[", "opt", "]", "=", "sct", "else", ":", "warnings", ".", "warn", "(", "'Command <{0}>: {1}.{2} shadowed by {3}.{2}'", ".", "format", "(", "cmd_name", ",", "sct", ",", "opt", ",", "cmd_dict", "[", "opt", "]", ")", ",", "error", ".", "LoamWarning", ",", "stacklevel", "=", "4", ")" ]
48.4
12.2
def next(self, verifyPad=False): """Manually iterate through the data loaded in Instrument object. Bounds of iteration and iteration type (day/file) are set by `bounds` attribute. Note ---- If there were no previous calls to load then the first day(default)/file will be loaded. """ if self._iter_type == 'date': if self.date is not None: idx, = np.where(self._iter_list == self.date) if (len(idx) == 0): raise StopIteration('File list is empty. Nothing to be done.') elif idx[-1]+1 >= len(self._iter_list): raise StopIteration('Outside the set date boundaries.') else: idx += 1 self.load(date=self._iter_list[idx[0]], verifyPad=verifyPad) else: self.load(date=self._iter_list[0], verifyPad=verifyPad) elif self._iter_type == 'file': if self._fid is not None: first = self.files.get_index(self._iter_list[0]) last = self.files.get_index(self._iter_list[-1]) if (self._fid < first) | (self._fid+1 > last): raise StopIteration('Outside the set file boundaries.') else: self.load(fname=self._iter_list[self._fid+1-first], verifyPad=verifyPad) else: self.load(fname=self._iter_list[0], verifyPad=verifyPad)
[ "def", "next", "(", "self", ",", "verifyPad", "=", "False", ")", ":", "if", "self", ".", "_iter_type", "==", "'date'", ":", "if", "self", ".", "date", "is", "not", "None", ":", "idx", ",", "=", "np", ".", "where", "(", "self", ".", "_iter_list", "==", "self", ".", "date", ")", "if", "(", "len", "(", "idx", ")", "==", "0", ")", ":", "raise", "StopIteration", "(", "'File list is empty. Nothing to be done.'", ")", "elif", "idx", "[", "-", "1", "]", "+", "1", ">=", "len", "(", "self", ".", "_iter_list", ")", ":", "raise", "StopIteration", "(", "'Outside the set date boundaries.'", ")", "else", ":", "idx", "+=", "1", "self", ".", "load", "(", "date", "=", "self", ".", "_iter_list", "[", "idx", "[", "0", "]", "]", ",", "verifyPad", "=", "verifyPad", ")", "else", ":", "self", ".", "load", "(", "date", "=", "self", ".", "_iter_list", "[", "0", "]", ",", "verifyPad", "=", "verifyPad", ")", "elif", "self", ".", "_iter_type", "==", "'file'", ":", "if", "self", ".", "_fid", "is", "not", "None", ":", "first", "=", "self", ".", "files", ".", "get_index", "(", "self", ".", "_iter_list", "[", "0", "]", ")", "last", "=", "self", ".", "files", ".", "get_index", "(", "self", ".", "_iter_list", "[", "-", "1", "]", ")", "if", "(", "self", ".", "_fid", "<", "first", ")", "|", "(", "self", ".", "_fid", "+", "1", ">", "last", ")", ":", "raise", "StopIteration", "(", "'Outside the set file boundaries.'", ")", "else", ":", "self", ".", "load", "(", "fname", "=", "self", ".", "_iter_list", "[", "self", ".", "_fid", "+", "1", "-", "first", "]", ",", "verifyPad", "=", "verifyPad", ")", "else", ":", "self", ".", "load", "(", "fname", "=", "self", ".", "_iter_list", "[", "0", "]", ",", "verifyPad", "=", "verifyPad", ")" ]
41.72973
20.810811
def get_font_path(self): """Return the current font path as a list of strings.""" r = request.GetFontPath(display = self.display) return r.paths
[ "def", "get_font_path", "(", "self", ")", ":", "r", "=", "request", ".", "GetFontPath", "(", "display", "=", "self", ".", "display", ")", "return", "r", ".", "paths" ]
41.25
12.25
def annotate(abf): """stamp the bottom with file info.""" msg="SWHLab %s "%str(swhlab.VERSION) msg+="ID:%s "%abf.ID msg+="CH:%d "%abf.channel msg+="PROTOCOL:%s "%abf.protoComment msg+="COMMAND: %d%s "%(abf.holding,abf.units) msg+="GENERATED:%s "%'{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()) pylab.annotate(msg,(.001,.001),xycoords='figure fraction',ha='left', va='bottom',color='#999999',family='monospace',size=8, weight='bold') if abf.nADC>1: msg="Ch %d/%d"%(abf.channel+1,abf.nADC) pylab.annotate(msg,(.01,.99),xycoords='figure fraction',ha='left', va='top',color='#FF0000',family='monospace',size=12, weight='bold')
[ "def", "annotate", "(", "abf", ")", ":", "msg", "=", "\"SWHLab %s \"", "%", "str", "(", "swhlab", ".", "VERSION", ")", "msg", "+=", "\"ID:%s \"", "%", "abf", ".", "ID", "msg", "+=", "\"CH:%d \"", "%", "abf", ".", "channel", "msg", "+=", "\"PROTOCOL:%s \"", "%", "abf", ".", "protoComment", "msg", "+=", "\"COMMAND: %d%s \"", "%", "(", "abf", ".", "holding", ",", "abf", ".", "units", ")", "msg", "+=", "\"GENERATED:%s \"", "%", "'{0:%Y-%m-%d %H:%M:%S}'", ".", "format", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "pylab", ".", "annotate", "(", "msg", ",", "(", ".001", ",", ".001", ")", ",", "xycoords", "=", "'figure fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "color", "=", "'#999999'", ",", "family", "=", "'monospace'", ",", "size", "=", "8", ",", "weight", "=", "'bold'", ")", "if", "abf", ".", "nADC", ">", "1", ":", "msg", "=", "\"Ch %d/%d\"", "%", "(", "abf", ".", "channel", "+", "1", ",", "abf", ".", "nADC", ")", "pylab", ".", "annotate", "(", "msg", ",", "(", ".01", ",", ".99", ")", ",", "xycoords", "=", "'figure fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'top'", ",", "color", "=", "'#FF0000'", ",", "family", "=", "'monospace'", ",", "size", "=", "12", ",", "weight", "=", "'bold'", ")" ]
46.9375
16.9375
def _get(self, obj): ''' Internal implementation of instance attribute access for the ``BasicPropertyDescriptor`` getter. If the value has not been explicitly set by a user, return that value. Otherwise, return the default. Args: obj (HasProps) : the instance to get a value of this property for Returns: object Raises: RuntimeError If the |HasProps| instance has not yet been initialized, or if this descriptor is on a class that is not a |HasProps|. ''' if not hasattr(obj, '_property_values'): raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" % (self.name, obj.__class__.__name__)) if self.name not in obj._property_values: return self._get_default(obj) else: return obj._property_values[self.name]
[ "def", "_get", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'_property_values'", ")", ":", "raise", "RuntimeError", "(", "\"Cannot get a property value '%s' from a %s instance before HasProps.__init__\"", "%", "(", "self", ".", "name", ",", "obj", ".", "__class__", ".", "__name__", ")", ")", "if", "self", ".", "name", "not", "in", "obj", ".", "_property_values", ":", "return", "self", ".", "_get_default", "(", "obj", ")", "else", ":", "return", "obj", ".", "_property_values", "[", "self", ".", "name", "]" ]
35.296296
27.148148
def show_external_release_file(root, request): """ Download a release from a download url from its package information. Must be used with :func:`pyshop.helpers.download.renderer_factory` to download the release file. :return: download informations :rtype: dict """ session = DBSession() settings = request.registry.settings whlify = asbool(settings.get('pyshop.mirror.wheelify', '0')) release = Release.by_id(session, int(request.matchdict['release_id'])) filename = (release.whlify_download_url_file if whlify else release.download_url_file) rv = {'url': release.download_url, 'filename': filename, 'original': release.download_url_file, 'whlify': whlify } release.downloads += 1 release.package.downloads += 1 session.add(release.package) session.add(release) request.response.date = datetime.datetime.utcnow() return rv
[ "def", "show_external_release_file", "(", "root", ",", "request", ")", ":", "session", "=", "DBSession", "(", ")", "settings", "=", "request", ".", "registry", ".", "settings", "whlify", "=", "asbool", "(", "settings", ".", "get", "(", "'pyshop.mirror.wheelify'", ",", "'0'", ")", ")", "release", "=", "Release", ".", "by_id", "(", "session", ",", "int", "(", "request", ".", "matchdict", "[", "'release_id'", "]", ")", ")", "filename", "=", "(", "release", ".", "whlify_download_url_file", "if", "whlify", "else", "release", ".", "download_url_file", ")", "rv", "=", "{", "'url'", ":", "release", ".", "download_url", ",", "'filename'", ":", "filename", ",", "'original'", ":", "release", ".", "download_url_file", ",", "'whlify'", ":", "whlify", "}", "release", ".", "downloads", "+=", "1", "release", ".", "package", ".", "downloads", "+=", "1", "session", ".", "add", "(", "release", ".", "package", ")", "session", ".", "add", "(", "release", ")", "request", ".", "response", ".", "date", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "return", "rv" ]
31
18.333333
def _run_pyroma(setup_file, show_lint_files): """Run pyroma.""" from pyroma import projectdata, ratings from prospector.message import Message, Location _debug_linter_status("pyroma", setup_file, show_lint_files) return_dict = dict() data = projectdata.get_data(os.getcwd()) all_tests = ratings.ALL_TESTS for test in [mod() for mod in [t.__class__ for t in all_tests]]: if test.test(data) is False: class_name = test.__class__.__name__ key = _Key(setup_file, 0, class_name) loc = Location(setup_file, None, None, 0, 0) msg = test.message() return_dict[key] = Message("pyroma", class_name, loc, msg) return return_dict
[ "def", "_run_pyroma", "(", "setup_file", ",", "show_lint_files", ")", ":", "from", "pyroma", "import", "projectdata", ",", "ratings", "from", "prospector", ".", "message", "import", "Message", ",", "Location", "_debug_linter_status", "(", "\"pyroma\"", ",", "setup_file", ",", "show_lint_files", ")", "return_dict", "=", "dict", "(", ")", "data", "=", "projectdata", ".", "get_data", "(", "os", ".", "getcwd", "(", ")", ")", "all_tests", "=", "ratings", ".", "ALL_TESTS", "for", "test", "in", "[", "mod", "(", ")", "for", "mod", "in", "[", "t", ".", "__class__", "for", "t", "in", "all_tests", "]", "]", ":", "if", "test", ".", "test", "(", "data", ")", "is", "False", ":", "class_name", "=", "test", ".", "__class__", ".", "__name__", "key", "=", "_Key", "(", "setup_file", ",", "0", ",", "class_name", ")", "loc", "=", "Location", "(", "setup_file", ",", "None", ",", "None", ",", "0", ",", "0", ")", "msg", "=", "test", ".", "message", "(", ")", "return_dict", "[", "key", "]", "=", "Message", "(", "\"pyroma\"", ",", "class_name", ",", "loc", ",", "msg", ")", "return", "return_dict" ]
35.652174
15
def fetch_token( self, token_url, code=None, authorization_response=None, body="", auth=None, username=None, password=None, method="POST", force_querystring=False, timeout=None, headers=None, verify=True, proxies=None, include_client_id=None, client_secret=None, **kwargs ): """Generic method for fetching an access token from the token endpoint. If you are using the MobileApplicationClient you will want to use `token_from_fragment` instead of `fetch_token`. The current implementation enforces the RFC guidelines. :param token_url: Token endpoint URL, must use HTTPS. :param code: Authorization code (used by WebApplicationClients). :param authorization_response: Authorization response URL, the callback URL of the request back to you. Used by WebApplicationClients instead of code. :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by `requests`. :param username: Username required by LegacyApplicationClients to appear in the request body. :param password: Password required by LegacyApplicationClients to appear in the request body. :param method: The HTTP method used to make the request. Defaults to POST, but may also be GET. Other methods should be added as needed. :param force_querystring: If True, force the request body to be sent in the querystring instead. :param timeout: Timeout of the request in seconds. :param headers: Dict to default request headers with. :param verify: Verify SSL certificate. :param proxies: The `proxies` argument is passed onto `requests`. :param include_client_id: Should the request body include the `client_id` parameter. Default is `None`, which will attempt to autodetect. This can be forced to always include (True) or never include (False). :param client_secret: The `client_secret` paired to the `client_id`. This is generally required unless provided in the `auth` tuple. If the value is `None`, it will be omitted from the request, however if the value is an empty string, an empty string will be sent. :param kwargs: Extra parameters to include in the token request. :return: A token dict """ if not is_secure_transport(token_url): raise InsecureTransportError() if not code and authorization_response: self._client.parse_request_uri_response( authorization_response, state=self._state ) code = self._client.code elif not code and isinstance(self._client, WebApplicationClient): code = self._client.code if not code: raise ValueError( "Please supply either code or " "authorization_response parameters." ) # Earlier versions of this library build an HTTPBasicAuth header out of # `username` and `password`. The RFC states, however these attributes # must be in the request body and not the header. # If an upstream server is not spec compliant and requires them to # appear as an Authorization header, supply an explicit `auth` header # to this function. # This check will allow for empty strings, but not `None`. # # Refernences # 4.3.2 - Resource Owner Password Credentials Grant # https://tools.ietf.org/html/rfc6749#section-4.3.2 if isinstance(self._client, LegacyApplicationClient): if username is None: raise ValueError( "`LegacyApplicationClient` requires both the " "`username` and `password` parameters." ) if password is None: raise ValueError( "The required paramter `username` was supplied, " "but `password` was not." ) # merge username and password into kwargs for `prepare_request_body` if username is not None: kwargs["username"] = username if password is not None: kwargs["password"] = password # is an auth explicitly supplied? if auth is not None: # if we're dealing with the default of `include_client_id` (None): # we will assume the `auth` argument is for an RFC compliant server # and we should not send the `client_id` in the body. # This approach allows us to still force the client_id by submitting # `include_client_id=True` along with an `auth` object. if include_client_id is None: include_client_id = False # otherwise we may need to create an auth header else: # since we don't have an auth header, we MAY need to create one # it is possible that we want to send the `client_id` in the body # if so, `include_client_id` should be set to True # otherwise, we will generate an auth header if include_client_id is not True: client_id = self.client_id if client_id: log.debug( 'Encoding `client_id` "%s" with `client_secret` ' "as Basic auth credentials.", client_id, ) client_secret = client_secret if client_secret is not None else "" auth = requests.auth.HTTPBasicAuth(client_id, client_secret) if include_client_id: # this was pulled out of the params # it needs to be passed into prepare_request_body if client_secret is not None: kwargs["client_secret"] = client_secret body = self._client.prepare_request_body( code=code, body=body, redirect_uri=self.redirect_uri, include_client_id=include_client_id, **kwargs ) headers = headers or { "Accept": "application/json", "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", } self.token = {} request_kwargs = {} if method.upper() == "POST": request_kwargs["params" if force_querystring else "data"] = dict( urldecode(body) ) elif method.upper() == "GET": request_kwargs["params"] = dict(urldecode(body)) else: raise ValueError("The method kwarg must be POST or GET.") r = self.request( method=method, url=token_url, timeout=timeout, headers=headers, auth=auth, verify=verify, proxies=proxies, **request_kwargs ) log.debug("Request to fetch token completed with status %s.", r.status_code) log.debug("Request url was %s", r.request.url) log.debug("Request headers were %s", r.request.headers) log.debug("Request body was %s", r.request.body) log.debug("Response headers were %s and content %s.", r.headers, r.text) log.debug( "Invoking %d token response hooks.", len(self.compliance_hook["access_token_response"]), ) for hook in self.compliance_hook["access_token_response"]: log.debug("Invoking hook %s.", hook) r = hook(r) self._client.parse_request_body_response(r.text, scope=self.scope) self.token = self._client.token log.debug("Obtained token %s.", self.token) return self.token
[ "def", "fetch_token", "(", "self", ",", "token_url", ",", "code", "=", "None", ",", "authorization_response", "=", "None", ",", "body", "=", "\"\"", ",", "auth", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "method", "=", "\"POST\"", ",", "force_querystring", "=", "False", ",", "timeout", "=", "None", ",", "headers", "=", "None", ",", "verify", "=", "True", ",", "proxies", "=", "None", ",", "include_client_id", "=", "None", ",", "client_secret", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "is_secure_transport", "(", "token_url", ")", ":", "raise", "InsecureTransportError", "(", ")", "if", "not", "code", "and", "authorization_response", ":", "self", ".", "_client", ".", "parse_request_uri_response", "(", "authorization_response", ",", "state", "=", "self", ".", "_state", ")", "code", "=", "self", ".", "_client", ".", "code", "elif", "not", "code", "and", "isinstance", "(", "self", ".", "_client", ",", "WebApplicationClient", ")", ":", "code", "=", "self", ".", "_client", ".", "code", "if", "not", "code", ":", "raise", "ValueError", "(", "\"Please supply either code or \"", "\"authorization_response parameters.\"", ")", "# Earlier versions of this library build an HTTPBasicAuth header out of", "# `username` and `password`. The RFC states, however these attributes", "# must be in the request body and not the header.", "# If an upstream server is not spec compliant and requires them to", "# appear as an Authorization header, supply an explicit `auth` header", "# to this function.", "# This check will allow for empty strings, but not `None`.", "#", "# Refernences", "# 4.3.2 - Resource Owner Password Credentials Grant", "# https://tools.ietf.org/html/rfc6749#section-4.3.2", "if", "isinstance", "(", "self", ".", "_client", ",", "LegacyApplicationClient", ")", ":", "if", "username", "is", "None", ":", "raise", "ValueError", "(", "\"`LegacyApplicationClient` requires both the \"", "\"`username` and `password` parameters.\"", ")", "if", "password", "is", "None", ":", "raise", "ValueError", "(", "\"The required paramter `username` was supplied, \"", "\"but `password` was not.\"", ")", "# merge username and password into kwargs for `prepare_request_body`", "if", "username", "is", "not", "None", ":", "kwargs", "[", "\"username\"", "]", "=", "username", "if", "password", "is", "not", "None", ":", "kwargs", "[", "\"password\"", "]", "=", "password", "# is an auth explicitly supplied?", "if", "auth", "is", "not", "None", ":", "# if we're dealing with the default of `include_client_id` (None):", "# we will assume the `auth` argument is for an RFC compliant server", "# and we should not send the `client_id` in the body.", "# This approach allows us to still force the client_id by submitting", "# `include_client_id=True` along with an `auth` object.", "if", "include_client_id", "is", "None", ":", "include_client_id", "=", "False", "# otherwise we may need to create an auth header", "else", ":", "# since we don't have an auth header, we MAY need to create one", "# it is possible that we want to send the `client_id` in the body", "# if so, `include_client_id` should be set to True", "# otherwise, we will generate an auth header", "if", "include_client_id", "is", "not", "True", ":", "client_id", "=", "self", ".", "client_id", "if", "client_id", ":", "log", ".", "debug", "(", "'Encoding `client_id` \"%s\" with `client_secret` '", "\"as Basic auth credentials.\"", ",", "client_id", ",", ")", "client_secret", "=", "client_secret", "if", "client_secret", "is", "not", "None", "else", "\"\"", "auth", "=", "requests", ".", "auth", ".", "HTTPBasicAuth", "(", "client_id", ",", "client_secret", ")", "if", "include_client_id", ":", "# this was pulled out of the params", "# it needs to be passed into prepare_request_body", "if", "client_secret", "is", "not", "None", ":", "kwargs", "[", "\"client_secret\"", "]", "=", "client_secret", "body", "=", "self", ".", "_client", ".", "prepare_request_body", "(", "code", "=", "code", ",", "body", "=", "body", ",", "redirect_uri", "=", "self", ".", "redirect_uri", ",", "include_client_id", "=", "include_client_id", ",", "*", "*", "kwargs", ")", "headers", "=", "headers", "or", "{", "\"Accept\"", ":", "\"application/json\"", ",", "\"Content-Type\"", ":", "\"application/x-www-form-urlencoded;charset=UTF-8\"", ",", "}", "self", ".", "token", "=", "{", "}", "request_kwargs", "=", "{", "}", "if", "method", ".", "upper", "(", ")", "==", "\"POST\"", ":", "request_kwargs", "[", "\"params\"", "if", "force_querystring", "else", "\"data\"", "]", "=", "dict", "(", "urldecode", "(", "body", ")", ")", "elif", "method", ".", "upper", "(", ")", "==", "\"GET\"", ":", "request_kwargs", "[", "\"params\"", "]", "=", "dict", "(", "urldecode", "(", "body", ")", ")", "else", ":", "raise", "ValueError", "(", "\"The method kwarg must be POST or GET.\"", ")", "r", "=", "self", ".", "request", "(", "method", "=", "method", ",", "url", "=", "token_url", ",", "timeout", "=", "timeout", ",", "headers", "=", "headers", ",", "auth", "=", "auth", ",", "verify", "=", "verify", ",", "proxies", "=", "proxies", ",", "*", "*", "request_kwargs", ")", "log", ".", "debug", "(", "\"Request to fetch token completed with status %s.\"", ",", "r", ".", "status_code", ")", "log", ".", "debug", "(", "\"Request url was %s\"", ",", "r", ".", "request", ".", "url", ")", "log", ".", "debug", "(", "\"Request headers were %s\"", ",", "r", ".", "request", ".", "headers", ")", "log", ".", "debug", "(", "\"Request body was %s\"", ",", "r", ".", "request", ".", "body", ")", "log", ".", "debug", "(", "\"Response headers were %s and content %s.\"", ",", "r", ".", "headers", ",", "r", ".", "text", ")", "log", ".", "debug", "(", "\"Invoking %d token response hooks.\"", ",", "len", "(", "self", ".", "compliance_hook", "[", "\"access_token_response\"", "]", ")", ",", ")", "for", "hook", "in", "self", ".", "compliance_hook", "[", "\"access_token_response\"", "]", ":", "log", ".", "debug", "(", "\"Invoking hook %s.\"", ",", "hook", ")", "r", "=", "hook", "(", "r", ")", "self", ".", "_client", ".", "parse_request_body_response", "(", "r", ".", "text", ",", "scope", "=", "self", ".", "scope", ")", "self", ".", "token", "=", "self", ".", "_client", ".", "token", "log", ".", "debug", "(", "\"Obtained token %s.\"", ",", "self", ".", "token", ")", "return", "self", ".", "token" ]
43.111111
21.851852
def set_frequency(self, pin, frequency_hz): """Set frequency (in Hz) of PWM output on specified pin.""" if pin not in self.pwm: raise ValueError('Pin {0} is not configured as a PWM. Make sure to first call start for the pin.'.format(pin)) self.pwm[pin].ChangeFrequency(frequency_hz)
[ "def", "set_frequency", "(", "self", ",", "pin", ",", "frequency_hz", ")", ":", "if", "pin", "not", "in", "self", ".", "pwm", ":", "raise", "ValueError", "(", "'Pin {0} is not configured as a PWM. Make sure to first call start for the pin.'", ".", "format", "(", "pin", ")", ")", "self", ".", "pwm", "[", "pin", "]", ".", "ChangeFrequency", "(", "frequency_hz", ")" ]
63
21.2
def read_block(self, address): """Read 32 bytes from the weather station. If the read fails for any reason, :obj:`None` is returned. :param address: address to read from. :type address: int :return: the data from the weather station. :rtype: list(int) """ buf = [ self.ReadCommand, address // 256, address % 256, self.EndMark, self.ReadCommand, address // 256, address % 256, self.EndMark, ] if not self.dev.write_data(buf): return None return self.dev.read_data(32)
[ "def", "read_block", "(", "self", ",", "address", ")", ":", "buf", "=", "[", "self", ".", "ReadCommand", ",", "address", "//", "256", ",", "address", "%", "256", ",", "self", ".", "EndMark", ",", "self", ".", "ReadCommand", ",", "address", "//", "256", ",", "address", "%", "256", ",", "self", ".", "EndMark", ",", "]", "if", "not", "self", ".", "dev", ".", "write_data", "(", "buf", ")", ":", "return", "None", "return", "self", ".", "dev", ".", "read_data", "(", "32", ")" ]
23.925926
18.481481
def install(**kwargs): """setup entry point""" if USE_SETUPTOOLS: if "--force-manifest" in sys.argv: sys.argv.remove("--force-manifest") packages = [modname] + get_packages(join(base_dir, "pylint"), modname) if USE_SETUPTOOLS: if install_requires: kwargs["install_requires"] = install_requires kwargs["dependency_links"] = dependency_links kwargs["entry_points"] = { "console_scripts": [ "pylint = pylint:run_pylint", "epylint = pylint:run_epylint", "pyreverse = pylint:run_pyreverse", "symilar = pylint:run_symilar", ] } kwargs["packages"] = packages cmdclass = {"install_lib": MyInstallLib, "build_py": build_py} if easy_install_lib: cmdclass["easy_install"] = easy_install return setup( name=distname, version=__pkginfo__["version"], license=__pkginfo__["license"], description=__pkginfo__["description"], long_description=long_description, author=__pkginfo__["author"], author_email=__pkginfo__["author_email"], url=__pkginfo__["web"], scripts=ensure_scripts(scripts), classifiers=__pkginfo__["classifiers"], data_files=data_files, ext_modules=ext_modules, cmdclass=cmdclass, extras_require=extras_require, test_suite="test", python_requires=">=3.4.*", setup_requires=["pytest-runner"], tests_require=["pytest"], **kwargs )
[ "def", "install", "(", "*", "*", "kwargs", ")", ":", "if", "USE_SETUPTOOLS", ":", "if", "\"--force-manifest\"", "in", "sys", ".", "argv", ":", "sys", ".", "argv", ".", "remove", "(", "\"--force-manifest\"", ")", "packages", "=", "[", "modname", "]", "+", "get_packages", "(", "join", "(", "base_dir", ",", "\"pylint\"", ")", ",", "modname", ")", "if", "USE_SETUPTOOLS", ":", "if", "install_requires", ":", "kwargs", "[", "\"install_requires\"", "]", "=", "install_requires", "kwargs", "[", "\"dependency_links\"", "]", "=", "dependency_links", "kwargs", "[", "\"entry_points\"", "]", "=", "{", "\"console_scripts\"", ":", "[", "\"pylint = pylint:run_pylint\"", ",", "\"epylint = pylint:run_epylint\"", ",", "\"pyreverse = pylint:run_pyreverse\"", ",", "\"symilar = pylint:run_symilar\"", ",", "]", "}", "kwargs", "[", "\"packages\"", "]", "=", "packages", "cmdclass", "=", "{", "\"install_lib\"", ":", "MyInstallLib", ",", "\"build_py\"", ":", "build_py", "}", "if", "easy_install_lib", ":", "cmdclass", "[", "\"easy_install\"", "]", "=", "easy_install", "return", "setup", "(", "name", "=", "distname", ",", "version", "=", "__pkginfo__", "[", "\"version\"", "]", ",", "license", "=", "__pkginfo__", "[", "\"license\"", "]", ",", "description", "=", "__pkginfo__", "[", "\"description\"", "]", ",", "long_description", "=", "long_description", ",", "author", "=", "__pkginfo__", "[", "\"author\"", "]", ",", "author_email", "=", "__pkginfo__", "[", "\"author_email\"", "]", ",", "url", "=", "__pkginfo__", "[", "\"web\"", "]", ",", "scripts", "=", "ensure_scripts", "(", "scripts", ")", ",", "classifiers", "=", "__pkginfo__", "[", "\"classifiers\"", "]", ",", "data_files", "=", "data_files", ",", "ext_modules", "=", "ext_modules", ",", "cmdclass", "=", "cmdclass", ",", "extras_require", "=", "extras_require", ",", "test_suite", "=", "\"test\"", ",", "python_requires", "=", "\">=3.4.*\"", ",", "setup_requires", "=", "[", "\"pytest-runner\"", "]", ",", "tests_require", "=", "[", "\"pytest\"", "]", ",", "*", "*", "kwargs", ")" ]
35.744186
11.674419
def get_request_body(self): """ Decodes the request body and returns it. :return: the decoded request body as a :class:`dict` instance. :raises: :class:`tornado.web.HTTPError` if the body cannot be decoded (415) or if decoding fails (400) """ if self._request_body is None: content_type_str = self.request.headers.get( 'Content-Type', 'application/octet-stream') LOGGER.debug('decoding request body of type %s', content_type_str) content_type = headers.parse_content_type(content_type_str) try: selected, requested = algorithms.select_content_type( [content_type], _content_types.values()) except errors.NoMatch: raise web.HTTPError( 415, 'cannot decoded content type %s', content_type_str, reason='Unexpected content type') handler = _content_handlers[str(selected)] try: self._request_body = handler.unpack_bytes( self.request.body, encoding=content_type.parameters.get('charset'), ) except ValueError as error: raise web.HTTPError( 400, 'failed to decode content body - %r', error, reason='Content body decode failure') return self._request_body
[ "def", "get_request_body", "(", "self", ")", ":", "if", "self", ".", "_request_body", "is", "None", ":", "content_type_str", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "'application/octet-stream'", ")", "LOGGER", ".", "debug", "(", "'decoding request body of type %s'", ",", "content_type_str", ")", "content_type", "=", "headers", ".", "parse_content_type", "(", "content_type_str", ")", "try", ":", "selected", ",", "requested", "=", "algorithms", ".", "select_content_type", "(", "[", "content_type", "]", ",", "_content_types", ".", "values", "(", ")", ")", "except", "errors", ".", "NoMatch", ":", "raise", "web", ".", "HTTPError", "(", "415", ",", "'cannot decoded content type %s'", ",", "content_type_str", ",", "reason", "=", "'Unexpected content type'", ")", "handler", "=", "_content_handlers", "[", "str", "(", "selected", ")", "]", "try", ":", "self", ".", "_request_body", "=", "handler", ".", "unpack_bytes", "(", "self", ".", "request", ".", "body", ",", "encoding", "=", "content_type", ".", "parameters", ".", "get", "(", "'charset'", ")", ",", ")", "except", "ValueError", "as", "error", ":", "raise", "web", ".", "HTTPError", "(", "400", ",", "'failed to decode content body - %r'", ",", "error", ",", "reason", "=", "'Content body decode failure'", ")", "return", "self", ".", "_request_body" ]
44.34375
18.03125
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}): """Start a running pipeline. The API waits for the pipeline to be fully started. Args: url (str): the host url in the form 'http://host:port/'. pipeline_id (str): the ID of of the exported pipeline. auth (tuple): a tuple of username, and password. runtime_parameters (dict): the desired runtime parameters for the pipeline. verify_ssl (bool): whether to verify ssl certificates Returns: dict: the response json """ start_result = requests.post(url + '/' + pipeline_id + '/start', headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters) start_result.raise_for_status() logging.info('Pipeline start requested.') poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl) logging.info("Pipeline started.") return start_result.json()
[ "def", "start_pipeline", "(", "url", ",", "pipeline_id", ",", "auth", ",", "verify_ssl", ",", "runtime_parameters", "=", "{", "}", ")", ":", "start_result", "=", "requests", ".", "post", "(", "url", "+", "'/'", "+", "pipeline_id", "+", "'/start'", ",", "headers", "=", "X_REQ_BY", ",", "auth", "=", "auth", ",", "verify", "=", "verify_ssl", ",", "json", "=", "runtime_parameters", ")", "start_result", ".", "raise_for_status", "(", ")", "logging", ".", "info", "(", "'Pipeline start requested.'", ")", "poll_pipeline_status", "(", "STATUS_RUNNING", ",", "url", ",", "pipeline_id", ",", "auth", ",", "verify_ssl", ")", "logging", ".", "info", "(", "\"Pipeline started.\"", ")", "return", "start_result", ".", "json", "(", ")" ]
44.909091
26.772727
def set_current_ns( ns_name: str, module: types.ModuleType = None, ns_var_name: str = NS_VAR_NAME, ns_var_ns: str = NS_VAR_NS, ) -> Var: """Set the value of the dynamic variable `*ns*` in the current thread.""" symbol = sym.Symbol(ns_name) ns = Namespace.get_or_create(symbol, module=module) ns_var_sym = sym.Symbol(ns_var_name, ns=ns_var_ns) ns_var = Maybe(Var.find(ns_var_sym)).or_else_raise( lambda: RuntimeException( f"Dynamic Var {sym.Symbol(ns_var_name, ns=ns_var_ns)} not bound!" ) ) ns_var.push_bindings(ns) logger.debug(f"Setting {ns_var_sym} to {ns}") return ns_var
[ "def", "set_current_ns", "(", "ns_name", ":", "str", ",", "module", ":", "types", ".", "ModuleType", "=", "None", ",", "ns_var_name", ":", "str", "=", "NS_VAR_NAME", ",", "ns_var_ns", ":", "str", "=", "NS_VAR_NS", ",", ")", "->", "Var", ":", "symbol", "=", "sym", ".", "Symbol", "(", "ns_name", ")", "ns", "=", "Namespace", ".", "get_or_create", "(", "symbol", ",", "module", "=", "module", ")", "ns_var_sym", "=", "sym", ".", "Symbol", "(", "ns_var_name", ",", "ns", "=", "ns_var_ns", ")", "ns_var", "=", "Maybe", "(", "Var", ".", "find", "(", "ns_var_sym", ")", ")", ".", "or_else_raise", "(", "lambda", ":", "RuntimeException", "(", "f\"Dynamic Var {sym.Symbol(ns_var_name, ns=ns_var_ns)} not bound!\"", ")", ")", "ns_var", ".", "push_bindings", "(", "ns", ")", "logger", ".", "debug", "(", "f\"Setting {ns_var_sym} to {ns}\"", ")", "return", "ns_var" ]
35.444444
16.611111
def _do_ffts(detector, stream, Nc): """ Perform ffts on data, detector and denominator boxcar :type detector: eqcorrscan.core.subspace.Detector :param detector: Detector object for doing detecting :type stream: list of obspy.core.stream.Stream :param stream: List of streams processed according to detector :type Nc: int :param Nc: Number of channels in data. 1 for non-multiplexed :return: list of time-reversed detector(s) in freq domain :rtype: list :return: list of squared data stream(s) in freq domain :rtype: list :return: list of data stream(s) in freq domain :return: detector-length boxcar in freq domain :rtype: numpy.ndarray :return: length of detector :rtype: int :return: length of data :rtype: int """ min_fftlen = int(stream[0][0].data.shape[0] + detector.data[0].shape[0] - Nc) fftlen = scipy.fftpack.next_fast_len(min_fftlen) mplen = stream[0][0].data.shape[0] ulen = detector.data[0].shape[0] num_st_fd = [np.fft.rfft(tr.data, n=fftlen) for tr in stream[0]] denom_st_fd = [np.fft.rfft(np.square(tr.data), n=fftlen) for tr in stream[0]] # Frequency domain of boxcar w = np.fft.rfft(np.ones(detector.data[0].shape[0]), n=fftlen) # This should go into the detector object as in Detex detector_fd = [] for dat_mat in detector.data: detector_fd.append(np.array([np.fft.rfft(col[::-1], n=fftlen) for col in dat_mat.T])) return detector_fd, denom_st_fd, num_st_fd, w, ulen, mplen
[ "def", "_do_ffts", "(", "detector", ",", "stream", ",", "Nc", ")", ":", "min_fftlen", "=", "int", "(", "stream", "[", "0", "]", "[", "0", "]", ".", "data", ".", "shape", "[", "0", "]", "+", "detector", ".", "data", "[", "0", "]", ".", "shape", "[", "0", "]", "-", "Nc", ")", "fftlen", "=", "scipy", ".", "fftpack", ".", "next_fast_len", "(", "min_fftlen", ")", "mplen", "=", "stream", "[", "0", "]", "[", "0", "]", ".", "data", ".", "shape", "[", "0", "]", "ulen", "=", "detector", ".", "data", "[", "0", "]", ".", "shape", "[", "0", "]", "num_st_fd", "=", "[", "np", ".", "fft", ".", "rfft", "(", "tr", ".", "data", ",", "n", "=", "fftlen", ")", "for", "tr", "in", "stream", "[", "0", "]", "]", "denom_st_fd", "=", "[", "np", ".", "fft", ".", "rfft", "(", "np", ".", "square", "(", "tr", ".", "data", ")", ",", "n", "=", "fftlen", ")", "for", "tr", "in", "stream", "[", "0", "]", "]", "# Frequency domain of boxcar", "w", "=", "np", ".", "fft", ".", "rfft", "(", "np", ".", "ones", "(", "detector", ".", "data", "[", "0", "]", ".", "shape", "[", "0", "]", ")", ",", "n", "=", "fftlen", ")", "# This should go into the detector object as in Detex", "detector_fd", "=", "[", "]", "for", "dat_mat", "in", "detector", ".", "data", ":", "detector_fd", ".", "append", "(", "np", ".", "array", "(", "[", "np", ".", "fft", ".", "rfft", "(", "col", "[", ":", ":", "-", "1", "]", ",", "n", "=", "fftlen", ")", "for", "col", "in", "dat_mat", ".", "T", "]", ")", ")", "return", "detector_fd", ",", "denom_st_fd", ",", "num_st_fd", ",", "w", ",", "ulen", ",", "mplen" ]
39.097561
15.292683
def set_environment_variable(self, key, val): """ Sets a variable if that variable is not already set """ if self.get_environment_variable(key) in [None, val]: self.__dict__['environment_variables'][key] = val else: raise Contradiction("Could not set environment variable %s" % (key))
[ "def", "set_environment_variable", "(", "self", ",", "key", ",", "val", ")", ":", "if", "self", ".", "get_environment_variable", "(", "key", ")", "in", "[", "None", ",", "val", "]", ":", "self", ".", "__dict__", "[", "'environment_variables'", "]", "[", "key", "]", "=", "val", "else", ":", "raise", "Contradiction", "(", "\"Could not set environment variable %s\"", "%", "(", "key", ")", ")" ]
54.5
19
def checkCursor(self): 'Keep cursor in bounds of data and screen.' # keep cursor within actual available rowset if self.nRows == 0 or self.cursorRowIndex <= 0: self.cursorRowIndex = 0 elif self.cursorRowIndex >= self.nRows: self.cursorRowIndex = self.nRows-1 if self.cursorVisibleColIndex <= 0: self.cursorVisibleColIndex = 0 elif self.cursorVisibleColIndex >= self.nVisibleCols: self.cursorVisibleColIndex = self.nVisibleCols-1 if self.topRowIndex <= 0: self.topRowIndex = 0 elif self.topRowIndex > self.nRows-1: self.topRowIndex = self.nRows-1 # (x,y) is relative cell within screen viewport x = self.cursorVisibleColIndex - self.leftVisibleColIndex y = self.cursorRowIndex - self.topRowIndex + 1 # header # check bounds, scroll if necessary if y < 1: self.topRowIndex = self.cursorRowIndex elif y > self.nVisibleRows: self.topRowIndex = self.cursorRowIndex-self.nVisibleRows+1 if x <= 0: self.leftVisibleColIndex = self.cursorVisibleColIndex else: while True: if self.leftVisibleColIndex == self.cursorVisibleColIndex: # not much more we can do break self.calcColLayout() mincolidx, maxcolidx = min(self.visibleColLayout.keys()), max(self.visibleColLayout.keys()) if self.cursorVisibleColIndex < mincolidx: self.leftVisibleColIndex -= max((self.cursorVisibleColIndex - mincolid)//2, 1) continue elif self.cursorVisibleColIndex > maxcolidx: self.leftVisibleColIndex += max((maxcolidx - self.cursorVisibleColIndex)//2, 1) continue cur_x, cur_w = self.visibleColLayout[self.cursorVisibleColIndex] if cur_x+cur_w < self.vd.windowWidth: # current columns fit entirely on screen break self.leftVisibleColIndex += 1
[ "def", "checkCursor", "(", "self", ")", ":", "# keep cursor within actual available rowset", "if", "self", ".", "nRows", "==", "0", "or", "self", ".", "cursorRowIndex", "<=", "0", ":", "self", ".", "cursorRowIndex", "=", "0", "elif", "self", ".", "cursorRowIndex", ">=", "self", ".", "nRows", ":", "self", ".", "cursorRowIndex", "=", "self", ".", "nRows", "-", "1", "if", "self", ".", "cursorVisibleColIndex", "<=", "0", ":", "self", ".", "cursorVisibleColIndex", "=", "0", "elif", "self", ".", "cursorVisibleColIndex", ">=", "self", ".", "nVisibleCols", ":", "self", ".", "cursorVisibleColIndex", "=", "self", ".", "nVisibleCols", "-", "1", "if", "self", ".", "topRowIndex", "<=", "0", ":", "self", ".", "topRowIndex", "=", "0", "elif", "self", ".", "topRowIndex", ">", "self", ".", "nRows", "-", "1", ":", "self", ".", "topRowIndex", "=", "self", ".", "nRows", "-", "1", "# (x,y) is relative cell within screen viewport", "x", "=", "self", ".", "cursorVisibleColIndex", "-", "self", ".", "leftVisibleColIndex", "y", "=", "self", ".", "cursorRowIndex", "-", "self", ".", "topRowIndex", "+", "1", "# header", "# check bounds, scroll if necessary", "if", "y", "<", "1", ":", "self", ".", "topRowIndex", "=", "self", ".", "cursorRowIndex", "elif", "y", ">", "self", ".", "nVisibleRows", ":", "self", ".", "topRowIndex", "=", "self", ".", "cursorRowIndex", "-", "self", ".", "nVisibleRows", "+", "1", "if", "x", "<=", "0", ":", "self", ".", "leftVisibleColIndex", "=", "self", ".", "cursorVisibleColIndex", "else", ":", "while", "True", ":", "if", "self", ".", "leftVisibleColIndex", "==", "self", ".", "cursorVisibleColIndex", ":", "# not much more we can do", "break", "self", ".", "calcColLayout", "(", ")", "mincolidx", ",", "maxcolidx", "=", "min", "(", "self", ".", "visibleColLayout", ".", "keys", "(", ")", ")", ",", "max", "(", "self", ".", "visibleColLayout", ".", "keys", "(", ")", ")", "if", "self", ".", "cursorVisibleColIndex", "<", "mincolidx", ":", "self", ".", "leftVisibleColIndex", "-=", "max", "(", "(", "self", ".", "cursorVisibleColIndex", "-", "mincolid", ")", "//", "2", ",", "1", ")", "continue", "elif", "self", ".", "cursorVisibleColIndex", ">", "maxcolidx", ":", "self", ".", "leftVisibleColIndex", "+=", "max", "(", "(", "maxcolidx", "-", "self", ".", "cursorVisibleColIndex", ")", "//", "2", ",", "1", ")", "continue", "cur_x", ",", "cur_w", "=", "self", ".", "visibleColLayout", "[", "self", ".", "cursorVisibleColIndex", "]", "if", "cur_x", "+", "cur_w", "<", "self", ".", "vd", ".", "windowWidth", ":", "# current columns fit entirely on screen", "break", "self", ".", "leftVisibleColIndex", "+=", "1" ]
44.042553
22.340426
def process_request( self, path: str, request_headers: Headers ) -> Union[Optional[HTTPResponse], Awaitable[Optional[HTTPResponse]]]: """ Intercept the HTTP request and return an HTTP response if needed. ``request_headers`` is a :class:`~websockets.http.Headers` instance. If this method returns ``None``, the WebSocket handshake continues. If it returns a status code, headers and a response body, that HTTP response is sent and the connection is closed. The HTTP status must be a :class:`~http.HTTPStatus`. HTTP headers must be a :class:`~websockets.http.Headers` instance, a :class:`~collections.abc.Mapping`, or an iterable of ``(name, value)`` pairs. The HTTP response body must be :class:`bytes`. It may be empty. This method may be overridden to check the request headers and set a different status, for example to authenticate the request and return ``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``. It can be declared as a function or as a coroutine because such authentication checks are likely to require network requests. It may also be overridden by passing a ``process_request`` argument to the :class:`WebSocketServerProtocol` constructor or the :func:`serve` function. """ if self._process_request is not None: return self._process_request(path, request_headers) return None
[ "def", "process_request", "(", "self", ",", "path", ":", "str", ",", "request_headers", ":", "Headers", ")", "->", "Union", "[", "Optional", "[", "HTTPResponse", "]", ",", "Awaitable", "[", "Optional", "[", "HTTPResponse", "]", "]", "]", ":", "if", "self", ".", "_process_request", "is", "not", "None", ":", "return", "self", ".", "_process_request", "(", "path", ",", "request_headers", ")", "return", "None" ]
42.057143
29.428571
def addMeal(self, date, category, name, notes=None, prices=None): """ This is the main helper, it adds a meal to the canteen. The following data are needed: :param datetime.date date: Date for the meal :param str category: Name of the meal category :param str meal: Meal name. :raises ValueError: if the meal name is empty or longer that 250 characters :raises ValueError: if the price role is unknown :raises ValueError: if the category name is empty :raises ValueError: if note list contains empty note :raises TypeError: if the price value is not an integer Additional the following data are also supported: :param notes: List of notes :type notes: list :param prices: Price of the meal; Every key must be a string for the role of the persons who can use this tariff; The value is the price in Euro Cents, The site of the OpenMensa project offers more detailed information. :type prices: dict""" # check name: if not len(name): raise ValueError('Meal names must not be empty') if len(name) > 250: raise ValueError('Meal names must be shorter than 251 characters') # check category: if not len(category): raise ValueError('Category names must not be empty') # process notes if notes: for note in notes: if not len(note): raise ValueError('Note must not be empty. Left it out, if not needed') # process prices: if prices is None: prices = {} else: for role in prices: if role not in self.allowed_price_roles: raise ValueError('Unknown price role "%s"' % role) if not isinstance(prices[role], int): raise TypeError('Unsupport price type - expect integer') # ensure we have an entry for this date date = self._handleDate(date) if date not in self._days: self._days[date] = OrderedDict() # ensure we have a category element for this category if category not in self._days[date]: self._days[date][category] = [] # add meal into category: self._days[date][category].append((name, notes or [], prices))
[ "def", "addMeal", "(", "self", ",", "date", ",", "category", ",", "name", ",", "notes", "=", "None", ",", "prices", "=", "None", ")", ":", "# check name:", "if", "not", "len", "(", "name", ")", ":", "raise", "ValueError", "(", "'Meal names must not be empty'", ")", "if", "len", "(", "name", ")", ">", "250", ":", "raise", "ValueError", "(", "'Meal names must be shorter than 251 characters'", ")", "# check category:", "if", "not", "len", "(", "category", ")", ":", "raise", "ValueError", "(", "'Category names must not be empty'", ")", "# process notes", "if", "notes", ":", "for", "note", "in", "notes", ":", "if", "not", "len", "(", "note", ")", ":", "raise", "ValueError", "(", "'Note must not be empty. Left it out, if not needed'", ")", "# process prices:", "if", "prices", "is", "None", ":", "prices", "=", "{", "}", "else", ":", "for", "role", "in", "prices", ":", "if", "role", "not", "in", "self", ".", "allowed_price_roles", ":", "raise", "ValueError", "(", "'Unknown price role \"%s\"'", "%", "role", ")", "if", "not", "isinstance", "(", "prices", "[", "role", "]", ",", "int", ")", ":", "raise", "TypeError", "(", "'Unsupport price type - expect integer'", ")", "# ensure we have an entry for this date", "date", "=", "self", ".", "_handleDate", "(", "date", ")", "if", "date", "not", "in", "self", ".", "_days", ":", "self", ".", "_days", "[", "date", "]", "=", "OrderedDict", "(", ")", "# ensure we have a category element for this category", "if", "category", "not", "in", "self", ".", "_days", "[", "date", "]", ":", "self", ".", "_days", "[", "date", "]", "[", "category", "]", "=", "[", "]", "# add meal into category:", "self", ".", "_days", "[", "date", "]", "[", "category", "]", ".", "append", "(", "(", "name", ",", "notes", "or", "[", "]", ",", "prices", ")", ")" ]
44.740741
19.388889
def load_vstr(buf, pos): """Load bytes prefixed by vint length""" slen, pos = load_vint(buf, pos) return load_bytes(buf, slen, pos)
[ "def", "load_vstr", "(", "buf", ",", "pos", ")", ":", "slen", ",", "pos", "=", "load_vint", "(", "buf", ",", "pos", ")", "return", "load_bytes", "(", "buf", ",", "slen", ",", "pos", ")" ]
35
6
def _matrix_grad(q, h, h_dx, t, t_prime): ''' Returns the gradient with respect to a single variable''' N = len(q) W = np.zeros([N, N]) Wprime = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) Wprime[i, i] = \ 0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)]) tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)]) grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \ + (q - t).T.dot(Wprime).dot(q - t) return grad
[ "def", "_matrix_grad", "(", "q", ",", "h", ",", "h_dx", ",", "t", ",", "t_prime", ")", ":", "N", "=", "len", "(", "q", ")", "W", "=", "np", ".", "zeros", "(", "[", "N", ",", "N", "]", ")", "Wprime", "=", "np", ".", "zeros", "(", "[", "N", ",", "N", "]", ")", "for", "i", "in", "range", "(", "N", ")", ":", "W", "[", "i", ",", "i", "]", "=", "0.5", "*", "(", "h", "[", "min", "(", "i", "+", "1", ",", "N", "-", "1", ")", "]", "-", "h", "[", "max", "(", "i", "-", "1", ",", "0", ")", "]", ")", "Wprime", "[", "i", ",", "i", "]", "=", "0.5", "*", "(", "h_dx", "[", "min", "(", "i", "+", "1", ",", "N", "-", "1", ")", "]", "-", "h_dx", "[", "max", "(", "i", "-", "1", ",", "0", ")", "]", ")", "tgrad", "=", "np", ".", "array", "(", "[", "t_prime", "[", "i", "]", "*", "h_dx", "[", "i", "]", "for", "i", "in", "np", ".", "arange", "(", "N", ")", "]", ")", "grad", "=", "2.0", "*", "(", "q", "-", "t", ")", ".", "T", ".", "dot", "(", "W", ")", ".", "dot", "(", "-", "1.0", "*", "tgrad", ")", "+", "(", "q", "-", "t", ")", ".", "T", ".", "dot", "(", "Wprime", ")", ".", "dot", "(", "q", "-", "t", ")", "return", "grad" ]
29.823529
21.823529
def get_target_temperature(self): """ Returns the actual target temperature. Attention: Returns None if the value can't be queried or is unknown. """ value = self.box.homeautoswitch("gethkrtsoll", self.actor_id) self.target_temperature = self.__get_temp(value) return self.target_temperature
[ "def", "get_target_temperature", "(", "self", ")", ":", "value", "=", "self", ".", "box", ".", "homeautoswitch", "(", "\"gethkrtsoll\"", ",", "self", ".", "actor_id", ")", "self", ".", "target_temperature", "=", "self", ".", "__get_temp", "(", "value", ")", "return", "self", ".", "target_temperature" ]
42.5
12
def gettext_lazy(message, domain=DEFAULT_DOMAIN): """Mark a message as translatable, but delay the translation until the message is used. Sometimes, there are some messages that need to be translated, but the translation can't be done at the point the message itself is written. For example, the names of the fields in a Model can't be translated at the point they are written, otherwise the translation would be done when the file is imported, long before a user even connects. To avoid this, `gettext_lazy` should be used. For example: .. code-block:: python from zengine.lib.translation import gettext_lazy, InstalledLocale from pyoko import model, fields class User(model.Model): name = fields.String(gettext_lazy('User Name')) print(User.name.title) 'User Name' InstalledLocale.install_language('tr') print(User.name.title) 'Kullanıcı Adı' Args: message (basestring, unicode): The input message. domain (basestring): The domain of the message. Defaults to 'messages', which is the domain where all application messages should be located. Returns: unicode: The translated message, with the translation itself being delayed until the text is actually used. """ return LazyProxy(gettext, message, domain=domain, enable_cache=False)
[ "def", "gettext_lazy", "(", "message", ",", "domain", "=", "DEFAULT_DOMAIN", ")", ":", "return", "LazyProxy", "(", "gettext", ",", "message", ",", "domain", "=", "domain", ",", "enable_cache", "=", "False", ")" ]
41.969697
26.515152
def add_child(self, child): """Children are GFFFeatures and are defined when added. This is done to avoid memory overheads that may be incurred by GFF files that have millions of rows. """ child_id = getattr(child, 'id', None) if child_id: if not hasattr(self, 'children'): self.children = {} if child_id not in self.children: self.children[child_id] = child
[ "def", "add_child", "(", "self", ",", "child", ")", ":", "child_id", "=", "getattr", "(", "child", ",", "'id'", ",", "None", ")", "if", "child_id", ":", "if", "not", "hasattr", "(", "self", ",", "'children'", ")", ":", "self", ".", "children", "=", "{", "}", "if", "child_id", "not", "in", "self", ".", "children", ":", "self", ".", "children", "[", "child_id", "]", "=", "child" ]
40.454545
11.818182
def log_likelihood(self, ts): """ Returns the log likelihood of the parameters on the given time series. Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf """ likelihood = self._jmodel.logLikelihood(_py2java(self._ctx, Vectors.dense(ts))) return _java2py(self._ctx, likelihood)
[ "def", "log_likelihood", "(", "self", ",", "ts", ")", ":", "likelihood", "=", "self", ".", "_jmodel", ".", "logLikelihood", "(", "_py2java", "(", "self", ".", "_ctx", ",", "Vectors", ".", "dense", "(", "ts", ")", ")", ")", "return", "_java2py", "(", "self", ".", "_ctx", ",", "likelihood", ")" ]
42.375
20.375
def splits(self): """ 将一个DataStruct按code分解为N个DataStruct """ return list(map(lambda x: self.select_code(x), self.code))
[ "def", "splits", "(", "self", ")", ":", "return", "list", "(", "map", "(", "lambda", "x", ":", "self", ".", "select_code", "(", "x", ")", ",", "self", ".", "code", ")", ")" ]
29.2
10
def get_cluster_placement_group(self): """ Return the placement group, create it if it doesn't yet exist. (needed for cluster type instances). """ placement_group_name = 'pg-%s' % (self._name) try: self.ec2.create_placement_group(placement_group_name, strategy = 'cluster') except: pass return placement_group_name
[ "def", "get_cluster_placement_group", "(", "self", ")", ":", "placement_group_name", "=", "'pg-%s'", "%", "(", "self", ".", "_name", ")", "try", ":", "self", ".", "ec2", ".", "create_placement_group", "(", "placement_group_name", ",", "strategy", "=", "'cluster'", ")", "except", ":", "pass", "return", "placement_group_name" ]
33.166667
15.666667
def redirect_ext(to, params_=None, anchor_=None, permanent_=False, args=None, kwargs=None): """ Advanced redirect which can includes GET-parameters and anchor. """ if permanent_: redirect_class = HttpResponsePermanentRedirect else: redirect_class = HttpResponseRedirect return redirect_class(resolve_url_ext(to, params_, anchor_, args, kwargs))
[ "def", "redirect_ext", "(", "to", ",", "params_", "=", "None", ",", "anchor_", "=", "None", ",", "permanent_", "=", "False", ",", "args", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "permanent_", ":", "redirect_class", "=", "HttpResponsePermanentRedirect", "else", ":", "redirect_class", "=", "HttpResponseRedirect", "return", "redirect_class", "(", "resolve_url_ext", "(", "to", ",", "params_", ",", "anchor_", ",", "args", ",", "kwargs", ")", ")" ]
41.777778
20.888889
def supported_operations(self): """ All operations supported by the camera. """ return tuple(op for op in backend.CAM_OPS if self._abilities.operations & op)
[ "def", "supported_operations", "(", "self", ")", ":", "return", "tuple", "(", "op", "for", "op", "in", "backend", ".", "CAM_OPS", "if", "self", ".", "_abilities", ".", "operations", "&", "op", ")" ]
47.75
8.5
def delete(ctx): """Delete build job. Uses [Caching](/references/polyaxon-cli/#caching) Example: \b ```bash $ polyaxon build delete ``` \b ```bash $ polyaxon build -b 2 delete ``` """ user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build')) if not click.confirm("Are sure you want to delete build job `{}`".format(_build)): click.echo('Existing without deleting build job.') sys.exit(1) try: response = PolyaxonClient().build_job.delete_build( user, project_name, _build) # Purge caching BuildJobManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not delete job `{}`.'.format(_build)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code == 204: Printer.print_success("Build job `{}` was deleted successfully".format(_build))
[ "def", "delete", "(", "ctx", ")", ":", "user", ",", "project_name", ",", "_build", "=", "get_build_or_local", "(", "ctx", ".", "obj", ".", "get", "(", "'project'", ")", ",", "ctx", ".", "obj", ".", "get", "(", "'build'", ")", ")", "if", "not", "click", ".", "confirm", "(", "\"Are sure you want to delete build job `{}`\"", ".", "format", "(", "_build", ")", ")", ":", "click", ".", "echo", "(", "'Existing without deleting build job.'", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "response", "=", "PolyaxonClient", "(", ")", ".", "build_job", ".", "delete_build", "(", "user", ",", "project_name", ",", "_build", ")", "# Purge caching", "BuildJobManager", ".", "purge", "(", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ":", "Printer", ".", "print_error", "(", "'Could not delete job `{}`.'", ".", "format", "(", "_build", ")", ")", "Printer", ".", "print_error", "(", "'Error message `{}`.'", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "if", "response", ".", "status_code", "==", "204", ":", "Printer", ".", "print_success", "(", "\"Build job `{}` was deleted successfully\"", ".", "format", "(", "_build", ")", ")" ]
29.352941
26.735294
def tokenize(self, text): """Tokenize given string `text`, and return a list of tokens. Raises :class:`~textparser.TokenizeError` on failure. This method should only be called by :func:`~textparser.Parser.parse()`, but may very well be overridden if the default implementation does not match the parser needs. """ names, specs = self._unpack_token_specs() keywords = self.keywords() tokens, re_token = tokenize_init(specs) for mo in re.finditer(re_token, text, re.DOTALL): kind = mo.lastgroup if kind == 'SKIP': pass elif kind != 'MISMATCH': value = mo.group(kind) if value in keywords: kind = value if kind in names: kind = names[kind] tokens.append(Token(kind, value, mo.start())) else: raise TokenizeError(text, mo.start()) return tokens
[ "def", "tokenize", "(", "self", ",", "text", ")", ":", "names", ",", "specs", "=", "self", ".", "_unpack_token_specs", "(", ")", "keywords", "=", "self", ".", "keywords", "(", ")", "tokens", ",", "re_token", "=", "tokenize_init", "(", "specs", ")", "for", "mo", "in", "re", ".", "finditer", "(", "re_token", ",", "text", ",", "re", ".", "DOTALL", ")", ":", "kind", "=", "mo", ".", "lastgroup", "if", "kind", "==", "'SKIP'", ":", "pass", "elif", "kind", "!=", "'MISMATCH'", ":", "value", "=", "mo", ".", "group", "(", "kind", ")", "if", "value", "in", "keywords", ":", "kind", "=", "value", "if", "kind", "in", "names", ":", "kind", "=", "names", "[", "kind", "]", "tokens", ".", "append", "(", "Token", "(", "kind", ",", "value", ",", "mo", ".", "start", "(", ")", ")", ")", "else", ":", "raise", "TokenizeError", "(", "text", ",", "mo", ".", "start", "(", ")", ")", "return", "tokens" ]
29.294118
18.911765
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs): """ Load subtitle file from given path. Arguments: path (str): Path to subtitle file. encoding (str): Character encoding of input file. Defaults to UTF-8, you may need to change this. format_ (str): Optional, forces use of specific parser (eg. `"srt"`, `"ass"`). Otherwise, format is detected automatically from file contents. This argument should be rarely needed. fps (float): Framerate for frame-based formats (MicroDVD), for other formats this argument is ignored. Framerate might be detected from the file, in which case you don't need to specify it here (when given, this argument overrides autodetection). kwargs: Extra options for the parser. Returns: SSAFile Raises: IOError UnicodeDecodeError pysubs2.exceptions.UnknownFPSError pysubs2.exceptions.UnknownFormatIdentifierError pysubs2.exceptions.FormatAutodetectionError Note: pysubs2 may autodetect subtitle format and/or framerate. These values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps` attributes. Example: >>> subs1 = pysubs2.load("subrip-subtitles.srt") >>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976) """ with open(path, encoding=encoding) as fp: return cls.from_file(fp, format_, fps=fps, **kwargs)
[ "def", "load", "(", "cls", ",", "path", ",", "encoding", "=", "\"utf-8\"", ",", "format_", "=", "None", ",", "fps", "=", "None", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "path", ",", "encoding", "=", "encoding", ")", "as", "fp", ":", "return", "cls", ".", "from_file", "(", "fp", ",", "format_", ",", "fps", "=", "fps", ",", "*", "*", "kwargs", ")" ]
39.95122
23.170732
def _repeat_iter(input_iter): """Iterate over the input iter values. Then repeat the last value indefinitely. This is useful to repeat seed values when an insufficient number of seeds are provided. E.g. KISS(1) effectively becomes KISS(1, 1, 1, 1), rather than (if we just used default values) KISS(1, default-value, default-value, default-value) It is better to repeat the last seed value, rather than just using default values. Given two generators seeded with an insufficient number of seeds, repeating the last seed value means their states are more different from each other, with less correlation between their generated outputs. """ last_value = None for value in input_iter: last_value = value yield value if last_value is not None: while True: yield last_value
[ "def", "_repeat_iter", "(", "input_iter", ")", ":", "last_value", "=", "None", "for", "value", "in", "input_iter", ":", "last_value", "=", "value", "yield", "value", "if", "last_value", "is", "not", "None", ":", "while", "True", ":", "yield", "last_value" ]
42
22.9
def _update_labels(self, label, crop_box, height, width): """Convert labels according to crop box""" xmin = float(crop_box[0]) / width ymin = float(crop_box[1]) / height w = float(crop_box[2]) / width h = float(crop_box[3]) / height out = label.copy() out[:, (1, 3)] -= xmin out[:, (2, 4)] -= ymin out[:, (1, 3)] /= w out[:, (2, 4)] /= h out[:, 1:5] = np.maximum(0, out[:, 1:5]) out[:, 1:5] = np.minimum(1, out[:, 1:5]) coverage = self._calculate_areas(out[:, 1:]) * w * h / self._calculate_areas(label[:, 1:]) valid = np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]) valid = np.logical_and(valid, coverage > self.min_eject_coverage) valid = np.where(valid)[0] if valid.size < 1: return None out = out[valid, :] return out
[ "def", "_update_labels", "(", "self", ",", "label", ",", "crop_box", ",", "height", ",", "width", ")", ":", "xmin", "=", "float", "(", "crop_box", "[", "0", "]", ")", "/", "width", "ymin", "=", "float", "(", "crop_box", "[", "1", "]", ")", "/", "height", "w", "=", "float", "(", "crop_box", "[", "2", "]", ")", "/", "width", "h", "=", "float", "(", "crop_box", "[", "3", "]", ")", "/", "height", "out", "=", "label", ".", "copy", "(", ")", "out", "[", ":", ",", "(", "1", ",", "3", ")", "]", "-=", "xmin", "out", "[", ":", ",", "(", "2", ",", "4", ")", "]", "-=", "ymin", "out", "[", ":", ",", "(", "1", ",", "3", ")", "]", "/=", "w", "out", "[", ":", ",", "(", "2", ",", "4", ")", "]", "/=", "h", "out", "[", ":", ",", "1", ":", "5", "]", "=", "np", ".", "maximum", "(", "0", ",", "out", "[", ":", ",", "1", ":", "5", "]", ")", "out", "[", ":", ",", "1", ":", "5", "]", "=", "np", ".", "minimum", "(", "1", ",", "out", "[", ":", ",", "1", ":", "5", "]", ")", "coverage", "=", "self", ".", "_calculate_areas", "(", "out", "[", ":", ",", "1", ":", "]", ")", "*", "w", "*", "h", "/", "self", ".", "_calculate_areas", "(", "label", "[", ":", ",", "1", ":", "]", ")", "valid", "=", "np", ".", "logical_and", "(", "out", "[", ":", ",", "3", "]", ">", "out", "[", ":", ",", "1", "]", ",", "out", "[", ":", ",", "4", "]", ">", "out", "[", ":", ",", "2", "]", ")", "valid", "=", "np", ".", "logical_and", "(", "valid", ",", "coverage", ">", "self", ".", "min_eject_coverage", ")", "valid", "=", "np", ".", "where", "(", "valid", ")", "[", "0", "]", "if", "valid", ".", "size", "<", "1", ":", "return", "None", "out", "=", "out", "[", "valid", ",", ":", "]", "return", "out" ]
41.809524
14.190476
def libvlc_media_get_duration(p_md): '''Get duration (in ms) of media descriptor object item. @param p_md: media descriptor object. @return: duration of media item or -1 on error. ''' f = _Cfunctions.get('libvlc_media_get_duration', None) or \ _Cfunction('libvlc_media_get_duration', ((1,),), None, ctypes.c_longlong, Media) return f(p_md)
[ "def", "libvlc_media_get_duration", "(", "p_md", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_media_get_duration'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_get_duration'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_longlong", ",", "Media", ")", "return", "f", "(", "p_md", ")" ]
42.555556
15.666667
def get_webhook(self): """Get the current callback URL if it exists. :return: The currently set webhook """ api = self._get_api(mds.NotificationsApi) return Webhook(api.get_webhook())
[ "def", "get_webhook", "(", "self", ")", ":", "api", "=", "self", ".", "_get_api", "(", "mds", ".", "NotificationsApi", ")", "return", "Webhook", "(", "api", ".", "get_webhook", "(", ")", ")" ]
31.142857
10
def check_data(self, *args, **kwargs): """Check whether the plotter of this plot method can visualize the data """ plotter_cls = self.plotter_cls da_list = self._project_plotter._da.psy.to_interactive_list() return plotter_cls.check_data( da_list.all_names, da_list.all_dims, da_list.is_unstructured)
[ "def", "check_data", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "plotter_cls", "=", "self", ".", "plotter_cls", "da_list", "=", "self", ".", "_project_plotter", ".", "_da", ".", "psy", ".", "to_interactive_list", "(", ")", "return", "plotter_cls", ".", "check_data", "(", "da_list", ".", "all_names", ",", "da_list", ".", "all_dims", ",", "da_list", ".", "is_unstructured", ")" ]
49.428571
9.714286
def is_identifier(string): """Check if string could be a valid python identifier :param string: string to be tested :returns: True if string can be a python identifier, False otherwise :rtype: bool """ matched = PYTHON_IDENTIFIER_RE.match(string) return bool(matched) and not keyword.iskeyword(string)
[ "def", "is_identifier", "(", "string", ")", ":", "matched", "=", "PYTHON_IDENTIFIER_RE", ".", "match", "(", "string", ")", "return", "bool", "(", "matched", ")", "and", "not", "keyword", ".", "iskeyword", "(", "string", ")" ]
35.777778
15.333333
def determine_frame_positions(self): """Record the file pointer position of each frame""" self.rewind_file() with ignored(struct.error): while True: pointer_position = self.blob_file.tell() length = struct.unpack('<i', self.blob_file.read(4))[0] self.blob_file.seek(length - 4, 1) self.frame_positions.append(pointer_position) self.rewind_file() log.info("Found {0} frames.".format(len(self.frame_positions)))
[ "def", "determine_frame_positions", "(", "self", ")", ":", "self", ".", "rewind_file", "(", ")", "with", "ignored", "(", "struct", ".", "error", ")", ":", "while", "True", ":", "pointer_position", "=", "self", ".", "blob_file", ".", "tell", "(", ")", "length", "=", "struct", ".", "unpack", "(", "'<i'", ",", "self", ".", "blob_file", ".", "read", "(", "4", ")", ")", "[", "0", "]", "self", ".", "blob_file", ".", "seek", "(", "length", "-", "4", ",", "1", ")", "self", ".", "frame_positions", ".", "append", "(", "pointer_position", ")", "self", ".", "rewind_file", "(", ")", "log", ".", "info", "(", "\"Found {0} frames.\"", ".", "format", "(", "len", "(", "self", ".", "frame_positions", ")", ")", ")" ]
46.818182
14.818182
def to_naf(self): """ Converts a KAF object to NAF (in memory). You will have to use the method dump later to save it as a new NAF file """ if self.type == 'KAF': self.root.tag = self.type = 'NAF' ## Convert the header if self.header is not None: self.header.to_naf() ## Convert the token layer if self.text_layer is not None: self.text_layer.to_naf() ## Convert the term layer if self.term_layer is not None: self.term_layer.to_naf() ## Convert the chunk layer if self.chunk_layer is not None: self.chunk_layer.to_naf() ## Convert the entity layer if self.entity_layer is not None: self.entity_layer.to_naf() ## Convert the features layer ## There is no feature layer defined in NAF, but we assumed ## that is defined will be followin the same rules if self.features_layer is not None: self.features_layer.to_naf() ##Convert the opinion layer if self.opinion_layer is not None: self.opinion_layer.to_naf() ## Convert the constituency layer ## This layer is exactly the same in KAF/NAF if self.constituency_layer is not None: self.constituency_layer.to_naf() #Does nothing... ## Convert the dedepency layer ## It is not defined on KAF so we assume both will be similar if self.dependency_layer is not None: self.dependency_layer.to_naf() #Does nothing... if self.coreference_layer is not None: self.coreference_layer.to_naf() ## Convert the temporalRelations layer ## It is not defined on KAF so we assume both will be similar if self.temporalRelations_layer is not None: self.temporalRelations_layer.to_naf() #Does nothing... ## Convert the causalRelations layer ## It is not defined on KAF so we assume both will be similar if self.causalRelations_layer is not None: self.causalRelations_layer.to_naf() #Does nothing... ## Convert the factuality layer ## It is not defined on KAF so we assume both will be similar if self.factuality_layer is not None: self.factuality_layer.to_naf() #Does nothing... ## Convert the markable layer ## It is not defined on KAF so we assume both will be similar if self.markable_layer is not None: self.markable_layer.to_naf() #Changes identifier attribute nothing else... ## Convert the attribution_layer ## It is not defined on KAF so we assme both will be similar if self.attribution_layer is not None: self.attribution_layer.to_naf()
[ "def", "to_naf", "(", "self", ")", ":", "if", "self", ".", "type", "==", "'KAF'", ":", "self", ".", "root", ".", "tag", "=", "self", ".", "type", "=", "'NAF'", "## Convert the header", "if", "self", ".", "header", "is", "not", "None", ":", "self", ".", "header", ".", "to_naf", "(", ")", "## Convert the token layer", "if", "self", ".", "text_layer", "is", "not", "None", ":", "self", ".", "text_layer", ".", "to_naf", "(", ")", "## Convert the term layer", "if", "self", ".", "term_layer", "is", "not", "None", ":", "self", ".", "term_layer", ".", "to_naf", "(", ")", "## Convert the chunk layer", "if", "self", ".", "chunk_layer", "is", "not", "None", ":", "self", ".", "chunk_layer", ".", "to_naf", "(", ")", "## Convert the entity layer", "if", "self", ".", "entity_layer", "is", "not", "None", ":", "self", ".", "entity_layer", ".", "to_naf", "(", ")", "## Convert the features layer", "## There is no feature layer defined in NAF, but we assumed", "## that is defined will be followin the same rules", "if", "self", ".", "features_layer", "is", "not", "None", ":", "self", ".", "features_layer", ".", "to_naf", "(", ")", "##Convert the opinion layer", "if", "self", ".", "opinion_layer", "is", "not", "None", ":", "self", ".", "opinion_layer", ".", "to_naf", "(", ")", "## Convert the constituency layer", "## This layer is exactly the same in KAF/NAF", "if", "self", ".", "constituency_layer", "is", "not", "None", ":", "self", ".", "constituency_layer", ".", "to_naf", "(", ")", "#Does nothing...", "## Convert the dedepency layer", "## It is not defined on KAF so we assume both will be similar", "if", "self", ".", "dependency_layer", "is", "not", "None", ":", "self", ".", "dependency_layer", ".", "to_naf", "(", ")", "#Does nothing...", "if", "self", ".", "coreference_layer", "is", "not", "None", ":", "self", ".", "coreference_layer", ".", "to_naf", "(", ")", "## Convert the temporalRelations layer", "## It is not defined on KAF so we assume both will be similar", "if", "self", ".", "temporalRelations_layer", "is", "not", "None", ":", "self", ".", "temporalRelations_layer", ".", "to_naf", "(", ")", "#Does nothing...", "## Convert the causalRelations layer", "## It is not defined on KAF so we assume both will be similar", "if", "self", ".", "causalRelations_layer", "is", "not", "None", ":", "self", ".", "causalRelations_layer", ".", "to_naf", "(", ")", "#Does nothing...", "## Convert the factuality layer", "## It is not defined on KAF so we assume both will be similar", "if", "self", ".", "factuality_layer", "is", "not", "None", ":", "self", ".", "factuality_layer", ".", "to_naf", "(", ")", "#Does nothing...", "## Convert the markable layer", "## It is not defined on KAF so we assume both will be similar", "if", "self", ".", "markable_layer", "is", "not", "None", ":", "self", ".", "markable_layer", ".", "to_naf", "(", ")", "#Changes identifier attribute nothing else...", "## Convert the attribution_layer", "## It is not defined on KAF so we assme both will be similar", "if", "self", ".", "attribution_layer", "is", "not", "None", ":", "self", ".", "attribution_layer", ".", "to_naf", "(", ")" ]
34.5875
19.0875
def _override_payload(self, payload): """ This function transforms the payload into a new format using the self.override_payload property. """ if self.override_payload: old_payload = payload def get_value(data, key): try: parent_key, nested_key = key.split(".", 1) return get_value(data.get(parent_key, {}), nested_key) except ValueError: return data.get(key, key) def set_values(data): for key, value in data.items(): if isinstance(value, dict): set_values(value) else: data[key] = get_value(old_payload, value) payload = deepcopy(self.override_payload) set_values(payload) return payload
[ "def", "_override_payload", "(", "self", ",", "payload", ")", ":", "if", "self", ".", "override_payload", ":", "old_payload", "=", "payload", "def", "get_value", "(", "data", ",", "key", ")", ":", "try", ":", "parent_key", ",", "nested_key", "=", "key", ".", "split", "(", "\".\"", ",", "1", ")", "return", "get_value", "(", "data", ".", "get", "(", "parent_key", ",", "{", "}", ")", ",", "nested_key", ")", "except", "ValueError", ":", "return", "data", ".", "get", "(", "key", ",", "key", ")", "def", "set_values", "(", "data", ")", ":", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "set_values", "(", "value", ")", "else", ":", "data", "[", "key", "]", "=", "get_value", "(", "old_payload", ",", "value", ")", "payload", "=", "deepcopy", "(", "self", ".", "override_payload", ")", "set_values", "(", "payload", ")", "return", "payload" ]
33.538462
15.461538
def install_language(language): """Install translation service routines into default namespace.""" translator = get_translator(default_domain, default_directory, languages=[get_lang(language)], fallback=True) do_unicode = True translator.install(do_unicode)
[ "def", "install_language", "(", "language", ")", ":", "translator", "=", "get_translator", "(", "default_domain", ",", "default_directory", ",", "languages", "=", "[", "get_lang", "(", "language", ")", "]", ",", "fallback", "=", "True", ")", "do_unicode", "=", "True", "translator", ".", "install", "(", "do_unicode", ")" ]
46
12.333333
def color_ramp(self, size): """Generate a color ramp for the current screen height.""" color = PALETTE.get(self.option.palette, {}) color = color.get(self.term.colors, None) color_ramp = [] if color is not None: ratio = len(color) / float(size) for i in range(int(size)): color_ramp.append(self.term.color(color[int(ratio * i)])) return color_ramp
[ "def", "color_ramp", "(", "self", ",", "size", ")", ":", "color", "=", "PALETTE", ".", "get", "(", "self", ".", "option", ".", "palette", ",", "{", "}", ")", "color", "=", "color", ".", "get", "(", "self", ".", "term", ".", "colors", ",", "None", ")", "color_ramp", "=", "[", "]", "if", "color", "is", "not", "None", ":", "ratio", "=", "len", "(", "color", ")", "/", "float", "(", "size", ")", "for", "i", "in", "range", "(", "int", "(", "size", ")", ")", ":", "color_ramp", ".", "append", "(", "self", ".", "term", ".", "color", "(", "color", "[", "int", "(", "ratio", "*", "i", ")", "]", ")", ")", "return", "color_ramp" ]
38.727273
14.181818
def tickets(self, extra_params=None): """ A User's tickets across all available spaces """ tickets = [] for space in self.api.spaces(): tickets += filter( lambda ticket: ticket.get('assigned_to_id', None) == self['id'], space.tickets(extra_params=extra_params) ) return tickets
[ "def", "tickets", "(", "self", ",", "extra_params", "=", "None", ")", ":", "tickets", "=", "[", "]", "for", "space", "in", "self", ".", "api", ".", "spaces", "(", ")", ":", "tickets", "+=", "filter", "(", "lambda", "ticket", ":", "ticket", ".", "get", "(", "'assigned_to_id'", ",", "None", ")", "==", "self", "[", "'id'", "]", ",", "space", ".", "tickets", "(", "extra_params", "=", "extra_params", ")", ")", "return", "tickets" ]
33.727273
13.363636
def F1(self, thresholds=None, train=False, valid=False, xval=False): """ Get the F1 values for a set of thresholds for the models explored. If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval". :param List thresholds: If None, then the thresholds in this set of metrics will be used. :param bool train: If True, return the F1 value for the training data. :param bool valid: If True, return the F1 value for the validation data. :param bool xval: If True, return the F1 value for each of the cross-validated splits. :returns: Dictionary of model keys to F1 values """ return {model.model_id: model.F1(thresholds, train, valid, xval) for model in self.models}
[ "def", "F1", "(", "self", ",", "thresholds", "=", "None", ",", "train", "=", "False", ",", "valid", "=", "False", ",", "xval", "=", "False", ")", ":", "return", "{", "model", ".", "model_id", ":", "model", ".", "F1", "(", "thresholds", ",", "train", ",", "valid", ",", "xval", ")", "for", "model", "in", "self", ".", "models", "}" ]
55.9375
31.0625
def clean_worksheet(wks, gfile_id, wks_name, credentials): """DOCS...""" values = wks.get_all_values() if values: df_ = pd.DataFrame(index=range(len(values)), columns=range(len(values[0]))) df_ = df_.fillna('') wks = upload(df_, gfile_id, wks_name=wks_name, col_names=False, row_names=False, clean=False, credentials=credentials) return wks
[ "def", "clean_worksheet", "(", "wks", ",", "gfile_id", ",", "wks_name", ",", "credentials", ")", ":", "values", "=", "wks", ".", "get_all_values", "(", ")", "if", "values", ":", "df_", "=", "pd", ".", "DataFrame", "(", "index", "=", "range", "(", "len", "(", "values", ")", ")", ",", "columns", "=", "range", "(", "len", "(", "values", "[", "0", "]", ")", ")", ")", "df_", "=", "df_", ".", "fillna", "(", "''", ")", "wks", "=", "upload", "(", "df_", ",", "gfile_id", ",", "wks_name", "=", "wks_name", ",", "col_names", "=", "False", ",", "row_names", "=", "False", ",", "clean", "=", "False", ",", "credentials", "=", "credentials", ")", "return", "wks" ]
36.583333
17
def compile_for_aexec(source, filename="<aexec>", mode="single", dont_imply_dedent=False, local={}): """Return a list of (coroutine object, abstract base tree).""" flags = ast.PyCF_ONLY_AST if dont_imply_dedent: flags |= codeop.PyCF_DONT_IMPLY_DEDENT if compat.PY35: # Avoid a syntax error by wrapping code with `async def` indented = '\n'.join(line and ' ' * 4 + line for line in source.split('\n')) coroutine = CORO_DEF + '\n' + indented + '\n' interactive = compile(coroutine, filename, mode, flags).body[0] # Check EOF errors try: compile(source, filename, mode, flags) except SyntaxError as exc: if exc.msg == 'unexpected EOF while parsing': raise else: interactive = compile(source, filename, mode, flags) return [make_tree(statement, filename, mode) for statement in interactive.body]
[ "def", "compile_for_aexec", "(", "source", ",", "filename", "=", "\"<aexec>\"", ",", "mode", "=", "\"single\"", ",", "dont_imply_dedent", "=", "False", ",", "local", "=", "{", "}", ")", ":", "flags", "=", "ast", ".", "PyCF_ONLY_AST", "if", "dont_imply_dedent", ":", "flags", "|=", "codeop", ".", "PyCF_DONT_IMPLY_DEDENT", "if", "compat", ".", "PY35", ":", "# Avoid a syntax error by wrapping code with `async def`", "indented", "=", "'\\n'", ".", "join", "(", "line", "and", "' '", "*", "4", "+", "line", "for", "line", "in", "source", ".", "split", "(", "'\\n'", ")", ")", "coroutine", "=", "CORO_DEF", "+", "'\\n'", "+", "indented", "+", "'\\n'", "interactive", "=", "compile", "(", "coroutine", ",", "filename", ",", "mode", ",", "flags", ")", ".", "body", "[", "0", "]", "# Check EOF errors", "try", ":", "compile", "(", "source", ",", "filename", ",", "mode", ",", "flags", ")", "except", "SyntaxError", "as", "exc", ":", "if", "exc", ".", "msg", "==", "'unexpected EOF while parsing'", ":", "raise", "else", ":", "interactive", "=", "compile", "(", "source", ",", "filename", ",", "mode", ",", "flags", ")", "return", "[", "make_tree", "(", "statement", ",", "filename", ",", "mode", ")", "for", "statement", "in", "interactive", ".", "body", "]" ]
44.045455
16.045455
def delete(sql, *args, **kwargs): """Deletes and commits with an insert sql statement""" assert "delete" in sql.lower(), 'This function requires a delete statement, provided: {}'.format(sql) CoyoteDb.execute_and_commit(sql, *args, **kwargs)
[ "def", "delete", "(", "sql", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "\"delete\"", "in", "sql", ".", "lower", "(", ")", ",", "'This function requires a delete statement, provided: {}'", ".", "format", "(", "sql", ")", "CoyoteDb", ".", "execute_and_commit", "(", "sql", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
65.25
23.25
def accuracy(self, outputs): '''Build a Theano expression for computing the accuracy of graph output. Parameters ---------- outputs : dict of Theano expressions A dictionary mapping network output names to Theano expressions representing the outputs of a computation graph. Returns ------- acc : Theano expression A Theano expression representing the accuracy of the output compared to the target data. ''' output = outputs[self.output_name] predict = TT.argmax(output, axis=-1) correct = TT.eq(predict, self._target) acc = correct.mean() if self._weights is not None: acc = (self._weights * correct).sum() / self._weights.sum() return acc
[ "def", "accuracy", "(", "self", ",", "outputs", ")", ":", "output", "=", "outputs", "[", "self", ".", "output_name", "]", "predict", "=", "TT", ".", "argmax", "(", "output", ",", "axis", "=", "-", "1", ")", "correct", "=", "TT", ".", "eq", "(", "predict", ",", "self", ".", "_target", ")", "acc", "=", "correct", ".", "mean", "(", ")", "if", "self", ".", "_weights", "is", "not", "None", ":", "acc", "=", "(", "self", ".", "_weights", "*", "correct", ")", ".", "sum", "(", ")", "/", "self", ".", "_weights", ".", "sum", "(", ")", "return", "acc" ]
36
20.545455
def make_codon_list(protein_seq, template_dna=None, include_stop=True): """ Return a list of codons that would be translated to the given protein sequence. Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer "optimal" codons. """ codon_list = [] if template_dna is None: template_dna = [] # Reverse translate each codon, preferring (in order): # 1. The codon with the most similarity to the template codon. # 2. The codon with the highest natural usage. for i, res in enumerate(protein_seq.upper()): try: template_codon = template_dna[3*i:3*i+3] except IndexError: template_codon = '---' # Already sorted by natural codon usage possible_codons = dna.ecoli_reverse_translate[res] # Sort by similarity. Note that this is a stable sort. possible_codons.sort( key=lambda x: dna.num_mutations(x, template_codon)) # Pick the best codon. codon_list.append(possible_codons[0]) # Make sure the sequence ends with a stop codon. last_codon = codon_list[-1] stop_codons = dna.ecoli_reverse_translate['.'] if include_stop and last_codon not in stop_codons: codon_list.append(stop_codons[0]) return codon_list
[ "def", "make_codon_list", "(", "protein_seq", ",", "template_dna", "=", "None", ",", "include_stop", "=", "True", ")", ":", "codon_list", "=", "[", "]", "if", "template_dna", "is", "None", ":", "template_dna", "=", "[", "]", "# Reverse translate each codon, preferring (in order):", "# 1. The codon with the most similarity to the template codon.", "# 2. The codon with the highest natural usage.", "for", "i", ",", "res", "in", "enumerate", "(", "protein_seq", ".", "upper", "(", ")", ")", ":", "try", ":", "template_codon", "=", "template_dna", "[", "3", "*", "i", ":", "3", "*", "i", "+", "3", "]", "except", "IndexError", ":", "template_codon", "=", "'---'", "# Already sorted by natural codon usage", "possible_codons", "=", "dna", ".", "ecoli_reverse_translate", "[", "res", "]", "# Sort by similarity. Note that this is a stable sort.", "possible_codons", ".", "sort", "(", "key", "=", "lambda", "x", ":", "dna", ".", "num_mutations", "(", "x", ",", "template_codon", ")", ")", "# Pick the best codon.", "codon_list", ".", "append", "(", "possible_codons", "[", "0", "]", ")", "# Make sure the sequence ends with a stop codon.", "last_codon", "=", "codon_list", "[", "-", "1", "]", "stop_codons", "=", "dna", ".", "ecoli_reverse_translate", "[", "'.'", "]", "if", "include_stop", "and", "last_codon", "not", "in", "stop_codons", ":", "codon_list", ".", "append", "(", "stop_codons", "[", "0", "]", ")", "return", "codon_list" ]
33.868421
21.763158
def visit(self, node, abort=abort_visit): """Visit a node.""" method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, abort) return visitor(node)
[ "def", "visit", "(", "self", ",", "node", ",", "abort", "=", "abort_visit", ")", ":", "method", "=", "'visit_'", "+", "node", ".", "__class__", ".", "__name__", "visitor", "=", "getattr", "(", "self", ",", "method", ",", "abort", ")", "return", "visitor", "(", "node", ")" ]
38.6
6
def _next_lexem(self, lexem_type, source_code, source_code_size): """Return next readable lexem of given type in source_code. If no value can be found, the neutral_value will be used""" # define reader as a lexem extractor def reader(seq, block_size): identificator = '' for char in source_code: if len(identificator) == self.idnt_values_size[lexem_type]: yield self.table_values[lexem_type][identificator] identificator = '' identificator += char lexem_reader = reader(source_code, self.idnt_values_size) lexem = None time_out = 0 while lexem == None and time_out < 2*source_code_size: lexem = next(lexem_reader) time_out += 1 # here we have found a lexem return lexem
[ "def", "_next_lexem", "(", "self", ",", "lexem_type", ",", "source_code", ",", "source_code_size", ")", ":", "# define reader as a lexem extractor", "def", "reader", "(", "seq", ",", "block_size", ")", ":", "identificator", "=", "''", "for", "char", "in", "source_code", ":", "if", "len", "(", "identificator", ")", "==", "self", ".", "idnt_values_size", "[", "lexem_type", "]", ":", "yield", "self", ".", "table_values", "[", "lexem_type", "]", "[", "identificator", "]", "identificator", "=", "''", "identificator", "+=", "char", "lexem_reader", "=", "reader", "(", "source_code", ",", "self", ".", "idnt_values_size", ")", "lexem", "=", "None", "time_out", "=", "0", "while", "lexem", "==", "None", "and", "time_out", "<", "2", "*", "source_code_size", ":", "lexem", "=", "next", "(", "lexem_reader", ")", "time_out", "+=", "1", "# here we have found a lexem", "return", "lexem" ]
44.842105
12.947368
def update_check(package_name, package_version, bypass_cache=False, url=None, **extra_data): """Convenience method that outputs to stdout if an update is available.""" checker = UpdateChecker(url) checker.bypass_cache = bypass_cache result = checker.check(package_name, package_version, **extra_data) if result: print(result)
[ "def", "update_check", "(", "package_name", ",", "package_version", ",", "bypass_cache", "=", "False", ",", "url", "=", "None", ",", "*", "*", "extra_data", ")", ":", "checker", "=", "UpdateChecker", "(", "url", ")", "checker", ".", "bypass_cache", "=", "bypass_cache", "result", "=", "checker", ".", "check", "(", "package_name", ",", "package_version", ",", "*", "*", "extra_data", ")", "if", "result", ":", "print", "(", "result", ")" ]
45.375
16.375
def attribute_map_get(self, address, route_dist=None, route_family=RF_VPN_V4): """This method gets in-bound filters of the specified neighbor. ``address`` specifies the IP address of the neighbor. ``route_dist`` specifies route distinguisher that has attribute_maps. ``route_family`` specifies route family of the VRF. This parameter must be one of the following. - RF_VPN_V4 (default) = 'ipv4' - RF_VPN_V6 = 'ipv6' Returns a list object containing an instance of AttributeMap """ if route_family not in SUPPORTED_VRF_RF: raise ValueError('Unsupported route_family: %s' % route_family) func_name = 'neighbor.attribute_map.get' param = { neighbors.IP_ADDRESS: address, } if route_dist is not None: param[vrfs.ROUTE_DISTINGUISHER] = route_dist param[vrfs.VRF_RF] = route_family return call(func_name, **param)
[ "def", "attribute_map_get", "(", "self", ",", "address", ",", "route_dist", "=", "None", ",", "route_family", "=", "RF_VPN_V4", ")", ":", "if", "route_family", "not", "in", "SUPPORTED_VRF_RF", ":", "raise", "ValueError", "(", "'Unsupported route_family: %s'", "%", "route_family", ")", "func_name", "=", "'neighbor.attribute_map.get'", "param", "=", "{", "neighbors", ".", "IP_ADDRESS", ":", "address", ",", "}", "if", "route_dist", "is", "not", "None", ":", "param", "[", "vrfs", ".", "ROUTE_DISTINGUISHER", "]", "=", "route_dist", "param", "[", "vrfs", ".", "VRF_RF", "]", "=", "route_family", "return", "call", "(", "func_name", ",", "*", "*", "param", ")" ]
34.172414
20.655172
def liste_campagnes(self, campagne=None): """ Liste des campagnes de mesure et des stations associées Paramètres: campagne: Si définie, liste des stations que pour cette campagne """ condition = "" if campagne: condition = "WHERE NOM_COURT_CM='%s' """ % campagne _sql = """SELECT NOM_COURT_CM AS CAMPAGNE, IDENTIFIANT AS STATION, LIBELLE AS LIBELLE_CM, DATEDEB AS DEBUT, DATEFIN AS FIN FROM CAMPMES INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM) INNER JOIN STATION USING (NOM_COURT_SIT) %s ORDER BY DATEDEB DESC""" % condition return psql.read_sql(_sql, self.conn)
[ "def", "liste_campagnes", "(", "self", ",", "campagne", "=", "None", ")", ":", "condition", "=", "\"\"", "if", "campagne", ":", "condition", "=", "\"WHERE NOM_COURT_CM='%s' \"", "\"\"", "%", "campagne", "_sql", "=", "\"\"\"SELECT\n NOM_COURT_CM AS CAMPAGNE,\n IDENTIFIANT AS STATION,\n LIBELLE AS LIBELLE_CM,\n DATEDEB AS DEBUT,\n DATEFIN AS FIN\n FROM CAMPMES\n INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM)\n INNER JOIN STATION USING (NOM_COURT_SIT)\n %s ORDER BY DATEDEB DESC\"\"\"", "%", "condition", "return", "psql", ".", "read_sql", "(", "_sql", ",", "self", ".", "conn", ")" ]
30.521739
14.869565