content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_eval(appdir, config): """Get an Evaluation object given the configured `GlobalConfig`. """ return core.Evaluation(appdir, config.client, config.reps, config.test_reps, config.simulate)
89b1a7bbbbbf936b622c90635a54ab6517b7bc65
3,653,478
def load_queue_from_disk(filename): """ Load the old queue from disk when started. Old messages that weren't posted yet are read from the queue and processed. """ if os.path.exists(filename): log.msg("Loading queue from %s" % filename) try: with closing(open(filename, 'r')) as fp: data = pickle.load(fp) return data except IOError, e: log.err() backup_filename = "%s.%s" % ( filename, datetime.utcnow().strftime("%Y%m%d_%H%M%S") ) shutil.copyfile(filename, backup_filename) log.err("Couldn't load queue from %s, backed it up to %s" % ( filename, backup_filename )) # return an empty queue, start from scratch. return []
b2641c7c4ad58e683b856d82825f7bd71ec00f91
3,653,479
def ask_ok(title="Confirm", message=""): """Ask the user to confirm something via an ok-cancel question. Parameters: title (str): the text to show as the window title. message (str): the message to show in the body of the dialog. Returns: bool: Whether the user selected "OK". """ if not isinstance(title, string_types): raise TypeError("ask_ok() title must be a string.") if not isinstance(message, string_types): raise TypeError("ask_ok() message must be a string.") return _get_app().ask_ok(title, message)
43e88f56219715a4f292ab6021d08d1e1fbc44de
3,653,480
def indexate(points): """ Create an array of unique points and indexes into this array. Arguments: points: A sequence of 3-tuples Returns: An array of indices and a sequence of unique 3-tuples. """ pd = {} indices = tuple(pd.setdefault(tuple(p), len(pd)) for p in points) pt = sorted([(v, k) for k, v in pd.items()], key=lambda x: x[0]) unique = tuple(i[1] for i in pt) return indices, unique
f78ef40ea9bf6cfe427d366026b633fbb67016a2
3,653,481
import ray def get_handle(endpoint_name, relative_slo_ms=None, absolute_slo_ms=None, missing_ok=False): """Retrieve RayServeHandle for service endpoint to invoke it from Python. Args: endpoint_name (str): A registered service endpoint. relative_slo_ms(float): Specify relative deadline in milliseconds for queries fired using this handle. (Default: None) absolute_slo_ms(float): Specify absolute deadline in milliseconds for queries fired using this handle. (Default: None) missing_ok (bool): If true, skip the check for the endpoint existence. It can be useful when the endpoint has not been registered. Returns: RayServeHandle """ if not missing_ok: assert endpoint_name in ray.get( master_actor.get_all_endpoints.remote()) return RayServeHandle( ray.get(master_actor.get_router.remote())[0], endpoint_name, relative_slo_ms, absolute_slo_ms, )
9db603fb9f0069a328f3fce86c2b56eec719dd21
3,653,482
def create_symbolic_controller(states, inputs): """"Returns a dictionary with keys that are the joint torque inputs and the values are the controller expressions. This can be used to convert the symbolic equations of motion from 0 = f(x', x, u, t) to a closed loop form 0 = f(x', x, t). Parameters ---------- states : sequence of len 2 * (n + 1) The SymPy time dependent functions for the system states where n are the number of links. inputs : sequence of len n The SymPy time depednent functions for the system joint torque inputs (should not include the lateral force). Returns ------- controller_dict : dictionary Maps joint torques to control expressions. gain_symbols : list of SymPy Symbols The symbols used in the gain matrix. xeq : list of SymPy Symbols The symbols for the equilibrium point. """ num_states = len(states) num_inputs = len(inputs) xeq = sym.Matrix([x.__class__.__name__ + '_eq' for x in states]) K = sym.Matrix(num_inputs, num_states, lambda i, j: sym.Symbol('k_{}{}'.format(i, j))) x = sym.Matrix(states) T = sym.Matrix(inputs) gain_symbols = [k for k in K] # T = K * (xeq - x) -> 0 = T - K * (xeq - x) controller_dict = sym.solve(T - K * (xeq - x), inputs) return controller_dict, gain_symbols, xeq
98d8cc545e6b70dce6161ef6c14d8bc12e0dfe77
3,653,483
def is_gene_name(instance): """This SHOULD check a webservice at HGNC/MGI for validation, but for now this just returns True always..""" ignored(instance) return True
a8a5b4047e8d0d8e70280f54365adf7a5eec20ee
3,653,484
import re def install_package_family(pkg): """ :param: pkg ie asr900rsp2-universal.03.13.03.S.154-3.S3-ext.bin :return: device_type of the installed image ie asr900 """ img_dev = None m = re.search(r'(asr\d+)\w*', pkg) if m: img_dev = m.group(1) return img_dev
b344d51ae426e167dbd2397ab93cbf8707b01496
3,653,485
def get_dendritic_mask_path_from_sessid(maindir, sessid, runtype="prod", check=True): """ get_dendritic_mask_path_from_sessid(maindir, sessid) Returns path to dendritic mask file for the specified session. Required args: - maindir (str): main directory - sessid (int) : session ID Optional args: - runtype (str) : "prod" (production) or "pilot" data default: "prod" - check (bool) : if True, checks whether the files in the output dictionary exist default: True Returns: - maskfile (str): full path name of the extract masks hdf5 file """ sessdir, mouse_dir = get_sess_dir_path(maindir, sessid, runtype) mouseid = get_mouseid(sessdir, mouse_dir) expid = get_expid(sessdir) maskfile = get_dendritic_mask_path( maindir, sessid, expid, mouseid, runtype, mouse_dir, check) return maskfile
3dafdc661f933f93fdfdfa9d7279649ce0d08b01
3,653,486
def abbn_min_vol(): """ Real Name: b'"Ab-bn min vol"' Original Eqn: b'25.6' Units: b'' Limits: (None, None) Type: constant b'' """ return 25.6
9fdde32cf832354b9bda9fe23ab000da66205d60
3,653,487
def clear(self: Client, player: str = None, item_name: str = None, data: int = None, max_count: int = None) -> str: """Clears items from player inventory, including items being dragged by the player. Bedrock Edition implementation. """ return self.run('clear', player, item_name, data, max_count)
3b7975b80f08c1f44c1a49b0a973586859f949bf
3,653,488
def load_glove_embeddings(dim, vocab): """ Load GloVe embedding vectors for all words in our vocabulary. https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/ Parameters ---------- dim : int Dimension of GloVe embeddings. Can be 50, 100, 200 and 300. vocab : dict Dictionary mapping words to index. Returns ------- embeddings_index : dict A dictionary that maps word to embedding vector. """ embeddings_index = dict() lower_dict = [word.lower() for word in vocab.keys()] with open('glove.6B/glove.6B.'+str(dim)+'d.txt', 'r', encoding="utf-8") as f: for line in f: values = line.split() word = values[0] coefs = asarray(values[1:], dtype='float32') # use only low case? GloVe seems to use only low case, but what about NER? if word in vocab: embeddings_index[vocab[word]] = coefs # maybe Word get same embedding as word? elif word in lower_dict: try: embeddings_index[vocab[word.title()]] = coefs except KeyError: continue return embeddings_index
63bf52b86efbb20ade43d144fd674bebd8111901
3,653,489
def check_vat_number(vat_number, country_code=None): """Check if a VAT number is valid. If possible, the VAT number will be checked against available registries. :param vat_number: VAT number to validate. :param country_code: Optional country code. Should be supplied if known, as there is no guarantee that naively entered VAT numbers contain the correct alpha-2 country code prefix for EU countries just as not all non-EU countries have a reliable country code prefix. Default ``None`` prompting detection. :returns: a :class:`VatNumberCheckResult` instance containing the result for the full VAT number check. """ # Decompose the VAT number. vat_number, country_code = decompose_vat_number(vat_number, country_code) if not vat_number or not country_code: return VatNumberCheckResult(False, [ '> Unable to decompose VAT number, resulted in %r and %r' % (vat_number, country_code) ]) # Test the VAT number format. format_result = is_vat_number_format_valid(vat_number, country_code) if format_result is not True: return VatNumberCheckResult(format_result, [ '> VAT number validation failed: %r' % (format_result) ]) # Attempt to check the VAT number against a registry. if country_code not in VAT_REGISTRIES: return VatNumberCheckResult() return VAT_REGISTRIES[country_code].check_vat_number(vat_number, country_code)
142a2dce1def90beed2a222b67f47e9458f97ea0
3,653,490
def argextrema(y, separate=True): """ Deprecated in favor of argrel{min|max} in scypy.signal to get separate extrema in about the same CPU time. If you need a list of all relative extrema in order, using this with separate=False takes about half the time as by combining the scipy functions with searchsorted. Returns the indices of the local extrema of a series. When consecutive points at an extreme have the same value, the index of the first is returned. """ delta = y[1:] - y[:-1] pos_neg = np.zeros(len(delta), np.int8) pos_neg[delta > 0] = 1 pos_neg[delta < 0] = -1 curve_sign = pos_neg[1:] - pos_neg[:-1] if separate: argmax = np.nonzero(curve_sign < 0)[0] + 1 argmin = np.nonzero(curve_sign > 0)[0] + 1 return argmin,argmax else: argext = np.nonzero(curve_sign != 0)[0] + 1 return argext
709c045d608c35c3af5ca29131da8629716a07d5
3,653,491
from typing import Union def examine_normal_mode(r_mol: RDKitMol, p_mol: RDKitMol, ts_xyz: np.array, disp: np.array, amplitude: Union[float, list] = 0.25, weights: Union[bool, np.array] = True, verbose: bool = True, as_factors: bool = True): """ Examine a TS's imaginary frequency given a known reactant complex and a product complex. The function checks if the bond changes are corresponding to the most significant change in the normal mode. The reactant and product complex need to be atom mapped. Args: r_mol ('RDKitMol'): the reactant complex. p_mol ('RDKitMol'): the product complex. ts_xyz (np.array): The xyz coordinates of the transition state. It should have a size of N x 3. disp (np.array): The displacement of the normal mode. It should have a size of N x 3. amplitude (float): The amplitude of the motion. Defaults to 0.25. weights (bool or np.array): If ``True``, use the sqrt(atom mass) as a scaling factor to the displacement. If ``False``, use the identity weights. If a N x 1 ``np.array` is provided, then The concern is that light atoms (e.g., H) tend to have larger motions than heavier atoms. verbose (bool): If print detailed information. Defaults to ``True``. as_factors (bool): If return the value of factors instead of a judgment. Defaults to ``False`` Returns: - bool: ``True`` for pass the examination, ``False`` otherwise. - list: If `as_factors == True`, two factors will be returned. """ # Analyze connectivity broken, formed, changed = get_all_changing_bonds(r_mol, p_mol) reacting_bonds = broken + formed + changed # Generate weights if isinstance(weights, bool) and weights: atom_masses = np.array(r_mol.GetAtomMasses()).reshape(-1, 1) weights = np.sqrt(atom_masses) elif isinstance(weights, bool) and not weights: weights = np.ones((ts_xyz.shape[0], 1)) # Generate conformer instance according to the displacement xyzs = ts_xyz - amplitude * disp * weights, ts_xyz + amplitude * disp * weights r_copy = r_mol.Copy(); r_copy.SetPositions(xyzs[0]) p_copy = p_mol.Copy(); p_copy.SetPositions(xyzs[1]) r_conf, p_conf = r_copy.GetConformer(), p_copy.GetConformer() # Calculate bond distance change formed_and_broken_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond)) for bond in broken + formed] changed_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond)) for bond in changed] other_bonds_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond)) for bond in r_copy.GetBondsAsTuples() if bond not in reacting_bonds] # We expect bonds that are formed or broken in the reaction # have relatively large changes; For bonds that change their bond order # in the reaction may have a smaller factor. # In this function, we only use the larger factor as a check. # The smaller factor is less deterministic, considering the change in # other bonds due to the change of atom hybridization or bond conjugation. baseline = np.max(other_bonds_diff) std = np.std(other_bonds_diff) larger_factor = (np.min(formed_and_broken_diff) - baseline) / std if changed_diff: # There might be no bond that only changes its order smaller_factor = (np.min(changed_diff) - baseline) / std else: smaller_factor = 0 if verbose: print(f'The min. bond distance change for bonds that are broken or formed' f' is {np.min(formed_and_broken_diff)} A and is {larger_factor:.1f} STD off the baseline.') if changed_diff: print(f'The min. bond distance change for bonds that are changed' f' is {np.min(changed_diff)} A and is {smaller_factor:.1f} STD off the baseline.') if as_factors: return larger_factor, smaller_factor if larger_factor > 3: return True return False
96fc2f4153dd231756a88e46ee608a0f54d6dabc
3,653,492
def generate_sprites(factor_dist, num_sprites=1): """Create callable that samples sprites from a factor distribution. Args: factor_dist: The factor distribution from which to sample. Should be an instance of factor_distributions.AbstractDistribution. num_sprites: Int or callable returning int. Number of sprites to generate per call. Returns: _generate: Callable that returns a list of Sprites. """ def _generate(): n = num_sprites() if callable(num_sprites) else num_sprites sprites = [sprite.Sprite(**factor_dist.sample()) for _ in range(n)] return sprites return _generate
8c09b3fe9916d0d8bc4094d62de3910de800f835
3,653,493
import warnings def recode_from_index_mapper(meta, series, index_mapper, append): """ Convert a {value: logic} map to a {value: index} map. This function takes a mapper of {key: logic} entries and resolves the logic statements using the given meta/data to return a mapper of {key: index}. The indexes returned can be used on data to isolate the cases described by arbitrarily complex logical statements. Parameters ---------- meta : dict Quantipy meta document. series : pandas.Series The series in which the recoded data will be stored and returned. index_mapper : dict A mapper of {key: index} append : bool Should the new recodd data be appended to items already found in series? If False, data from series (where found) will overwrite whatever was found for that item in ds1 instead. Returns ------- series : pandas.Series The series in which the recoded data will be stored and returned. """ qtype = meta['columns'][series.name]['type'] if qtype in ['delimited set']: if series.dtype in ['int64', 'float64']: not_null = series.notnull() if len(not_null) > 0: series.loc[not_null] = series.loc[not_null].map(str) + ';' if index_mapper: cols = [str(c) for c in sorted(index_mapper.keys())] else: vals = meta['columns'][series.name]['values'] codes = [c['value'] for c in vals] cols = [str(c) for c in codes] ds = pd.DataFrame(0, index=series.index, columns=cols) for key, idx in index_mapper.iteritems(): ds[str(key)].loc[idx] = 1 ds2 = condense_dichotomous_set(ds) org_name = series.name series = join_delimited_set_series(series, ds2, append) ## Remove potential duplicate values if series.dropna().empty: warn_msg = 'Could not recode {}, found empty data column dependency!'.format(org_name) warnings.warn(warn_msg) return series ds = series.str.get_dummies(';') # Make sure columns are in numeric order ds.columns = [int(float(c)) for c in ds.columns] cols = sorted(ds.columns.tolist()) ds = ds[cols] ds.columns = [str(i) for i in ds.columns] # Reconstruct the dichotomous set series = condense_dichotomous_set(ds) elif qtype in ['single', 'int', 'float']: for key, idx in index_mapper.iteritems(): series.loc[idx] = key else: raise TypeError( "Can't recode '{col}'. Recoding for '{typ}' columns is not" " yet supported.".format(col=series.name, typ=qtype) ) return series
e8d2afc8536f552e2af277b60af47f8b8c07d961
3,653,494
def get_variables(): """Loads ODAHU config as Robot variable """ return {'CONFIG': {var: getattr(config, var) for var in config.ALL_VARIABLES}}
78ae110fdbe2837df00b06e47132b0ceda3648dd
3,653,495
import string def is_number(char: Text) -> bool: """Checks if char is number. Returns Boolean.""" return char in string.digits
4bec510537057c8f6a48f35c6d0b6d9f300c00b7
3,653,496
def sliceData(data, slicebox=[None,None,None,None]): """ Sum 2d data along both axes and return 1d datasets **Inputs** data (sans2d) : data in slicebox (range?:xy): region over which to integrate (in data coordinates) **Returns** xout (sans1d) : xslice yout (sans1d) : yslice 2018-04-20 Brian Maranville """ if slicebox is None: slicebox = [None, None, None, None] xmin, xmax, ymin, ymax = slicebox res = data.copy() if data.qx is None or data.qy is None: # then use pixels xslice = slice(int(np.ceil(xmin)) if xmin is not None else None, int(np.floor(xmax)) if xmax is not None else None) yslice = slice(int(np.ceil(ymin)) if ymin is not None else None, int(np.floor(ymax)) if ymax is not None else None) x_in = np.arange(data.data.x.shape[0]) y_in = np.arange(data.data.x.shape[1]) x_out = x_in[xslice] y_out = y_in[yslice] dx = np.zeros_like(x_out) dy = np.zeros_like(y_out) else: # then use q-values qxmin = data.qx_min if data.qx_min is not None else data.qx.min() qxmax = data.qx_max if data.qx_max is not None else data.qx.max() qx_in = np.linspace(qxmin, qxmax, data.data.x.shape[0]) qymin = data.qy_min if data.qy_min is not None else data.qy.min() qymax = data.qy_max if data.qy_max is not None else data.qy.max() qy_in = np.linspace(qymin, qymax, data.data.x.shape[1]) xslice = slice(get_index(qx_in, xmin), get_index(qx_in, xmax)) yslice = slice(get_index(qy_in, ymin), get_index(qy_in, ymax)) x_out = qx_in[xslice] y_out = qy_in[yslice] dx = np.zeros_like(x_out) dy = np.zeros_like(y_out) dataslice = (xslice, yslice) x_sum = uncertainty.sum(data.data[dataslice], axis=1) y_sum = uncertainty.sum(data.data[dataslice], axis=0) x_output = Sans1dData(x_out, x_sum.x, dx=dx, dv=x_sum.variance, xlabel=data.xlabel, vlabel="I", xunits="", vunits="neutrons", metadata=data.metadata) y_output = Sans1dData(y_out, y_sum.x, dx=dy, dv=y_sum.variance, xlabel=data.ylabel, vlabel="I", xunits="", vunits="neutrons", metadata=data.metadata) return x_output, y_output
1d30a500a29c1803eb6982bb7442f9e328e3f245
3,653,497
def GetChangeUrl(host, change): """Given a Gerrit host name and change ID, returns a URL for the change.""" return '%s://%s/a/changes/%s' % (GERRIT_PROTOCOL, host, change)
61ff03daa28b22ca88ab2b2f67ec18ab9617c691
3,653,498
def recipe_clone_message(recipe): """ Renders the recipe clone message. """ return dict(recipe=recipe)
09728b431966b12415861a212f2cb85af475dc37
3,653,500
def read_expression_file(file): """Reads a file with the expression profiles.""" D = [] genes = [] with open(file) as fp: firstline = fp.readline() classes = [c.strip() for c in firstline.split("\t")[1:]] for line in fp.readlines(): items = [w.strip() for w in line.split("\t")] genes.append(items[0]) D.append([int(x) for x in items[1:]]) class_a = classes[0] C = [int(c == class_a) for c in classes] D = np.array(D) return genes, D, C
aa3465855eb75a731801660e8f7b22091aae0a36
3,653,501
def train(X, Y, n_h, num_iterations=10000, print_cost=False): """ 定义神经网络模型,把之前的操作合并到一起 Args: X: 输入值 Y: 真实值 n_h: 隐藏层大小/节点数 num_iterations: 训练次数 print_cost: 设置为True,则每1000次训练打印一次成本函数值 Return: parameters: 模型训练所得参数,用于预测 """ np.random.seed(3) n_x = layer_sizes(X, Y)[0] n_y = layer_sizes(X, Y)[2] # 根据n_x, n_h, n_y初始化参数,并取出W1,b1,W2,b2 parameters = initialize_parameters(n_x, n_h, n_y) W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] for i in range(0, num_iterations): # 前向传播, 输入: "X, parameters". 输出: "A2, cache". A2, cache = forward_propagation(X, parameters) # 成本计算. 输入: "A2, Y, parameters". 输出: "cost". cost = calculate_cost(A2, Y, parameters) # 后向传播, 输入: "parameters, cache, X, Y". 输出: "grads". grads = backward_propagation(parameters, cache, X, Y) # 参数更新. 输入: "parameters, grads". 输出: "parameters". parameters = update_parameters(parameters, grads) # 每1000次训练打印一次成本函数值 if print_cost and i % 1000 == 0: print "Cost after iteration %i: %f" % (i, cost) return parameters
57efdadd744b9801227da87aed8ca458e2990c5c
3,653,502
def get_drawdowns(cum_returns): """ Computes the drawdowns of the cumulative returns. Parameters ---------- cum_returns : Series or DataFrame, required a Series or DataFrame of cumulative returns Returns ------- Series or DataFrame """ cum_returns = cum_returns[cum_returns.notnull()] highwater_marks = cum_returns.expanding().max() drawdowns = cum_returns/highwater_marks - 1 return drawdowns
1f4da9e405b8b4f8a691b09e42e479cd6fdec3ae
3,653,503
def calc_recipe_quantity_ratio( first_month: str, first_recipe: str, second_recipe: str, file_name: str, second_month: str = None) -> float: """ A function which calculates the ratio of quantity between two months. :param first_month: str :param first_recipe: str :param second_recipe: str :param file_name: str :param second_month: str :return: ratio: float """ if first_month not in VALID_MONTH: raise ValueError("Date must be one of %s." % VALID_MONTH) elif first_recipe not in VALID_RECIPE or second_recipe not in VALID_RECIPE: raise ValueError("Recipe must be on of %s." % VALID_RECIPE) else: if second_month is None: second_month: str = first_month first_quantity: int = calc_month_quantity_by_recipe(first_month, first_recipe, file_name) second_quantity: int = calc_month_quantity_by_recipe(second_month, second_recipe, file_name) ratio = round(first_quantity / second_quantity, 2) return ratio
284fd4c010c933523967a11f774fc7e220198e7f
3,653,504
def teacher_add_to_db(): """Adds a teacher to database Returns: Redirect: Redirects to teachers list route """ if request.method == "POST": fet_name = request.form["fet_name"] fullname = request.form["fullname"] teacher_email = request.form["t_email"] try: teacher_obj = Teacher(teacher_email=teacher_email, fet_name=fet_name, fullname=fullname) db.session.add(teacher_obj) db.session.commit() flash(("Teacher {} added successfully.".format(fet_name)), category="success") return redirect(url_for('teacher_list')), 302 except Exception as e: flash("Exception: {}".format(str(e)), category="danger") return redirect(url_for("teacher_list")), 302
224f95f00e88dce00d21406883dd5655ed9e8fbd
3,653,505
def authorize(app_id, channel_id, team_id): """Just double check if this app is invoked from the expected app/channel/team""" if app_id != SLACK_APP_ID: return f"app ID {app_id}" if team_id not in SLACK_TEAM_IDS: return f"team ID {team_id}" if channel_id not in SLACK_CHANNEL_IDS: return f"channel ID {channel_id}"
ff1f43a2073e7a0a54bda709b41fce02475c48ec
3,653,506
import random def deal_one_card(): """ returns a random card from the deck """ cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] return random.choice(cards)
e8836c6569ed5c9e48043c9b750e730c42781a14
3,653,507
def grey_pal(start=0.2, end=0.8): """ Utility for creating continuous grey scale palette Parameters ---------- start : float grey value at low end of palette end : float grey value at high end of palette Returns ------- out : function Continuous color palette that takes a single :class:`int` parameter ``n`` and returns ``n`` equally spaced colors. Examples -------- >>> palette = grey_pal() >>> palette(5) ['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc'] """ gamma = 2.2 ends = ((0.0, start, start), (1.0, end, end)) cdict = {'red': ends, 'green': ends, 'blue': ends} grey_cmap = mcolors.LinearSegmentedColormap('grey', cdict) def continuous_grey_palette(n): # The grey scale points are linearly separated in # gamma encoded space x = np.linspace(start**gamma, end**gamma, n) # Map points onto the [0, 1] palette domain vals = (x ** (1./gamma) - start) / (end - start) return ratios_to_colors(vals, grey_cmap) return continuous_grey_palette
f38295a48120a3e17b000276797bfec78a644749
3,653,508
from typing import Optional from typing import Union from typing import Any from typing import Dict def get_parameter_value_and_validate_return_type( domain: Optional[Domain] = None, parameter_reference: Optional[Union[Any, str]] = None, expected_return_type: Optional[Union[type, tuple]] = None, variables: Optional[ParameterContainer] = None, parameters: Optional[Dict[str, ParameterContainer]] = None, ) -> Optional[Any]: """ This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.) or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value. """ if isinstance(parameter_reference, dict): parameter_reference = safe_deep_copy(data=parameter_reference) parameter_reference = get_parameter_value( domain=domain, parameter_reference=parameter_reference, variables=variables, parameters=parameters, ) if expected_return_type is not None: if not isinstance(parameter_reference, expected_return_type): raise ge_exceptions.ProfilerExecutionError( message=f"""Argument "{parameter_reference}" must be of type "{str(expected_return_type)}" \ (value of type "{str(type(parameter_reference))}" was encountered). """ ) return parameter_reference
be1ab4c90942d69083b765194baa494658265275
3,653,511
def add_leaf_to_edge(t): """ Returns a `Shape` instance with a new root; both a new leaf and the input `Shape` pend from it. :param t: `Shape` instance. :return: `Shape` instance. """ return Shape([Shape.LEAF, t])
18d4a383dcc2873e506677f76501c76e36b99ac7
3,653,512
def create_simulation(parameter_values=None, experiment=None, make_inputs=False): """ Create a PyBaMM simulation set up for interation with liionpack Parameters ---------- parameter_values : :class:`pybamm.ParameterValues` The default is None. experiment : :class:`pybamm.Experiment` The default is None. make_inputs : bool, optional Changes "Current function [A]" and "Total heat transfer coefficient [W.m-2.K-1]" to be inputs that are controlled by liionpack. The default is False. Returns ------- sim : :class:`pybamm.Simulation` A simulation that can be solved individually or passed into the liionpack solve method """ # Create the pybamm model model = pybamm.lithium_ion.SPMe( options={ "thermal": "lumped", } ) # Set up parameter values if parameter_values is None: chemistry = pybamm.parameter_sets.Chen2020 parameter_values = pybamm.ParameterValues(chemistry=chemistry) # Change the current function and heat transfer coefficient to be # inputs controlled by the external circuit if make_inputs: parameter_values.update( { "Current function [A]": "[input]", "Total heat transfer coefficient [W.m-2.K-1]": "[input]", }, ) # Set up solver and simulation solver = pybamm.CasadiSolver(mode="safe") sim = pybamm.Simulation( model=model, experiment=experiment, parameter_values=parameter_values, solver=solver, ) return sim
0faf24958440e42b64ef8cc82b9ec478d4899dd2
3,653,513
def _logistic_loss_and_grad(w, X, y, alpha, mask, sample_weight=None): """Computes the logistic loss and gradient. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. alpha : float Regularization parameter. alpha is equal to 1 / C. mask : array-like, shape (n_features), (n_classes, n_features) optional Masking array for coef. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- out : float Logistic loss. grad : ndarray, shape (n_features,) or (n_features + 1,) Logistic gradient. """ n_samples, n_features = X.shape if mask is not None: w[:n_features] *= mask grad = np.empty_like(w) w, c, yz = _intercept_dot(w, X, y) if sample_weight is None: sample_weight = np.ones(n_samples) # Logistic loss is the negative of the log of the logistic function. out = -np.sum(sample_weight * log_logistic(yz)) / n_samples out += .5 * alpha * np.dot(w, w) z = expit(yz) z0 = sample_weight * (z - 1) * y grad[:n_features] = (safe_sparse_dot(X.T, z0) / n_samples) + alpha * w if mask is not None: grad[:n_features] *= mask # Case where we fit the intercept. if grad.shape[0] > n_features: grad[-1] = z0.sum() / n_samples return out, grad
39a05f090807a6eb83000e6daf5f3719bbbe9aa1
3,653,514
import requests def verify_status_code(request_response: requests.Response) -> tuple: """Verify the status code of the post request to the search url and raise exceptions if the code is unexpected :type request_response: requests.Response :return: """ if request_response.status_code == 200: return STATUS_CODE_OK, '' elif request_response.status_code == 429: if 'user\'s rate limit' in request_response.text: msg = "Search rate limit reached" return STATUS_CODE_REPEAT, msg if 'limit of 150 searches' in request_response.text: raise DailyLimitReachedException('Daily search limit for unregistered users reached') elif 'limit of 300 searches' in request_response.text: raise DailyLimitReachedException('Daily search limit for basic users reached') else: raise DailyLimitReachedException('Daily search limit reached') elif request_response.status_code == 403: raise InvalidOrWrongApiKeyException("Invalid or wrong API key") elif request_response.status_code == 413: msg = "Payload too large, skipping file" return STATUS_CODE_SKIP, msg else: msg = "Unknown status code: {0:d}".format(request_response.status_code) return STATUS_CODE_REPEAT, msg
b5f686dfe11d7fd4bd9d5f32ccd51df9f2322a13
3,653,515
def instability_product_graphs(gra): """ Determine if the species has look for functional group attachments that could cause molecule instabilities """ # Build graphs for the detection scheme rad_grp_dct = radical_group_dct(gra) # Check for instability causing functional groups prd_gras = () for atm, grps in rad_grp_dct.items(): if atm in instab_fgrps.DCT: fgrps, prds = instab_fgrps.DCT[atm] for grp in grps: grp_ich = automol.graph.inchi(grp) if grp_ich in fgrps: # If instability found, determine prod of the instability prd_ich = prds[fgrps.index(grp_ich)] prd_geo = automol.inchi.geometry(prd_ich) prd_gra = automol.geom.graph(prd_geo) prd_gras = radical_dissociation_prods( gra, prd_gra) break return prd_gras
42aba79627bd6343bd2e99bca9a099d1047a9f5c
3,653,516
def build_pdb_rmsd_matrix(pdb_paths, pdb_diff_path=None): """ Returns rmsd difference matrix for multiple pdb files. Returns rmsd_list (3-item list), pdb_comp_amount (int). Optional with pdb_diff_path return pdb_diff_comp(int). """ # make 3 column list or ndarray for x, y = (pdb1-n * pdb1-n) and z = rmsd diff rmsd_list = [[], [], []] # get rmsd difference between each pdb file in nested loop and append for pdb0 in pdb_paths: # compare 2 different sets of pdb files if pdb_diff_path != None: for pdb1 in pdb_diff_path: # append to x (col 0) pdb in outer loop rmsd_list[0].append(pdb_paths.index(pdb0) + 1) # append to y (col 1) pdb in inner loop rmsd_list[1].append(pdb_diff_path.index(pdb1) + 1) # find and append to z (col 2) rmsd value between pdb0 and pdb1 rmsd = rmsd_diff_calc(pdb0, pdb1) #print(f"\n For PDB-A = {pdb0} and PDB-B = {pdb1} : RMSD = {rmsd}") rmsd_list[2].append(rmsd) elif pdb_diff_path == None: for pdb1 in pdb_paths: # append to x (col 0) pdb in outer loop rmsd_list[0].append(pdb_paths.index(pdb0) + 1) # append to y (col 1) pdb in inner loop rmsd_list[1].append(pdb_paths.index(pdb1) + 1) # find and append to z (col 2) rmsd value between pdb0 and pdb1 rmsd = rmsd_diff_calc(pdb0, pdb1) rmsd_list[2].append(rmsd) # amount of pdb files to compare to each other pdb_comp_amount = len(pdb_paths) if pdb_diff_path == None: return rmsd_list, pdb_comp_amount elif pdb_diff_path !=None: pdb_diff_comp = len(pdb_diff_path) return rmsd_list, pdb_comp_amount, pdb_diff_comp
fe00efa6a853d0e77ae52c21267376eca25a110c
3,653,517
from typing import Optional def expected_response(y: np.ndarray, w: np.ndarray, policy: np.ndarray, mu: Optional[np.ndarray]=None, ps: Optional[np.ndarray]=None) -> float: """Estimate expected response. Parameters ---------- y: array-like of shape = (n_samples) Observed target values. w: array-like of shape = shape = (n_samples) Treatment assignment variables. policy: array-like of shape = (n_samples) Estimated treatment policy. mu: array-like of shape = (n_samples, n_trts), optional Estimated potential outcomes. ps: array-like of shape = (n_samples, n_trts), optional Estimated propensity scores. Returns ------- expected_response: float Estimated expected_response. """ mu = np.zeros((w.shape[0], np.unique(w).shape[0])) if mu is None else mu ps = pd.get_dummies(w).mean(axis=0).values if ps is None else ps indicator = np.array(w == policy, dtype=int) expected_response = np.mean(mu[np.arange(w.shape[0]), policy] + (y - mu[np.arange(w.shape[0]), policy]) * indicator / ps[w]) return expected_response
ffa6938914480f236d8b0aff7ebc993a2e714682
3,653,519
def get_type_for_field(field: Field) -> type: """ For optional fields, the field type_ is a :class:`typing.Union`, of ``NoneType`` and the actual type. Here we extract the "actual" type from a Union with None """ if not field.sub_fields: return field.type_ for f in field.sub_fields: if f.type_ != type(None): # noqa return f.type_ raise Exception(f"No type found for field: {field}")
2d0163b6e92bcd67c2aaee60d7088cc70e9bd09b
3,653,520
from typing import List def read_program_data(program: List[str]) -> int: """Read program data from port computer system. Args: program (List[str]): the program code containing masks and memory Returns: int: sum of all values in memory """ memory = defaultdict(int) for line in program: if line.startswith('mask'): _, mask = line.split(' = ') ones = remove_leading_zeroes( [1 if c == '1' else 0 for c in mask] ) floating = remove_leading_zeroes( [1 if c == 'X' else 0 for c in mask] ) mask_len = 36 # This is hard-coded currently and may change # if this problem is used in a new context. else: address, value = [int(n) for n in MEM_RE.match(line).groups()] address = [int(a) for a in bin(address)[2:]] if len(address) < mask_len: address = add_leading_zeroes(address, mask_len - len(address)) try: if 1 in ones: address = mask_values(ones, address, 1) except TypeError: pass if 1 in floating: addresses = mask_floating(floating, address) for address in addresses: address = int(''.join([str(a) for a in address]), base=2) memory[address] = value return sum(memory.values())
7852d13f1de9cc04ee4170a10807b49ed2592905
3,653,521
def get_mgr_worker_msg(comm, status=None): """Get message to worker from manager. """ status = status or MPI.Status() comm.probe(source=0, tag=MPI.ANY_TAG, status=status) tag = status.Get_tag() if tag in [STOP_TAG, PERSIS_STOP]: return tag, None, None Work = comm.recv(buf=None, source=0, tag=MPI.ANY_TAG, status=status) calc_in = comm.recv(buf=None, source=0) return tag, Work, calc_in
76bf2be4707052ab5ea0f447822208eccb19f9ef
3,653,522
import time def retry(exceptions, tries=4, delay=3, backoff=2, logger=None): """ Retry calling the decorated function using an exponential backoff. Args: exceptions: The exception to check. may be a tuple of exceptions to check. tries: Number of times to try (not retry) before giving up. delay: Initial delay between retries in seconds. backoff: Backoff multiplier (e.g. value of 2 will double the delay each retry). logger: Logger to use. If None, print. """ def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except exceptions as e: msg = '{}, Retrying in {} seconds...'.format(e, mdelay) if logger: logger.warning(msg) else: print(msg) time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs) return f_retry # true decorator return deco_retry
8c0917ad45b2c000ced926f0457b9b9aebbc4543
3,653,523
def load_dictionary(dicttimestamp, server='postgres-cns-myaura'): """ Load dictionary from database Args: dicttimestamp (string): the version of dictionary (ex: 20210131) server (string): the server name in db_config.ini Returns: tuple (termdictparser, pandas.DataFrame): A TermDictParser and a pandas dataframe containing the dictionary. """ print('--- Loading {server:s} dictionary ({dicttimestamp:s}) ---'.format(server=server, dicttimestamp=dicttimestamp)) # if 'postgres' in server: engine = db.connectToPostgreSQL(server=server) tablename = 'dictionaries.dict_%s' % (dicttimestamp) sql = """ SELECT d.id, COALESCE(d.id_parent,d.id) AS id_parent, d.dictionary, d.token, COALESCE(p.token, d.token) as parent, d.type, d.source, d.id_original, COALESCE(p.id_original, d.id_original) as id_original_parent FROM %s d LEFT JOIN %s p ON d.id_parent = p.id WHERE d.enabled > 0""" % (tablename, tablename) elif 'mysql' in server: engine = db.connectToMySQL(server=server) tablename = 'dict_%s' % (dicttimestamp) sql = """ SELECT d.id, IFNULL(d.id_parent,d.id) AS id_parent, d.dictionary, d.token, IFNULL(p.token, d.token) as parent, d.type, d.source, d.id_original, IFNULL(p.id_original, d.id_original) as id_original_parent FROM %s d LEFT JOIN %s p ON d.id_parent = p.id WHERE d.enabled = True""" % (tablename, tablename) else: raise TypeError("Invalid server name. The name of the server must contain either a 'mysql' or 'postgress' string.") df = pd.read_sql(sql, engine, index_col='id') return df
63df416815386c1bf4d6a820c98490ae5a6e4d08
3,653,524
def part1(data): """ >>> part1(((20, 30), (-10, -5))) 45 >>> part1(INPUT) 13203 """ target_x, target_y = data best = None for dx in range(1, max(target_x) + 1): for dy in range(0, - min(target_y) + 1): hit_target, height = trajectory(target_x, target_y, dx, dy) if hit_target: if best is None: best = height else: best = max(best, height) return best
293dd006caa20471cc849c1366f0610594279b8b
3,653,525
def get_neighbors(p, exclude_p=True, shape=None, nNeighbors=1, get_indices=False, direction=None, get_mask=False): """Determine pixel coordinates of neighboring pixels. Includes also all pixels that neighbor diagonally. Parameters ---------- p : tuple Gives the coordinates (y, x) of the central pixel exclude_p : boolean Whether or not to exclude the pixel with position p from the resulting list. shape : tuple Describes the dimensions of the total array (NAXIS2, NAXIS1). Returns ------- neighbors: numpy.ndarray Contains all pixel coordinates of the neighboring pixels [[y1, x1], [y2, x2], ...] Adapted from: https://stackoverflow.com/questions/34905274/how-to-find-the-neighbors-of-a-cell-in-an-ndarray """ ndim = len(p) n = nNeighbors*2 + 1 # generate an (m, ndims) array containing all combinations of 0, 1, 2 offset_idx = np.indices((n,) * ndim).reshape(ndim, -1).T # use these to index into np.array([-1, 0, 1]) to get offsets lst = list(range(-(nNeighbors), nNeighbors + 1)) offsets = np.r_[lst].take(offset_idx) if direction == 'horizontal': indices = np.where(offsets[:, 0] == 0) elif direction == 'vertical': indices = np.where(offsets[:, 1] == 0) elif direction == 'diagonal_ul': indices = np.where(offsets[:, 0] == offsets[:, 1]) elif direction == 'diagonal_ur': indices = np.where(offsets[:, 0] == -offsets[:, 1]) if direction is not None: offsets = offsets[indices] # optional: exclude offsets of 0, 0, ..., 0 (i.e. p itself) if exclude_p: offsets = offsets[np.any(offsets, 1)] neighbours = p + offsets # apply offsets to p # optional: exclude out-of-bounds indices if shape is not None: valid = np.all((neighbours < np.array(shape)) & (neighbours >= 0), axis=1) neighbours = neighbours[valid] if get_mask: return valid if get_indices: indices_neighbours = np.array([]) for neighbour in neighbours: indices_neighbours = np.append( indices_neighbours, np.ravel_multi_index(neighbour, shape)).astype('int') return indices_neighbours return neighbours
4dc68ed4f44667253c4bdb114a0d3034a65ef725
3,653,526
import torch def make_coordinate_grid(spatial_size, type): """ Create a meshgrid [-1,1] x [-1,1] of given spatial_size. """ h, w = spatial_size x = torch.arange(w).type(type) y = torch.arange(h).type(type) x = (2 * (x / (w - 1)) - 1) y = (2 * (y / (h - 1)) - 1) yy = y.view(-1, 1).repeat(1, w) xx = x.view(1, -1).repeat(h, 1) meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2) return meshed
0bbbd2f0e0d588b58feebce19b3f2fd9c84934d8
3,653,527
def add_wrong_column(data_frame): """ Adds wrong column to dataframe :params dataframe data_frame: :returns dataframe: """ new_df = data_frame.copy() new_df['Ducks'] = 0 return new_df
0f3ae838c0975e8021cfeee258576afac75072c5
3,653,528
def p2l(X, Y, D, tol, inputTransform): """ Computes the Procrustean point-line registration between X and Y+nD with anisotropic Scaling, where X is a mxn matrix, m is typically 3 Y is a mxn matrix denoting line origin, same dimension as X D is a mxn normalized matrix denoting line direction R is a mxm rotation matrix, A is a mxm diagonal scaling matrix, and t is a mx1 translation vector Q is a mxn fiducial on line that is closest to X after registration fre is the fiducial localization error based on the Majorization Principle """ [m,n] = X.shape err = np.Infinity E_old = 1000000 * np.ones((m,n)) e = np.ones((1,n)) # intialization Q = Y # normalize the line orientation just in case Dir = D/np.linalg.norm(D, ord=2,axis=0,keepdims=True) while err > tol: [R, t, A] = AOPA_Major(X, Q, tol) E = Q-np.matmul(R,np.matmul(A,X))-np.matmul(t,e) # project point to line Q = Y+Dir*np.tile(np.einsum('ij,ij->j',np.matmul(R,np.matmul(A,X))+np.matmul(t,e)-Y,Dir),(m,1)) err = np.linalg.norm(E-E_old) E_old = E E = Q - np.matmul(R, np.matmul(A,X)) - np.matmul(t,e) # calculate fiducial registration error fre = np.sum(np.linalg.norm(E,ord=2,axis=0,keepdims=True))/X.shape[1] lps2ras = np.diag([-1, -1, 1, 1]) data = np.eye(4) data[0:3, 3] = t.T data[:3, :3] = np.dot(R, A) data = np.dot(data, lps2ras) transform_matrix = vtk.vtkMatrix4x4() dimensions = len(data) - 1 for row in range(dimensions): for col in range(dimensions + 1): transform_matrix.SetElement(row, col, data[(row, col)]) inputTransform.SetMatrixTransformToParent(transform_matrix) return [R,t,A,Q,fre, inputTransform]
5b7b1143fd7dbbeae6767f8ef5f71464eb6220a0
3,653,529
def config_database(db_name): """ Create a database in sqlite3 :param db_name: The name of the file for the database :return: A database objetc and his connections object """ db = Database() connection = db.create_connection(db_name) db.create_table(connection) return db, connection
42602f32a3cca0dfbbc791973acbf6279af7cde3
3,653,530
from pathlib import Path def parse_main_argument(argument, export_folder): """Function parsing the main_argument argument. Returns a dataframe containing the search terms (or the urls if main_argument is a youtube file.""" # File or string if Path(argument).is_file(): is_file = True argument_file_content = open(argument).read() # File of urls or search terms is_spotify = ( "spotify" in argument_file_content and argument_file_content.startswith("http") ) is_deezer = ( "deezer" in argument_file_content and argument_file_content.startswith("http") ) is_youtube = ( "youtu" in argument_file_content and argument_file_content.startswith("http") ) else: is_file = False is_spotify = "spotify" in argument is_deezer = "deezer" in argument # would be equivalent to argument youtube_url, doesn't exist is_youtube = False if is_spotify: if is_file: terms = extract_terms_from_file(argument) df = get_spotify_songs(terms) logger.info("Reading file containing spotify urls at %s.", argument) else: terms = extract_terms_from_arg(argument) df = get_spotify_songs(terms) logger.info("Reading spotify urls %s.", argument) elif is_deezer: if is_file: terms = extract_terms_from_file(argument) df = get_deezer_songs(terms) logger.info("Reading file containing deezer urls at %s.", argument) else: terms = extract_terms_from_arg(argument) df = get_deezer_songs(terms) logger.info("Reading deezer urls %s.", argument) elif is_youtube: if is_file: df = pd.read_csv(argument, sep="\t", header=None, names=["url"]) logger.info("Reading file containing youtube urls at %s.", argument) else: if is_file: df = pd.read_csv(argument, sep="\t", header=None, names=["title"]) logger.info("Reading file containing search terms at %s.", argument) else: df = pd.DataFrame( [x.strip() for x in argument.split(",")], columns=["title"] ) logger.info("Reading search terms %s.", argument) return df
f3cbd81e3fe98333fa2a4ad04f746c291dc9138a
3,653,531
from datetime import datetime def validate_auth_header(headers): """Validate and decode auth token in request headers. This helper function is used in each of the below wrappers, and is responsible to validate the format of the `Authorization` header where the Lowball token is supposed to reside. Requirements for successful validation: 1. The current app must have a working auth database 2. The `Authorization` header __must__ be present in the headers 3. That header value __must__ be of the format `Bearer <token>`. The header value is split on the space character, and if the header value is properly formatted, this should result in a data structure that looks like ["Bearer", "<token>"]. If after splitting the header value on the space, the length of the resulting structure is not __exactly__ two, then the header is considered improperly formatted. 4. The token must be able to be decoded by the `Authentication.decode_token` method 5. The token cannot be expired. 6. The token must match a token that is in the application authentication database __exactly__ :param headers: Headers from request made to Lowball application :type headers: werkzeug.Headers :return: decoded token data :rtype: Token """ if current_app.auth_db is None: raise NoAuthenticationDatabaseException if "Authorization" not in headers: raise NoAuthHeaderException auth_header = headers["Authorization"].split(" ") if len(auth_header) < 2 or auth_header[0] != "Bearer": raise InvalidAuthHeaderException token = auth_header[1] decoded = current_app.authenticator.decode_token(token) g.client_data = decoded if datetime.datetime.utcnow() > decoded.expiration: raise ExpiredTokenException database_token = current_app.auth_db.lookup_token(decoded.token_id) if database_token != decoded: raise InvalidTokenException return decoded
be75c33767a43f1482417277d6a41f887b26f388
3,653,532
def shared_random_seed(): """All workers must call this function, otherwise it will deadblock. """ seed = np.random.randint(2 ** 31) all_seeds = all_gather(seed) return all_seeds[0]
bdf636ddc24defd13339c20fed0bb5896c35400e
3,653,533
def _version(base): """Get a chronological version from git or PKG-INFO Args: base (dict): state Returns: str: Chronological version "yyyymmdd.hhmmss" str: git sha if available """ v1 = _version_from_pkg_info(base) v2, sha = _version_from_git(base) if v1: if v2: return (v1, None) if float(v1) > float(v2) else (v2, sha) return v1, None if v2: return v2, sha raise ValueError('Must have a git repo or an source distribution')
2d5b5e08fe44386347541634643e28e86cda5a44
3,653,534
def average_link_distance_segment(D,stop=-1,qmax=1,verbose=0): """ Average link clustering based on a pairwise distance matrix. Parameters ---------- D: a (n,n) distance matrix between some items stop=-1: stopping criterion, i.e. distance threshold at which further merges are forbidden By default, all merges are performed qmax = 1; the number of desired clusters (in the limit of stop) verbose=0, verbosity level Returns ------- u: a labelling of the graph vertices according to the criterion cost the cost of each merge step during the clustering procedure Note ---- this method has not been optimized """ n = D.shape[0] if D.shape[1]!=n: raise ValueError, "non -square distance matrix" if stop==-1: stop = np.infty t = average_link_distance(D,verbose) if verbose: t.plot() u1 = np.zeros(n, np.int) u2 = np.zeros(n, np.int) if stop>=0: u1 = t.partition(stop) if qmax>0: u2 = t.split(qmax) if u1.max()<u2.max(): u = u2 else: u = u1 cost = t.get_height() cost = cost[t.isleaf()==False] return u,cost
1be3da149a5ceb99ab94980b94caec0c42edb096
3,653,535
def _process_get_set_Operand(column, reply): """Process reply for functions zGetOperand and zSetOperand""" rs = reply.rstrip() if column == 1: # ensure that it is a string ... as it is supposed to return the operand if isinstance(_regressLiteralType(rs), str): return str(rs) else: return -1 elif column in (2,3): # if thre is a comment, it will be in column 2 #return int(float(rs)) return _regressLiteralType(rs) else: return float(rs)
b63de89b480ab263eb49c04dba47befb8bbe0997
3,653,536
def generic_laplace(input, derivative2, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """Multi-dimensional Laplace filter using a provided second derivative function. Args: input (cupy.ndarray): The input array. derivative2 (callable): Function or other callable with the following signature that is called once per axis:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an ``int`` from ``0`` to the number of dimensions, and ``mode``, ``cval``, ``extra_arguments``, ``extra_keywords`` are the values given to this function. output (cupy.ndarray, dtype or None): The array in which to place the output. Default is is same dtype as the input. mode (str): The array borders are handled according to the given mode (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, ``'wrap'``). Default is ``'reflect'``. cval (scalar): Value to fill past edges of input if mode is ``'constant'``. Default is ``0.0``. extra_arguments (sequence, optional): Sequence of extra positional arguments to pass to ``derivative2``. extra_keywords (dict, optional): dict of extra keyword arguments to pass ``derivative2``. Returns: cupy.ndarray: The result of the filtering. .. seealso:: :func:`scipy.ndimage.generic_laplace` .. note:: When the output data type is integral (or when no output is provided and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ if extra_keywords is None: extra_keywords = {} ndim = input.ndim modes = _util._fix_sequence_arg(mode, ndim, 'mode', _util._check_mode) output = _util._get_output(output, input) if ndim == 0: output[...] = input return output derivative2(input, 0, output, modes[0], cval, *extra_arguments, **extra_keywords) if ndim > 1: tmp = _util._get_output(output.dtype, input) for i in range(1, ndim): derivative2(input, i, tmp, modes[i], cval, *extra_arguments, **extra_keywords) output += tmp return output
bc58a7ca79b551f4cba4c4c61855e027d666f2a0
3,653,537
import functools def as_keras_metric(method): """ from https://stackoverflow.com/questions/43076609/how-to-calculate-precision-and-recall-in-keras """ @functools.wraps(method) def wrapper(self, args, **kwargs): """ Wrapper for turning tensorflow metrics into keras metrics """ value, update_op = method(self, args, **kwargs) tf.keras.backend.get_session().run(tf.local_variables_initializer()) with tf.control_dependencies([update_op]): value = tf.identity(value) return value return wrapper
17a6a6e39a25576215e0b426779df2ccac48e9b4
3,653,538
def do_get_video_capture_job(port_output_name: str = 'RAW') -> str: """ Function for configure the image retrieval job from video camera. :param port_output_name: name you want to use for raw image in the application :return: output image port name """ output_raw_port_name = transform_port_name_lvl(name=port_output_name, lvl=PYRAMID_LEVEL.LEVEL_0) output_raw_port_size = transform_port_size_lvl(lvl=PYRAMID_LEVEL.LEVEL_0, rgb=True) input_port_list = None main_func_list = [output_raw_port_name] output_port_list = [(output_raw_port_name, output_raw_port_size, 'B', True)] job_name = job_name_create(action='Get image camera video frame') d = create_dictionary_element(job_module='get_image', job_name=job_name, input_ports=input_port_list, init_func_name='init_func', init_func_param=None, main_func_name='main_func_video_camera', main_func_param=main_func_list, output_ports=output_port_list) jobs_dict.append(d) return port_output_name
b7965cde58e6b562d289cc58ac25edfd400c3b8a
3,653,539
def vgg8_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(make_layers(cfg['YS']), final_filter=256, **kwargs) return model
ec3824dbccbbca804ec5160c551bc7171a9b3866
3,653,541
from typing import IO def createWords(word_str): """Cn Mandarin sentence to Cn Mandarin Words list""" pre_func = IO.readList(r'docs/pre_punctuation.txt')[1:] lat_func = IO.readList(r'docs/post_punctuation.txt')[1:] en_letters = IO.readList(r'docs/special_English_letters.txt')[1:] words = [] j = 0 tmp_word = '' while j < len(word_str): find_pre_func = 0 while j < len(word_str) and word_str[j] in pre_func: tmp_word += word_str[j] find_pre_func = 1 j += 1 if (u'\u9fa5' >= word_str[j] >= u'\u4e00') or word_str[j] in en_letters: if find_pre_func: tmp_word += word_str[j] else: tmp_word = word_str[j] j = j + 1 while j < len(word_str) and word_str[j] in lat_func: tmp_word += word_str[j] j = j + 1 words.append(tmp_word) tmp_word = '' return words
7d7aad411773550e6a884539a1160218499dea73
3,653,542
def get_calib_driver(calib_dir: str): """ Create left/right charuco point detectors and load calibration images from directory. """ reference_image = cv2.imread("tests/data/2020_01_20_storz/pattern_4x4_19x26_5_4_with_inset_9x14.png") minimum_points = 50 number_of_squares = [19, 26] square_tag_sizes = [5, 4] filter_markers = True number_of_chessboard_squares = [9, 14] chessboard_square_size = 3 chessboard_id_offset = 500 left_pd = \ charuco_pd.CharucoPlusChessboardPointDetector( reference_image, minimum_number_of_points=minimum_points, number_of_charuco_squares=number_of_squares, size_of_charuco_squares=square_tag_sizes, charuco_filtering=filter_markers, number_of_chessboard_squares=number_of_chessboard_squares, chessboard_square_size=chessboard_square_size, chessboard_id_offset=chessboard_id_offset ) right_pd = \ charuco_pd.CharucoPlusChessboardPointDetector( reference_image, minimum_number_of_points=minimum_points, number_of_charuco_squares=number_of_squares, size_of_charuco_squares=square_tag_sizes, charuco_filtering=filter_markers, number_of_chessboard_squares=number_of_chessboard_squares, chessboard_square_size=chessboard_square_size, chessboard_id_offset=chessboard_id_offset ) calibration_driver = sc.StereoVideoCalibrationDriver(left_pd, right_pd, minimum_points) for i in range(3): l_img, r_img, chessboard, scope = lcu.get_calib_data(calib_dir, i) calibration_driver.grab_data(l_img, r_img, scope, chessboard) return calibration_driver
1c0b859327fefa983c68b2f70909f5d1ca8108cd
3,653,543
def stop_loading() -> dict: """Force the page stop all navigations and pending resource fetches.""" return {"method": "Page.stopLoading", "params": {}}
fd46497cee6a87ca0b00cc6ceed487655361d896
3,653,544
def drop_duplicates(df): """Drop duplicate rows and reindex. Args: df (pd.DataFrame): Dataframe. Returns: pd.DataFrame: Dataframe with the replaced value. Examples: >>> df = pd.DataFrame({'letters':['b','b','c'], 'numbers':[2,2,3]}) >>> drop_duplicates(df) letters numbers 0 b 2 1 c 3 """ return df.drop_duplicates().reset_index(drop=True)
517d9faf09267df72def3fa7b90b0f59d819d660
3,653,545
def make_otf( psf, outpath=None, dzpsf=0.1, dxpsf=0.1, wavelength=520, na=1.25, nimm=1.3, otf_bgrd=None, krmax=0, fixorigin=10, cleanup_otf=False, max_otf_size=60000, **kwargs ): """ Generate a radially averaged OTF file from a PSF file Args: psf (str): Filepath of 3D PSF TIF outpath (str): Destination filepath for the output OTF (default: appends "_otf.tif" to filename) dzpsf (float): Z-step size in microns (default: {0.1}) dxpsf (float): XY-Pixel size in microns (default: {0.1}) wavelength (int): Emission wavelength in nm (default: {520}) na (float): Numerical Aperture (default: {1.25}) nimm (float): Refractive indez of immersion medium (default: {1.3}) otf_bgrd (int, None): Background to subtract. "None" = autodetect. (default: {None}) krmax (int): pixels outside this limit will be zeroed (overwriting estimated value from NA and NIMM) (default: {0}) fixorigin (int): for all kz, extrapolate using pixels kr=1 to this pixel to get value for kr=0 (default: {10}) cleanup_otf (bool): clean-up outside OTF support (default: {False}) max_otf_size (int): make sure OTF is smaller than this many bytes. Deconvolution may fail if the OTF is larger than 60KB (default: 60000) Returns: str: Path of output file """ if outpath is None: outpath = psf.replace(".tif", "_otf.tif") if otf_bgrd and isinstance(otf_bgrd, (int, float)): bUserBackground = True background = float(otf_bgrd) else: bUserBackground = False background = 0.0 with CappedPSF(psf, max_otf_size) as _psf: shared_makeotf( str.encode(_psf.path), str.encode(outpath), wavelength, dzpsf, fixorigin, bUserBackground, background, na, nimm, dxpsf, krmax, cleanup_otf, ) return outpath
ad3fbdbea7562f766c53f26a82880f41002c893f
3,653,547
import unicodedata def is_number(input_string): """ if input_string includes number only, return corresponding number, otherwise return input_string """ try: return float(input_string) except ValueError: pass try: return unicodedata.numeric(input_string) except (TypeError, ValueError): pass return input_string.strip('"')
2b435b1f23c8764e0ff6bf741678db91bb4a5b23
3,653,549
def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argument """ Initiate a reboot if the running kernel is not the latest one installed. .. note:: This state does not install any patches. It only compares the running kernel version number to other kernel versions also installed in the system. If the running version is not the latest one installed, this state will reboot the system. See :py:func:`kernelpkg.upgrade <salt.modules.kernelpkg_linux_yum.upgrade>` and :py:func:`~salt.states.kernelpkg.latest_installed` for ways to install new kernel packages. This module does not attempt to understand or manage boot loader configurations it is possible to have a new kernel installed, but a boot loader configuration that will never activate it. For this reason, it would not be advisable to schedule this state to run automatically. Because this state function may cause the system to reboot, it may be preferable to move it to the very end of the state run. See :py:func:`~salt.states.kernelpkg.latest_wait` for a waitable state that can be called with the `listen` requesite. name Arbitrary name for the state. Does not affect behavior. at_time The wait time in minutes before the system will be rebooted. """ active = __salt__["kernelpkg.active"]() latest = __salt__["kernelpkg.latest_installed"]() ret = {"name": name} if __salt__["kernelpkg.needs_reboot"](): ret["comment"] = ( "The system will be booted to activate " "kernel: {0}" ).format(latest) if __opts__["test"]: ret["result"] = None ret["changes"] = {"kernel": {"old": active, "new": latest}} else: __salt__["system.reboot"](at_time=at_time) ret["result"] = True ret["changes"] = {"kernel": {"old": active, "new": latest}} else: ret["result"] = True ret["comment"] = ( "The latest installed kernel package " "is active: {0}" ).format(active) ret["changes"] = {} return ret
449819b4abb43b062514f0c8356f3fcd992198f7
3,653,550
import logging def upload_file(file_name, bucket, object_name): """Upload a file to an S3 bucket :param file_name: File to upload :param bucket: Bucket to upload to :param object_name: S3 object name. If not specified then file_name is used :return: True if file was uploaded, else False """ s3_client = boto3.client('s3') try: s3_client.upload_file(file_name, bucket, object_name) except ClientError as e: logging.error(e) return False return True
3db16ca5b2136995f4772dafa3778b54e5e41e5c
3,653,551
def get_conf(bs_info, client_config, genesis_time, setup_oracle=None, setup_poet=None, args=None): """ get_conf gather specification information into one ContainerSpec object :param bs_info: DeploymentInfo, bootstrap info :param client_config: DeploymentInfo, client info :param genesis_time: string, genesis time as set in suite specification file :param setup_oracle: string, oracle ip :param setup_poet: string, poet ip :param args: list of strings, arguments for appendage in specification :return: ContainerSpec """ genesis_time_delta = get_genesis_time_delta(genesis_time) client_args = {} if 'args' not in client_config else client_config['args'] # append client arguments if args is not None: for arg in args: client_args[arg] = args[arg] # create a new container spec with client configuration cspec = ContainerSpec(cname='client', specs=client_config) # append oracle configuration if setup_oracle: client_args['oracle_server'] = 'http://{0}:{1}'.format(setup_oracle, conf.ORACLE_SERVER_PORT) # append poet configuration if setup_poet: client_args['poet_server'] = '{0}:{1}'.format(setup_poet, conf.POET_SERVER_PORT) bootnodes = node_string(bs_info['key'], bs_info['pod_ip'], conf.BOOTSTRAP_PORT, conf.BOOTSTRAP_PORT) cspec.append_args(bootnodes=bootnodes, genesis_time=genesis_time_delta.isoformat('T', 'seconds')) # append client config to ContainerSpec if len(client_args) > 0: cspec.append_args(**client_args) return cspec
c2d27fa1922be786b0afa9212864465e14ee3de7
3,653,552
def zeropad(tr, starttime, endtime): """ Zeropads an obspy.Trace so as to cover the time window specified by `starttime`'and `endtime` Parameters ---------- tr : obspy.Trace starttime, endtime : obspy.UTCDateTime Returns ------- trace : obspy.Trace Zeropadded copy of the input trace. """ trace = Trace() for key, value in tr.stats.items(): if key not in ['endtime', 'npts']: trace.stats[key] = value fs = tr.stats.sampling_rate samples_before = int((tr.stats.starttime - starttime) * fs) samples_after = int((endtime - tr.stats.endtime) * fs) data = tr.data if samples_before > 0: trace.stats.starttime = tr.stats.starttime - ((samples_before+1) / fs) data = np.concatenate((np.zeros(samples_before+1), data)) if samples_after > 0: data = np.concatenate((data, np.zeros(samples_after+1))) trace.data = data return trace
417c17d8dea148f6534f204d064ba13665ede597
3,653,553
import time import random def deepwalk(G, _filepath, o=1, num_walks_node=10, walk_length=80, representation_size=128, window_size=5,): """not going to deal with memory exceeding case""" output = _filepath + G.name print("Walking...") time_start = time.time() walks = gu.build_deepwalk_corpus(G, num_paths=num_walks_node, path_length=walk_length, alpha=0, rand=random.Random(0)) # alpha = 0: do not go back time_end = time.time() print('Walking time cost:', time_end - time_start) print("Training...") time_start = time.time() # with negative sampling: 5(default) model = Word2Vec(walks, size=representation_size, window=window_size, min_count=0, sg=1, workers=cpu_count()) time_end = time.time() print('Training vectors time cost:', time_end - time_start) if o == 1: model.wv.save_word2vec_format(output + '.dw.emb') else: model.wv.save_word2vec_format(output + '0.dw.emb') return time_end - time_start
a5b77e6485d29a6a08ca25aa9ea06507a7fae076
3,653,554
def logship_status(host): """Report log shipping retstore delta and latency""" crit = warn = 0 msg = '' sql = """SELECT secondary_server, secondary_database, primary_server, primary_database, last_restored_date, DATEDIFF(mi, last_restored_date, GETDATE()) last_restored_delta, last_restored_latency, restore_threshold FROM msdb..log_shipping_monitor_secondary""" rows = execute_sql(host, sql) if type(rows) is dict: return rows for row in rows: if row.last_restored_delta >= row.restore_threshold: warn += 1 msg += "Srv:%s DB:%s Restore delta %s exceeds threshold of %s\n" % (row.primary_server, row.primary_database, row.last_restored_delta, row.restore_threshold) if row.last_restored_latency >= row.restore_threshold: crit += 1 msg += "Srv:%s DB:%s Latency of %s exceeds threshold of %s\n" % (row.primary_server, row.primary_database, row.last_restored_latency, row.restore_threshold) if row.last_restored_delta < row.restore_threshold and row.last_restored_latency < row.restore_threshold: msg += "Srv:%s DB:%s Latency:%s Restore delta:%s\n" % (row.primary_server, row.primary_database, row.last_restored_latency, row.last_restored_delta) if crit > 0: code = 'CRITICAL' msg = 'Log shipping CRITICAL\n' + msg elif warn > 0: code = 'WARNING' msg = 'Log shipping warning\n' + msg else: code = 'OK' msg = 'Log shipping OK\n' + msg return {'code':code, 'msg': msg}
7b0ea3282d66dd7d354d4b6d14d54bfd826d85d9
3,653,555
def dice(y_true, y_pred): """ Attention: y_true can be weighted to modify learning therefore apply sign to get back to labels y_pred have to be rounded to nearest integer to obtain labels. """ smooth = 1. y_true_f = y_true.flatten() y_pred_f = y_pred.flatten() intersection = np.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
205d7cf0f09f702d7905d42c0dfbbd16738ed1e8
3,653,556
def get_default_language(): """ Returns the default language code based on the data from LANGUAGES.json. """ for language_code, language_data in MEDICINE_LANGUAGE_DATA.items(): if 'DEFAULT' in language_data: if language_data['DEFAULT']: return language_code return 'en'
5bbfbc1060e52a4db957afd0eb8b45cdb60036f4
3,653,558
def triple_triple(r, p=qt.QH([1, 0, 0, 0])): """Use three triple products for rotations and boosts.""" # Note: 'qtype' provides a record of what algrabric operations were done to create a quaternion. return triple_sandwich(r, p).add(triple_2_on_1(r, p), qtype="triple_triple")
58b529faa97fae29fcc5b481263c0c84af2ddca2
3,653,559
def _pinv_trunc(x, miss): """Compute pseudoinverse, truncating at most "miss" fraction of varexp.""" u, s, v = linalg.svd(x, full_matrices=False) # Eigenvalue truncation varexp = np.cumsum(s) varexp /= varexp[-1] n = np.where(varexp >= (1.0 - miss))[0][0] + 1 logger.info(' Truncating at %d/%d components to omit less than %g ' '(%0.2g)' % (n, len(s), miss, 1. - varexp[n - 1])) s = 1. / s[:n] inv = ((u[:, :n] * s) @ v[:n]).T return inv, n
fca4f8b7e118c88ed7be37553ede09275f8d06ec
3,653,560
def mog_loglike(x, means, icovs, dets, pis): """ compute the log likelihood according to a mixture of gaussians with means = [mu0, mu1, ... muk] icovs = [C0^-1, ..., CK^-1] dets = [|C0|, ..., |CK|] pis = [pi1, ..., piK] (sum to 1) at locations given by x = [x1, ..., xN] """ xx = np.atleast_2d(x) centered = xx[:,:,np.newaxis] - means.T[np.newaxis,:,:] solved = np.einsum('ijk,lji->lki', icovs, centered) logprobs = -0.5*np.sum(solved * centered, axis=1) - np.log(2*np.pi) - 0.5*np.log(dets) + np.log(pis) logprob = scpm.logsumexp(logprobs, axis=1) if len(x.shape) == 1: return logprob[0] else: return logprob
e907c642e664188cb838e89b889257ded2a5aed9
3,653,561
from typing import Sequence def align_chunks(array: da.core.Array, scale_factors: Sequence[int]) -> da.core.Array: """ Ensure that all chunks are divisible by scale_factors """ new_chunks = {} for idx, factor in enumerate(scale_factors): aligned = aligned_coarsen_chunks(array.chunks[idx], factor) if aligned != array.chunks[idx]: new_chunks[idx] = aligned if new_chunks: array = array.rechunk(new_chunks) return array
df8b845f10bc4a8fa72e1da53d655d25f73d971d
3,653,562
def getWordScore(word): """ Computes the score of a word (no bingo bonus is added). word: The word to score (a string). returns: score of the word. """ if len(word) == HAND_SIZE: score = 50 else: score = 0 for letter in word: score = score + SCRABBLE_LETTER_VALUES[letter] return score
5d848b5ef5bb0e77d75a866300d9a87b557b5b1b
3,653,563
import requests import json def get_lang_list(source_text, key=None, print_meta_data=False): """ Inputs: source_text - source text as a string key - google api key, needed or function will raise and error returns list of language identifiers """ #set up url request to google translate api if not key: raise Exception( "You dont have a key") url_shell = 'https://www.googleapis.com/language/translate/v2/detect?key={0}&q={1}' url = url_shell.format(key, source_text) response = requests.get(url) lang_json= json.loads(response.text) source_lang = lang_json['data']['detections'][0][0]['language'] # if print_meta_data: # print 'Is detection reliable: {0}'.format(data_dict['data']['detections']['isReliable']) # print 'Confidence: {0}'.format(data_dict['data']['detections']['confidence']) # return source_lang
720c3c9252535e82881411fa345734d984350537
3,653,564
def single_value_rnn_regressor(num_units, sequence_feature_columns, context_feature_columns=None, cell_type='basic_rnn', num_rnn_layers=1, optimizer_type='SGD', learning_rate=0.1, momentum=None, gradient_clipping_norm=5.0, input_keep_probability=None, output_keep_probability=None, model_dir=None, config=None, feature_engineering_fn=None): """Create a RNN `Estimator` that predicts single values. The input function passed to this `Estimator` optionally contains keys `RNNKeys.SEQUENCE_LENGTH_KEY`. The value corresponding to `RNNKeys.SEQUENCE_LENGTH_KEY` must be vector of size `batch_size` where entry `n` corresponds to the length of the `n`th sequence in the batch. The sequence length feature is required for batches of varying sizes. It will be used to calculate loss and evaluation metrics. If `RNNKeys.SEQUENCE_LENGTH_KEY` is not included, all sequences are assumed to have length equal to the size of dimension 1 of the input to the RNN. In order to specify an initial state, the input function must include keys `STATE_PREFIX_i` for all `0 <= i < n` where `n` is the number of nested elements in `cell.state_size`. The input function must contain values for all state components or none of them. If none are included, then the default (zero) state is used as an initial state. See the documentation for `dict_to_state_tuple` and `state_tuple_to_dict` for further details. The `predict()` method of the `Estimator` returns a dictionary with keys `RNNKeys.PREDICTIONS_KEY` and `STATE_PREFIX_i` for `0 <= i < n` where `n` is the number of nested elements in `cell.state_size`. The value keyed by `RNNKeys.PREDICTIONS_KEY` has shape `[batch_size, padded_length]`. Here, `padded_length` is the largest value in the `RNNKeys.SEQUENCE_LENGTH` `Tensor` passed as input. Entry `[i, j]` is the prediction associated with sequence `i` and time step `j`. Args: num_units: The size of the RNN cells. This argument has no effect if `cell_type` is an instance of `RNNCell`. sequence_feature_columns: An iterable containing all the feature columns describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns describing context features, i.e., features that apply accross all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. cell_type: A subclass of `RNNCell`, an instance of an `RNNCell` or one of 'basic_rnn,' 'lstm' or 'gru'. num_rnn_layers: Number of RNN layers. Leave this at its default value 1 if passing a `cell_type` that is already a MultiRNNCell. optimizer_type: The type of optimizer to use. Either a subclass of `Optimizer`, an instance of an `Optimizer` or a string. Strings must be one of 'Adagrad', 'Momentum' or 'SGD'. learning_rate: Learning rate. This argument has no effect if `optimizer` is an instance of an `Optimizer`. momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'. gradient_clipping_norm: Parameter used for gradient clipping. If `None`, then no clipping is performed. input_keep_probability: Probability to keep inputs to `cell`. If `None`, no dropout is applied. output_keep_probability: Probability to keep outputs of `cell`. If `None`, no dropout is applied. model_dir: The directory in which to save and restore the model graph, parameters, etc. config: A `RunConfig` instance. feature_engineering_fn: Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into `model_fn`. Please check `model_fn` for a definition of features and labels. Returns: An initialized `Estimator`. """ cell = _to_rnn_cell(cell_type, num_units, num_rnn_layers) target_column = layers.regression_target() if optimizer_type == 'Momentum': optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum) dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn( cell=cell, target_column=target_column, problem_type=ProblemType.REGRESSION, prediction_type=PredictionType.SINGLE_VALUE, optimizer=optimizer_type, sequence_feature_columns=sequence_feature_columns, context_feature_columns=context_feature_columns, learning_rate=learning_rate, gradient_clipping_norm=gradient_clipping_norm, input_keep_probability=input_keep_probability, output_keep_probability=output_keep_probability, name='SingleValueRnnRegressor') return estimator.Estimator(model_fn=dynamic_rnn_model_fn, model_dir=model_dir, config=config, feature_engineering_fn=feature_engineering_fn)
4290d8b4e5ea069f58b7fc5a5734c16133b1a614
3,653,565
import uuid def token(): """ Return a unique 32-char write-token """ return str(uuid.uuid4().hex)
f7dc5725cc1d11ee0ab9471d141a89178fa3d07c
3,653,566
def _get_caller_caller_module_name(): """Return name of module which calls the function from which this function is invoked""" frame = currentframe().f_back.f_back return getmodule(frame).__name__
4199207922db40424e1a4fa56ee662209de06830
3,653,567
from typing import Tuple def percentile(x: np.ndarray, percentile: float = 99) -> Tuple[float, float]: """Get the (low, high) limit for the series by only including the data within the given percentile. For example, if percentile is 99, (1st percentile, 99th percentile) will be returned. Also, if percentile is 1, (1st percentile, 99th percentile) will be returned. Args: x: the series percentile: the percentile, beyond which to exclude data. Returns: (low, high) percentiles of series """ percentile = max(percentile, 100 - percentile) high = np.percentile(x, percentile) low = np.percentile(x, 100 - percentile) return (low, high)
00a7c6561432da84f878985b1e3dba942d4ec478
3,653,568
from pathlib import Path def get_versions_data( hidden=None, is_unreleased=None, find_latest_release=None, sort_key=None, labels=None, suffix_latest_release=' (latest release)', suffix_unreleased=' (dev)', find_downloads=None, ): """Get the versions data, to be serialized to json.""" if hidden is None: hidden = [] if is_unreleased is None: is_unreleased = _is_unreleased if find_latest_release is None: find_latest_release = _find_latest_release if find_downloads is None: find_downloads = _find_downloads if sort_key is None: sort_key = parse_version if labels is None: labels = {} folders = sorted( [ str(f) for f in Path().iterdir() if ( f.is_dir() and not str(f).startswith('.') and not str(f).startswith('_') ) ], key=sort_key, ) labels = {folder: labels.get(folder, str(folder)) for folder in folders} versions = [] unreleased = [] for folder in folders: if folder not in hidden: versions.append(folder) if is_unreleased(folder): unreleased.append(folder) labels[folder] += suffix_unreleased latest_release = find_latest_release( [f for f in versions if f not in unreleased] ) outdated = [] if latest_release is not None: labels[latest_release] += suffix_latest_release outdated = [ folder for folder in versions if (folder != latest_release and folder not in unreleased) ] versions_data = { # list of *all* folders 'folders': folders, # # folder => labels for every folder in "Versions" 'labels': labels, # # list folders that appear in "Versions" 'versions': versions, # # list of folders that do not appear in "Versions" 'hidden': hidden, # # list of folders that should warn & point to latest release 'outdated': outdated, # # list of dev-folders that should warn & point to latest release 'unreleased': unreleased, # # the latest stable release folder 'latest_release': latest_release, # # folder => list of (label, file) 'downloads': {folder: find_downloads(folder) for folder in folders}, } return versions_data
4232013fe403b3de54df571e13b881077145b61f
3,653,569
from wheel_filename import InvalidFilenameError, parse_wheel_filename import json from datetime import datetime def wheels( package_name: str = Argument(..., help="The name of the package to show wheel info for"), version: str = Argument( None, help="The version of the package to show info for, defaults to latest, can be omitted if using package_name==version", ), supported_only: bool = Option(False, help="Only show wheels supported on the current platform"), ): """See detailed information about all the wheels of a release of a package""" if not version and "==" in package_name: package_name, _, version = package_name.partition("==") url = f"{base_url}/pypi/{quote(package_name)}{f'/{quote(version)}' if version else ''}/json" with console.status("Getting data from PyPI"): response = session.get(url) if response.status_code != 200: if response.status_code == 404: rich.print("[red]:no_entry_sign: Project or version not found[/]") rich.print(f"[orange]:grey_exclamation: Some error occured. response code {response.status_code}[/]") raise typer.Exit() parsed_data = json.loads(response.text) from packaging.version import parse as parse_version # pylint: disable=import-outside-toplevel from rich.text import Text # pylint: disable=import-outside-toplevel # def is_wheel_supported(wheel_name): # try: # tag = parse_tag("-".join(wheel_name.split("-")[2:])) # except Exception as e: # return "white" # if not tag: # return "white" # else: # if list(tag)[-1] in sys_tags(): # return "green" # else: # return "red" data = parsed_data["urls"] from itertools import cycle # pylint: disable=import-outside-toplevel colors = cycle(["green", "blue", "magenta", "cyan", "yellow", "red"]) wheel_panels = [] if supported_only: from packaging.tags import parse_tag, sys_tags # pylint: disable=import-outside-toplevel def is_wheel_supported(wheel): try: parsed_wheel_file = parse_wheel_filename(wheel["filename"]) except InvalidFilenameError: return True for tag in parsed_wheel_file.tag_triples(): if any(tag in sys_tags() for tag in list(parse_tag(tag))): return True return False data = filter(is_wheel_supported, data) from datetime import timezone # pylint: disable=import-outside-toplevel for wheel in data: wheel_name = Text(wheel["filename"]) # Maybe use the regex in https://github.com/jwodder/wheel-filename/blob/master/src/wheel_filename/__init__.py#L45-L53 wheel_name.highlight_regex( r"^(?P<distribution>\w+)-(?P<version>[A-Za-z0-9\.\-]+)(?P<build_tag>-\w{0,3})?-(?P<python_tag>[a-z]{2}[0-9]{0,3})-(?P<abi_tag>\w+)-(?P<platform_tag>.+)(?P<file_extension>\.whl)$", style_prefix="wheel.", ) wheel_panels.append( Panel( "\n".join( filter( None, [ f"[blue]Comment:[/] {wheel['comment_text']}" if wheel["comment_text"] else None, f"[magenta]Has Signature[/]: {wheel['has_sig']}", f"[cyan]Package Type:[/] {wheel['packagetype']}", f"[green]Requires Python:[/] {wheel['requires_python']}" if not wheel["requires_python"] is None else None, f"[yellow]Size:[/] {humanize.naturalsize(wheel['size'], binary=True)}", f"[bright_cyan]Yanked Reason[/]: {wheel['yanked_reason']}" if wheel["yanked"] else None, f"[red]Upload Time[/]: {humanize.naturaltime(utc_to_local(datetime.strptime(wheel['upload_time_iso_8601'], '%Y-%m-%dT%H:%M:%S.%fZ'), timezone.utc))}", ], ) ), title=f"[white]{wheel_name}[/]" if not wheel_name.plain.endswith(".whl") else wheel_name, border_style=next(colors), ) ) from rich.columns import Columns # pylint: disable=import-outside-toplevel console.print(Columns(wheel_panels))
f57b577426dbd24b86d9eaa91b13c0b048d22afe
3,653,571
def extract_pvdata(h5file, timestamp, pvnames=None): """ Extract as a snapshot (PV values) nearest a timestamp from a BSA HDF5 file. Parameters ---------- h5file: str BSA HDF5 file with data that includes the timestamp timestamp: datetime-like, str, int, float This must be localized (not naive time). Returns ------- pvdata: dict Dict of pvname:value found_timestamp : pd.Timestamp The exact time that the data was tagged at See Also -------- bsa_snapshot """ timestamp = pd.Timestamp(timestamp).tz_convert('UTC') # Convert to UTC with h5py.File(h5file) as h5: # Use pandas to get the nearest time s = h5['secondsPastEpoch'][:, 0] ns = h5['nanoseconds'][:, 0] df = pd.DataFrame({'s':s, 'ns':ns}) df['time'] = pd.to_datetime(df['s'], unit='s', utc=True) + pd.to_timedelta(df['ns'], unit='nanoseconds') # Assure that the time is in here assert timestamp <= df.time.iloc[-1] assert timestamp >= df.time.iloc[0] # Search for the nearest time ix = df.time.searchsorted(timestamp) found_timestamp = df['time'].iloc[ix] # form snapshot dict pvdata = {} # Return everything if pvnames is None: pvnames = list(h5) for pvname in pvnames: if pvname in h5: pvdata[pvname] = np.squeeze(h5[pvname][ix]) else: pvdata[pvname] = None return pvdata, found_timestamp
cd62b347735c97e962c8eec01d4344b4fb4e63f9
3,653,573
import functools def decorate_func_with_plugin_arg(f): """Decorate a function that takes a plugin as an argument. A "plugin" is a pair of simulation and postprocess plugins. The decorator expands this pair. """ @functools.wraps(f) def wrapper(self, plugins_tuple): return f(self, plugins_tuple[0], plugins_tuple[1]) return wrapper
e90c86bfd6c3cada33c867d26ed64da3cac6f9c4
3,653,574
from datetime import datetime def datestr(date=None): """Convert timestamps to strings in a predefined format """ if date is None: date = datetime.utcnow() if isinstance(date, str): date = parse_time(date) return date.strftime("%y-%m-%d %H:%M:%S")
36505b926eef6aaa5bcbae011ba90931c20a5067
3,653,575
def init(): """Connect to the keyboard, switch all lights off""" global bufferC # Buffer with the full key/lights mapping global device device=hid.device() # 0x17cc: Native Instruments. 0x1410: KK S88 MK1 device.open(0x17cc, pid) device.write([0xa0]) bufferC = [0x00] * numkeys notes_off() return True
0edc2085cbd6b48fef85d5492e4093551a15aac1
3,653,577
def compute_acc_bin(conf_thresh_lower, conf_thresh_upper, conf, pred, true): """ # Computes accuracy and average confidence for bin Args: conf_thresh_lower (float): Lower Threshold of confidence interval conf_thresh_upper (float): Upper Threshold of confidence interval conf (numpy.ndarray): list of confidences pred (numpy.ndarray): list of predictions true (numpy.ndarray): list of true labels Returns: (accuracy, avg_conf, len_bin): accuracy of bin, confidence of bin and number of elements in bin. """ filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper] if len(filtered_tuples) < 1: return 0, 0, 0 else: correct = len([x for x in filtered_tuples if x[0] == x[1]]) # How many correct labels len_bin = len(filtered_tuples) # How many elements falls into given bin avg_conf = sum([x[2] for x in filtered_tuples]) / len_bin # Avg confidence of BIN accuracy = float(correct) / len_bin # accuracy of BIN return accuracy, avg_conf, len_bin
eb338800751de635e6b72213254287554cd34dc0
3,653,579
import importlib def multi_backend_test(globals_dict, relative_module_name, backends=('jax', 'tensorflow'), test_case=None): """Multi-backend test decorator. The end goal of this decorator is that the decorated test case is removed, and replaced with a set of new test cases that have been rewritten to use one or more backends. E.g., a test case named `Test` will by default be rewritten to `Test_jax` and 'Test_tensorflow' which use the JAX and TensorFlow, respectively. The decorator works by using the dynamic rewrite system to rewrite imports of the module the test is defined in, and inserting the approriately renamed test cases into the `globals()` dictionary of the original module. A side-effect of this is that the global code inside the module is run `1 + len(backends)` times, so avoid doing anything expensive there. This does mean that the original module needs to be in a runnable state, i.e., when it uses symbols from `backend`, those must be actually present in the literal `backend` module. A subtle point about what this decorator does in the rewritten modules: the rewrite system changes the behavior of this decorator to act as a passthrough to avoid infinite rewriting loops. Args: globals_dict: Python dictionary of strings to symbols. Set this to the value of `globals()`. relative_module_name: Python string. The module name of the module where the decorated test resides relative to `fun_mc`. You must not use `__name__` for this as that is set to a defective value of `__main__` which is sufficiently abnormal that the rewrite system does not work on it. backends: Python iterable of strings. Which backends to test with. test_case: The actual test case to decorate. Returns: None, to delete the original test case. """ if test_case is None: return lambda test_case: multi_backend_test( # pylint: disable=g-long-lambda globals_dict=globals_dict, relative_module_name=relative_module_name, test_case=test_case) if BACKEND is not None: return test_case if relative_module_name == '__main__': raise ValueError( 'module_name should be written out manually, not by passing __name__.') # This assumes `test_util` is 1 levels deep inside of `fun_mc`. If we # move it, we'd change the `-1` to equal the (negative) nesting level. root_name_comps = __name__.split('.')[:-1] relative_module_name_comps = relative_module_name.split('.') # Register the rewrite hooks. importlib.import_module('.'.join(root_name_comps + ['backends', 'rewrite'])) new_test_case_names = [] for backend in backends: new_module_name_comps = ( root_name_comps + ['dynamic', 'backend_{}'.format(backend)] + relative_module_name_comps) # Rewrite the module. new_module = importlib.import_module('.'.join(new_module_name_comps)) # Subclass the test case so that we can rename it (absl uses the class name # in its UI). base_new_test = getattr(new_module, test_case.__name__) new_test = type('{}_{}'.format(test_case.__name__, backend), (base_new_test,), {}) new_test_case_names.append(new_test.__name__) globals_dict[new_test.__name__] = new_test # We deliberately return None to delete the original test case from the # original module.
1006e2bc983f7821138ab27b6d2465055a275c0d
3,653,580
import random def _get_typed_array(): """Generates a TypedArray constructor. There are nine types of TypedArrays and TypedArray has four constructors. Types: * Int8Array * Int16Array * Int32Array * Uint8Array * Uint16Array * Uint32Array * Uint8ClampedArray * Float32Array * Float64Array Constructors: * new TypedArray(length) * new TypedArray(typedArray) * new TypedArray(object) * new TypedArray(buffer) Returns: A string made up of a randomly chosen type and argument type from the lists above. """ array_type = random.choice([ 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Uint8ClampedArray', 'Float32Array', 'Float64Array' ]) # Choose an argument type at random. arguments = random.choice([ # length e.g. 293 # We choose 2**10 as the upper boundry because the max length allowed # by WebBluetooth is 2**10. lambda: utils.UniformExpoInteger(0, 10), # typedArray e.g. new Uint8Array([1,2,3]) _get_typed_array, # object e.g. [1,2,3] lambda: _get_array_of_random_ints(max_length=1000, max_value=2**64), # buffer e.g. new Uint8Array(10).buffer lambda: _get_typed_array() + '.buffer', ]) return 'new {array_type}({arguments})'.format( array_type=array_type, arguments=arguments())
31eea5c66689584ff38eb8edad3a15231f7cd438
3,653,581
def _is_valid_requirement(requirement: str) -> bool: """Returns True is the `requirement.txt` line is valid.""" is_invalid = ( not requirement or # Empty line requirement.startswith('#') or # Comment requirement.startswith('-r ') # Filter the `-r requirement.txt` ) return not is_invalid
73b8ad139329698ad334b230cb04976db4ec05ba
3,653,582
from kivy.clock import mainthread from kivy.app import App import threading def execute(cmd): """Execute a random string in the app context """ _result = [None] _event = threading.Event() @mainthread def _real_execute(): app = App.get_running_app() idmap = {"app": app} try: exec(cmd, idmap) except Exception as e: _result[:] = [u"{}".format(e)] _event.set() _real_execute() _event.wait() return _result[0]
2ec1850487d854a074dd60642ff38a7ec9fc7e97
3,653,583
import six def is_scalar(element): """An `is_atomic` criterion. Returns `True` for scalar elements. Scalar elements are : strings and any object that is not one of: collections.Sequence, collections.Mapping, set, or attrs object. ``` import nifty_nesting as nest flat = nest.flatten([1, [2, 3]], is_atomic=is_scalar) assert flat == [1, 2, 3] ``` Arguments: element: The element to check. Returns: `True` if the element is a scalar, else `False`. """ if isinstance(element, six.string_types): return True if is_attrs_object(element): return False if is_sequence(element) or is_set(element): return False if is_mapping(element): return False return True
07f280822a6167ab951942f6e2479476ceec2dc5
3,653,584
from typing import Union from typing import Sequence def wrap_singleton_string(item: Union[Sequence, str]): """ Wrap a single string as a list. """ if isinstance(item, str): # Can't check if iterable, because a string is an iterable of # characters, which is not what we want. return [item] return item
6e0946fee8fddd23631ff66d405dce2ae8a15fa6
3,653,585
def view_milestone_history(request, chosen_year=None): """ http://127.0.0.1:8000/milestones/by-columns/ :param request: :return: """ (chosen_year, basic_query) = get_basic_milestone_history_query(chosen_year) milestones = basic_query.order_by('due_on') open_closed_cnts = get_issue_counts_query_base().values('open_issues', 'closed_issues') num_open_issues = sum(x['open_issues'] for x in open_closed_cnts) num_closed_issues = sum( x['closed_issues'] for x in open_closed_cnts) mmo = MilestoneMonthOrganizer(milestones) #mmo.show() #return HttpResponse('ok') sorted_repos = mmo.get_sorted_repos() if sorted_repos and len(sorted_repos) > 0: last_retrieval_time = sorted_repos[0].last_retrieval_time else: last_retrieval_time = None d = {} d['page_title'] = 'Previous Milestones for %s' % chosen_year d['is_milestone_history_all'] = True d['chosen_year'] = chosen_year d['last_retrieval_time'] = last_retrieval_time d['sorted_repos'] = sorted_repos d['organized_months'] = mmo.get_organized_months(descending_order=True) d['NO_DUE_DATE'] = RepoMilestoneMonthsOrganizer.NO_DUE_DATE d['milestone_count'] = milestones.count() d['num_open_issues'] = num_open_issues d['num_closed_issues'] = num_closed_issues d['hide_description'] = True #print(d) return render_to_response('milestones/view_history_multi_column.html'\ , d\ , context_instance=RequestContext(request))
1d40c08701e9088682e9e0663c1538956b22770c
3,653,586
def M_absolute_bol(lum): """Computes the absolute bolometric luminosity Parameters ---------- lum : `float/array` luminosity in solar luminosities Returns ------- M_bol : `float/array` absolute bolometric magnitude """ log_lum = np.log10(lum) M_bol = 4.75 - 2.7 * log_lum return M_bol
dd3209fd6c91a7b1b51f43a7a15f9c5eaccd740d
3,653,587
def codes_index_get_double(indexid, key): # type: (cffi.FFI.CData, bytes) -> T.List[float] """ Get the list of double values associated to a key. The index must be created with such a key (possibly together with other keys). :param bytes key: the keyword whose list of values has to be retrieved :rtype: List(int) """ size = codes_index_get_size(indexid, key) values = ffi.new('double[]', size) size_p = ffi.new('size_t *', size) check_return(lib.codes_index_get_double)(indexid, key, values, size_p) return list(values)
9a0c2c27f917ecfe63ad1f6a797aa152928d294c
3,653,588
from typing import Optional def lemmatize( nlp: Optional[Language] = None, name="lemmatize" ) -> ops.base.SpacyBasedOperation: """Helper function to return SpacyBasedOperation for lemmatizing. This operation returns a stream.DataStream where each item is a string after being lemmatized. Parameters ---------- nlp : Optional[spacy.language.Language] spacy's language model or None. If None then by default `en_core_web_sm` spacy model is loaded name : Optional[str] name of this operation Returns ------- out : SpacyBasedOperation """ return ops.base.SpacyBasedOperation(nlp=nlp, process_doc_fn=_lemmatize, name=name,)
797efa35320cf4b4e5e1176d1fbbcee13bbaa884
3,653,589