content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from pathlib import Path import os import zipfile def main(args): """Main entry point""" args.archive = expand_path(args.archive) args.files = expand_path(args.files) def additions(search_path): """Generate a list of (lpath, arcname) for writing to zip-file""" aname = Path(args.archive).stem for root, _, files in os.walk(search_path): for fname in files: fpath = os.path.join(root, fname) arcname = fpath.replace( search_path, "/".join([aname, 'subprojects']) ) yield fpath, arcname with zipfile.ZipFile(args.archive, 'a') as zfile: listing = zfile.namelist() for lpath, arcname in additions(args.files): if arcname in listing: print(f"skipping: {lpath} {arcname}") continue zfile.write(lpath, arcname) return 0
f4ba59a73a3c0491829df87dad79ece7a6fcdfbc
3,657,200
import time def execution_duration(fun): """ Calculates the duration the function 'fun' takes to execute. execution_duration returns a wrapper function to which you pass your arguments. Example: execution_duration(my_function)(my_first_param, my_second_param) The result of the wrapper function will be a tuple, where the fist value is the return value of your function and the second is the execution time in seconds expressed as a float. """ def wrapper(*args, **kwargs): t1 = time.time() result = fun(*args, **kwargs) exec_dur = time.time() - t1 return result, exec_dur return wrapper
b824ce8e1448a65bd932ec8344b1976d2a86dd09
3,657,201
def return_origin_and_destination(): """Return origin and destination from session's waypoints key.""" waypoints = session['waypoints'] if len(waypoints) <= 1: return 'Please enter at least 2 destinations for your trip.' else: origin = session['waypoints'][0] destination = session['waypoints'][-1] data = { "origin": origin, "destination": destination } return jsonify(data)
db8764fc32fe1367f303fa44b9c5c0c113a8c9ee
3,657,202
def attempt_move(piece): """ Attempts to make a move if the target coordinate is a legal move. Returns: True if the move is made, False otherwise """ x, y = pygame.mouse.get_pos() x = x // 100 y = y // 100 if (piece is not None) and (x, y) in piece.legal_moves: piece.move(the_board, x, y) initialize_moves() update_moves() return True return False
36c2b7764f6bb13765cf2eed7270f90f1cb338d1
3,657,203
def give(user_id, text, group): """construct a message to be sent that mentions a user, which is surprisingly complicated with GroupMe""" nickname = group.members().filter(user_id=user_id).first.nickname mention = attachments.Mentions([user_id], [[0, len(nickname)+1]]).as_dict() message = '@{} {}'.format(nickname, text) return (message, mention)
f9d36042b3ab5a2681fe065ac935321d8d398085
3,657,204
def integrate_audio_feat(features, audio_h5, mxm2msd): """ """ # TODO: this part should be moved to MFCC feature extraction # and stored in the feature file for better integrity n_coeffs = 40 audio_feat_cols = ( ['mean_mfcc{:d}'.format(i) for i in range(n_coeffs)] + ['var_mfcc{:d}'.format(i) for i in range(n_coeffs)] + ['mean_dmfcc{:d}'.format(i) for i in range(n_coeffs)] + ['var_dmfcc{:d}'.format(i) for i in range(n_coeffs)] + ['mean_ddmfcc{:d}'.format(i) for i in range(n_coeffs)] + ['var_ddmfcc{:d}'.format(i) for i in range(n_coeffs)] ) with h5py.File(audio_h5, 'r') as hf: tid2row = {tid:i for i, tid in enumerate(hf['tids'][:])} feats = [] for mxmid in corpus.ids: tid = mxm2msd[mxmid] if tid in tid2row: feats.append(hf['feature'][tid2row[tid]][None]) else: feats.append(np.zeros((1, len(audio_feat_cols)))) audio_feat = np.concatenate(feats, axis=0) # idx = [tid2row[mxm2msd[mxmid]] for mxmid in corpus.ids] # audio_feat = hf['feature'][idx] features['audio'] = TextFeature( 'mfcc', corpus.ids, audio_feat, audio_feat_cols ) return features
f346cf69a0a4b1aef0fc7dc7b7b603e02952ae6b
3,657,205
def make_annotation_loader_factory(): """Generate a factory function for constructing annotation loaders. Invoke the returned factory function by passing the name of the annotation loader class you want to construct, followed by the parameters for the constructor as named arguments (e.g., factory('FourCornersCSV', annotations_file=...)) """ return AnnotationLoaderLoader().loader.make_object_factory()
70e6d9834a903a614a41510b6d97b62c3d1d5b3f
3,657,206
def test_arma(): """arma, check that rho is correct (appendix 10.A )and reproduce figure 10.2""" a,b, rho = arma_estimate(marple_data, 20, 20, 40) psd = arma2psd(A=a,B=b, rho=rho, NFFT=None) psd = arma2psd(A=a,B=b, rho=rho) try: psd = arma2psd(A=None, B=None, rho=rho) assert False except: assert True return psd
b1db09017fe060746ae1b503315bfaa6f3a44a58
3,657,207
from typing import Union def chunks_lists_to_tuples(level: Union[list, int, float]) -> Union[tuple, int, float]: """Convert a recursive list of lists of ints into a tuple of tuples of ints. This is a helper function needed because MongoDB automatically converts tuples to lists, but the dask constructor wants the chunks defined strictly as tuples. e.g. - input: ``[[1, 2], [3, 4]]`` - output: ``((1, 2), (3, 4))`` .. note:: float data type is supported to allow for NaN-sized dask chunks """ if isinstance(level, list): return tuple(chunks_lists_to_tuples(i) for i in level) if isinstance(level, (int, float)): return level raise TypeError(level)
49cc7923211d50fdf6a386016af12b80a2f821df
3,657,208
def oid_pattern_specificity(pattern): # type: (str) -> Tuple[int, Tuple[int, ...]] """Return a measure of the specificity of an OID pattern. Suitable for use as a key function when sorting OID patterns. """ wildcard_key = -1 # Must be less than all digits, so that e.G. '1.*' is less specific than '1.n' for n = 0...9. parts = tuple(wildcard_key if digit == '*' else int(digit) for digit in pattern.lstrip('.').split('.')) return ( len(parts), # Shorter OIDs are less specific than longer OIDs, regardless of their contents. parts, # For same-length OIDs, compare their contents (integer parts). )
7d1b4304791076fca42add7a8b9aeb31f85359f9
3,657,209
def extract_entities(text, json_={}): """ Extract entities from a given text using metamap and generate a json, preserving infro regarding the sentence of each entity that was found. For the time being, we preserve both concepts and the entities related to them Input: - text: str, a piece of text or sentence - json_: dic, sometimes the json to be returned is given to us to be enriched Defaults to an empty json_ Output: - json_: dic, json with fields text, sents, concepts and entities containg the final results """ json_['text'] = text # Tokenize the text sents = sent_tokenize(text) json_['sents'] = [{'sent_id': i, 'sent_text': sent} for i, sent in enumerate(sents)] json_['concepts'], _ = mmap_extract(text) json_['entities'] = {} for i, sent in enumerate(json_['sents']): ents = metamap_ents(sent) json_['entities'][sent['sent_id']] = ents return json_
15f8b88e430c451a517f11b661aa1c57a93288fe
3,657,210
def gaul_as_df(gaul_path): """ Load the Gaussian list output by PyBDSF as a pd.DataFrame Args: gaul_path (`str`): Path to Gaussian list (.gaul file) """ gaul_df = pd.read_csv( gaul_path, skiprows=6, names=GAUL_COLUMNS, delim_whitespace=True, ) return gaul_df
806f8c386344c5380109705b053b89a82db62e66
3,657,211
def normalize_matrix(mat, dim=3, p=2): """Normalize matrix. Args: mat: matrix dim: dimension p: p value for norm Returns: normalized matrix """ mat_divided = F.normalize(mat, p=p, dim=dim) return mat_divided
35ac155a51818d2b93fc12a0c91ce35c0dfd9fe2
3,657,212
from typing import List import math def species_to_parameters(species_ids: List[str], sbml_model: 'libsbml.Model') -> List[str]: """ Turn a SBML species into parameters and replace species references inside the model instance. :param species_ids: List of SBML species ID to convert to parameters with the same ID as the replaced species. :param sbml_model: SBML model to modify :return: List of IDs of species which have been converted to parameters """ transformables = [] for species_id in species_ids: species = sbml_model.getSpecies(species_id) if species.getHasOnlySubstanceUnits(): logger.warning( f"Ignoring {species.getId()} which has only substance units." " Conversion not yet implemented.") continue if math.isnan(species.getInitialConcentration()): logger.warning( f"Ignoring {species.getId()} which has no initial " "concentration. Amount conversion not yet implemented.") continue transformables.append(species_id) # Must not remove species while iterating over getListOfSpecies() for species_id in transformables: species = sbml_model.removeSpecies(species_id) par = sbml_model.createParameter() par.setId(species.getId()) par.setName(species.getName()) par.setConstant(True) par.setValue(species.getInitialConcentration()) par.setUnits(species.getUnits()) # Remove from reactants and products for reaction in sbml_model.getListOfReactions(): for species_id in transformables: # loop, since removeX only removes one instance while reaction.removeReactant(species_id): # remove from reactants pass while reaction.removeProduct(species_id): # remove from products pass while reaction.removeModifier(species_id): # remove from modifiers pass return transformables
a7cb9df992bad98584124320bc485aa978495050
3,657,213
import warnings def gaussian_filter_cv(array: np.ndarray, sigma) -> np.ndarray: """ Apply a Gaussian filter to a raster that may contain NaNs, using OpenCV's implementation. Arguments are for now hard-coded to be identical to scipy. N.B: kernel_size is set automatically based on sigma :param array: the input array to be filtered. :param sigma: the sigma of the Gaussian kernel :returns: the filtered array (same shape as input) """ # Check that array dimension is 2, or can be squeezed to 2D orig_shape = array.shape if len(orig_shape) == 2: pass elif len(orig_shape) == 3: if orig_shape[0] == 1: array = array.squeeze() else: raise NotImplementedError("Case of array of dimension 3 not implemented") else: raise ValueError( f"Invalid array shape given: {orig_shape}. Expected 2D or 3D array" ) # In case array does not contain NaNs, use OpenCV's gaussian filter directly # With kernel size (0, 0), i.e. set to default, and borderType=BORDER_REFLECT, the output is equivalent to scipy if np.count_nonzero(np.isnan(array)) == 0: gauss = cv.GaussianBlur(array, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT) # If array contain NaNs, need a more sophisticated approach # Inspired by https://stackoverflow.com/a/36307291 else: # Run filter on a copy with NaNs set to 0 array_no_nan = array.copy() array_no_nan[np.isnan(array)] = 0 gauss_no_nan = cv.GaussianBlur(array_no_nan, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT) del array_no_nan # Mask of NaN values nan_mask = 0 * array.copy() + 1 nan_mask[np.isnan(array)] = 0 gauss_mask = cv.GaussianBlur(nan_mask, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT) del nan_mask with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="invalid value encountered") gauss = gauss_no_nan / gauss_mask return gauss.reshape(orig_shape)
f39223111ff6624756491b37c32b7162ae8f3e5c
3,657,214
import inspect import functools def refresh_cache(f): """Decorator to update the instance_info_cache Requires context and instance as function args """ argspec = inspect.getargspec(f) @functools.wraps(f) def wrapper(self, context, *args, **kwargs): res = f(self, context, *args, **kwargs) try: # get the instance from arguments (or raise ValueError) instance = kwargs.get('instance') if not instance: instance = args[argspec.args.index('instance') - 2] except ValueError: msg = _('instance is a required argument to use @refresh_cache') raise Exception(msg) update_instance_cache_with_nw_info(self, context, instance, nw_info=res) # return the original function's return value return res return wrapper
6ca9449f1ae222052f89da9a8baa611b42b47fe4
3,657,215
import time import os import logging def get_timestamped_export_dir(export_dir_base): """Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name. """ attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: export_timestamp = int(time.time()) export_dir = os.path.join( compat.as_bytes(export_dir_base), compat.as_bytes(str(export_timestamp))) if not gfile.Exists(export_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return export_dir time.sleep(1) attempts += 1 logging.warn( 'Export directory {} already exists; retrying (attempt {}/{})'.format( export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)) raise RuntimeError('Failed to obtain a unique export directory name after ' '{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
f4848f97f6176aa990a8a40de42b92e2e74d46a3
3,657,216
import sys def calculate_density(temp, pressure): """Returns density in g/cm^3 """ if (temp < 161.40): raise ValueError("Solid phase!") if (temp < 289.7): VaporP_bar = pow(10, 4.0519 - 667.16 / temp) else: VaporP_bar = sys.float_info.max if (pressure < VaporP_bar): raise ValueError("Gas phase!") density = 2.9970938084691329e2 * np.exp(-8.2598864714323525e-2 * temp) - \ 1.8801286589442915e6 * np.exp( -((temp - 4.0820251276172212e2) / 2.7863170223154846e1)**2) - \ 5.4964506351743057e3 * np.exp( -((temp - 6.3688597345042672e2) / 1.1225818853661815e2)**2) + \ 8.3450538370682614e2 * np.exp( -((temp + 4.8840568924597342e1) / 7.3804147172071107e3)**2) \ - 8.3086310405942265e2 return density
e5222e4f552a9f4b82ea13be874985406e4a3b2f
3,657,217
def cross_validation(df, K, hyperparameters): """ Perform cross validation on a dataset. :param df: pandas.DataFrame :param K: int :param hyperparameters: dict """ train_indices = list(df.sample(frac=1).index) k_folds = np.array_split(train_indices, K) if K == 1: K = 2 rmse_list = [] for i in range(len(k_folds)): training_folds = [fold for j, fold in enumerate(k_folds) if j != i] training_indices = np.concatenate(training_folds) x_train, y_train = df.iloc[training_indices, 1:], df.iloc[training_indices, :1] x_validation, y_validation = df.iloc[k_folds[i], 1:], df.iloc[k_folds[i], :1] dtrain = xgb.DMatrix(data=x_train, label=y_train) dvalidation = xgb.DMatrix(data=x_validation, label=y_validation) model = xgb.train( params=hyperparameters, dtrain=dtrain, evals=[(dtrain, "train"), (dvalidation, "validation")], ) eval_results = model.eval(dvalidation) rmse_list.append(float(eval_results.split("eval-rmse:")[1])) return rmse_list, model
72cdf91efa029eb8c029eb84d596057d13a7c515
3,657,218
from typing import List from typing import Dict def solve_cities(cities: List, gdps: List, sick: List, total_capacity: int, value_r=0, weight_r=0, num_reads=1, verbose=False) -> Dict: """ Solves problem: "Which cities should I should I shut down in order to stay within healthcare resources constraints while maximizing overall GDP" parameters: cities - list of city names gdps - corresponding list of GDP per city sick - corresponding number of sick people per city total_capacity - max capacity for sick people summed over all cities num_reads - number of samples to take verbose - whether to print out best result returns: (dict) - list of dictionaries with individual results and selected attributes sorted in order of least energy first """ if sum(sick) < total_capacity: print("Warning in solve_cities: Total number of sick people is less " + "than total capacity. There's no knapsack problem to solve!") bqm = knapsack_bqm(cities, gdps, sick, total_capacity, value_r=value_r, weight_r=weight_r) sampler = LeapHybridSampler() samplesets = [sampler.sample(bqm) for _ in range(num_reads)] df = pd.DataFrame({'city': cities, 'gdp': gdps, 'sick': sick}) df = df.set_index('city') solution_set = [] for sampleset in samplesets: open_cities = [] closed_cities = [] for k, v in sampleset.first.sample.items(): if k in cities: if v == 1: open_cities.append(k) else: closed_cities.append(k) solution_set.append({ 'open_cities': open_cities, 'closed_cities': closed_cities, 'energy': sampleset.first.energy, 'salvaged_gdp': sum(df.loc[open_cities]['gdp']) + sum(df.loc[closed_cities]['gdp']) * value_r, 'used_capacity': int(round(sum(df.loc[open_cities]['sick']))) }) # do sorting from lowest to highest energy if num_reads > 1: energies = [solution['energy'] for solution in solution_set] solution_set = [x for _, x in sorted(zip(energies, solution_set))] if verbose: print('BEST SOLUTION') print('Open cities') print(solution_set[0]['open_cities']) print('\n') print('Closed cities') print(solution_set[0]['closed_cities']) print('\n') total_gdp = sum(df['gdp']) salvaged_gdp = solution_set[0]['salvaged_gdp'] print( f'Salvaged GDP: {salvaged_gdp} ({(100*salvaged_gdp/total_gdp):.1f}%)') used_capacity = solution_set[0]['used_capacity'] print( f'Used up hospital capacity: {used_capacity:d} of {total_capacity} ({(100*used_capacity/total_capacity):.1f}%)') return solution_set
52bef06069ee6975fbc5dea50cbb44349c96b9db
3,657,219
def catalog(): """Render the mapping catalog page.""" if request.args.get(EQUIVALENT_TO): mappings = current_app.manager.get_mappings_by_type(EQUIVALENT_TO) message = Markup("<h4>You are now visualizing the catalog of equivalent mappings</h4>") flash(message) elif request.args.get(IS_PART_OF): mappings = current_app.manager.get_mappings_by_type(IS_PART_OF) message = Markup("<h4>You are now visualizing the catalog of hierarchical mappings</h4>") flash(message) else: mappings = current_app.manager.get_all_mappings() return render_template( 'curation/catalog.html', STYLED_NAMES=STYLED_NAMES, mappings=mappings, all='all' )
b28904fff79b978225eda1bb3ed4f6e04c817737
3,657,220
import logging def detect_counterexample(algorithm, test_epsilon, default_kwargs={}, event_search_space=None, databases=None, event_iterations=100000, detect_iterations=500000, cores=0, loglevel=logging.INFO): """ :param algorithm: The algorithm to test for. :param test_epsilon: The privacy budget to test for, can either be a number or a tuple/list. :param default_kwargs: The default arguments the algorithm needs except the first Queries argument. :param event_search_space: The search space for event selector to reduce search time, optional. :param databases: The databases to run for detection, optional. :param event_iterations: The iterations for event selector to run, default is 100000. :param detect_iterations: The iterations for detector to run, default is 500000. :param cores: The cores to utilize, 0 means auto-detection. :param loglevel: The loglevel for logging package. :return: [(epsilon, p, d1, d2, kwargs, event)] The epsilon-p pairs along with databases/arguments/selected event. """ logging.basicConfig(level=loglevel) logger.info('Starting to find counter example on algorithm {} with test epsilon {}\n' .format(algorithm.__name__, test_epsilon)) logger.info('\nExtra arguments:\n' 'default_kwargs: {}\n' 'event_search_space: {}\n' 'databases: {}\n' 'cores:{}\n'.format(default_kwargs, event_search_space, databases, cores)) if databases is not None: d1, d2 = databases kwargs = generate_arguments(algorithm, d1, d2, default_kwargs=default_kwargs) input_list = ((d1, d2, kwargs),) else: input_list = generate_databases(algorithm, 5, default_kwargs=default_kwargs) result = [] test_epsilon = (test_epsilon, ) if isinstance(test_epsilon, (int, float)) else test_epsilon pool = None if cores == 0: pool = mp.Pool(mp.cpu_count()) elif cores != 1: pool = mp.Pool(cores) try: for i, epsilon in enumerate(test_epsilon): d1, d2, kwargs, event = select_event(algorithm, input_list, epsilon, event_iterations, search_space=event_search_space, process_pool=pool) # fix the database and arguments if selected for performance input_list = ((d1, d2, kwargs),) if len(input_list) > 1 else input_list p1, _ = hypothesis_test(algorithm, d1, d2, kwargs, event, epsilon, detect_iterations, process_pool=pool) result.append((epsilon, p1, d1, d2, kwargs, event)) print('Epsilon: {} | p-value: {:5.3f} | Event: {} | {:5.1f}%' .format(epsilon, p1, event, float(i + 1) / len(test_epsilon) * 100)) logger.debug('D1: {} | D2: {} | kwargs: {}'.format(d1, d2, kwargs)) finally: if pool is not None: pool.close() else: pass return result
4a4fecd74743895abd9bc1c6ab617736758d5c64
3,657,221
def apply_inverse_rot_to_vec(rot, vec): """Multiply the inverse of a rotation matrix by a vector.""" # Inverse rotation is just transpose return [rot[0][0] * vec[0] + rot[1][0] * vec[1] + rot[2][0] * vec[2], rot[0][1] * vec[0] + rot[1][1] * vec[1] + rot[2][1] * vec[2], rot[0][2] * vec[0] + rot[1][2] * vec[1] + rot[2][2] * vec[2]]
1108ac6caa30b3562a2af1bcc83e1c1a1bfd8d4d
3,657,222
def gsl_blas_dsdot(*args, **kwargs): """gsl_blas_dsdot(gsl_vector_float const * X, gsl_vector_float const * Y) -> int""" return _gslwrap.gsl_blas_dsdot(*args, **kwargs)
6b8f45a773fca936913b2653df9ab8c96f1e974a
3,657,223
def cost(weights): """Cost function which tends to zero when A |x> tends to |b>.""" p_global_ground = global_ground(weights) p_ancilla_ground = ancilla_ground(weights) p_cond = p_global_ground / p_ancilla_ground return 1 - p_cond
3b33292c63b42d110efe0d7cbe4dae85f095472f
3,657,224
import time def runOptimization( cfg, optimize_cfg, n_iter=20, split_runs=1, model_runs=1, filename="optimize_result", ): """Optimize the model parameter using hyperopt. The model parameters are optimized using the evaluations on validation dataset. Args: cfg(dict): configuration data optimize_cfg(dict): configuration for optimization n_iter(int): the number of iterations for sequential optimization split_runs(int): the number of runs for different dataset-split random seeds. model_runs(int): the number of runs for different model-initialization random seeds. filename(string): a file-name for logging """ def objective(space): print(space) newcfg = {**cfg} for k in space.keys(): if k in newcfg and type(newcfg[k]) == dict: newcfg[k] = {**space[k]} else: newcfg[k] = space[k] print(newcfg, cfg) result = runEvaluation( newcfg, split_runs=split_runs, model_runs=model_runs ) opt_result = { "loss": result["val_rmse"][0], "loss_variance": result["val_rmse"][1] ** 2, "true_loss": result["test_rmse"][0], "true_loss_variance": result["test_rmse"][1] ** 2, "status": STATUS_OK, "eval_time": time.time(), "data": result, "space": space, } return opt_result trials = Trials() best = fmin( objective, optimize_cfg, algo=tpe.suggest, max_evals=n_iter, trials=trials, ) valid_trial = [t for t in trials if t["result"]["status"] == STATUS_OK] losses_argmin = np.argmin( [float(trial["result"]["loss"]) for trial in valid_trial] ) print([float(trial["result"]["loss"]) for trial in valid_trial]) best_trial = valid_trial[losses_argmin] best_result = best_trial["result"]["data"] print(best, best_trial["result"]["space"], space_eval(optimize_cfg, best)) ret = { "best": best, "n_iter": n_iter, "split_runs": split_runs, "model_runs": model_runs, "result": best_result, "optimize_confg": optimize_cfg, "config": cfg, } ret_str = ConfigEncoder.dumps(ret) with open(f"{filename}.json", "w") as fp: fp.write(ret_str) print(ret) return ret
ef2e2c85f5b0b8f6889da49ed1e964d432bb1886
3,657,225
def _capabilities_for_entity(config, entity): """Return an _EntityCapabilities appropriate for given entity. raises _UnknownEntityDomainError if the given domain is unsupported. """ if entity.domain not in _CAPABILITIES_FOR_DOMAIN: raise _UnknownEntityDomainError() return _CAPABILITIES_FOR_DOMAIN[entity.domain](config, entity)
5fe541778ede415020377a0c989fa47ad2ae4d05
3,657,226
import os import click def check_missing_files(client): """Find missing files listed in datasets.""" missing = defaultdict(list) for path, dataset in client.datasets.items(): for file in dataset.files: filepath = (path.parent / file) if not filepath.exists(): missing[str( path.parent.relative_to(client.renku_datasets_path) )].append( os.path.normpath(str(filepath.relative_to(client.path))) ) if not missing: return True click.secho( WARNING + 'There are missing files in datasets.' # '\n (use "renku dataset clean <name>" to clean them)' ) for dataset, files in missing.items(): click.secho( '\n\t' + click.style(dataset, fg='yellow') + ':\n\t ' + '\n\t '.join(click.style(path, fg='red') for path in files) ) return False
f3a167e72871ef05baa217a7d10dfe4be113da21
3,657,227
def apply_torsion(nodes, suffix=""): """ Torsion energy in nodes. """ if ( "phases%s" % suffix in nodes.data and "periodicity%s" % suffix in nodes.data ): return { "u%s" % suffix: esp.mm.torsion.periodic_torsion( x=nodes.data["x"], k=nodes.data["k%s" % suffix], phases=nodes.data["phases%s" % suffix], periodicity=nodes.data["periodicity%s" % suffix], ) } else: return { "u%s" % suffix: esp.mm.torsion.periodic_torsion( x=nodes.data["x"], k=nodes.data["k%s" % suffix], ) }
37310291ddb769587d9d14a8dedcda3b528a78f3
3,657,228
import decimal def parse_summary_table(doc): """ Parse the etree doc for summarytable, returns:: [{'channel': unicode, 'impressions': int, 'clicks': int, 'ctr': decimal.Decimal, 'ecpm': decimal.Decimal, 'earnings': decimal.Decimal}] """ for t in doc.findall('.//table'): if t.attrib.get('id') == 'summarytable': break else: raise ValueError("summary table not found") res = [] FIELDS = ['channel', 'requests', 'responses', 'impressions', 'clicks', 'ctr', 'ecpm', 'earnings'] for row in t.findall('.//tr'): celltext = [] for c in row.findall('td'): tail = '' # adsense inserts an empty span if a row has a period in it, so # get the children and find the tail element to append to the text if c.find('a') and c.find('a').getchildren(): tail = c.find('a').getchildren()[0].tail or '' celltext.append('%s%s' % ((c.text or c.findtext('a') or '').strip(), tail.strip())) if len(celltext) != 8: continue try: value_cols = map(parse_decimal, celltext[1:]) except decimal.InvalidOperation: continue res.append(dict(zip(FIELDS, [celltext[0]] + value_cols))) return res
7d188478dc5539b4c8020af09cb052140def63c9
3,657,229
import math def tileset_info(hitile_path): """ Get the tileset info for a hitile file. Parameters ---------- hitile_path: string The path to the hitile file Returns ------- tileset_info: {'min_pos': [], 'max_pos': [], 'tile_size': 1024, 'max_zoom': 7 } """ hdf_file = h5py.File(hitile_path, "r") d = hdf_file["meta"] if "min-pos" in d.attrs: min_pos = d.attrs["min-pos"] else: min_pos = 0 if "max-pos" in d.attrs: max_pos = d.attrs["max-pos"] else: max_pos = d.attrs["max-length"] return { "max_pos": [int(max_pos)], "min_pos": [int(min_pos)], "max_width": 2 ** math.ceil(math.log(max_pos - min_pos) / math.log(2)), "max_zoom": int(d.attrs["max-zoom"]), "tile_size": int(d.attrs["tile-size"]), }
3ea467898e15ac6aca21c219398aa1249b795e55
3,657,230
def delete_user(): """ Deletes the current user's account. """ DB.session.delete(current_user) DB.session.commit() flash("Account deleted", 'success') return redirect('/login')
bc22d6287738c676cec3c780a9fb01513ddd5530
3,657,231
def setup_code_gen(no_of_accessories): """ Generate setup code """ try: invalid_setup_codes = ['00000000','11111111','22222222','33333333','44444444','55555555',\ '66666666','77777777','88888888','99999999','12345678','87654321'] setup_code_created = [] for _ in range(no_of_accessories): setup_code = '' # random generate setup_code for _ in range(8): random_num = str(random.randint(0,9)) setup_code += random_num # generate again till valid while setup_code in invalid_setup_codes: setup_code = '' for _ in range(8): random_num = str(randint(0,9)) setup_code += random_num # Check if the setup code has valid format if (len(setup_code) != 8) or (not setup_code.isdigit()): print "\nSetup code generated should be 8 numbers without any '-' in between. Eg. 11122333 \n" raise SystemExit(1) # Add the hyphen (-) in the PIN for salt-verifier generation. So, 11122333 will become 111-22-333 setup_code = setup_code[:3] + '-' + setup_code[3:5] + '-' + setup_code[5:] setup_code_created.append(setup_code) return setup_code_created except StandardError as std_err: print std_err except: raise
253272cc27de1ead05d12f2e1798d91a3c4571dd
3,657,232
def letter_difference(letter_1: str, letter_2: str) -> int: """ Return the difference in value between letter_1 and letter_2 """ assert len(letter_1) == 1 assert len(letter_2) == 1 diff = letter_to_value[letter_2] - letter_to_value[letter_1] if diff > 13: diff -= 27 return diff
66d88efff92acebef06275d244d560ca5071e974
3,657,233
import requests def refresh_access_token(request): """Updates `accessToken` in request cookies (not in browser cookies) using `refreshToken`. """ try: refresh_token = request.COOKIES['refreshToken'] url = urljoin(settings.TIT_API_HOST, '/api/auth/token/refresh/') response = requests.post(url, {'refresh': refresh_token}) result = response.json() request.COOKIES['accessToken'] = result['access'] return True except (KeyError, requests.HTTPError, ): """Refresh token doesn't exist in cookies or response from TIT API returned error status code. """ return False
378f4129fa9cc6af8cc961560e3e4063fcd0495b
3,657,234
def ngrams(string, n=3, punctuation=PUNCTUATION, **kwargs): """ Returns a list of n-grams (tuples of n successive words) from the given string. Punctuation marks are stripped from words. """ s = string s = s.replace(".", " .") s = s.replace("?", " ?") s = s.replace("!", " !") s = [w.strip(punctuation) for w in s.split()] s = [w.strip() for w in s if w.strip()] return [tuple(s[i:i + n]) for i in range(len(s) - n + 1)]
1e0e99f01c8aa46f4c44cca02d9bdb2b1c52d4c5
3,657,235
def binstringToBitList(binstring): """Converts a string of '0's and '1's to a list of 0's and 1's""" bitList = [] for bit in binstring: bitList.append(int(bit)) return bitList
d8ff10651d9fc2d02aba3b4a57a0a768032783b7
3,657,236
def file_revisions(request, repo_id): """List file revisions in file version history page. """ repo = get_repo(repo_id) if not repo: raise Http404 # perm check if check_folder_permission(request, repo_id, '/') is None: raise Http404 return render_file_revisions(request, repo_id)
c9ec0e1c159a4efdd8f4c3287cb9d8339ba9a9d2
3,657,237
def textctrl_info_t_get_tabsize(*args): """ textctrl_info_t_get_tabsize(self) -> unsigned int """ return _ida_kernwin.textctrl_info_t_get_tabsize(*args)
7aff89906aebacb3664a73d26f52dd4317031790
3,657,238
def node_is_hidden(node_name): """ Returns whether or not given node is hidden :param node_name: str :return: bool """ if python.is_string(node_name): return not maya.cmds.getAttr('{}.visibility'.format(node_name)) return not maya.cmds.getAttr('{}.visibility'.format(node.get_name(node_name)))
0927d2424b64b9b81b52ced335823963d7ec9fe2
3,657,239
import torch def generate_patch_grid_from_normalized_LAF(img: torch.Tensor, LAF: torch.Tensor, PS: int = 32) -> torch.Tensor: """Helper function for affine grid generation. Args: img: image tensor of shape :math:`(B, CH, H, W)`. LAF: laf with shape :math:`(B, N, 2, 3)`. PS: patch size to be extracted. Returns: grid """ raise_error_if_laf_is_not_valid(LAF) B, N, _, _ = LAF.size() num, ch, h, w = img.size() # norm, then renorm is needed for allowing detection on one resolution # and extraction at arbitrary other LAF_renorm = denormalize_laf(LAF, img) grid = F.affine_grid(LAF_renorm.view(B * N, 2, 3), [B * N, ch, PS, PS], align_corners=False) # type: ignore grid[..., :, 0] = 2.0 * grid[..., :, 0].clone() / float(w) - 1.0 grid[..., :, 1] = 2.0 * grid[..., :, 1].clone() / float(h) - 1.0 return grid
288572e4ff8577a8bd664732c79408b71ec58c0d
3,657,240
from typing import Union from typing import Tuple def _resolve_condition_operands( left_operand: Union[str, pipeline_channel.PipelineChannel], right_operand: Union[str, pipeline_channel.PipelineChannel], ) -> Tuple[str, str]: """Resolves values and PipelineChannels for condition operands. Args: left_operand: The left operand of a condition expression. right_operand: The right operand of a condition expression. Returns: A tuple of the resolved operands values: (left_operand_value, right_operand_value). """ # Pre-scan the operand to get the type of constant value if there's any. # The value_type can be used to backfill missing PipelineChannel.channel_type. value_type = None for value_or_reference in [left_operand, right_operand]: if isinstance(value_or_reference, pipeline_channel.PipelineChannel): parameter_type = type_utils.get_parameter_type( value_or_reference.channel_type) if parameter_type in [ pipeline_spec_pb2.ParameterType.STRUCT, pipeline_spec_pb2.ParameterType.LIST, pipeline_spec_pb2.ParameterType .PARAMETER_TYPE_ENUM_UNSPECIFIED, ]: input_name = _additional_input_name_for_pipeline_channel( value_or_reference) raise ValueError('Conditional requires scalar parameter values' ' for comparison. Found input "{}" of type {}' ' in pipeline definition instead.'.format( input_name, value_or_reference.channel_type)) parameter_types = set() for value_or_reference in [left_operand, right_operand]: if isinstance(value_or_reference, pipeline_channel.PipelineChannel): parameter_type = type_utils.get_parameter_type( value_or_reference.channel_type) else: parameter_type = type_utils.get_parameter_type( type(value_or_reference).__name__) parameter_types.add(parameter_type) if len(parameter_types) == 2: # Two different types being compared. The only possible types are # String, Boolean, Double and Integer. We'll promote the other type # using the following precedence: # String > Boolean > Double > Integer if pipeline_spec_pb2.ParameterType.STRING in parameter_types: canonical_parameter_type = pipeline_spec_pb2.ParameterType.STRING elif pipeline_spec_pb2.ParameterType.BOOLEAN in parameter_types: canonical_parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN else: # Must be a double and int, promote to double. assert pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE in parameter_types, \ 'Types: {} [{} {}]'.format( parameter_types, left_operand, right_operand) assert pipeline_spec_pb2.ParameterType.NUMBER_INTEGER in parameter_types, \ 'Types: {} [{} {}]'.format( parameter_types, left_operand, right_operand) canonical_parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE elif len(parameter_types) == 1: # Both operands are the same type. canonical_parameter_type = parameter_types.pop() else: # Probably shouldn't happen. raise ValueError('Unable to determine operand types for' ' "{}" and "{}"'.format(left_operand, right_operand)) operand_values = [] for value_or_reference in [left_operand, right_operand]: if isinstance(value_or_reference, pipeline_channel.PipelineChannel): input_name = _additional_input_name_for_pipeline_channel( value_or_reference) operand_value = "inputs.parameter_values['{input_name}']".format( input_name=input_name) parameter_type = type_utils.get_parameter_type( value_or_reference.channel_type) if parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER: operand_value = 'int({})'.format(operand_value) elif isinstance(value_or_reference, str): operand_value = "'{}'".format(value_or_reference) parameter_type = pipeline_spec_pb2.ParameterType.STRING elif isinstance(value_or_reference, bool): # Booleans need to be compared as 'true' or 'false' in CEL. operand_value = str(value_or_reference).lower() parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN elif isinstance(value_or_reference, int): operand_value = str(value_or_reference) parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_INTEGER else: assert isinstance(value_or_reference, float), value_or_reference operand_value = str(value_or_reference) parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE if parameter_type != canonical_parameter_type: # Type-cast to so CEL does not complain. if canonical_parameter_type == pipeline_spec_pb2.ParameterType.STRING: assert parameter_type in [ pipeline_spec_pb2.ParameterType.BOOLEAN, pipeline_spec_pb2.ParameterType.NUMBER_INTEGER, pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE, ] operand_value = "'{}'".format(operand_value) elif canonical_parameter_type == pipeline_spec_pb2.ParameterType.BOOLEAN: assert parameter_type in [ pipeline_spec_pb2.ParameterType.NUMBER_INTEGER, pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE, ] operand_value = 'true' if int(operand_value) == 0 else 'false' else: assert canonical_parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE assert parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER operand_value = 'double({})'.format(operand_value) operand_values.append(operand_value) return tuple(operand_values)
fde07e14af8f9ae610cfcd64e6a3f2219f0ee8e9
3,657,241
import json import os import random import base64 import zlib def format_record(test_record): """Create a properly formatted Kinesis, S3, or SNS record. Supports a dictionary or string based data record. Reads in event templates from the test/integration/templates folder. Args: test_record: Test record metadata dict with the following structure: data - string or dict of the raw data description - a string describing the test that is being performed trigger - bool of if the record should produce an alert source - which stream/s3 bucket originated the data service - which aws service originated the data compress (optional) - if the payload needs to be gzip compressed or not Returns: dict in the format of the specific service """ service = test_record['service'] source = test_record['source'] compress = test_record.get('compress') data_type = type(test_record['data']) if data_type == dict: data = json.dumps(test_record['data']) elif data_type in (unicode, str): data = test_record['data'] else: LOGGER_CLI.info('Invalid data type: %s', type(test_record['data'])) return # Get the template file for this particular service template_path = os.path.join(DIR_TEMPLATES, '{}.json'.format(service)) with open(template_path, 'r') as service_template: try: template = json.load(service_template) except ValueError as err: LOGGER_CLI.error('Error loading %s.json: %s', service, err) return if service == 's3': # Set the S3 object key to a random value for testing test_record['key'] = ('{:032X}'.format(random.randrange(16**32))) template['s3']['object']['key'] = test_record['key'] template['s3']['object']['size'] = len(data) template['s3']['bucket']['arn'] = 'arn:aws:s3:::{}'.format(source) template['s3']['bucket']['name'] = source # Create the mocked s3 object in the designated bucket with the random key put_mocked_s3_object(source, test_record['key'], data) elif service == 'kinesis': if compress: kinesis_data = base64.b64encode(zlib.compress(data)) else: kinesis_data = base64.b64encode(data) template['kinesis']['data'] = kinesis_data template['eventSourceARN'] = 'arn:aws:kinesis:us-east-1:111222333:stream/{}'.format(source) elif service == 'sns': template['Sns']['Message'] = data template['EventSubscriptionArn'] = 'arn:aws:sns:us-east-1:111222333:{}'.format(source) else: LOGGER_CLI.info('Invalid service %s', service) return template
719d398353c50ec2fdb48db933b0322027f744c1
3,657,242
def int_to_bitstr(int_value: int) -> str: """ A function which returns its bit representation as a string. Arguments: int_value (int) - The int value we want to get the bit representation for. Return: str - The string representation of the bits required to form the int. """ return bin(int_value)[2:]
cafbf151ce0404081a0a8e1327d85e61ea7ddc52
3,657,243
def target_reached(effect): """target amount has been reached (100% or more)""" if not effect.instance.target: return False return effect.instance.amount_raised >= effect.instance.target
0101cd9c3c51a1e03ba7cfd8844c3821a156e2fe
3,657,244
def resid_mask(ints, wfs_map=read_map(wfs_file), act_map=read_map(act_file), num_aps=236): """ Returns the locations of the valid actuators in the actuator array resids: Nx349 residual wavefront array (microns) ints: Nx304 intensity array (any units) N: Number of timestamps """ # Check inputs N = ints.shape[0] # Num timestamps # Aggregate intensities over all timestamps med_ints = np.median(ints, axis=0) # Fill WFS map with aggregated intensities int_map = wfs_map.copy() int_map[np.where(int_map==1)] = med_ints # Find lenslets with greatest intensity idxs = np.flip(np.argsort(int_map, axis=None))[:num_aps] # flat idxs of sort idxs = np.unravel_index(idxs, wfs_map.shape) # 2D idxs of sort # Mask for good sub-ap values good_aps = np.zeros(wfs_map.shape, dtype=int) good_aps[idxs] = 1 good_aps = good_aps * wfs_map # Just in case # Mask for good actuator values good_acts = np.pad(good_aps, ((1,1),(1,1))) good_acts = (good_acts[1:,1:] | good_acts[1:,:-1] | good_acts[:-1,:-1] | good_acts[:-1,1:]) * act_map return good_acts
98c818db8d2d5040c5a20857693f3f3116ab8e13
3,657,245
import requests def session(): """Sets up a HTTP session with a retry policy.""" s = requests.Session() retries = Retry(total=5, backoff_factor=0.5) s.mount("http://", HTTPAdapter(max_retries=retries)) return s
d5cb89f04017718983834a0b4008972f393f56ae
3,657,246
def fit(df, methodtype='hc', scoretype='bic', black_list=None, white_list=None, bw_list_method='enforce', max_indegree=None, epsilon=1e-4, max_iter=1e6, verbose=3): """Structure learning fit model. Description ----------- Search strategies for structure learning The search space of DAGs is super-exponential in the number of variables and the above scoring functions allow for local maxima. To learn model structure (a DAG) from a data set, there are three broad techniques: 1. Score-based structure learning (BIC/BDeu/K2 score; exhaustive search, hill climb/tabu search) a. exhaustivesearch b. hillclimbsearch 2. Constraint-based structure learning (PC) a. chi-square test 3. Hybrid structure learning (The combination of both techniques) (MMHC) Score-based Structure Learning This approach construes model selection as an optimization task. It has two building blocks: A scoring function sD:->R that maps models to a numerical score, based on how well they fit to a given data set D. A search strategy to traverse the search space of possible models M and select a model with optimal score. Commonly used scoring functions to measure the fit between model and data are Bayesian Dirichlet scores such as BDeu or K2 and the Bayesian Information Criterion (BIC, also called MDL). As before, BDeu is dependent on an equivalent sample size. Parameters ---------- df : pd.DataFrame() Input dataframe. methodtype : str, (default : 'hc') String Search strategy for structure_learning. 'hc' or 'hillclimbsearch' (default) 'ex' or 'exhaustivesearch' 'cs' or 'constraintsearch' scoretype : str, (default : 'bic') Scoring function for the search spaces. 'bic', 'k2', 'bdeu' black_list : List or None, (default : None) If a list of edges is provided as black_list, they are excluded from the search and the resulting model will not contain any of those edges. The default is None. Works only in case of methodtype='hc'. See also paramter: `bw_list_method` white_list : List or None, (default : None) If a list of edges is provided as white_list, the search is limited to those edges. The resulting model will then only contain edges that are in white_list. The default is None. Works only in case of methodtype='hc'/ See also paramter: `bw_list_method` bw_list_method : str, (default : 'enforce') 'enforce' : A list of edges can optionally be passed as `black_list` or `white_list` to exclude those edges or to limit the search. This option is limited to only methodtype='hc' 'filter' : Filter the dataframe based on `black_list` or `white_list`. Filtering can be done for every methodtype/scoretype. max_indegree : int, (default : None) If provided and unequal None, the procedure only searches among models where all nodes have at most max_indegree parents. (only in case of methodtype='hc') epsilon: float (default: 1e-4) Defines the exit condition. If the improvement in score is less than `epsilon`, the learned model is returned. (only in case of methodtype='hc') max_iter: int (default: 1e6) The maximum number of iterations allowed. Returns the learned model when the number of iterations is greater than `max_iter`. (only in case of methodtype='hc') verbose : int, (default : 3) Print progress to screen. 0: NONE 1: ERROR 2: WARNING 3: INFO (default) 4: DEBUG 5: TRACE Returns ------- dict with model. Examples -------- >>> import bnlearn as bn >>> >>> # Load asia DAG >>> model = bn.import_DAG('asia') >>> >>> # plot ground truth >>> G = bn.plot(model) >>> >>> # Sampling >>> df = bn.sampling(model, n=10000) >>> >>> # Structure learning of sampled dataset >>> model_sl = bn.structure_learning.fit(df, methodtype='hc', scoretype='bic') >>> >>> # Plot based on structure learning of sampled data >>> bn.plot(model_sl, pos=G['pos']) >>> >>> # Compare networks and make plot >>> bn.compare_networks(model, model_sl, pos=G['pos']) """ assert isinstance(pd.DataFrame(), type(df)), 'df must be of type pd.DataFrame()' assert (scoretype=='bic') | (scoretype=='k2') | (scoretype=='bdeu'), 'scoretype must be string: "bic", "k2" or "bdeu"' assert (methodtype=='hc') | (methodtype=='ex')| (methodtype=='cs') | (methodtype=='exhaustivesearch')| (methodtype=='hillclimbsearch')| (methodtype=='constraintsearch'), 'Methodtype string is invalid' # noqa if isinstance(white_list, str): white_list = [white_list] if isinstance(black_list, str): black_list = [black_list] if (white_list is not None) and len(white_list)==0: white_list = None if (black_list is not None) and len(black_list)==0: black_list = None if (bw_list_method is None) : bw_list_method='enforce' config = {} config['verbose'] = verbose config['method'] = methodtype config['scoring'] = scoretype config['black_list'] = black_list config['white_list'] = white_list config['bw_list_method'] = bw_list_method config['max_indegree'] = max_indegree config['epsilon'] = epsilon config['max_iter'] = max_iter # Show warnings # PGMPY_VER = version.parse(pgmpy.__version__)>version.parse("0.1.9") # Can be be removed if pgmpy >v0.1.9 # if (not PGMPY_VER) and ((black_list is not None) or (white_list is not None)): # if config['verbose']>=2: print('[bnlearn] >Warning: black_list and white_list only works for pgmpy > v0.1.9') # Can be be removed if pgmpy >v0.1.9 if df.shape[1]>10 and df.shape[1]<15: if config['verbose']>=2: print('[bnlearn] >Warning: Computing DAG with %d nodes can take a very long time!' %(df.shape[1])) if (black_list is not None) and methodtype!='hc': if config['verbose']>=2: print('[bnlearn] >Warning: blacklist only works in case of methodtype="hc"') if (white_list is not None) and methodtype!='hc': if config['verbose']>=2: print('[bnlearn] >Warning: white_list only works in case of methodtype="hc"') if (max_indegree is not None) and methodtype!='hc': if config['verbose']>=2: print('[bnlearn] >Warning: max_indegree only works in case of methodtype="hc"') if config['verbose']>=3: print('[bnlearn] >Computing best DAG using [%s]' %(config['method'])) # Make sure columns are of type string df.columns = df.columns.astype(str) # Filter on white_list and black_list df = _white_black_list(df, white_list, black_list, bw_list_method=config['bw_list_method'], verbose=verbose) # ExhaustiveSearch can be used to compute the score for every DAG and returns the best-scoring one: if config['method']=='ex' or config['method']=='exhaustivesearch': """The first property makes exhaustive search intractable for all but very small networks, the second prohibits efficient local optimization algorithms to always find the optimal structure. Thus, identifiying the ideal structure is often not tractable. Despite these bad news, heuristic search strategies often yields good results If only few nodes are involved (read: less than 5).""" if (df.shape[1]>15) and (config['verbose']>=3): print('[bnlearn] >Warning: Structure learning with more then 15 nodes is computationally not feasable with exhaustivesearch. Use hillclimbsearch or constraintsearch instead!!') # noqa out = _exhaustivesearch(df, scoretype=config['scoring'], verbose=config['verbose']) # HillClimbSearch if config['method']=='hc' or config['method']=='hillclimbsearch': out = _hillclimbsearch(df, scoretype=config['scoring'], black_list=config['black_list'], white_list=config['white_list'], max_indegree=config['max_indegree'], bw_list_method=config['bw_list_method'], epsilon=config['epsilon'], max_iter=config['max_iter'], verbose=config['verbose'], ) # Constraint-based Structure Learning if config['method']=='cs' or config['method']=='constraintsearch': """Constraint-based Structure Learning A different, but quite straightforward approach to build a DAG from data is this: Identify independencies in the data set using hypothesis tests Construct DAG (pattern) according to identified independencies (Conditional) Independence Tests Independencies in the data can be identified using chi2 conditional independence tests.""" out = _constraintsearch(df, verbose=config['verbose']) # Setup simmilarity matrix adjmat = _dag2adjmat(out['model']) # adjmat = pd.DataFrame(data=False, index=out['model'].nodes(), columns=out['model'].nodes()).astype('Bool') # # Fill adjmat with edges # edges = out['model'].edges() # for edge in edges: # adjmat.loc[edge[0],edge[1]]=True # adjmat.index.name = 'source' # adjmat.columns.name = 'target' # Store out['adjmat'] = adjmat out['config'] = config # return return(out)
865dd11638a8e9444818678b3e8d83f90647ce9b
3,657,247
def handle_index(): """ Kezeli az index oldalat, elokesziti es visszakulti a html-t a kliensnek. :return: """ return render_template("index.html")
eaaa2c3028983c1e5ed29a45fe6ff3db0a8a7482
3,657,248
def get_polynomial_coefficients(degree=5): """ Return a list with coefficient names, [1 x y x^2 xy y^2 x^3 ...] """ names = ["1"] for exp in range(1, degree + 1): # 0, ..., degree for x_exp in range(exp, -1, -1): y_exp = exp - x_exp if x_exp == 0: x_str = "" elif x_exp == 1: x_str = r"$x$" else: x_str = rf"$x^{x_exp}$" if y_exp == 0: y_str = "" elif y_exp == 1: y_str = r"$y$" else: y_str = rf"$y^{y_exp}$" names.append(x_str + y_str) return names
9369841215045e925a3453b83be9dc49c9be7b92
3,657,249
from typing import Dict import pickle def _get_configured_credentials() -> Dict[str, bytes]: """ Get the encryupted credentials stored in disk """ path = get_credentials_path() credentials: Dict[str, bytes] with open(path, "rb") as file_handle: credentials = pickle.load(file_handle) if len(credentials) == 0: raise ConfigurationError( "You have not setup your credentials yet. " "Please do so by using 'omigami credentials-helper' CLI functionality and try again." ) if not all(key in ["k", "u", "p"] for key in credentials.keys()): raise ConfigurationError( "Something seems wrong with your credentials. " "Please, run 'omigami credentials-helper --unset' to remove them and then set them again." ) return credentials
824aeb18f4e5bed609008c6594d86666502b2339
3,657,250
def open(request: HttpRequest, *args, **kwargs) -> HttpResponse: """ Create a temporary project from a single source. This view allows for all users, including anonymous users, to create a temporary project which they can later save as a permanent project if they wish. It aims to be a quick way to start a project and preview publishing of a file. TODO: See https://github.com/stencila/hub/pull/552 for more todos """ if request.method == "GET": # TODO: If a GET request attempt to get source from end of URL or a query parameter return render(request, "projects/open.html") if request.method == "POST": viewset = ProjectsViewSet.init("create", request, args, kwargs) serializer = viewset.get_serializer(data=dict(temporary=True, public=True)) serializer.is_valid(raise_exception=True) project = serializer.create(serializer.validated_data) url = request.POST.get("url") if url: Source.from_address(url, project=project, path="main") file = request.FILES.get("file") if file: UploadSource.objects.create(project=project, path=file.name, file=file) # TODO: Make the source the project's main file. How to do before pulling it? # TODO: Create a newer simpler job preview page, that is visible to # anon users and redirect to that instead of to the project overview page # job = source.pull() return redir("ui-projects-retrieve", "temp", project.name) raise Http404
f9c98eb829ec8d46b889c04308c4ea59fc2c4eb6
3,657,251
def kabsch_rotate(P, Q): """ Rotate matrix P unto matrix Q using Kabsch algorithm """ U = kabsch(P, Q) # Rotate P P = np.dot(P, U) return P
2be9c94901b27205ec4720b16ab6e81f34b1c6d6
3,657,252
def matrix( odoo=ODOO_VERSIONS, pg=PG_VERSIONS, odoo_skip=frozenset(), pg_skip=frozenset() ): """All possible combinations. We compute the variable matrix here instead of in ``.travis.yml`` because this generates faster builds, given the scripts found in ``hooks`` directory are already multi-version-build aware. """ return map( dict, product( product(("ODOO_MINOR",), ODOO_VERSIONS & odoo - odoo_skip), product(("DB_VERSION",), PG_VERSIONS & pg - pg_skip), ), )
034e791d2e10e9f691df3e6c458722549c59a89a
3,657,253
import copy def iterate_pagerank(corpus, damping_factor): """ Return PageRank values for each page by iteratively updating PageRank values until convergence. Return a dictionary where keys are page names, and values are their estimated PageRank value (a value between 0 and 1). All PageRank values should sum to 1. """ # Initialize a dict with {"page": 1/n} for all pages in corpus new_dist = dict([(page, 1 / len(corpus)) for page in corpus]) finished = False while not finished: # Make copy before changing prev_dist = copy.deepcopy(new_dist) for page in corpus: # Run the iterative algorithm on each page new_dist[page] = iter_algorithm(damping_factor, len(corpus), page, corpus, new_dist) # If any page has a difference over .001 from the previous run, the while loop will continue for pg in new_dist: finished = True if abs(prev_dist[pg] - new_dist[pg]) > 0.001: finished = False break return new_dist
bc51bd946d8fc617222303ffe30507695ee5ae33
3,657,254
def user_enabled(inst, opt): """ Check whether the option is enabled. :param inst: instance from content object init :param url: Option to be checked :return: True if enabled, False if disabled or non present """ return opt in inst.settings and inst.settings[opt]
3b2a5a1534ff779178eb4bd6b839b66c0b07864f
3,657,255
async def get_buttons_data(client: Client, message: Message): """ Get callback_data and urls of all the inline buttons of the message you replied to. """ reply_message = message.reply_to_message if reply_message and reply_message.reply_markup: if reply_message.reply_markup.inline_keyboard: row_lines = [] for i, row in enumerate(reply_message.reply_markup.inline_keyboard): row_buttons = [] for button in row: if button.callback_data: data = button.callback_data elif button.url: data = button.url else: continue row_buttons.append(f"<i>{quote_html(button.text)}:</i> <code>{quote_html(data)}</code>") buttons = "\n".join(row_buttons) row_lines.append(f"<b>Row {i + 1}:</b>\n{buttons}") if row_lines: clean_time = 20 await message.edit_text("\n\n".join(row_lines)) else: clean_time = 4 await message.edit_text("There is no any callback_data or url button inside this keyboard.") return await clean_up(client, message.chat.id, message.message_id, clear_after=clean_time) await message.edit_text("Reply to a message containing an inline keyboard to extract callback_data and urls.") await clean_up(client, message.chat.id, message.message_id, clear_after=4)
6567455d95781515fc6236bf867a042ba550736f
3,657,256
def update_document( *, db_session: Session = Depends(get_db), document_id: PrimaryKey, document_in: DocumentUpdate ): """Update a document.""" document = get(db_session=db_session, document_id=document_id) if not document: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=[{"msg": "The document with this id does not exist."}], ) document = update(db_session=db_session, document=document, document_in=document_in) return document
15dc23aeb10950da19a6732c43d7675447f6a45c
3,657,257
def from_jabsorb(request, seems_raw=False): """ Transforms a jabsorb request into a more Python data model (converts maps and lists) :param request: Data coming from Jabsorb :param seems_raw: Set it to True if the given data seems to already have been parsed (no Java class hint). If True, the lists will be kept as lists instead of being converted to tuples. :return: A Python representation of the given data """ if isinstance(request, (tuple, set, frozenset)): # Special case : JSON arrays (Python lists) return type(request)(from_jabsorb(element) for element in request) elif isinstance(request, list): # Check if we were a list or a tuple if seems_raw: return list(from_jabsorb(element) for element in request) else: return tuple(from_jabsorb(element) for element in request) elif isinstance(request, dict): # Dictionary java_class = request.get(JAVA_CLASS) json_class = request.get(JSON_CLASS) seems_raw = not java_class and not json_class if java_class: # Java Map ? if JAVA_MAPS_PATTERN.match(java_class) is not None: return HashableDict((from_jabsorb(key), from_jabsorb(value)) for key, value in request["map"].items()) # Java List ? elif JAVA_LISTS_PATTERN.match(java_class) is not None: return HashableList(from_jabsorb(element) for element in request["list"]) # Java Set ? elif JAVA_SETS_PATTERN.match(java_class) is not None: return HashableSet(from_jabsorb(element) for element in request["set"]) # Any other case result = AttributeMap((from_jabsorb(key), from_jabsorb(value, seems_raw)) for key, value in request.items()) # Keep JSON class information as is if json_class: result[JSON_CLASS] = json_class return result elif not _is_builtin(request): # Bean for attr in dir(request): # Only convert public fields if not attr[0] == '_': # Field conversion setattr(request, attr, from_jabsorb(getattr(request, attr))) return request else: # Any other case return request
78e8eefc0b234a5b6cd09cebff76e5cb716b54c2
3,657,258
def write_board_to_svg_file(board, file_name, hex_edge=50, hex_offset=0, board_padding=None, pointy_top=True, trim_board=True, style=None): """ Writes given board to a svg file of given name. :param board: 2 dimensional list of fields, each represented as a number :param file_name name of the output file :param hex_edge: length of hexagon's side (in pixels) :param hex_offset: distance between side of one hexagon and its neighbour (in pixels) :param board_padding padding of the board (in pixels) :param pointy_top: specifies if hexagons should be pointy topped or flat topped :param trim_board: if True, fields with a value 0 will be removed during transformation :param style css style (as string) """ if board_padding is None: board_padding = hex_edge styles = ['.board { fill: white } .hex-field { fill: white; stroke: black } .hex-field-0 { fill: black }'] if style is not None: styles.append(style) hexagons = transform_board_into_hexagons(board, hex_edge, hex_offset, pointy_top, trim_board) min_x, min_y, max_x, max_y = calculate_bounding_box(hexagons) offset = (board_padding - min_x, board_padding - min_y) hexagons = move_hexagons_by_offset(hexagons, offset) board_size = (2 * board_padding + max_x - min_x, 2 * board_padding + max_y - min_y) svg_image = create_svg_image(styles, board_size, hexagons) svg_image.saveas(file_name) return svg_image
4cbf895a4a91e0434e31fdad459c3354927c5e2b
3,657,259
def ensure_conf(app): """ Ensure for the given app the the redbeat_conf attribute is set to an instance of the RedBeatConfig class. """ name = 'redbeat_conf' app = app_or_default(app) try: config = getattr(app, name) except AttributeError: config = RedBeatConfig(app) setattr(app, name, config) return config
673680aafbc4d76b1ae7f7740e53fd7f54740acf
3,657,260
def check_if_process_present(string_to_find): """Checks if process runs on machine Parameters: string_to_find (string): process we want to find Returns: found (bool): True if found process running """ output = check_output(["ps", "-ax"], universal_newlines=True) if string_to_find in output: return True else: return False
3a153e2160000ec1c9d4c0c28d1631179f9e88c3
3,657,261
def consulta_dicionario(nivel): """ Entrada: Parâmetro do nível selecionado (fácil, médio, difícil) Tarefa: Determinar qual dicionário a ser consultado Saída: Parâmetros do dicionário (texto, lacunas, gabarito) """ nivel_dicionario = nivel if nivel_dicionario == 'facil': texto = dicionario_nivel_facil['texto'] lacunas = dicionario_nivel_facil['lacunas'] gabarito = dicionario_nivel_facil['gabarito'] elif nivel_dicionario == 'medio': texto = dicionario_nivel_medio['texto'] lacunas = dicionario_nivel_medio['lacunas'] gabarito = dicionario_nivel_medio['gabarito'] elif nivel_dicionario == 'dificil': texto = dicionario_nivel_dificil['texto'] lacunas = dicionario_nivel_dificil['lacunas'] gabarito = dicionario_nivel_dificil['gabarito'] return texto, lacunas, gabarito
1453298f791cca010f75abad9d0d37a28c1c8ae5
3,657,262
from typing import List def stats_check( main_table: Table, compare_table: Table, checks: List[OutlierCheck] = [], max_rows_returned: int = 100, ): """ :param main_table: main table :type main_table: table object :param compare_table: table to be compared :type compare_table: table object :param checks: check class object, which represent boolean expression :type checks: Check :param max_rows_returned: number of row returned if the check fails. :type max_rows_returned: int """ return AgnosticStatsCheck( main_table=main_table, compare_table=compare_table, checks=checks, max_rows_returned=max_rows_returned, )
39f0c0b2bad74a7878453a3fe11def36f1971a5f
3,657,263
async def get_collectible_name(collectible_id: int, db: AsyncSession = Depends(get_db_session)): """Gets the collectible name""" result = await destiny_items.get_collectible(db=db, collectible_id=collectible_id) return NameModel(name=result.name) if result else NameModel(name=None)
18fd3856d5145a004cf20343b5e8782a13a35845
3,657,264
def prime_factors(n): """ Return a list of prime factors of n :param n: int :return: list """ # check if 2 is the largest prime all_factors = set() t = n while t % 2 == 0: t /= 2 all_factors.add(2) # check the divisors greater than 2 d = 3 while d < n ** 0.5: while not t % d: t /= d all_factors.add(d) d += 2 return all_factors
09aad44a7b04492c225447eaa15590fa630a43cd
3,657,265
def calculate_central_age(Ns, Ni, zeta, seZeta, rhod, Nd, sigma=0.15): """Function to calculate central age.""" Ns = np.array(Ns) Ni = np.array(Ni) # We just replace 0 counts with a low value, the age will be rounded to # 2 decimals. That should take care of the zero count issue. Ns = np.where(Ns == 0, 1e-10, Ns) # Do this to account for 0 track counts Ni = np.where(Ni == 0, 1e-10, Ni) # Do this to account for 0 track counts # Calculate mj LAMBDA = 1.55125e-4 G = 0.5 m = Ns + Ni p = Ns / m theta = np.sum(Ns) / np.sum(m) for i in range(0, 30): w = m / (theta * (1 - theta) + (m - 1) * theta**2 * (1 - theta)**2 * sigma**2) sigma = sigma * np.sqrt(np.sum(w**2 * (p - theta)**2) / np.sum(w)) theta = np.sum(w * p) / np.sum(w) t = (1.0 / LAMBDA) * np.log( 1.0 + G * LAMBDA * zeta * rhod * (theta) / (1.0 - theta)) se = np.sqrt(1 / (theta**2 * (1.0 - theta)**2 * np.sum(w)) + 1.0 / Nd + (seZeta / zeta)**2) * t return {"Central": np.round(t, 2), "se": np.round(se, 2), "sigma": np.round(sigma, 2)}
29b360ce1df7cfa376a86b989fb7899137d110cc
3,657,266
import requests def _get_cross_reference_token(auth_cookie: str) -> str: """Gets a new cross reference token affiliated with the Roblox auth cookie. :param auth_cookie: Your Roblox authentication cookie. :return: A fresh cross reference token. """ session: requests.Session = _get_session(auth_cookie) response: requests.Response = session.post("https://auth.roblox.com/v2/logout") try: token = response.headers["x-csrf-token"] except KeyError: raise Exception("Please specify a valid auth cookie") return token
63041ddeb31ccdc0a72a721d000968588580e816
3,657,267
def erase_not_displayed(client): """Erase all non-displayed models from memory. Args: client (obj): creopyson Client. Returns: None """ return client._creoson_post("file", "erase_not_displayed")
c3981fcce00b5d5440fcbdbe8781e9e6229a8fa7
3,657,268
def reset_position_for_friends_image_details_from_voter(voter, twitter_profile_image_url_https, facebook_profile_image_url_https): """ Reset all position image urls in PositionForFriends from we vote image details :param voter: :param twitter_profile_image_url_https: :param facebook_profile_image_url_https: :return: """ position_list_manager = PositionListManager() position_manager = PositionManager() stance_we_are_looking_for = ANY_STANCE friends_vs_public = FRIENDS_ONLY speaker_image_url_https = None reset_all_position_image_urls_results = [] if positive_value_exists(twitter_profile_image_url_https): speaker_image_url_https = twitter_profile_image_url_https elif positive_value_exists(facebook_profile_image_url_https): speaker_image_url_https = facebook_profile_image_url_https positions_for_voter_results = position_list_manager.retrieve_all_positions_for_voter( voter.id, voter.we_vote_id, stance_we_are_looking_for, friends_vs_public) if positions_for_voter_results['position_list_found']: friends_position_list = positions_for_voter_results['position_list'] for position_object in friends_position_list: reset_position_image_urls_results = position_manager.reset_position_image_details( position_object, speaker_image_url_https=speaker_image_url_https) reset_all_position_image_urls_results.append(reset_position_image_urls_results) results = { 'success': True, 'reset_all_position_results': reset_all_position_image_urls_results } return results
e2483bf781029a7481dead0a168775b1a9223978
3,657,269
def get_analysis(panda_data): """ Get Analysis of CSV Data :param panda_data: Panda dataframes :return: panda data frames """ # Create Object for Analysis0 sentiment_object = SentimentConfig.sentiment_object ner_object = SentimentConfig.ner_object # Get list of sentences list = panda_data['text'].to_list() sentiment_result = np.array([sentiment_object.get_sentiment(i) for i in list]) panda_data["Positive Score"] = sentiment_result[:, 2] panda_data["Negative Score"] = sentiment_result[:, 0] panda_data["Neutral Score"] = sentiment_result[:, 1] panda_data["Sentiment Result"] = sentiment_result[:, 3] # NER Data Analysis Added ner_result = np.array([ner_object.get_ner(i) for i in list]) panda_data["Entity Result"] = ner_result # Adjective Analysis Added adjective_result = np.array([ner_object.get_adjectives(i) for i in list]) panda_data["Adjective Result"] = adjective_result return panda_data
014bc1543f67dfe561bce62d9b5e5f974b28db2a
3,657,270
def create_coordinate_string_dict(): """31パターンのヒモ。""" w = 120 h = 120 return { 47: (0, 0), 57: (1*-w, 0), 58: (2*-w, 0), 16: (4*-w, 0), 35: (5*-w, 0), 36: (6*-w, 0), 38: (0, 1*-h), 13: (1*-w, 1*-h), 14: (2*-w, 1*-h), 15: (3*-w, 1*-h), 25: (4*-w, 1*-h), 17: (5*-w, 1*-h), 27: (6*-w, 1*-h), 37: (7*-w, 1*-h), 1357: (0, 2*-h), 1571: (1*-w, 2*-h), 7135: (2*-w, 2*-h), 3583: (4*-w, 2*-h), 274: (5*-w, 2*-h), 1361: (6*-w, 2*-h), 1371: (0, 3*-h), 15037: (1*-w, 3*-h), 3573: (2*-w, 3*-h), 416: (4*-w, 3*-h), 258: (6*-w, 3*-h), 1753: (0, 4*-h), 1351: (1*-w, 4*-h), 3175: (2*-w, 4*-h), 2572: (4*-w, 4*-h), 638: (5*-w, 4*-h), 1471: (6*-w, 4*-h), }
4abc2b246345569780db2dc9f6ef71c56ae86528
3,657,271
def all_but_ast(check): """Only passes AST to check.""" def _check_wrapper(contents, ast, **kwargs): """Wrap check and passes the AST to it.""" del contents del kwargs return check(ast) return _check_wrapper
71f3e3b8649a3a9885ded7eec248894cca8083c4
3,657,272
import readline def get_history_items(): """ Get all history item """ return [ readline.get_history_item(i) for i in xrange(1, readline.get_current_history_length() + 1) ]
b3600ca6581c11a46c2ea92d82b9d5aefcded49b
3,657,273
from typing import List from pathlib import Path from typing import Tuple def generate_nucmer_commands( filenames: List[Path], outdir: Path = Path("."), nucmer_exe: Path = pyani_config.NUCMER_DEFAULT, filter_exe: Path = pyani_config.FILTER_DEFAULT, maxmatch: bool = False, ) -> Tuple[List, List]: """Return list of NUCmer command-lines for ANIm. :param filenames: a list of paths to input FASTA files :param outdir: path to output directory :param nucmer_exe: location of the nucmer binary :param maxmatch: Boolean flag indicating to use NUCmer's -maxmatch option The first element returned is a list of NUCmer commands, and the second a corresponding list of delta_filter_wrapper.py commands. The NUCmer commands should each be run before the corresponding delta-filter command. TODO: This return value needs to be reworked as a collection. Loop over all FASTA files generating NUCmer command lines for each pairwise comparison. """ nucmer_cmdlines, delta_filter_cmdlines = [], [] filenames = sorted(filenames) # enforce ordering of filenames for idx, fname1 in enumerate(filenames[:-1]): for fname2 in filenames[idx + 1 :]: ncmd, dcmd = construct_nucmer_cmdline( fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch ) nucmer_cmdlines.append(ncmd) delta_filter_cmdlines.append(dcmd) return (nucmer_cmdlines, delta_filter_cmdlines)
28923f86762d73fcdd0a4f9681da09b6ab8368cb
3,657,274
def normalize(*args): """Scale a sequence of occurrences into probabilities that sum up to 1.""" total = sum(args) return [arg / total for arg in args]
49b0f998fe58b2c85da5a993e542d91bb5dd5382
3,657,275
import os def get_CZI_zstack(filename,frame,channel,filepath=None,img_info=None): """ Obtains a single z-stack from a 3D imaging time-series for a specified time and channel. Parameters ---------- filename : str Name of the file from which to retrieve the z-stack. frame : int The temporal slice of the image series from which to retrieve the z-stack. channel : int The channel from which to retrieve the z-stack. filepath : str, optional Path to the file. img_info : tuple of ints, optional 5-tuple containing lengths of the `X`, `Y`, `Z` (spatial), `T` (temporal) dimensions of the image series, and the number of channels, `num_channels`. E.g. (sizeX,sizeY,sizeZ,sizeT,num_channels). See output of get_CZI_metadata(). Pass these pre-computed values for increased speed in batch processing. Returns ------- zstack : numpy.ndarray, or None Z-stack of the image series specified by the desired `frame`; contains 3 spatial dimensions. If loading is unsuccessful, `None` is returned. """ # prepare file name, check that file exists if not (filepath is None): czi_image = os.path.join(filepath,filename) else: czi_image = filename if not os.path.exists(czi_image): return None # retrieve image dimensions, and number of channels if img_info is None: (sizeX,sizeY,sizeZ,sizeT,num_channels), _ = get_CZI_metadata(filename,filepath=filepath) else: assert len(img_info) == 5 (sizeX,sizeY,sizeZ,sizeT,num_channels) = img_info # make sure frame and channel are in bounds assert frame < sizeT assert channel < num_channels #initialize array and load z-stack zstack = np.zeros((sizeZ, sizeY,sizeX)) with bioformats.ImageReader(czi_image) as reader: for z in range(sizeZ): zstack[z,:,:] = reader.read(t=frame,z=z,c=channel) return zstack
dbbfaf7f9cc50c99cd025236dff2480be2ac6017
3,657,276
import requests def _make_request( resource: str, from_currency_code: str, to_currency_code: str, timestamp: int, access_token: str, exchange_code: str, num_records: int, api_version: str ) -> requests.Response: """ API documentation for cryptocompare can be found at https://min-api.cryptocompare.com/documentation """ base_url = f"https://min-api.cryptocompare.com/data/{api_version}/{resource}" params = { "fsym": from_currency_code, "tsym": to_currency_code, "e": exchange_code, "limit": num_records, "toTs": timestamp, "api_key": access_token } return requests.get(base_url, params=params)
4da7c3cab42b742b106fafb4c1585e6ecb250121
3,657,277
def dependencies_order_of_build(target_contract, dependencies_map): """ Return an ordered list of contracts that is sufficient to sucessfully deploys the target contract. Note: This function assumes that the `dependencies_map` is an acyclic graph. """ if len(dependencies_map) == 0: return [target_contract] if target_contract not in dependencies_map: raise ValueError('no dependencies defined for {}'.format(target_contract)) order = [target_contract] todo = list(dependencies_map[target_contract]) while len(todo): target_contract = todo.pop(0) target_pos = len(order) for dependency in dependencies_map[target_contract]: # we need to add the current contract before all it's depedencies if dependency in order: target_pos = order.index(dependency) else: todo.append(dependency) order.insert(target_pos, target_contract) order.reverse() return order
e5a67247aad8b37c29e274eac80f28414f59427f
3,657,278
def projective_error_function(params, args): """ :param params: :param args: :return: """ # fx fy cx cy k0 k1 project_params = params[0:5] f, cx, cy, k0, k1 = project_params K = eye(3, 3) K[0,0] = f K[1,1] = f K[0, 2] = k0 K[1, 2] = k1 model, image = args tp = params[5:] _, R, t = transform(tp, model) Rt = np.c_[R, t.transpose()] # Reconstruct camera matrix P = K @ Rt # Project X = np.zeros((4, len(model[0]))) X[0:3] = model X[3] = 1 PX = P @ X image_star = PX[0:2] / PX[2] dataShape = image.shape nData = dataShape[0] * dataShape[1] imagevec = image.reshape(1, nData)[0] image_star_vec = image_star.reshape(1, nData)[0] return imagevec - image_star_vec
5b1fe8265478a379d91178474e392797798c9c0f
3,657,279
import warnings def _transform_masks(y, transform, data_format=None, **kwargs): """Based on the transform key, apply a transform function to the masks. Refer to :mod:`deepcell.utils.transform_utils` for more information about available transforms. Caution for unknown transform keys. Args: y (numpy.array): Labels of ``ndim`` 4 or 5 transform (str): Name of the transform, one of ``{"deepcell", "disc", "watershed", None}``. data_format (str): A string, one of ``channels_last`` (default) or ``channels_first``. The ordering of the dimensions in the inputs. ``channels_last`` corresponds to inputs with shape ``(batch, height, width, channels)`` while ``channels_first`` corresponds to inputs with shape ``(batch, channels, height, width)``. kwargs (dict): Optional transform keyword arguments. Returns: numpy.array: the output of the given transform function on ``y``. Raises: ValueError: Rank of ``y`` is not 4 or 5. ValueError: Channel dimension of ``y`` is not 1. ValueError: ``transform`` is invalid value. """ valid_transforms = { 'deepcell', # deprecated for "pixelwise" 'pixelwise', 'disc', 'watershed', # deprecated for "outer-distance" 'watershed-cont', # deprecated for "outer-distance" 'inner-distance', 'outer-distance', 'centroid', # deprecated for "inner-distance" 'fgbg' } if data_format is None: data_format = K.image_data_format() if y.ndim not in {4, 5}: raise ValueError('`labels` data must be of ndim 4 or 5. Got', y.ndim) channel_axis = 1 if data_format == 'channels_first' else -1 if y.shape[channel_axis] != 1: raise ValueError('Expected channel axis to be 1 dimension. Got', y.shape[1 if data_format == 'channels_first' else -1]) if isinstance(transform, str): transform = transform.lower() if transform not in valid_transforms and transform is not None: raise ValueError('`{}` is not a valid transform'.format(transform)) if transform in {'pixelwise', 'deepcell'}: if transform == 'deepcell': warnings.warn('The `{}` transform is deprecated. Please use the ' '`pixelwise` transform instead.'.format(transform), DeprecationWarning) dilation_radius = kwargs.pop('dilation_radius', None) separate_edge_classes = kwargs.pop('separate_edge_classes', False) edge_class_shape = 4 if separate_edge_classes else 3 if data_format == 'channels_first': shape = tuple([y.shape[0]] + [edge_class_shape] + list(y.shape[2:])) else: shape = tuple(list(y.shape[0:-1]) + [edge_class_shape]) # using uint8 since should only be 4 unique values. y_transform = np.zeros(shape, dtype=np.uint8) for batch in range(y_transform.shape[0]): if data_format == 'channels_first': mask = y[batch, 0, ...] else: mask = y[batch, ..., 0] y_transform[batch] = transform_utils.pixelwise_transform( mask, dilation_radius, data_format=data_format, separate_edge_classes=separate_edge_classes) elif transform in {'outer-distance', 'watershed', 'watershed-cont'}: if transform in {'watershed', 'watershed-cont'}: warnings.warn('The `{}` transform is deprecated. Please use the ' '`outer-distance` transform instead.'.format(transform), DeprecationWarning) by_frame = kwargs.pop('by_frame', True) bins = kwargs.pop('distance_bins', None) distance_kwargs = { 'bins': bins, 'erosion_width': kwargs.pop('erosion_width', 0), } # If using 3d transform, pass in scale arg if y.ndim == 5 and not by_frame: distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217]) if data_format == 'channels_first': shape = tuple([y.shape[0]] + list(y.shape[2:])) else: shape = y.shape[0:-1] y_transform = np.zeros(shape, dtype=K.floatx()) if y.ndim == 5: if by_frame: _distance_transform = transform_utils.outer_distance_transform_movie else: _distance_transform = transform_utils.outer_distance_transform_3d else: _distance_transform = transform_utils.outer_distance_transform_2d for batch in range(y_transform.shape[0]): if data_format == 'channels_first': mask = y[batch, 0, ...] else: mask = y[batch, ..., 0] y_transform[batch] = _distance_transform(mask, **distance_kwargs) y_transform = np.expand_dims(y_transform, axis=-1) if bins is not None: # convert to one hot notation # uint8's max value of255 seems like a generous limit for binning. y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8) if data_format == 'channels_first': y_transform = np.rollaxis(y_transform, y.ndim - 1, 1) elif transform in {'inner-distance', 'centroid'}: if transform == 'centroid': warnings.warn('The `{}` transform is deprecated. Please use the ' '`inner-distance` transform instead.'.format(transform), DeprecationWarning) by_frame = kwargs.pop('by_frame', True) bins = kwargs.pop('distance_bins', None) distance_kwargs = { 'bins': bins, 'erosion_width': kwargs.pop('erosion_width', 0), 'alpha': kwargs.pop('alpha', 0.1), 'beta': kwargs.pop('beta', 1) } # If using 3d transform, pass in scale arg if y.ndim == 5 and not by_frame: distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217]) if data_format == 'channels_first': shape = tuple([y.shape[0]] + list(y.shape[2:])) else: shape = y.shape[0:-1] y_transform = np.zeros(shape, dtype=K.floatx()) if y.ndim == 5: if by_frame: _distance_transform = transform_utils.inner_distance_transform_movie else: _distance_transform = transform_utils.inner_distance_transform_3d else: _distance_transform = transform_utils.inner_distance_transform_2d for batch in range(y_transform.shape[0]): if data_format == 'channels_first': mask = y[batch, 0, ...] else: mask = y[batch, ..., 0] y_transform[batch] = _distance_transform(mask, **distance_kwargs) y_transform = np.expand_dims(y_transform, axis=-1) if distance_kwargs['bins'] is not None: # convert to one hot notation # uint8's max value of255 seems like a generous limit for binning. y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8) if data_format == 'channels_first': y_transform = np.rollaxis(y_transform, y.ndim - 1, 1) elif transform == 'disc' or transform is None: dtype = K.floatx() if transform == 'disc' else np.int32 y_transform = to_categorical(y.squeeze(channel_axis), dtype=dtype) if data_format == 'channels_first': y_transform = np.rollaxis(y_transform, y.ndim - 1, 1) elif transform == 'fgbg': y_transform = np.where(y > 1, 1, y) # convert to one hot notation if data_format == 'channels_first': y_transform = np.rollaxis(y_transform, 1, y.ndim) # using uint8 since should only be 2 unique values. y_transform = to_categorical(y_transform, dtype=np.uint8) if data_format == 'channels_first': y_transform = np.rollaxis(y_transform, y.ndim - 1, 1) return y_transform
278615037d78b35cacb641eca89918010cf9b2fd
3,657,280
import os def root_dir(): """ Returns root director for this project """ return os.path.dirname(os.path.realpath(__file__ + '/..'))
c9346df7838dd0a528613a7069c55d910373fe86
3,657,281
import os import json def query_subgraph(seeds, genes_top, output_path): # pylint: disable=too-many-locals """ This function queries the data, writes the resulting subgraph and returns a dictionary containing the number of nodes and edges. seeds: list genes_top: dict whose keys are genes and values are their ranks """ genes_list = list(genes_top.keys()) genes = set(seeds + genes_list) seeds_set = frozenset(seeds) # Produce the induced subgraph of genes in all networks. nodes_raw, edges_raw = query_sqlite(genes) # Only keep the nodes of interest. nodes = [node for node in nodes_raw if normalized_node_id(node["_id"]) in genes] edges = [ edge for edge in edges_raw if ( normalized_node_id(edge["_from"]) in genes and normalized_node_id(edge["_to"]) in genes ) ] def node_is_seed(node_id): return normalized_node_id(node_id) in seeds_set # graph data cytoscape_nodes = [ dict( data=cytoscape_node( node, genes_top.get(normalized_node_id(node["_id"])), seed=node_is_seed(node["_id"]), ) ) for node in nodes ] cytoscape_edges = [dict(data=cytoscape_edge(edge)) for edge in edges] cytoscape_data = dict( nodes=cytoscape_nodes, edges=cytoscape_edges, ) cytoscape_path = os.path.join(output_path, "graph.json") with open(cytoscape_path, "w") as cytoscape_json: cytoscape_json.write(json.dumps(cytoscape_data)) # graph metadata return dict( nodes=len(nodes), edges=len(edges), )
eace2fb32ec6488e3ad096f9570e3eefe97d042a
3,657,282
def get_cur_version(): """ Get current apk version string """ pkg_name = cur_activity.getPackageName() return str( cur_activity.getPackageManager().getPackageInfo( pkg_name, 0).versionName)
015d3368238edc10344c633d9cc491c79569f5f6
3,657,283
def checkpointload(checkpointfile): """Loads an hyperoptimizer checkpoint from file Returns a list of tuples (params, loss) referring to previous hyperoptimization trials """ try: with open(checkpointfile, "rb") as f: return pkl.load(f) except (FileNotFoundError, EOFError): return []
906df00fcb209c979fd57c49a426f4c752b45753
3,657,284
from time import time from array import array import hmac import base64 import os def encrypt_password(password, key): """ Encrypts the password using the given key. Args: password (str): password to be encrypted key (str): key to be used to encrypt the password """ h_hash = get_sha_hash uhash = h_hash(','.join(str(x) for x in [repr(time()), repr(os.getpid()), repr(len(password)), password, key]))[:16] k_enc, k_auth = h_hash('enc' + key + uhash), h_hash('auth' + key + uhash) pwd_len = len(password) password_stream = array('L', password + '0000'[pwd_len & 3:]) x_key = expand_key(k_enc, pwd_len + 4) for i_cnt in range(len(password_stream)): password_stream[i_cnt] = password_stream[i_cnt] ^ x_key[i_cnt] cipher_t = uhash + password_stream.tostring()[:pwd_len] auth = hmac.new(cipher_t, k_auth).digest() encrypt_str = cipher_t + auth[:8] encoded_str = base64.encodestring(encrypt_str) encrypted_password = encoded_str.rstrip('\n') return encrypted_password
c267cfbd59f1859f999602f19c85af8eba47421b
3,657,285
import os def prepare_test_data(datapath): """ Wrapper function to load the test dataset """ print("Loading and encoding the test dataset") depth_test = np.array(pd.read_csv(os.path.join(datapath,'test_depth.txt'),sep="\t", header = None)) depth_test = depth_test.reshape(depth_test.shape[0],depth_test.shape[1], 1) exp_test = np.array(pd.read_csv(os.path.join(datapath,'test_expression.txt'),sep="\t", header = None)) exp_test = exp_test.reshape(exp_test.shape[0], exp_test.shape[1],1) time_test = np.array(pd.read_csv(os.path.join(datapath,'test_ref.txt'),sep="\t", header = None)) time_test = time_test.reshape(time_test.shape[0], time_test.shape[1], 1) foldchange_test = np.array(pd.read_csv(os.path.join(datapath,'test_foldchange.txt'),sep="\t", header = None)) foldchange_test = foldchange_test.reshape(foldchange_test.shape[0], foldchange_test.shape[1], 1) weight_test = time_test*foldchange_test seq_test, y_test = load_sequence_data(datapath, 'test_sequences.csv') test_bed= pr.read_bed(os.path.join(datapath,"test_tiles.bed"), as_df=True) print('Test labels shape:', y_test.shape) print('Test features shape:', depth_test.shape, seq_test.shape, exp_test.shape, weight_test.shape) return depth_test, exp_test, weight_test, seq_test, y_test, test_bed
e34eb64d2297b5cd8194af292abfb022722d0885
3,657,286
def get_case_color_marker(case): """Get color and marker based on case.""" black_o = ("#000000", "o") teal_D = ("#469990", "D") orange_s = ("#de9f16", "s") purple_v = ("#802f99", "v") bs = case["batch_size"] sub = case["subsampling"] mc = case["mc_samples"] if sub is None and mc == 0: # only bs mapping = {2: purple_v, 8: orange_s, 32: teal_D, 128: black_o} try: return mapping[bs] except KeyError: warn(f"Could not map bs={bs} to color-marker-pair. Returning (black, o)") return black_o if sub is not None and mc == 0: # only bs & sub return teal_D if sub is None and mc != 0: # only bs & mc return orange_s if sub is not None and mc != 0: # bs, sub & mc return purple_v
4a42fc784b9034e3996753bc6da18fbfebc66b16
3,657,287
def clean_integer_score(x): """Converts x from potentially a float or string into a clean integer, and replace NA and NP values with one string character""" try: x = str(int(float(x))) except Exception as exc: if isinstance(x, basestring): pass else: raise x = x.lower().strip() return 'A' if x == 'na (not assesible)' else 'P' if x == 'np (not performed)' else x
9ff2c911653421d51738bdb1bf8f381d3aa59820
3,657,288
def do_stuff2(): """This is not right.""" (first, second) = 1, 2, 3 return first + second
dbb6f503e73cc0365dfa20fca54bebb174fcdadd
3,657,289
def rule_block_distributor(rule_param, src_cortical_area, dst_cortical_area, src_neuron_id, z_offset): """ This rule helps to take a set of unique inputs from one cortical area and develop synaptic projections that can lead to a comprehensive set of unique connections that covers all the combinations of the input values. Note: This function is designed for the corner case of the destination cortical area being 1 dimensional in z direction """ # todo: generalize this function so it takes the direction of the source and destination cortical areas as input candidate_list = list() block_list = blocks.z_block_refs(cortical_area=dst_cortical_area, x_ref=0, y_ref=0) source_x_depth = runtime_data.genome['blueprint'][src_cortical_area]['neuron_params']['block_boundaries'][0] for offset in range(source_x_depth): for block_ref in block_list: if blocks.block_ref_2_id(block_ref)[2] // (2 ** offset) % 2 == 0: for neuron in blocks.neurons_in_the_block(cortical_area=dst_cortical_area, block_ref=block_ref): candidate_list.append(neuron) return candidate_list
b9201bfce68dc22b47bd20fc687b20f78ef1a08e
3,657,290
def get_extra(item_container): """ liefert die erste passende image_url """ if item_container.item.extra != '': return get_extra_data(item_container) item_container = item_container.get_parent() while item_container.item.app.name == 'dmsEduFolder': if item_container.item.extra != '': return get_extra_data(item_container) item_container = item_container.get_parent() if item_container.item.app.name != 'dmsEduFolder': return None
c9ac2ad65d05d13deaad8a6031f2f9ee8ab8aee4
3,657,291
import torch def pt_accuracy(output, target, topk=(1,)): """Compute the accuracy over the k top predictions for the specified values of k.""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
5c0ddcd57163987b00e09a6677ddc41928810874
3,657,292
from typing import Counter def compute_phasing_counts(variant_to_read_names_dict): """ Parameters ---------- variants_to_read_names : dict Dictionary mapping varcode.Variant to set of read names Returns ------- Dictionary from variant to Counter(Variant) """ read_names_to_variants = defaultdict(set) for variant, read_names in variant_to_read_names_dict.items(): for read_name in read_names: read_names_to_variants[read_name].add(variant) # now count up how many reads are shared between pairs of variants phasing_counts = defaultdict(Counter) for variant, read_names in variant_to_read_names_dict.items(): for read_name in read_names: for other_variant in read_names_to_variants[read_name]: if variant != other_variant: phasing_counts[variant][other_variant] += 1 return phasing_counts
ba13a6d6c76e018cb1072e9fba635aad5593437b
3,657,293
def _inputs_and_vae(hparams): """Constructs a VAE.""" obs_encoder = codec.MLPObsEncoder(hparams) obs_decoder = codec.MLPObsDecoder( hparams, codec.BernoulliDecoder(squeeze_input=True), param_size=1) inputs = context_mod.EncodeObserved(obs_encoder) vae = vae_mod.make(hparams, obs_encoder, obs_decoder) return inputs, vae
0e6af8a7d17312f99435426907a0dca062981225
3,657,294
def read_grid_hdf5(filepath, name): """Read a grid from HDF5 file. Parameters ---------- filepath : string or pathlib.Path object Path of the HDF5 file. name : string Name of the grid. Returns ------- x : numpy.ndarray The x-coordinates along a gridline in the x-direction. y : numpy.ndarray The y-coordinates along a gridline in the y-direction. z : numpy.ndarray The z-coordinates along a gridline in the z-direction. """ f = h5py.File(str(filepath), 'r') dim = len(f[name]) x, y, z = f[name]['x'][:], f[name]['y'][:], None if dim == 3: z = f[name]['z'][:] f.close() if z is None or len(z) == 1: return x, y return x, y, z
f9ad79da36cfa24028562cdaeefba8ca9b48e572
3,657,295
def string_to_version(verstring): """ Return a tuple of (epoch, version, release) from a version string This function replaces rpmUtils.miscutils.stringToVersion, see https://bugzilla.redhat.com/1364504 """ # is there an epoch? components = verstring.split(':') if len(components) > 1: epoch = components[0] else: epoch = 0 remaining = components[:2][0].split('-') version = remaining[0] release = remaining[1] return (epoch, version, release)
4dca34ce0f30e66eff6dad784cd684fb39b665ee
3,657,296
import xml def cff2provn(filename): """Parse cml xml file and return a prov bundle object""" #filename = "/Users/fariba/Desktop/UCI/freesurfer/scripts/meta-MC-SCA-023_tp1.cml" tree = xml.dom.minidom.parse(filename) collections = tree.documentElement g = prov.ProvBundle() g.add_namespace(xsd) g.add_namespace(dcterms) g.add_namespace(cml) url_entity = g.entity(cml[get_id()]) url_entity.add_extra_attributes({prov.PROV['type']: nidm['nidm:ConnectomeFileFormat'], prov.PROV['location']: prov.Literal(filename, prov.XSD['String'])}) cml_collection = g.collection(cml[get_id()]) cml_collection.add_extra_attributes( {prov.PROV['type']: cml['connectome'], prov.PROV['label']: filename}) g.wasDerivedFrom(cml_collection, url_entity) # get species, subject_name, and subject_timepoint species = tree.getElementsByTagName('cml:species')[0].toxml() species = species.replace('<cml:species>', '').replace('</cml:species>', '') tp = '' sub = '' tags = collections.getElementsByTagName("cml:tag") for t in tags: if t.attributes['key'].value == 'subject_name': sub = t.toxml() if t.attributes['key'].value == 'subject_timepoint': tp = t.toxml() sub = sub.replace('<cml:tag key="subject_name">', '').replace('</cml:tag>', '') tp = tp.replace('<cml:tag key="subject_timepoint">', '').replace('</cml:tag>', '') #print species + " " + sub + " " + tp cml_meta = g.entity(cml[get_id()]) cml_meta.add_extra_attributes( {prov.PROV['type']: cml['connectome-meta'], cml['species']: species, cml['timepoint']: tp, cml['subject_name']: sub}) g.hadMember(cml_collection, cml_meta) volumes = collections.getElementsByTagName("cml:connectome-volume") c = 0 for v in volumes: c = c + 1 #print v.getAttribute("src") + " " + v.getAttribute("dtype") + " " + v.getAttribute("name") + " " + v.getAttribute("fileformat") #print v.attributes['fileformat'].value dtype = v.getAttribute('dtype') src = v.getAttribute('src') name = v.getAttribute('name') fileformat = v.getAttribute('fileformat') cml_volume = g.entity(cml[get_id()]) cml_volume.add_extra_attributes( {prov.PROV['type']: cml['connectome-volume'], cml['dtype']: dtype, cml['src']: src, cml['name']: name, cml['fileformat']: fileformat}) g.hadMember(cml_collection, cml_volume) tracks = collections.getElementsByTagName("cml:connectome-track") c = 0 for t in tracks: c = c + 1 #print t.getAttribute("src") + " " + t.getAttribute("dtype") + " " + t.getAttribute("name") + " " + t.getAttribute("fileformat") dtype = t.getAttribute('dtype') src = t.getAttribute('src') name = t.getAttribute('name') fileformat = t.getAttribute('fileformat') cml_track = g.entity(cml[get_id()]) cml_track.add_extra_attributes( {prov.PROV['type']: cml['connectome-track'], cml['dtype']: dtype, cml['src']: src, cml['name']: name, cml['fileformat']: fileformat}) g.hadMember(cml_collection, cml_track) networks = collections.getElementsByTagName("cml:connectome-network") c = 0 for n in networks: c = c + 1 #print n.getAttribute("src") + " " + n.getAttribute("dtype") + " " + n.getAttribute("name") + " " + n.getAttribute("fileformat") dtype = n.getAttribute('dtype') src = n.getAttribute('src') name = n.getAttribute('name') fileformat = n.getAttribute('fileformat') cml_network = g.entity(cml[get_id()]) cml_network.add_extra_attributes( {prov.PROV['type']: cml['connectome-network'], cml['dtype']: dtype, cml['src']: src, cml['name']: name, cml['fileformat']: fileformat}) g.hadMember(cml_collection, cml_network) surfaces = collections.getElementsByTagName("cml:connectome-surface") c = 0 for s in surfaces: c = c + 1 #print s.getAttribute("src") + " " + s.getAttribute("dtype") + " " + s.getAttribute("name") + " " + s.getAttribute("fileformat") dtype = s.getAttribute('dtype') src = s.getAttribute('src') name = s.getAttribute('name') fileformat = s.getAttribute('fileformat') cml_surface = g.entity(cml[get_id()]) cml_surface.add_extra_attributes( {prov.PROV['type']: cml['connectome-surface'], cml['dtype']: dtype, cml['src']: src, cml['name']: name, cml['fileformat']: fileformat}) g.hadMember(cml_collection, cml_surface) data = collections.getElementsByTagName("cml:connectome-data") c = 0 for d in data: c = c + 1 #print d.getAttribute("src") + " " + d.getAttribute("dtype") + " " + d.getAttribute("name") + " " + d.getAttribute("fileformat") dtype = d.getAttribute('dtype') src = d.getAttribute('src') name = d.getAttribute('name') cml_data = g.entity(cml[get_id()]) cml_data.add_extra_attributes( {prov.PROV['type']: cml['connectome-data'], cml['dtype']: dtype, cml['src']: src, cml['name']: name, cml['fileformat']: fileformat}) g.hadMember(cml_collection, cml_data) return g
c8e44bd627173a17d45eadcc5ab73062c4e27ff6
3,657,297
def optional(idx, *args): """A converter for functions having optional arguments. The index to the last non-optional parameter is specified and a list of types for optional arguments follows. """ return lambda ctx, typespecs: _optional_imp(ctx, typespecs[idx], args)
e5491588090beaf4730f18c1b54193104a8f62be
3,657,298
def calc_plot_ROC(y1, y2): """ Take two distributions and plot the ROC curve if you used the difference in those distributions as a binary classifier. :param y1: :param y2: :return: """ y_score = np.concatenate([y1, y2]) y_true = np.concatenate([np.zeros(len(y1)), np.ones(len(y2))]) return plot_ROC(y_true, y_score)
428aa54ebe92ff1df6df4ebcae800a0b692f09d5
3,657,299