content
stringlengths
22
815k
id
int64
0
4.91M
def validate_dtype(dtype_in): """ Input is an argument represention one, or more datatypes. Per column, number of columns have to match number of columns in csv file: dtype = [pa.int32(), pa.int32(), pa.int32(), pa.int32()] dtype = {'__columns__': [pa.int32(), pa.int32(), pa.int32(), pa.int32()]} Default: dtype_in = pa.int32() dtype_out = {'__default__': pa.int32()} Not yet supported: Default, optional column overwrite: dtype_in = {'__default__': pa.int32(), '__columns__': {'colname': pa.int32()}} dtype_out = raise ValueError dtype_in = {'colname': pa.int32()} dtype_out = raise ValueError """ if dtype_in is None: # use default datatype dtype_in = pa.float32() argtype = type(dtype_in) valid_types = _dtypes_from_arrow() if argtype is pa.DataType: if dtype_in not in list(valid_types.keys()): raise ValueError('Not supporting type: ' + dtype_in.__str__()) return {'__default__': valid_types[dtype_in]} if argtype is dict: raise ValueError('Not yet supported dict') if argtype is list and dtype_in.__len__() > 0: matches = [dtype in list(valid_types.keys()) for dtype in dtype_in] if False in matches: mismatches = [dtype_in[j].__str__() + '(column:' + str(j) + ')' for j in range(0, len(matches)) if matches[j] is False] raise ValueError('List contains unsupported datatype: ' + ','.join(mismatches)) if set(dtype_in).__len__() == 1: # all list members are of same type return {'__default__': valid_types[dtype_in[0]]} return {'__columns__': list([valid_types[dtype] for dtype in dtype_in])} raise ValueError('No input to match datatypes')
5,351,800
def get_secret(name): """Load a secret from file or env Either provide ``{name}_FILE`` or ``{name}`` in the environment to configure the value for ``{name}``. """ try: with open(os.environ[name + "_FILE"]) as secret_file: return secret_file.read().strip() except (FileNotFoundError, PermissionError, KeyError): try: return os.environ[name] except KeyError: if os.path.basename(sys.argv[0]) == 'sphinx-build': # We won't have nor need secrets when building docs return None raise ValueError( f"Missing secrets: configure {name} or {name}_FILE to contain or point at secret" ) from None
5,351,801
def cache( cache_class: Callable[[], base_cache.BaseCache[T]], serializer: Callable[[], cache_serializer.CacheSerializer], conditional: Callable[[List[Any], Dict[str, Any]], bool] = _always_true): """ cache ===== parameters: cache_class (base_cache.BaseCache) conditional (Callable[[List[Any], Dict[str, Any]]) Decorator that caches function results using the provided class. The class must be a subclass of base_cache, providing get and set methods with appropriate signatures. An optional conditional can be passed, which receives the *args and **kwargs of the called function. This function determines whether or not to cache, or to always recompute, based on whether it returns True or False. """ serializer_instance = serializer() cache_instance = cache_class() return curry(_wrapper, cache_instance, serializer_instance, conditional)
5,351,802
def words2chars(images, labels, gaplines): """ Transform word images with gaplines into individual chars """ # Total number of chars length = sum([len(l) for l in labels]) imgs = np.empty(length, dtype=object) newLabels = [] height = images[0].shape[0] idx = 0; for i, gaps in enumerate(gaplines): for pos in range(len(gaps) - 1): imgs[idx] = images[i][0:height, gaps[pos]:gaps[pos+1]] newLabels.append(char2idx(labels[i][pos])) idx += 1 print("Loaded chars from words:", length) return imgs, newLabels
5,351,803
def general_barplot_with_error(means, stds): """ Barplot, meant for fixation position and entropy. x: fixation position. y: mean (and stdev) means: dictionary, with x labels as keys stds: dictionary, with x labels as keys """ fig,ax = plt.subplots() plt.errorbar(means.keys(), means.values(), yerr=stds.values(), linewidth=2, elinewidth=0.4) plt.show()
5,351,804
def show_all_archives(): """Displays meta data for all archives.""" archives = Archive.select() template = "{: <32} {: <64} {: <64} {: <32} {: <32}" header = [ "Name", "Source Path", "Destination Path", "Key Pair Name", "Timestamp", ] print(template.format(*header)) for archive in archives: row = [ archive.name[:32], archive.src_path[:64], archive.dst_path[:64], archive.key_pair.name, str(archive.timestamp), ] print(template.format(*row))
5,351,805
def add_custom_subparser(subparsers): """ Add subparser for customized stock picks """ custom = subparsers.add_parser( "custom", help="Analyze set of user defined pages" ) custom.add_argument( "pages", nargs="*", help="Symbol:Wikipage to analyze, eg: AAPL:Apple_Inc MSFT:Microsoft" )
5,351,806
def create_spark_session(spark_jars: str) -> SparkSession: """ Create Spark session :param spark_jars: Hadoop-AWS JARs :return: SparkSession """ spark = SparkSession \ .builder \ .config("spark.jars.packages", spark_jars) \ .appName("Sparkify ETL") \ .getOrCreate() return spark
5,351,807
def make_expired(request, pk): """ 将号码状态改为过号 """ try: reg = Registration.objects.get(pk=pk) except Registration.DoesNotExist: return Response('registration not found', status=status.HTTP_404_NOT_FOUND) data = { 'status': REGISTRATION_STATUS_EXPIRED } serializer = RegistrationSerializer(reg, data=data, partial=True) if serializer.is_valid(): reg = serializer.save() reg.end_time = datetime.datetime.now() reg.save() # 通知后面第n位的顾客就餐 _notify_ready(reg.table_type) return Response(serializer.data, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
5,351,808
def run_optimization(mesh, target_evals, out_path, params=OptimizationParams()): """Run the optimization.""" # Create the output directories os.makedirs(f"{out_path}/ply", exist_ok=True) os.makedirs(f"{out_path}/txt", exist_ok=True) # Unpack the mesh [ Xopt, TRIV, n, m, Ik, Ih, Ik_k, Ih_k, Tpi, Txi, Tni, iM, Windices, Ael, Bary, bound_edges, ord_list, ] = mesh # Save the initial embedding save_ply(Xopt, TRIV, "%s/ply/initial.ply" % out_path) # Save the target eigenvalue sequence np.savetxt("%s/txt/target.txt" % out_path, target_evals.cpu().detach().numpy()) iterations = [] for nevals in params.evals: step = 0 while step < params.steps - 1: # Prepare the mesh mesh = prepare_mesh(Xopt, TRIV) # Unpack the mesh [ Xori, TRIV, n, m, Ik, Ih, Ik_k, Ih_k, Tpi, Txi, Tni, iM, Windices, Ael, Bary, bound_edges, ord_list, ] = mesh # Initialize the model graph = initialize(mesh, step=step) tic() # Start iteration for step in range(step + 1, params.steps): # Recompute triangulation if step % params.remesh_step == 0: print("RECOMPUTING TRIANGULATION at step %d" % step) break try: # Alternate optimization of inner and boundary vertices if int(step / 10) % 2 == 0: # Optimize over inner points er, ee, Xopt_t = forward( "inner", "train", graph, mesh, target_evals, nevals, step, params, ) else: # Optimize over boundary points er, ee, Xopt_t = forward( "bound", "train", graph, mesh, target_evals, nevals, step, params, ) iterations.append((step, nevals, er, ee, int(step / 10) % 2)) if ( step % params.checkpoint == 0 or step == params.steps - 1 or step == 1 ): toc() tic() # Perform a forward pass in eval mode ( cost, cost_evals, cost_vcL, cost_vcW, decay, flip, evout, ) = forward( "bound", "eval", graph, mesh, target_evals, nevals, step ) print( "Iter %f, cost: %f(evals cost: %f (%f) (%f), smoothness weight: %f). Flip: %d" % ( step, cost, cost_evals, cost_vcL, cost_vcW, decay, np.sum(flip < 0), ) ) # Save the current embedding save_ply( Xopt, TRIV, "%s/ply/evals_%d_iter_%06d.ply" % (out_path, nevals, step), ) # Save the current eigenvalue sequence np.savetxt( "%s/txt/evals_%d_iter_%06d.txt" % (out_path, nevals, step), evout, ) # Save the training progress statistics np.savetxt("%s/iterations.txt" % (out_path), iterations) # Early stopping if ee < params.min_eval_loss: step = params.steps print("Minimum eigenvalues loss reached") break except KeyboardInterrupt: step = params.steps break except: print(sys.exc_info()) ee = float("nan") if ee != ee: # If nan (something went wrong) with the spectral decomposition, # perturbate the last valid state and start over print("iter %d. Perturbating initial condition" % step) Xopt = ( Xopt + (np.random.rand(np.shape(Xopt)[0], np.shape(Xopt)[1]) - 0.5) * 1e-3 ) graph.global_step = step else: Xopt = Xopt_t graph.global_step += 1 if step < params.steps - 1: [Xopt, TRIV] = resample(Xopt, TRIV)
5,351,809
def register(): """Registers all signature key managers in the Python registry.""" tink_bindings.register() for key_type_identifier in ('EcdsaPrivateKey', 'Ed25519PrivateKey', 'RsaSsaPssPrivateKey', 'RsaSsaPkcs1PrivateKey',): type_url = 'type.googleapis.com/google.crypto.tink.' + key_type_identifier key_manager = core.PrivateKeyManagerCcToPyWrapper( tink_bindings.PublicKeySignKeyManager.from_cc_registry(type_url), _public_key_sign.PublicKeySign, _PublicKeySignCcToPyWrapper) core.Registry.register_key_manager(key_manager, new_key_allowed=True) for key_type_identifier in ('EcdsaPublicKey', 'Ed25519PublicKey', 'RsaSsaPssPublicKey', 'RsaSsaPkcs1PublicKey',): type_url = 'type.googleapis.com/google.crypto.tink.' + key_type_identifier key_manager = core.KeyManagerCcToPyWrapper( tink_bindings.PublicKeyVerifyKeyManager.from_cc_registry(type_url), _public_key_verify.PublicKeyVerify, _PublicKeyVerifyCcToPyWrapper) core.Registry.register_key_manager(key_manager, new_key_allowed=True) core.Registry.register_primitive_wrapper( _signature_wrapper.PublicKeySignWrapper()) core.Registry.register_primitive_wrapper( _signature_wrapper.PublicKeyVerifyWrapper())
5,351,810
def cmd_add(opts): """Add one or more existing Docker containers to a Blockade group """ config = load_config(opts.config) b = get_blockade(config, opts) b.add_container(opts.containers)
5,351,811
def word2vec_similarity(segmented_topics, accumulator, with_std=False, with_support=False): """For each topic segmentation, compute average cosine similarity using a :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator`. Parameters ---------- segmented_topics : list of lists of (int, `numpy.ndarray`) Output from the :func:`~gensim.topic_coherence.segmentation.s_one_set`. accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator` Word occurrence accumulator. with_std : bool, optional True to also include standard deviation across topic segment sets in addition to the mean coherence for each topic. with_support : bool, optional True to also include support across topic segments. The support is defined as the number of pairwise similarity comparisons were used to compute the overall topic coherence. Returns ------- list of (float[, float[, int]]) Сosine word2vec similarities per topic (with std/support if `with_std`, `with_support`). Examples -------- .. sourcecode:: pycon >>> import numpy as np >>> from gensim.corpora.dictionary import Dictionary >>> from gensim.topic_coherence import indirect_confirmation_measure >>> from gensim.topic_coherence import text_analysis >>> >>> # create segmentation >>> segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]] >>> >>> # create accumulator >>> dictionary = Dictionary() >>> dictionary.id2token = {1: 'fake', 2: 'tokens'} >>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary) >>> _ = accumulator.accumulate([['fake', 'tokens'], ['tokens', 'fake']], 5) >>> >>> # should be (0.726752426218 0.00695475919227) >>> mean, std = indirect_confirmation_measure.word2vec_similarity(segmentation, accumulator, with_std=True)[0] """ topic_coherences = [] total_oov = 0 for topic_index, topic_segments in enumerate(segmented_topics): segment_sims = [] num_oov = 0 for w_prime, w_star in topic_segments: if not hasattr(w_prime, '__iter__'): w_prime = [w_prime] if not hasattr(w_star, '__iter__'): w_star = [w_star] try: segment_sims.append(accumulator.ids_similarity(w_prime, w_star)) except ZeroDivisionError: num_oov += 1 if num_oov > 0: total_oov += 1 logger.warning( "%d terms for topic %d are not in word2vec model vocabulary", num_oov, topic_index) topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support)) if total_oov > 0: logger.warning("%d terms for are not in word2vec model vocabulary", total_oov) return topic_coherences
5,351,812
def remote(self, privilege, grant_target_name, user_name, node=None): """Check that user is only able to to create a table from a remote source when they have the necessary privilege. """ exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node with Scenario("Remote source without privilege"): table_name = f'table_{getuid()}' with Given("The user has table privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") with When("I grant the user NONE privilege"): node.query(f"GRANT NONE TO {grant_target_name}") with And("I grant the user USAGE privilege"): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the Remote source"): node.query(f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", settings=[("user",user_name)], exitcode=exitcode, message=message) with Scenario("Remote source with privilege"): with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the Remote source"): node.query(f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", settings = [("user", f"{user_name}")], exitcode=42, message='Exception: Storage') with Scenario("Remote source with revoked privilege"): with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with And(f"I revoke {privilege}"): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the Remote source"): node.query(f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", settings=[("user",user_name)], exitcode=exitcode, message=message)
5,351,813
def test_vecenv_terminal_obs(vec_env_class, vec_env_wrapper): """Test that 'terminal_observation' gets added to info dict upon termination.""" step_nums = [i + 5 for i in range(N_ENVS)] vec_env = vec_env_class([functools.partial(StepEnv, n) for n in step_nums]) if vec_env_wrapper is not None: if vec_env_wrapper == VecFrameStack: vec_env = vec_env_wrapper(vec_env, n_stack=2) else: vec_env = vec_env_wrapper(vec_env) zero_acts = np.zeros((N_ENVS,), dtype="int") prev_obs_b = vec_env.reset() for step_num in range(1, max(step_nums) + 1): obs_b, _, done_b, info_b = vec_env.step(zero_acts) assert len(obs_b) == N_ENVS assert len(done_b) == N_ENVS assert len(info_b) == N_ENVS env_iter = zip(prev_obs_b, obs_b, done_b, info_b, step_nums) for prev_obs, obs, done, info, final_step_num in env_iter: assert done == (step_num == final_step_num) if not done: assert "terminal_observation" not in info else: terminal_obs = info["terminal_observation"] # do some rough ordering checks that should work for all # wrappers, including VecNormalize assert np.all(prev_obs < terminal_obs) assert np.all(obs < prev_obs) if not isinstance(vec_env, VecNormalize): # more precise tests that we can't do with VecNormalize # (which changes observation values) assert np.all(prev_obs + 1 == terminal_obs) assert np.all(obs == 0) prev_obs_b = obs_b vec_env.close()
5,351,814
def addGroupsToKey(server, activation_key, groups): """ Add server groups to a activation key CLI Example: .. code-block:: bash salt-run spacewalk.addGroupsToKey spacewalk01.domain.com 1-my-key '[group1, group2]' """ try: client, key = _get_session(server) except Exception as exc: # pylint: disable=broad-except err_msg = "Exception raised when connecting to spacewalk server ({}): {}".format( server, exc ) log.error(err_msg) return {"Error": err_msg} all_groups = client.systemgroup.listAllGroups(key) groupIds = [] for group in all_groups: if group["name"] in groups: groupIds.append(group["id"]) if client.activationkey.addServerGroups(key, activation_key, groupIds) == 1: return {activation_key: groups} else: return {activation_key: "Failed to add groups to activation key"}
5,351,815
def get_user_for_delete(): """Query for Users table.""" delete_user = Users.query \ .get(DELETE_USER_ID) return delete_user
5,351,816
def station_code_from_duids(duids: List[str]) -> Optional[str]: """ Derives a station code from a list of duids ex. BARRON1,BARRON2 => BARRON OSBAG,OSBAG => OSBAG """ if type(duids) is not list: return None if not duids: return None if len(duids) == 0: return None duids_uniq = list(set(duids)) common = findcommonstart(duids_uniq) if not common: return None # strip last character if we have one if is_single_number(common[-1]): common = common[:-1] if common.endswith("_"): common = common[:-1] if len(common) > 2: return common return None
5,351,817
def act_mmp_3d(out_mmps, target_id): """Function to attribute 3D coordinates to the activity only molecule. Uses MMFF and maximises shape overlap with existing fragment. Takes a list of mmps and a Target Returns None""" print "Generating 3D conformations" # First of all make sure the protein exists to put all these artificial coordinates on my_target = Target.objects.get(pk=target_id) new_protein = Protein() new_protein.code = my_target.title + "ChEMBL" new_protein.target_id = my_target try: new_protein.validate_unique() new_protein.save() except ValidationError: new_protein = Protein.objects.get(code=my_target.title + "ChEMBL") # Now let's loop through the MMPs and overlay the act mol onto the other one tot = len(out_mmps) old = -1 for i, ans in enumerate(out_mmps): # Print the progress if i * 100 / tot != old: old = i * 100 / tot sys.stdout.write("\r%d%% complete..." % old) sys.stdout.flush() # Pull out the molecules and the fragments mol1 is in 3D mol2 is an activity molecule mol1 = Chem.MolFromMolBlock(str(ans[0].sdf_info)) # Set the protonation state here new_s = set_pH(str(ans[1].cmpd_id.smiles)) if Chem.MolFromSmiles(new_s) is None: continue mol2 = Chem.MolFromSmiles(new_s) frag2 = ans[3] activity = ans[1] s_context = str(ans[4]) # Split the context into it's fragments my_frags = s_context.split(".") # If its a single cut if len(my_frags) == 1: context = canonicalise_context((Chem.MolFromSmiles(s_context))) elif len(my_frags) == 2 or len(my_frags) == 3: # If it is a double or triple split ignore continue else: sys.stderr.write("ERROR MORE THAN THREE FRAGMENTS...") sys.exit() # Now filter out if it's too small if context.GetNumHeavyAtoms() < 3: continue # Now find the best core, using make_smarts_from_frag to make a # relevant smarts pattern to do substructure subs smarts_mol = Chem.MolFromSmarts(make_smarts_from_frag(Chem.MolToSmiles(context, isomericSmiles=True))) core_mol = find_core(mol1, mol2, smarts_mol) # Check there is only one option. If there are two then we have a # problem. With the current implementation we can only if len(mol2.GetSubstructMatches(context)) > 1: sys.stderr.write("ERROR TWO POSSIBLE MATCHES") sys.stderr.write("ERROR " + Chem.MolToSmiles(context)) sys.stderr.write("ERROR " + Chem.MolToSmiles(mol2)) continue #Now the function to do the constraining try: mol2 = AllChem.ConstrainedEmbed(mol2, core_mol) except ValueError: # If it is a nitrile it needs extra sanitisation before embedding. Chem.SanitizeMol(core_mol) try: mol2 = AllChem.ConstrainedEmbed(mol2, core_mol) except ValueError: try: # Setting the mol as a molblock and reading back in # can fix this issue mol2 = Chem.MolFromMolBlock(Chem.MolToMolBlock(mol2)) core_mol = Chem.MolFromMolBlock(Chem.MolToMolBlock(core_mol)) mol2 = AllChem.ConstrainedEmbed(mol2, core_mol) except ValueError: # Except when it doesn't so print out this sys.stderr.write(Chem.MolToMolBlock(mol2)) sys.stderr.write(Chem.MolToMolBlock(core_mol)) sys.stderr.write("ERROR MOL ONE:\n" + Chem.MolToSmiles(mol1)) sys.stderr.write("ERROR MOL TWO:\n" + Chem.MolToSmiles(mol2)) sys.stderr.write("ERROR CORE MOL:\n" + Chem.MolToSmiles(core_mol)) sys.stderr.write("ERROR CONTEXT:\n" + Chem.MolToSmiles(context)) sys.stderr.write("ERROR CANNOT CONSTRAINED EMBED MOLECULE") continue # If the molecule is the same as the core then why bother if Chem.MolToSmiles(mol2, isomericSmiles=True) == Chem.MolToSmiles(core_mol, isomericSmiles=True): continue #NOW FILTER ON SHAPE FOR THE BEST ONE out_confs = generate_conformations(mol2, core_mol, num_confs=30, num_fails=30, max_iters=30, ff="MMFF") if out_confs is None: continue my_mols = [Chem.MolFromMolBlock(x[0]) for x in out_confs] mols = [] for conf in my_mols: # Now calculate the shape distance mols.append((AllChem.ShapeTanimotoDist(conf, mol1, ignoreHs=True), conf)) mols = sorted(mols, key=lambda x: x[0]) # Now pick the best (lowest) one mol2 = mols[0][1] # # code to find the most energetically favourable one - if several have # if len(mols) > 1: # # If the molecules are identical in terms of shapeprotrudedist # # then use energy to get the value # if mols[0][0] == mols[1][0]: # # Check there aren't more like this # samemols = [x for x in mols if x[0] == mols[0][0]] # mineng = 100000 # # Check the energy # for smol in samemols: # mysmol = smol[1] # # Calculate the energy # try: # mmff_mol = Chem.MolFromMolBlock(Chem.MolToMolBlock(mysmol), sanitize=False) # myff = Chem.rdForceFieldHelpers.SetupMMFFForceField(mmff_mol, mmffVerbosity=0) # ff = AllChem.MMFFGetMoleculeForceField(mysmol, myff, confId=0) # # Because the neweer version of RDKit has this difference # except AttributeError: # ff = AllChem.MMFFGetMoleculeForceField(mysmol, AllChem.MMFFGetMoleculeProperties(mysmol)) # if ff.CalcEnergy() < mineng: # mineng = ff.CalcEnergy() # mol2 = mysmol # # Derive the fragments me = Chem.MolFromSmarts(make_smarts_from_frag(s_context)) frag_out2 = AllChem.DeleteSubstructs(mol2,me) # Clean up the fragments frag_out2, fr_none = clean_up_frags(frag_out2) # Now add the coordinates for the fragment to the fragment and add the molecule linker -> the fact that the molecule is linked to an activity protein singles it out as being Activity firs make_new_3d_frag(frag2, activity, ans[0], frag_out2, new_protein, mol2) old = 100 sys.stdout.write("\r%d%% complete..." % old) sys.stdout.flush() print "Completed generating all 3D coordinates"
5,351,818
async def count_to(number): """ counts to n in async manner""" async for i in some_iter_func(number): print(i)
5,351,819
def erosion(image, selem, out=None, shift_x=False, shift_y=False): """Return greyscale morphological erosion of an image. Morphological erosion sets a pixel at (i,j) to the minimum over all pixels in the neighborhood centered at (i,j). Erosion shrinks bright regions and enlarges dark regions. Parameters ---------- image : ndarray Image array. selem : ndarray The neighborhood expressed as a 2-D array of 1's and 0's. out : ndarray The array to store the result of the morphology. If None is passed, a new array will be allocated. shift_x, shift_y : bool shift structuring element about center point. This only affects eccentric structuring elements (i.e. selem with even numbered sides). Returns ------- eroded : uint8 array The result of the morphological erosion. Examples -------- >>> # Erosion shrinks bright regions >>> import numpy as np >>> from skimage.morphology import square >>> bright_square = np.array([[0, 0, 0, 0, 0], ... [0, 1, 1, 1, 0], ... [0, 1, 1, 1, 0], ... [0, 1, 1, 1, 0], ... [0, 0, 0, 0, 0]], dtype=np.uint8) >>> erosion(bright_square, square(3)) array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], dtype=uint8) """ if image is out: raise NotImplementedError("In-place erosion not supported!") image = img_as_ubyte(image) selem = img_as_ubyte(selem) return cmorph._erode(image, selem, out=out, shift_x=shift_x, shift_y=shift_y)
5,351,820
def NE(x=None, y=None): """ Compares two values and returns: true when the values are not equivalent. false when the values are equivalent. See https://docs.mongodb.com/manual/reference/operator/aggregation/ne/ for more details :param x: first value or expression :param y: second value or expression :return: Aggregation operator """ if x is None and y is None: return {'$ne': []} return {'$ne': [x, y]}
5,351,821
def TCPs_from_tc(type_constraint): """ Take type_constraint(type_param_str, allowed_type_strs) and return list of TypeConstraintParam """ tys = type_constraint.allowed_type_strs # Get all ONNX types tys = set( [onnxType_to_Type_with_mangler(ty) for ty in tys] ) # Convert to Knossos and uniquify return [ TypeConstraintParam(type_constraint.type_param_str, ty) for ty in tys ]
5,351,822
def test_phone_search(yelp_fusion, kwargs, exception_raised): """ test GET /businesses/search/phone Args: yelp_fusion (yelp.YelpFusion): a YelpFusion object """ if exception_raised: with pytest.raises(RuntimeError): yelp_fusion.phone_search(**kwargs) else: assert yelp_fusion.phone_search(**kwargs) is not None
5,351,823
def builtin_unshelve(self, context, looping=False): """Modify the stack: ( a ... -- ... a ).""" a = context.dequeue() context.push(a)
5,351,824
def invoke_windowsError(): """ Raised when a Windows-specific error occurs or when the error number does not correspond to an errno value. (Only available on Windows systems) """ try: a = open("unexistant.txt", 'r') except WindowsError as e: print("WindowsError encountered") print(e)
5,351,825
def writeJavascriptLibrary(html): """Write a block of Javascript code.""" html.write("""<!-- Javascript functions to hide/display folder content --> <script type="text/javascript"> <!-- to hide script contents from old browsers // Pick loggraph // Hides all loggraphs and then shows just the one of // interest function pick_loggraph(name,i,nloggraphs) { hide_all_loggraphs(name,nloggraphs); show_loggraph(name,i); } // Function to display a loggraph associated with a log function show_loggraph(name,i) { var loggraph_name = name + "_loggraph_" + i; var loggraph_title = name + "_loggraph_title_" + i; showElement(loggraph_name); // Also make the title link bold var obj = document.getElementById(loggraph_title); obj.style.fontWeight = "bold"; } // Function to hide a loggraph associated with a log function hide_loggraph(name,i) { var loggraph_name = name + "_loggraph_" + i; var loggraph_title = name + "_loggraph_title_" + i; hideElement(loggraph_name); // Also make the title link normal var obj = document.getElementById(loggraph_title); obj.style.fontWeight = "normal"; } // Function to hide all loggraphs associated with a log function hide_all_loggraphs(name,n) { // Loop over all graphs up to n and hide each one for (var i=0; i<n; i++){ hide_loggraph(name,i); } } // Function to open the complete log file from the // hidden state function open_full_logfile(name) { // Show the entire logfile var classname = name + "_logfile"; setDisplayByClass(classname,"block"); // Show the controls for toggling between // summary and complete views classname = name + "_logfile_open_controls"; setDisplayByClass(classname,"block"); // Hide the controls for accessing the logfile // when it's hidden classname = name + "_logfile_closed_controls"; setDisplayByClass(classname,"none"); // Show the full view show_full_logfile(name); } // Function to open the log file summary from the // hidden state function open_summary_logfile(name) { // Show the entire logfile var classname = name + "_logfile"; setDisplayByClass(classname,"block"); // Show the controls for toggling between // summary and complete views classname = name + "_logfile_open_controls"; setDisplayByClass(classname,"block"); // Hide the controls for accessing the logfile // when it's hidden classname = name + "_logfile_closed_controls"; setDisplayByClass(classname,"none"); // Show the summary view show_only_summary(name); } // Function to hide the complete log file function close_logfile(name) { // Hide the entire logfile var classname = name + "_logfile"; setDisplayByClass(classname,"none"); // Hide the controls for toggling between // summary and complete views classname = name + "_logfile_open_controls"; setDisplayByClass(classname,"none"); // Show the controls for accessing the logfile // when it's hidden classname = name + "_logfile_closed_controls"; setDisplayByClass(classname,"block"); } // Function to show only summary for a program log function show_only_summary(name) { // Hide everything that isn't a summary // i.e. all the elements that have belong to // classes ending with "_non_summary" var classname = name + "_non_summary"; setDisplayByClass(classname,"none"); // Now deal with control elements // Hide all controls that offer the option of // showing the summary only classname = name + "_show_summary_control"; setDisplayByClass(classname,"none"); // Show all controls that offer the option if // showing the full log file classname = name + "_show_full_logfile_control"; setDisplayByClass(classname,"block"); } // Function to show full version of a program log function show_full_logfile(name) { // Show all the associated elements that // have class ending with "_non_summary" var classname = name + "_non_summary"; setDisplayByClass(classname,"block"); // Now deal with control elements // Show all controls that offer the option of // showing the summary only classname = name + "_show_summary_control"; setDisplayByClass(classname,"block"); // Hide all controls that offer the option if // showing the full log file classname = name + "_show_full_logfile_control"; setDisplayByClass(classname,"none"); } // Open the view of a logfile fragment function open_fragment(n) { var closed_classname = "fragment_closed_" + n; setDisplayByClass(closed_classname,"none"); var open_classname = "fragment_open_" + n; setDisplayByClass(open_classname,"block"); } // Close the view of a logfile fragment function close_fragment(n) { var closed_classname = "fragment_open_" + n; setDisplayByClass(closed_classname,"none"); var open_classname = "fragment_closed_" + n; setDisplayByClass(open_classname,"block"); } // General function to reveal a specific element // Specify the id of an element and its display // style will be changed to "block" function showElement(name) { // This changes the display style to be "block" var obj = document.getElementById(name); obj.style.display = "block"; } // General function to hide a specific element // Specify the id of an element and its display // style will be changed to "none" function hideElement(name) { // This changes the display style to be "none" var obj = document.getElementById(name); obj.style.display = "none"; } // General function to set the display property for all elements // with a specific class // This is able to deal with elements that belong to multiple // classes function setDisplayByClass(classname,value) { // Get all elements in the document var elements = document.getElementsByTagName("*"); // For each element look for the "class" attribute for (var i = 0; i < elements.length; i++) { var node = elements.item(i); // First try to get the class attribute using 'class' // This seems to work on Firefox 2.* and 1.5 var classes = node.getAttribute('class'); if (classes == null) { // If the attribute is null then try using the // 'className' attribute instead // This works for IE7 and IE6 classes = node.getAttribute('className'); } if (classes != null) { classes = classes.split(" "); for (var k in classes) { if (classes[k] == classname) { node.style.display = value; } } } } } // end hiding contents from old browsers --> </script> """) return
5,351,826
def _randomde(allgenes, allfolds, size): """Randomly select genes from the allgenes array and fold changes from the allfolds array. Size argument indicates how many to draw. Parameters ---------- allgenes : numpy array numpy array with all the genes expressed in the cells where de is generated allfolds : numpy array an array of fold changes from which the simulation should draw size : int number of non-zero weights (typically number of DE genes) Returns ------- type : PandasDataFrame DataFrame with randomly chosen genes and weights. """ rdgenes = np.random.choice(allgenes, size, replace = False) rdfolds = np.random.choice(allfolds, size, replace = False) rdDF = pd.DataFrame({'id' : rdgenes, 'weights' : rdfolds}) return(rdDF)
5,351,827
def add_disc_rew(seg, gamma): """ Discount the reward of the generated batch of trajectories. """ new = np.append(seg['new'], 1) rew = seg['rew'] n_ep = len(seg['ep_rets']) n_samp = len(rew) seg['ep_disc_ret'] = ep_disc_ret = np.empty(n_ep, 'float32') seg['disc_rew'] = disc_rew = np.empty(n_samp, 'float32') discounter = 0 ret = 0. i = 0 for t in range(n_samp): disc_rew[t] = rew[t] * gamma ** discounter ret += disc_rew[t] if new[t + 1]: discounter = 0 ep_disc_ret[i] = ret i += 1 ret = 0. else: discounter += 1
5,351,828
def test_md041_good_heading_top_level_setext(): """ Test to make sure we get the expected behavior after scanning a good file from the test/resources/rules/md004 directory that has consistent asterisk usage on a single level list. """ # Arrange scanner = MarkdownScanner() supplied_arguments = [ "scan", "test/resources/rules/md041/good_heading_top_level_setext.md", ] expected_return_code = 0 expected_output = "" expected_error = "" # Act execute_results = scanner.invoke_main( arguments=supplied_arguments, suppress_first_line_heading_rule=False ) # Assert execute_results.assert_results( expected_output, expected_error, expected_return_code )
5,351,829
def retrieve(resource_url, filename=None, verbose=True): """ Copy the given resource to a local file. If no filename is specified, then use the URL's filename. If there is already a file named C{filename}, then raise a C{ValueError}. @type resource_url: C{str} @param resource_url: A URL specifying where the resource should be loaded from. The default protocol is C{"nltk:"}, which searches for the file in the the NLTK data package. """ if filename is None: if resource_url.startswith('file:'): filename = os.path.split(filename)[-1] else: filename = re.sub(r'(^\w+:)?.*/', '', resource_url) if os.path.exists(filename): raise ValueError, "%r already exists!" % filename if verbose: print 'Retrieving %r, saving to %r' % (resource_url, filename) # Open the input & output streams. infile = _open(resource_url) outfile = open(filename, 'wb') # Copy infile -> outfile, using 64k blocks. while True: s = infile.read(1024*64) # 64k blocks. outfile.write(s) if not s: break # Close both files. infile.close() outfile.close()
5,351,830
def XCor(spectra, mask_l, mask_h, mask_w, vel, lbary_ltopo, vel_width=30,\ vel_step=0.3, start_order=0, spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300.): """ Calculates the cross-correlation function for a Coralie Spectra """ # speed of light, km/s c = 2.99792458E5 # loop over orders norders = spectra.shape[1] # determine minimum velocities vel_min = vel - vel_width vel_max = vel + vel_width N = int(np.ceil( (2*vel_width) / vel_step )) Xcor_full = np.zeros( (N, norders+1) ) sn = np.zeros( (norders) ) nlines_used = np.zeros( (norders) ) velocities = vel_min + np.arange( N ) * vel_step Xcor_full[:,0] = velocities weight=0.0 mask_middle = 0.5*(mask_l + mask_h) W = np.zeros( norders ) vwt = 300 for j in range(start_order,norders): t1 = time.time() LL = np.where( spectra[spec_order,j,:] != 0 ) if len(LL[0]) > 0: x1 = np.min( LL ) x2 = np.max( LL ) w1 = np.argmin( np.absolute( spectra[0,j,:] - spectra[0,j,x1] ) ) w2 = np.argmin( np.absolute( spectra[0,j,:] - spectra[0,j,x2] ) ) l1_0 = spectra[0,j,w1] / lbary_ltopo l2_0 = spectra[0,j,w2] / lbary_ltopo ww1 = np.argmin( np.abs( spectra[0,j,:] - l1_0*(1+(31+max_vel_rough)/c) ) ) ww2 = np.argmin( np.abs( spectra[0,j,:] - l2_0*(1-(31+max_vel_rough)/c) ) ) # should not happen, but hey, just in case... if (ww1 < w1): ww1 = w1 if (ww2 > w2): ww2 = w2 l1 = spectra[0,j,ww1] l2 = spectra[0,j,ww2] II = np.where( (mask_l > l1) & (mask_h < l2) ) #if len(II[0])>0: #print j,II[0][0],II[0][-1] nlu = len(II[0]) nlines_used[j] = nlu snw1 = int(0.25*spectra.shape[2]) snw2 = int(0.75*spectra.shape[2]) if (nlu > 0): # calculate median S/N #median_sn = np.median( spectra[5,j,w1:w2] * np.sqrt( spectra[6,j,w1:w2] ) ) median_sn = np.median( spectra[sn_order,j,snw1:snw2] ) sn[j] = median_sn S = spectra[spec_order,j,w1:w2] #iv = spectra[iv_order,j,w1:w2] signal2noise = spectra[sn_order,j,w1:w2] snwa = np.zeros(N) for k in range(N): #print k Xcor_full[k,j+1], snw = CCF.ccfcos(mask_l[II], mask_h[II], spectra[0,j,w1:w2], S,\ mask_w[II], signal2noise, vel_min + k*vel_step) snwa[k] = snw if np.isnan(Xcor_full[k,j+1]): Xcor_full[k,j+1] = Xcor_full[k-1,j+1] snwa[k] = snwa[k-1] #if k ==182 and j==35: # #print mask_l[II], mask_h[II], spectra[0,j,w1:w2], S,mask_w[II], signal2noise, vel_min + k*vel_step # #for z in range(len(mask_l[II])): # # III = np.where((spectra[0,j,w1:w2]>=mask_l[II][z])&(spectra[0,j,w1:w2]<=mask_h[II][z]))[0] # # print spectra[0,j,w1:w2][III],S[III] # #print Xcor_full[k,j+1] # #print snw # #print gfd xc_weight = np.median( snwa ) Xcor_full[:,j+1] /= snwa #xc_weight W[j] = xc_weight return velocities, Xcor_full, sn, nlines_used, W
5,351,831
def marks_details(request, pk): """ Display details for a given Mark """ # Check permission if not has_access(request): raise PermissionDenied # Get context context = get_base_context(request) # Get object mark = get_object_or_404(Mark, pk=pk) mark.category_clean = mark.get_category_display() context['mark'] = mark # Get users connected to the mark context['mark_users'] = mark.given_to.all() # AJAX if request.method == 'POST': if request.is_ajax and 'action' in request.POST: resp = {'status': 200} context, resp = _handle_mark_detail(request, context, resp) # Set mark resp['mark'] = {'last_changed_date': context['mark'].last_changed_date.strftime("%Y-%m-%d"), 'last_changed_by': context['mark'].last_changed_by.get_full_name()} # Return ajax return HttpResponse(json.dumps(resp), status=resp['status']) # Render view return render(request, 'marks/dashboard/marks_details.html', context)
5,351,832
def clean_text(text): """ text: a string return: modified initial string """ text = BeautifulSoup(text, "lxml").text # HTML decoding text = text.lower() # lowercase text # replace REPLACE_BY_SPACE_RE symbols by space in text text = REPLACE_BY_SPACE_RE.sub(' ', text) # delete symbols which are in BAD_SYMBOLS_RE from text text = BAD_SYMBOLS_RE.sub('', text) # delete stopwors from text text = ' '.join(word for word in text.split() if word not in STOPWORDS) return text
5,351,833
def get_input_device(config): """ Create the InputDevice instance and handle errors """ dev_path = config['flirc_device_path'] logging.debug('get_input_device() dev_path: %s', dev_path) try: input_device = InputDevice(dev_path) return input_device except FileNotFoundError as exception: logging.error('Error opening device path %s', dev_path) logging.error('Error was: %s', exception) logging.error('FLIRC is likely not attached or the device path (FLIRC_DEV_PATH) is wrong') sys.exit(1)
5,351,834
def _simpsons_interaction(data, groups): """ Calculation of Simpson's Interaction index Parameters ---------- data : a pandas DataFrame groups : list of strings. The variables names in data of the groups of interest of the analysis. Returns ------- statistic : float Simpson's Interaction Index core_data : a pandas DataFrame A pandas DataFrame that contains the columns used to perform the estimate. Notes ----- Based on Equation 1 of page 37 of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67. Simpson's interaction index (I) can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to not belong to the same group. Higher values means lesser segregation. Simpson's Concentration + Simpson's Interaction = 1 Reference: :cite:`reardon2002measures`. """ core_data = data[groups] data = _nan_handle(core_data) df = np.array(core_data) Pk = df.sum(axis=0) / df.sum() I = (Pk * (1 - Pk)).sum() return I, core_data, groups
5,351,835
def find_includes(armnn_include_env: str = INCLUDE_ENV_NAME): """Searches for ArmNN includes. Args: armnn_include_env(str): Environmental variable to use as path. Returns: list: A list of paths to include. """ armnn_include_path = os.getenv(armnn_include_env) if armnn_include_path is not None and os.path.exists(armnn_include_path): armnn_include_path = [armnn_include_path] else: armnn_include_path = ['/usr/local/include', '/usr/include'] return armnn_include_path
5,351,836
def read_missing_oids(oid_lines): """ Parse lines into oids. >>> list(read_missing_oids([ ... "!!! Users 0 ?", "POSKeyError: foo", ... "!!! Users 0 ?", ... "!!! Users 1 ?", ... "bad xref name, 1", "bad db", ])) [0, 1] """ result = OidSet() for line in oid_lines: if line.startswith('bad') or ':' in line: continue if line.startswith('!!!'): # zc.zodbdgc output. bad OID is always the # third field. try: oid = int(line.split()[2]) except (ValueError, IndexError): logger.info("Malformed zc.zodbdgc input: %s", line) continue result.add(oid) else: # Just an int try: oid = int(line) except ValueError: logger.info("Malformed input: %s", line) else: result.add(oid) if oid_lines != sys.stdin: oid_lines.close() return result
5,351,837
def run(target='192.168.1.1', ports=[21,22,23,25,80,110,111,135,139,443,445,554,993,995,1433,1434,3306,3389,8000,8008,8080,8888]): """ Run a portscan against a target hostname/IP address `Optional` :param str target: Valid IPv4 address :param list ports: Port numbers to scan on target host :returns: Results in a nested dictionary object in JSON format Returns onlne targets & open ports as key-value pairs in dictionary (JSON) object """ global tasks global threads global results if not util.ipv4(target): raise ValueError("target is not a valid IPv4 address") if _ping(target): for port in ports: tasks.put_nowait((_scan, (target, port))) for i in range(1, tasks.qsize()): threads['portscan-%d' % i] = _threader() for t in threads: threads[t].join() return json.dumps(results[target]) else: return "Target offline"
5,351,838
def go(): """サンプルを実行します.""" obj = Sample() obj.exec()
5,351,839
def check_factorial_validity(token, value): """ Checks whether the factorial call is in limit. Parameters ---------- token : ``Token`` The parent token. value : `int` or `float` The value to use factorial on. Raises ------ EvaluationError - Factorial is only allowed for integral values. - Factorial nto defined for negative values. - Operation over factorial limit is disallowed. """ if (not isinstance(value, int)) and (not value.is_integer()): raise EvaluationError( token.array, token.start, token.end, f'Factorial only accepts integral values: factorial({value!r})', ) if value < 0: raise EvaluationError( token.array, token.start, token.end, f'Factorial is not defined for negative values: factorial({value!r})', ) if value > LIMIT_FACTORIAL_MAX: raise EvaluationError( token.array, token.start, token.end, f'Factorial over {LIMIT_INTEGER_BIT_LENGTH} is disallowed: factorial({value!r})', )
5,351,840
def subsequent_mask(size: int): """ Mask out subsequent positions (to prevent attending to future positions) Transformer helper function. :param size: size of mask (2nd and 3rd dim) :return: Tensor with 0s and 1s of shape (1, size, size) """ mask = np.triu(np.ones((1, size, size)), k=1).astype("uint8") return torch.from_numpy(mask) == 0
5,351,841
def _get_dist_class( policy: Policy, config: TrainerConfigDict, action_space: gym.spaces.Space ) -> Type[TorchDistributionWrapper]: """Helper function to return a dist class based on config and action space. Args: policy (Policy): The policy for which to return the action dist class. config (TrainerConfigDict): The Trainer's config dict. action_space (gym.spaces.Space): The action space used. Returns: Type[TFActionDistribution]: A TF distribution class. """ if hasattr(policy, "dist_class") and policy.dist_class is not None: return policy.dist_class elif config["model"].get("custom_action_dist"): action_dist_class, _ = ModelCatalog.get_action_dist( action_space, config["model"], framework="torch" ) return action_dist_class elif isinstance(action_space, Discrete): return TorchCategorical elif isinstance(action_space, Simplex): return TorchDirichlet else: assert isinstance(action_space, Box) if config["normalize_actions"]: return ( TorchSquashedGaussian if not config["_use_beta_distribution"] else TorchBeta ) else: return TorchDiagGaussian
5,351,842
def timeexec(fct, number, repeat): """ Measures the time for a given expression. :param fct: function to measure (as a string) :param number: number of time to run the expression (and then divide by this number to get an average) :param repeat: number of times to repeat the computation of the above average :return: dictionary """ rep = timeit_repeat(fct, number=number, repeat=repeat) ave = sum(rep) / (number * repeat) std = (sum((x / number - ave)**2 for x in rep) / repeat)**0.5 fir = rep[0] / number fir3 = sum(rep[:3]) / (3 * number) las3 = sum(rep[-3:]) / (3 * number) rep.sort() mini = rep[len(rep) // 20] / number maxi = rep[-len(rep) // 20] / number return dict(average=ave, deviation=std, first=fir, first3=fir3, last3=las3, repeat=repeat, min5=mini, max5=maxi, run=number)
5,351,843
def is_bst(root): """ checks if binary tree is binary search tree """ def is_bst_util(root, min_value, max_value): """ binary search tree check utility function """ if root is None: return True if (root.data >= min_value and root.data < max_value and is_bst_util(root.left, min_value, root.data) and is_bst_util(root.right, root.data, max_value)): return True return False return is_bst_util(root, -sys.maxsize - 1, sys.maxsize)
5,351,844
def regexify(w, tags): """Convert a single component of a decomposition rule from Weizenbaum notation to regex. Parameters ---------- w : str Component of a decomposition rule. tags : dict Tags to consider when converting to regex. Returns ------- w : str Component of a decomposition rule converted to regex form. """ # 0 means "an indefinite number of words" if w == '0': w = '.*' # A positive non-zero integer means "this specific amount of words" elif w.isnumeric() and int(w) > 0: w = r'(?:\b\w+\b[\s\r\n]*){' + w + '}' # A word starting with @ signifies a tag elif w[0] == "@": # Get tag name tag_name = w[1:].lower() w = tag_to_regex(tag_name, tags) else: # Add word boundaries to match on a whole word basis w = r'\b' + w + r'\b' return w
5,351,845
def test_handling_unexpected_exception(testdata_dir: pathlib.Path, monkeypatch: MonkeyPatch) -> None: """Test whether a bad element string correctly errors.""" element_str = 'catalog' full_path = testdata_dir / 'json/minimal_catalog.json' command_str = f'trestle partial-object-validate -f {str(full_path)} -e {element_str}' monkeypatch.setattr(sys, 'argv', command_str.split()) monkeypatch.setattr( 'trestle.core.commands.partial_object_validate.PartialObjectValidate.partial_object_validate', test_utils.patch_raise_exception ) rc = Trestle().run() assert rc > 0
5,351,846
def test_subgrid_error(): """Capture errors in the subgrid specification""" with pytest.raises(SystemExit): # j0 outside grid g = Grid(filename=grid_file, subgrid=(20, 150, 30, 222)) with pytest.raises(SystemExit): # i0 > i1, i0 g = Grid(filename=grid_file, subgrid=(50, 20, 30, 170))
5,351,847
def row_dot_product(a: np.ndarray, b: np.ndarray) -> np.ndarray: """ Returns a vectorized dot product between the rows of a and b :param a: An array of shape (N, M) or (M, ) (or a shape that can be broadcast to (N, M)) :param b: An array of shape (N, M) or (M, ) (or a shape that can be broadcast to (N, M)) :return: A vector of shape (N, ) whose elements are the dot product of rows a, b """ return np.einsum('ij,ij->i', np.atleast_2d(a), np.atleast_2d(b))
5,351,848
def pmg_pickle_dump(obj, filobj, **kwargs): """ Dump an object to a pickle file using PmgPickler. Args: obj : Object to dump. fileobj: File-like object \\*\\*kwargs: Any of the keyword arguments supported by PmgPickler """ return PmgPickler(filobj, **kwargs).dump(obj)
5,351,849
def condition_header(header, needed_keys=None): """Return a dictionary of all `needed_keys` from `header` after passing their values through the CRDS value conditioner. """ header = { key.upper():val for (key, val) in header.items() } if not needed_keys: needed_keys = header.keys() else: needed_keys = [ key.upper() for key in needed_keys ] conditioned = { key:condition_value(header[key]) for key in needed_keys } return conditioned
5,351,850
def check_is_proba(entry: Union[float, int], name: str = None): """Check whether the number is non-negative and less than or equal to 1.""" if name is None: name = 'Probabilities' if type(entry) not in [float, int]: raise TypeError('{} must be floats (or ints if 0 or 1).'.format(name)) if entry < 0 or entry > 1: raise ValueError('{} must have value between 0 and 1.'.format(name))
5,351,851
def get_generic_path_information(paths, stat_prefix=""): """ Get an OrderedDict with a bunch of statistic names and values. """ statistics = OrderedDict() returns = [sum(path["rewards"]) for path in paths] # rewards = np.vstack([path["rewards"] for path in paths]) rewards = np.concatenate([path["rewards"] for path in paths]) statistics.update( create_stats_ordered_dict( "Rewards", rewards, stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Returns", returns, stat_prefix=stat_prefix, always_show_all_stats=True ) ) # print(paths[0]["env_infos"]) if "is_success" in paths[0]["env_infos"][0].keys(): acc_sum = [(np.sum([x['is_success'] for x in path["env_infos"]])>0).astype(float) for path in paths] acc = np.sum(acc_sum) * 1.0 / len(paths) statistics.update( create_stats_ordered_dict( "Success Num", np.sum(acc_sum), stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Traj Num", len(paths), stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Success Rate", acc, stat_prefix=stat_prefix, always_show_all_stats=True ) ) actions = [path["actions"] for path in paths] # if isinstance(actions[0][0], np.ndarray): # actions = np.vstack([path["actions"] for path in paths]) # else: # actions = np.hstack([path["actions"] for path in paths]) statistics.update( create_stats_ordered_dict( "Actions", actions, stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Ep. Len.", np.array([len(path["terminals"]) for path in paths]), stat_prefix=stat_prefix, always_show_all_stats=True, ) ) statistics["Num Paths"] = len(paths) return statistics
5,351,852
def pad_images(images, nlayers): """ In Unet, every layer the dimension gets divided by 2 in the encoder path. Therefore the image size should be divisible by 2^nlayers. """ import math import numpy as np divisor = 2**nlayers nlayers, x, y = images.shape # essentially setting nlayers to z direction so return is z, x, y x_pad = int((math.ceil(x / float(divisor)) * divisor) - x) y_pad = int((math.ceil(y / float(divisor)) * divisor) - y) padded_image = np.pad(images, ((0,0),(0, x_pad), (0, y_pad)), 'constant', constant_values=(0, 0)) return padded_image
5,351,853
def remove_measurements(measurements, model_dict, params=None): """Remove measurements from a model specification. If provided, a params DataFrame is also reduced correspondingly. Args: measurements (str or list): Name(s) of the measurement(s) to remove. model_dict (dict): The model specification. See: :ref:`model_specs`. params (pandas.DataFrame or None): The params DataFrame for the full model. Returns: dict: The reduced model dictionary pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ out = deepcopy(model_dict) for factor in model_dict["factors"]: full = model_dict["factors"][factor]["measurements"] reduced = [_remove_from_list(meas_list, measurements) for meas_list in full] out["factors"][factor]["measurements"] = reduced norminfo = model_dict["factors"][factor].get("normalizations", {}) if "loadings" in norminfo: out["factors"][factor]["normalizations"][ "loadings" ] = _remove_measurements_from_normalizations( measurements, norminfo["loadings"] ) if "intercepts" in norminfo: out["factors"][factor]["normalizations"][ "intercepts" ] = _remove_measurements_from_normalizations( measurements, norminfo["intercepts"] ) if params is not None: out_params = _reduce_params(params, out) out = (out, out_params) return out
5,351,854
def make_output(platform, output): """Return libraries list""" libraries = parse_libs() # OS libs separate _ = {"win32": ";", "linux": ":", "darwin": ":"} out_lib = str() # Generate libraries list for lib in libraries: out_lib = out_lib + "$MC_DIR/libraries/{0}".format(lib) + _[platform] out_lib = out_lib + "$MC_DIR/versions/$GAME_VERSION/$GAME_VERSION.jar" # Replace for OS shell variable symbol if platform == "win32": out_lib = out_lib.replace("$MC_DIR", "%MC_DIR%") out_lib = out_lib.replace("$GAME_VERSION", "%GAME_VERSION%") if output == "tty": click.echo(out_lib) if platform == "win32": print("\nWindows generate libraries list complete!") elif platform in ("linux", "darwin"): print("\nUnix generate libraries list complete!") elif output == "txt": with open("./libs.txt", "w", encoding="utf-8") as f: f.write(out_lib) if platform == "win32": print("\nWindows generate libraries list complete!\n" "See libs.txt file.") elif platform in ("linux", "darwin"): print("\nUnix generate libraries list complete!\n" "See libs.txt file.")
5,351,855
def _output(command: Sequence[str]) -> None: """Helper function. Prints start/finish and runs the command inbetween.""" print(f'========== {command[0]} starting ==========') subprocess.run(command) print(f'========== {command[0]} finished ==========')
5,351,856
def good2Go(SC, L, CC, STR): """ Check, if all input is correct and runnable """ if SC == 1 and L == 1 and CC == 1 and STR == 1: return True else: print(SC, L, CC, STR) return False
5,351,857
def __validate_tweet_name(tweet_name: str, error_msg: str) -> str: """Validate the tweet's name. Parameters ---------- tweet_name : str Tweet's name. error_msg : str Error message to display for an invalid name. Returns ------- str Validated tweet name. Raises ------ InvalidTweetName Raised for invalid tweet names. """ if tweet_name == "": raise InvalidTweetName(error_msg) else: return tweet_name
5,351,858
def convert_event(obj): """ :type obj: :class:`sir.schema.modelext.CustomEvent` """ event = models.event(id=obj.gid, name=obj.name) if obj.comment: event.set_disambiguation(obj.comment) if obj.type is not None: event.set_type(obj.type.name) event.set_type_id(obj.type.gid) lifespan = convert_life_span(obj.begin_date, obj.end_date, obj.ended) if lifespan.get_begin() is not None or lifespan.get_end() is not None: event.set_life_span(lifespan) if obj.time is not None: event.set_time(datetime_to_string(obj.time)) if obj.area_links: event.add_relation_list(convert_event_area_relation_list(obj.area_links)) if obj.artist_links: event.add_relation_list(convert_artist_relation_list(obj.artist_links)) if obj.place_links: event.add_relation_list(convert_place_relation_list(obj.place_links)) if obj.aliases: event.set_alias_list(convert_alias_list(obj.aliases)) if obj.tags: event.set_tag_list(convert_tag_list(obj.tags)) return event
5,351,859
def get_pr_review_status(pr: PullRequestDetails, per_page: int = 100) -> Any: """ References: https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request """ url = (f"https://api.github.com/repos/{pr.repo.organization}/{pr.repo.name}" f"/pulls/{pr.pull_id}/reviews" f"?per_page={per_page};access_token={pr.repo.access_token}") response = requests.get(url) if response.status_code != 200: raise RuntimeError( 'Get review failed. Code: {}. Content: {}.'.format( response.status_code, response.content)) return json.JSONDecoder().decode(response.content.decode())
5,351,860
def make_sph_model(filename): """reads a spherical model file text file and generates interpolated values Args: filename: Returns: model: """ M = np.loadtxt(filename, dtype={'names': ('rcurve', 'potcurve', 'dpotcurve'),'formats': ('f4', 'f4', 'f4')},skiprows=1) model = spherical_model() model.rcurve = M['rcurve'] model.potcurve = M['potcurve'] model.dpotcurve = M['dpotcurve'] model.rcurve = M['rcurve'] model.potcurve = UnivariateSpline(model.rcurve,M['potcurve'],k=3) model.dpotcurve = UnivariateSpline(model.rcurve,M['dpotcurve'],k=3) return model
5,351,861
def list_dir(filepath): """List the files in the directory""" return sorted(list(map(lambda x: os.path.join(filepath, x), os.listdir(filepath))))
5,351,862
def minimum(x, y): """ Returns the min of x and y (i.e. x < y ? x : y) element-wise. Parameters ---------- x : tensor. Must be one of the following types: bfloat16, half, float32, float64, int32, int64. y : A Tensor. Must have the same type as x. name : str A name for the operation (optional). Returns ------- A Tensor. Has the same type as x """ return pd.minimum(x, y)
5,351,863
def seconds_to_timestamp(seconds): """ Convert from seconds to a timestamp """ minutes, seconds = divmod(float(seconds), 60) hours, minutes = divmod(minutes, 60) return "%02d:%02d:%06.3f" % (hours, minutes, seconds)
5,351,864
def query(querystring: str, db: tsdb.Database, **kwargs): """ Perform query *querystring* on the testsuite *ts*. Note: currently only 'select' queries are supported. Args: querystring (str): TSQL query string ts (:class:`delphin.itsdb.TestSuite`): testsuite to query over kwargs: keyword arguments passed to the more specific query function (e.g., :func:`select`) Example: >>> list(tsql.query('select i-id where i-length < 4', ts)) [[142], [1061]] """ queryobj = _parse_query(querystring) if queryobj['type'] in ('select', 'retrieve'): return _select( queryobj['projection'], queryobj['relations'], queryobj['condition'], db, record_class=kwargs.get('record_class', None)) else: # not really a syntax error; replace with TSQLError or something # when the proper exception class exists raise TSQLSyntaxError(queryobj['type'] + ' queries are not supported', text=querystring)
5,351,865
def rename_SI_cols(dataset, removenans=True): """ names columns of Spectro Inlets data like EC-Lab and PyExpLabSys+cinfdata name them. """ if isinstance(dataset, dict): print("WARNGING!!! The use of dataset dictionaries is no longer suported!!!") data = dataset else: data = dataset.data data_cols = data["data_cols"].copy() # ^ to avoid changing the size of a set during iteration for col_0 in data_cols: col = "test" if not get_type(col_0, dataset=dataset) == "SI": continue if re.search("^C[0-9]+", col_0): try: mass = re.search(r"M[0-9]+", col_0).group() except AttributeError: print("Can't rename SI col " + col_0) continue if "Time" in col_0: col = mass + "-x" else: col = mass + "-y" col_type = "MS" elif re.search("^pot", col_0): for c0, c in [ ("Time", "time/s"), ("Voltage", "Ewe/V"), ("Current", "I/mA"), ("Cycle", "cycle number"), ]: if c0 in col_0: col = c # print('col = ' + col + ', col_0 = ' + col_0) # debugging break else: print("Can't rename SI col " + col_0) continue col_type = "EC" else: # print('Not renaming SI col ' + col_0) continue data[col] = data[col_0].copy() data["col_types"][col] = col_type data["data_cols"].add(col) print(col_0 + " copied to " + col) if removenans: remove_nans(data) data["data_type"] = "combined"
5,351,866
def TTF_SizeUTF8(font, text, w, h): """Calculates the size of a UTF8-encoded string rendered with a given font. See :func:`TTF_SizeText` for more info. Args: font (:obj:`TTF_Font`): The font object to use. text (bytes): A UTF8-encoded bytestring of text for which the rendered surface size should be calculated. w (byref(:obj:`~ctypes.c_int`)): A pointer to an integer in which to store the calculated surface width (in pixels). h (byref(:obj:`~ctypes.c_int`)): A pointer to an integer in which to store the calculated surface height (in pixels). Returns: int: 0 on success, or -1 on error (e.g. if a glyph is not found in the font). """ return _funcs["TTF_SizeUTF8"](font, text, w, h)
5,351,867
def get_mse(y_true, y_hat): """ Return the mean squared error between the ground truth and the prediction :param y_true: ground truth :param y_hat: prediction :return: mean squared error """ return np.mean(np.square(y_true - y_hat))
5,351,868
def generate_v2_token(username, version, client_ip, issued_at_timestamp, email=''): """Creates the JSON Web Token with a new schema :Returns: String :param username: The name of person who the token identifies :type username: String :param version: The version number for the token :type version: Integer/String :param client_ip: The IP of machine the client used to request a token. :type client_ip: String :param email: The email address associated with a user. :type email: String """ claims = {'exp' : issued_at_timestamp + const.AUTH_TOKEN_TIMEOUT, 'iat' : issued_at_timestamp, 'iss' : const.VLAB_URL, 'username' : username, 'version' : version, 'client_ip' : client_ip, 'email' : email, } return jwt.encode(claims, const.AUTH_TOKEN_SECRET, algorithm=const.AUTH_TOKEN_ALGORITHM)
5,351,869
def choose(n, k): """return n choose k resilient (though not immune) to integer overflow""" if n == 1: # optimize by far most-common case return 1 return fact_div(n, max(k, n - k)) / math.factorial(min(k, n - k))
5,351,870
def part_one(puzzle_input: List[str]) -> int: """Find the highest seat ID on the plane""" return max(boarding_pass_to_seat_id(line) for line in puzzle_input)
5,351,871
def readbit(val, bitidx): """ Direct word value """ return int((val & (1<<bitidx))!=0)
5,351,872
def main(): """ pred_dict : dict. {'img1.png': [array, array], ...} array: [x1, y1, x2, y2, conf] bordered, borderless 순 gt_dict: dict. {{'img1.png': [array, array], ...}} array: [x1, y1, x2, y2] """ with open(pred_pickle, 'rb') as f: pred_dict = pickle.load(f) with open(gt_pickle, 'rb') as f: gt_dict = pickle.load(f) ious = [0.6, 0.7, 0.8, 0.9] print('\n') for iou in ious: images_result = get_images_results(gt_dict, pred_dict, conf_thr=conf_thr, iou_thr=iou, mode='all') precision, recall, f1 = calc_precision_recall_f1(images_result) print('[iou=%f]' % iou) print(images_result) print(f'precision: {precision}, recall: {recall}, f1: {f1}') print('\n')
5,351,873
def mdtf_rename_img(varlist,conv,img_dir): """Rename figure files to use variable names from settings file, not conventions.""" for f in img_dir.iterdir(): f_name = str(f.name) if not f_name.startswith("."): for pod_var in varlist: standard_name = varlist[pod_var]["standard_name"] dim_len = len(varlist[pod_var]["dimensions"]) conv_var = conv.lookup_by_standard_name(standard_name,dim_len,suppress_warning=True) if conv_var is not None: if "scalar_coordinates" in varlist[pod_var]: try: conv_var += str(varlist[pod_var]["scalar_coordinates"]["lev"]) except KeyError: conv_var += str(varlist[pod_var]["scalar_coordinates"]["plev"]) if ("_"+conv_var+".png" in f_name): f_new = img_dir/f_name.replace(conv_var,pod_var) f.rename(f_new)
5,351,874
def test_immediate_datafuture(): """Test DataFuture string representation, for AppFutures launched with parent """ import time fu = echo_slow_message("Hello world", sleep=1, outputs=["hello.1.txt"]) d_fu = fu.outputs[0] time.sleep(0.1) state_2 = d_fu.__str__() print("State_2 : ", state_2, "Fu:", fu.parent) assert "running" in state_2, "DataFuture should now be running" d_fu.result() state_3 = d_fu.__str__() print("State_3 : ", state_3, "Fu:", fu.parent) assert "finished" in state_3, "DataFuture should now be finished"
5,351,875
def matrix_bombing_plan(m): """ This method calculates sum of the matrix by trying every possible position of the bomb and returns a dictionary. Dictionary's keys are the positions of the bomb and values are the sums of the matrix after the damage """ matrix = deepcopy(m) rows = len(m) columns = len(m[0]) d = {} for x in range(0, rows): for y in range(0, columns): p = (x, y) neighbours = find_neighbour(matrix, (x, y)) d[p] = sum_matrix(neighbours) return d
5,351,876
def coord_to_gtp(coord, board_size): """ From 1d coord (0 for position 0,0 on the board) to A1 """ if coord == board_size ** 2: return "pass" return "{}{}".format("ABCDEFGHJKLMNOPQRSTYVWYZ"[int(coord % board_size)],\ int(board_size - coord // board_size))
5,351,877
def test_alternative_clusting_method(ClusterModel): """ Test that users can supply alternative clustering method as dep injection """ def clusterer(X: np.ndarray, k: int, another_test_arg): """ Function to wrap a sklearn model as a clusterer for OptimalK First two arguments are always the data matrix, and k, and can supply """ m = ClusterModel() m.fit(X) assert another_test_arg == "test" return m.cluster_centers_, m.predict(X) optimalk = OptimalK( n_jobs=-1, parallel_backend="joblib", clusterer=clusterer, clusterer_kwargs={"another_test_arg": "test"}, ) X, y = make_blobs(n_samples=50, n_features=2, centers=3) n_clusters = optimalk(X, n_refs=3, cluster_array=np.arange(1, 5)) assert isinstance(n_clusters, int)
5,351,878
def load_dataset(spfile, twfile): """Loads dataset given the span file and the tweets file Arguments: spfile {string} -- path to span file twfile {string} -- path to tweets file Returns: dict -- dictionary of tweet-id to Tweet object """ tw_int_map = {} # for filen in os.listdir(txt_dir): # twid = filen.split(".")[0] # if twid == "tweet_id": # continue # tweet = Tweet(twid) # tw_int_map[twid] = tweet for line in open(twfile, 'r'): #parts = line.split("\t") #twid, text = parts[0], parts[1] twid = get_basename_without_extension(line.strip('\n')) # ANTONIO tweet = Tweet(twid) if twid in tw_int_map: log.warning("Possible duplicate %s", twid) tw_int_map[twid] = tweet # Load annotations for line in open(spfile, 'r'): parts = [x.strip() for x in line.split("\t")] if len(parts) != 5: log.warning("Tab delimited not correct:" + str(len(parts))) continue if len(parts) == 5: twid, start, end, atype, prof = parts if twid == "tweet_id": continue if twid in tw_int_map: tweet = tw_int_map[twid] else: log.warning("Invalid tweetid %s not found.", twid) continue valid_labels = ["PROTEINAS", "NORMALIZABLES", "UNCLEAR","NO-NORMALIZABLES"] if atype in valid_labels: ann = Ann(prof.strip(), atype, start, end) tweet.anns.append(ann) tweet.has_ann = (tweet.has_ann or atype in valid_labels) num_anns = sum([len(x.anns) for _, x in tw_int_map.items()]) log.info("Loaded dataset %s tweets. %s annotations.", len(tw_int_map), num_anns) return tw_int_map
5,351,879
def nameof(var, *more_vars, # *, keyword only argument, supported with python3.8+ frame: int = 1, vars_only: bool = True) -> Union[str, Tuple[str]]: """Get the names of the variables passed in Examples: >>> a = 1 >>> nameof(a) # 'a' >>> b = 2 >>> nameof(a, b) # ('a', 'b') >>> x = lambda: None >>> x.y = 1 >>> nameof(x.y, full=True) # 'x.y' Note: This function works with the environments where source code is available, in other words, the callee's node can be retrieved by `executing`. In some cases, for example, running code from python shell/REPL or from `exec`/`eval`, we try to fetch the variable name from the bytecode. This requires only a single variable name is passed to this function and no keyword arguments, meaning that getting full names of attribute calls are not supported in such cases. Args: var: The variable to retrieve the name of *more_vars: Other variables to retrieve the names of frame: The this function is called from the wrapper of it. `frame=1` means no wrappers. Note that the calls from standard libraries are ignored. Also note that the wrapper has to have signature as this one. vars_only: Whether only allow variables/attributes as arguments or any expressions. If `True`, then the sources of the arguments will be returned. Returns: The names/sources of variables/expressions passed in. If a single argument is passed, return the name/source of it. If multiple variables are passed, return a tuple of their names/sources. If the argument is an attribute (e.g. `a.b`) and `vars_only` is `False`, only `"b"` will returned. Set `vars_only` to `True` to get `"a.b"`. Raises: VarnameRetrievingError: When the callee's node cannot be retrieved or trying to retrieve the full name of non attribute series calls. """ # Frame is anyway used in get_node frameobj = IgnoreList.create( ignore_lambda=False, ignore_varname=False ).get_frame(frame) node = get_node_by_frame(frameobj, raise_exc=True) if not node: # We can't retrieve the node by executing. # It can be due to running code from python/shell, exec/eval or # other environments where sourcecode cannot be reached # make sure we keep it simple (only single variable passed and no # full passed) to use bytecode_nameof # # We don't have to check keyword arguments here, as the instruction # will then be CALL_FUNCTION_KW. if not more_vars: return bytecode_nameof(frameobj.f_code, frameobj.f_lasti) # We are anyway raising exceptions, no worries about additional burden # of frame retrieval again source = frameobj.f_code.co_filename if source == '<stdin>': raise VarnameRetrievingError( "Are you trying to call nameof in REPL/python shell? " "In such a case, nameof can only be called with single " "argument and no keyword arguments." ) if source == '<string>': raise VarnameRetrievingError( "Are you trying to call nameof from exec/eval? " "In such a case, nameof can only be called with single " "argument and no keyword arguments." ) raise VarnameRetrievingError( "Source code unavailable, nameof can only retrieve the name of " "a single variable, and argument `full` should not be specified." ) return argname( var, *more_vars, func=nameof, frame=frame, vars_only=vars_only, pos_only=True )
5,351,880
def ui_save_newspaper_text_to_disk(dir_name, newspaper_text_by_date): """Saves data to disk at current working directory and prints UI messages. Makes new directory named by the `dir_name` parameter, adding a suffix to avoid naming collisions. Does not merge issues from current call into an equally named directory already present on disk. Args: dir_name (str): Name of directory in which to save newspaper text files. Creates directory if it doesn't exist. Adds unique suffix to `dir_name` if a directory with the name already exists. newspaper_text_by_date (dict): Dict where keys are filenames and values are text file contents to save. Returns: Nothing. Has side-effects on filesystem. Raises: IOError: Bubbles up from `lccn_to_disk()` if writing files fails. FileNotFoundError: Bubbles up from `lccn_to_disk()` if supplied `dir_name` doesn't exist. """ # Create a directory in current working directory with recursive naming to # avoid collisions dir_name = makedirs_with_rename(dir_name) # Write to disk in created directory number_of_files_written = lccn_to_disk(dir_name, newspaper_text_by_date) # Show filesystem changes to user print('{} file(s) written to disk'.format(number_of_files_written)) real_path = os.path.join(os.getcwd(), dir_name) print('Data saved to: `{}`'.format(real_path))
5,351,881
def cache_get_filepath(key): """Returns cache path.""" return os.path.join(settings.CACHE_PATH, key)
5,351,882
def scorer(func): """This function is a decorator for a scoring function. This is hack a to get around self being passed as the first argument to the scoring function.""" def wrapped(a, b=None): if b is not None: return func(b) return func(a) return wrapped
5,351,883
def print_stats(yards): """ This function prints the final stats after a skier has crashed. """ print print "You skied a total of", yards, "yards!" #print "Want to take another shot?" print return 0
5,351,884
def _calculate_risk_reduction(module): """ Function to calculate the risk reduction due to testing. The algorithms used are based on the methodology presented in RL-TR-92-52, "SOFTWARE RELIABILITY, MEASUREMENT, AND TESTING Guidebook for Software Reliability Measurement and Testing." Rather than attempting to estimate the software failure rate, RTK provides a risk index for the software based on the same factors used in RL-TR-92-52 for estimating software failure rates. RTK also provides test planning guidance in the same manner as RL-TR-92-52. :param module: the :py:class:`rtk.software.CSCI.Model` or :py:class:`rtk.software.Unit.Model` data model to calculate. :return: _error_code :rtype: int """ # WARNING: Refactor _calculate_risk_reduction; current McCabe Complexity metric = 13. _error_code = 0 # Calculate the risk reduction due to the test effort. try: if module.test_effort == 1: # Labor hours _test_ratio = float(module.labor_hours_test) / \ float(module.labor_hours_dev) elif module.test_effort == 2: # Budget _test_ratio = float(module.budget_test) / \ float(module.budget_dev) elif module.test_effort == 3: # Schedule _test_ratio = float(module.schedule_test) / \ float(module.schedule_dev) else: _test_ratio = 1.0 except ZeroDivisionError: _error_code = 10 _test_ratio = 0.0 module.te = 1.0 if _test_ratio > 0.4: module.te = 0.9 # Calculate the risk reduction due to test methods used. module.tm = 1.0 module.tu = sum([_tu[0] for _tu in module.lst_test_selection]) module.tt = sum([_tt[1] for _tt in module.lst_test_selection]) try: if module.tu / module.tt > 0.75: module.tm = 0.9 elif module.tu / module.tt < 0.5: module.tm = 1.1 except ZeroDivisionError: _error_code = 10 # Calculate the risk reduction due to test coverage. try: if module.level_id == 2: # Module _VS = ((float(module.nm_test) / float(module.nm)) + (float(module.interfaces_test) / float(module.interfaces))) / 2.0 elif module.level_id == 3: # Unit _VS = ((float(module.branches_test) / float(module.branches)) + (float(module.inputs_test) / float(module.inputs))) / 2.0 else: _VS = 1.0 except ZeroDivisionError: _error_code = 10 _VS = 1.0 module.tc = 1.0 / _VS module.t_risk = module.te * module.tm * module.tc return _error_code
5,351,885
def run_metarl(env, test_env, seed, log_dir): """Create metarl model and training.""" deterministic.set_seed(seed) snapshot_config = SnapshotConfig(snapshot_dir=log_dir, snapshot_mode='gap', snapshot_gap=10) runner = LocalRunner(snapshot_config) obs_dim = int(np.prod(env[0]().observation_space.shape)) action_dim = int(np.prod(env[0]().action_space.shape)) reward_dim = 1 # instantiate networks encoder_in_dim = obs_dim + action_dim + reward_dim encoder_out_dim = params['latent_size'] * 2 net_size = params['net_size'] context_encoder = MLPEncoder(input_dim=encoder_in_dim, output_dim=encoder_out_dim, hidden_sizes=[200, 200, 200]) space_a = akro.Box(low=-1, high=1, shape=(obs_dim + params['latent_size'], ), dtype=np.float32) space_b = akro.Box(low=-1, high=1, shape=(action_dim, ), dtype=np.float32) augmented_env = EnvSpec(space_a, space_b) qf1 = ContinuousMLPQFunction(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size]) qf2 = ContinuousMLPQFunction(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size]) obs_space = akro.Box(low=-1, high=1, shape=(obs_dim, ), dtype=np.float32) action_space = akro.Box(low=-1, high=1, shape=(params['latent_size'], ), dtype=np.float32) vf_env = EnvSpec(obs_space, action_space) vf = ContinuousMLPQFunction(env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size]) policy = TanhGaussianMLPPolicy2( env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size]) context_conditioned_policy = ContextConditionedPolicy( latent_dim=params['latent_size'], context_encoder=context_encoder, policy=policy, use_ib=params['use_information_bottleneck'], use_next_obs=params['use_next_obs_in_context'], ) train_task_names = ML10.get_train_tasks()._task_names test_task_names = ML10.get_test_tasks()._task_names pearlsac = PEARLSAC( env=env, test_env=test_env, policy=context_conditioned_policy, qf1=qf1, qf2=qf2, vf=vf, num_train_tasks=params['num_train_tasks'], num_test_tasks=params['num_test_tasks'], latent_dim=params['latent_size'], meta_batch_size=params['meta_batch_size'], num_steps_per_epoch=params['num_steps_per_epoch'], num_initial_steps=params['num_initial_steps'], num_tasks_sample=params['num_tasks_sample'], num_steps_prior=params['num_steps_prior'], num_extra_rl_steps_posterior=params['num_extra_rl_steps_posterior'], num_evals=params['num_evals'], num_steps_per_eval=params['num_steps_per_eval'], batch_size=params['batch_size'], embedding_batch_size=params['embedding_batch_size'], embedding_mini_batch_size=params['embedding_mini_batch_size'], max_path_length=params['max_path_length'], reward_scale=params['reward_scale'], train_task_names=train_task_names, test_task_names=test_task_names, ) tu.set_gpu_mode(params['use_gpu'], gpu_id=0) if params['use_gpu']: pearlsac.to() tabular_log_file = osp.join(log_dir, 'progress.csv') tensorboard_log_dir = osp.join(log_dir) dowel_logger.add_output(dowel.StdOutput()) dowel_logger.add_output(dowel.CsvOutput(tabular_log_file)) dowel_logger.add_output(dowel.TensorBoardOutput(tensorboard_log_dir)) runner.setup(algo=pearlsac, env=env, sampler_cls=PEARLSampler, sampler_args=dict(max_path_length=params['max_path_length'])) runner.train(n_epochs=params['num_epochs'], batch_size=params['batch_size']) dowel_logger.remove_all() return tabular_log_file
5,351,886
def get_default_mutation_op(dom): """ Returns the default mutation operator for the domain. """ if dom.get_type() == 'euclidean': return lambda x: euclidean_gauss_mutation(x, dom.bounds) elif dom.get_type() == 'integral': return lambda x: integral_gauss_mutation(x, dom.bounds) elif dom.get_type() == 'discrete': return lambda x: discrete_random_mutation(x, dom.list_of_items) elif dom.get_type() == 'prod_discrete': return lambda x: prod_discrete_random_mutation(x, dom.list_of_list_of_items) elif dom.get_type() == 'discrete_numeric': return lambda x: discrete_numeric_exp_mutation(x, dom.list_of_items) elif dom.get_type() == 'prod_discrete_numeric': return lambda x: prod_discrete_numeric_exp_mutation(x, dom.list_of_list_of_items) elif dom.get_type() == 'discrete_euclidean': return lambda x: discrete_euclidean_mutation(x, dom.list_of_items) elif dom.get_type() == 'neural_network': from ..nn.nn_modifiers import get_single_nn_mutation_op return get_single_nn_mutation_op(dom, [0.5, 0.25, 0.125, 0.075, 0.05]) else: raise ValueError('No default mutation implemented for domain type %s.'%( dom.get_type()))
5,351,887
def chunked(src, size, count=None, **kw): """Returns a list of *count* chunks, each with *size* elements, generated from iterable *src*. If *src* is not evenly divisible by *size*, the final chunk will have fewer than *size* elements. Provide the *fill* keyword argument to provide a pad value and enable padding, otherwise no padding will take place. >>> chunked(range(10), 3) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> chunked(range(10), 3, fill=None) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] >>> chunked(range(10), 3, count=2) [[0, 1, 2], [3, 4, 5]] See :func:`chunked_iter` for more info. """ chunk_iter = chunked_iter(src, size, **kw) if count is None: return list(chunk_iter) else: return list(itertools.islice(chunk_iter, count))
5,351,888
def doc_to_schema_fields(doc, schema_file_name='_schema.yaml'): """Parse a doc to retrieve the schema file.""" return doc_to_schema(doc, schema_file_name=schema_file_name)[ 'schema_fields']
5,351,889
def cluster(df: pd.DataFrame, k: int, knn: int = 10, m: int = 30, alpha: float = 2.0, verbose0: bool = False, verbose1: bool = False, verbose2: bool = True, plot: bool = True) -> Tuple[pd.DataFrame, OrderedDict]: """ Chameleon clustering: build the K-NN graph, partition it into m clusters :param df: input dataframe. :param k: desired number of clusters. :param knn: parameter k of K-nearest_neighbors. :param m: number of clusters to reach in the initial clustering phase. :param alpha: exponent of relative closeness; the larger, the more important relative closeness is than relative interconnectivity. :param verbose0: if True, print general infos. :param verbose1: if True, print infos about the prepartitioning phase. :param verbose2: if True, print labels of merging clusters and their scores in the merging phase. :param plot: if True, show plots. :return: dataframe with cluster labels and dictionary of merging scores (similarities). """ if k is None: k = 1 if verbose0: print(f"Building kNN graph (k = {knn})...") graph = knn_graph(df=df, k=knn, symmetrical=False, verbose=verbose1) if plot is True: plot2d_graph(graph, print_clust=False) graph = pre_part_graph(graph, m, df, verbose1, plotting=plot) # to account for cases where initial_clust is too big or k is already reached before the merging phase cl_dict = OrderedDict({ list(graph.nodes)[i]: graph.nodes[i]["cluster"] for i in range(len(graph)) }) m = len(Counter(cl_dict.values())) if verbose0: print(f"actual init_clust: {m}") merging_similarities = OrderedDict({}) iterm = (tqdm(enumerate(range(m - k)), total=m - k) if verbose1 else enumerate(range(m - k))) for i, _ in iterm: df, ms, ci = merge_best(graph, df, alpha, k, False, verbose2) if ms == 0: break merging_similarities[m - (i + 1)] = ms if plot: plot2d_data(df, ci) res = rebuild_labels(df) return res, merging_similarities
5,351,890
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the two-layer neural net classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = "C:\Users\Pomodori\workspace\cifar-10-batches-py" X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image # Reshape data to rows X_train = X_train.reshape(num_training, -1) X_val = X_val.reshape(num_validation, -1) X_test = X_test.reshape(num_test, -1) return X_train, y_train, X_val, y_val, X_test, y_test
5,351,891
def test_operation_populated(petstore_expanded_spec): """ Tests that operations are populated as expected """ op = petstore_expanded_spec.paths["/pets"].get # check description and metadata populated correctly assert op.operationId == "findPets" assert op.description.startswith("Returns all pets from the system") assert op.summary is None # check parameters populated correctly assert len(op.parameters) == 2 param1 = op.parameters[0] assert param1.name == "tags" assert param1.in_ == "query" assert param1.description == "tags to filter by" assert param1.required == False assert param1.style == "form" assert param1.schema_ is not None assert param1.schema_.type == "array" assert param1.schema_.items.type == "string" param2 = op.parameters[1] assert param2.name == "limit" assert param2.in_ == "query" assert param2.description == "maximum number of results to return" assert param2.required == False assert param2.schema_ is not None assert param2.schema_.type == "integer" assert param2.schema_.format == "int32" # check that responses populated correctly assert "200" in op.responses assert "default" in op.responses assert len(op.responses) == 2 resp1 = op.responses["200"] assert resp1.description == "pet response" assert len(resp1.content) == 1 assert "application/json" in resp1.content con1 = resp1.content["application/json"] assert con1.schema_ is not None assert con1.schema_.type == "array" # we're not going to test that the ref resolved correctly here - that's a separate test assert type(con1.schema_.items._target) == Schema resp2 = op.responses["default"] assert resp2.description == "unexpected error" assert len(resp2.content) == 1 assert "application/json" in resp2.content con2 = resp2.content["application/json"] assert con2.schema_ is not None # again, test ref resolution elsewhere assert type(con2.schema_._target) == Schema
5,351,892
def timeframe_int_to_str(timeframe: int) -> str: """ Convert timeframe from integer to string :param timeframe: minutes per candle (240) :return: string representation for API (4h) """ if timeframe < 60: return f"{timeframe}m" elif timeframe < 1440: return f"{int(timeframe / 60)}h" else: return f"{int(timeframe / 1440)}d"
5,351,893
def FIT(individual): """Sphere test objective function. F(x) = sum_{i=1}^d xi^2 d=1,2,3,... Range: [-100,100] Minima: 0 """ y=sum(x**2 for x in individual) return y
5,351,894
def update_range(value): """ For user selections, return the relevant range """ global df min, max = df.timestamp.iloc[value[0]], df.timestamp.iloc[value[-1]] return 'timestamp slider: {} | {}'.format(min, max)
5,351,895
def project_users_add(ctx, user, **kwargs): """ Adds new user to the project. """ client = api.ProjectUser( project=ctx.obj['project'], config=ctx.obj['config'], user=user, **kwargs ) client.save() click.echo("User '{}' added to the project.".format(user.email))
5,351,896
def wrapper_func_easy(quantity = None, food = None, express = None, is_awesome = None): """ wrapper_func_easy: Sample wrapper function. """ dict_args = {} dict_args['quantity'] = 42 if quantity is None else quantity if food is not None: dict_args['food'] = food if express is not None: dict_args['express'] = express if is_awesome is not None: dict_args['is_awesome'] = is_awesome get_food(**dict_args)
5,351,897
def simplify(tile): """ :param tile: 34 tile format :return: tile: 0-8 presentation """ return tile - 9 * (tile // 9)
5,351,898
def vep(dataset, config, block_size=1000, name='vep', csq=False) -> MatrixTable: """Annotate variants with VEP. .. include:: ../_templates/req_tvariant.rst :func:`.vep` runs `Variant Effect Predictor <http://www.ensembl.org/info/docs/tools/vep/index.html>`__ with the `LOFTEE plugin <https://github.com/konradjk/loftee>`__ on the current dataset and adds the result as a row field. Examples -------- Add VEP annotations to the dataset: >>> result = hl.vep(dataset, "data/vep.properties") # doctest: +SKIP Notes ----- **Configuration** :func:`.vep` needs a configuration file to tell it how to run VEP. The format is a `.properties file <https://en.wikipedia.org/wiki/.properties>`__. Roughly, each line defines a property as a key-value pair of the form `key = value`. :func:`.vep` supports the following properties: - **hail.vep.perl** -- Location of Perl. Optional, default: perl. - **hail.vep.perl5lib** -- Value for the PERL5LIB environment variable when invoking VEP. Optional, by default PERL5LIB is not set. - **hail.vep.path** -- Value of the PATH environment variable when invoking VEP. Optional, by default PATH is not set. - **hail.vep.location** -- Location of the VEP Perl script. Required. - **hail.vep.cache_dir** -- Location of the VEP cache dir, passed to VEP with the ``--dir`` option. Required. - **hail.vep.fasta** -- Location of the FASTA file to use to look up the reference sequence, passed to VEP with the `--fasta` option. Required. - **hail.vep.assembly** -- Genome assembly version to use. Optional, default: GRCh37 - **hail.vep.plugin** -- VEP plugin, passed to VEP with the `--plugin` option. Optional. Overrides `hail.vep.lof.human_ancestor` and `hail.vep.lof.conservation_file`. - **hail.vep.lof.human_ancestor** -- Location of the human ancestor file for the LOFTEE plugin. Ignored if `hail.vep.plugin` is set. Required otherwise. - **hail.vep.lof.conservation_file** -- Location of the conservation file for the LOFTEE plugin. Ignored if `hail.vep.plugin` is set. Required otherwise. Here is an example ``vep.properties`` configuration file .. code-block:: text hail.vep.perl = /usr/bin/perl hail.vep.path = /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin hail.vep.location = /path/to/vep/ensembl-tools-release-81/scripts/variant_effect_predictor/variant_effect_predictor.pl hail.vep.cache_dir = /path/to/vep hail.vep.lof.human_ancestor = /path/to/loftee_data/human_ancestor.fa.gz hail.vep.lof.conservation_file = /path/to/loftee_data/phylocsf.sql **VEP Invocation** .. code-block:: text <hail.vep.perl> <hail.vep.location> --format vcf --json --everything --allele_number --no_stats --cache --offline --dir <hail.vep.cache_dir> --fasta <hail.vep.fasta> --minimal --assembly <hail.vep.assembly> --plugin LoF,\ human_ancestor_fa:$<hail.vep.lof.human_ancestor>,\ filter_position:0.05,\ min_intron_size:15,\ conservation_file:<hail.vep.lof.conservation_file> -o STDOUT **Annotations** A new row field is added in the location specified by `name` with the following schema: .. code-block:: text struct { assembly_name: str, allele_string: str, ancestral: str, colocated_variants: array<struct { aa_allele: str, aa_maf: float64, afr_allele: str, afr_maf: float64, allele_string: str, amr_allele: str, amr_maf: float64, clin_sig: array<str>, end: int32, eas_allele: str, eas_maf: float64, ea_allele: str, ea_maf: float64, eur_allele: str, eur_maf: float64, exac_adj_allele: str, exac_adj_maf: float64, exac_allele: str, exac_afr_allele: str, exac_afr_maf: float64, exac_amr_allele: str, exac_amr_maf: float64, exac_eas_allele: str, exac_eas_maf: float64, exac_fin_allele: str, exac_fin_maf: float64, exac_maf: float64, exac_nfe_allele: str, exac_nfe_maf: float64, exac_oth_allele: str, exac_oth_maf: float64, exac_sas_allele: str, exac_sas_maf: float64, id: str, minor_allele: str, minor_allele_freq: float64, phenotype_or_disease: int32, pubmed: array<int32>, sas_allele: str, sas_maf: float64, somatic: int32, start: int32, strand: int32 }>, context: str, end: int32, id: str, input: str, intergenic_consequences: array<struct { allele_num: int32, consequence_terms: array<str>, impact: str, minimised: int32, variant_allele: str }>, most_severe_consequence: str, motif_feature_consequences: array<struct { allele_num: int32, consequence_terms: array<str>, high_inf_pos: str, impact: str, minimised: int32, motif_feature_id: str, motif_name: str, motif_pos: int32, motif_score_change: float64, strand: int32, variant_allele: str }>, regulatory_feature_consequences: array<struct { allele_num: int32, biotype: str, consequence_terms: array<str>, impact: str, minimised: int32, regulatory_feature_id: str, variant_allele: str }>, seq_region_name: str, start: int32, strand: int32, transcript_consequences: array<struct { allele_num: int32, amino_acids: str, biotype: str, canonical: int32, ccds: str, cdna_start: int32, cdna_end: int32, cds_end: int32, cds_start: int32, codons: str, consequence_terms: array<str>, distance: int32, domains: array<struct { db: str, name: str }>, exon: str, gene_id: str, gene_pheno: int32, gene_symbol: str, gene_symbol_source: str, hgnc_id: str, hgvsc: str, hgvsp: str, hgvs_offset: int32, impact: str, intron: str, lof: str, lof_flags: str, lof_filter: str, lof_info: str, minimised: int32, polyphen_prediction: str, polyphen_score: float64, protein_end: int32, protein_start: int32, protein_id: str, sift_prediction: str, sift_score: float64, strand: int32, swissprot: str, transcript_id: str, trembl: str, uniparc: str, variant_allele: str }>, variant_class: str } Parameters ---------- dataset : :class:`.MatrixTable` Dataset. config : :obj:`str` Path to VEP configuration file. block_size : :obj:`int` Number of rows to process per VEP invocation. name : :obj:`str` Name for resulting row field. csq : :obj:`bool` If ``True``, annotates VCF CSQ field as a :py:data:`.tstr`. If ``False``, annotates with the full nested struct schema. Returns ------- :class:`.MatrixTable` Dataset with new row-indexed field `name` containing VEP annotations. """ require_row_key_variant(dataset, 'vep') mt = MatrixTable(Env.hail().methods.VEP.apply(dataset._jvds, config, 'va.`{}`'.format(name), csq, block_size)) return mt.annotate_rows(vep=mt['vep']['vep'])
5,351,899