content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def test() -> ScadObject: """ Create something. """ result = IDUObject() result += box(10, 10, 5, center=True).translated((0, 0, -1)).named("Translated big box") result -= box(4, 4, 4, center=True) result += box(10, 10, 5) result *= sphere(7).translated((0, 0, 1)) return ( result.rotated((-45, 0, 0)) .rendered(10) .commented("Render it now!") .colored("green", alpha=0.5) .commented( """ This file is autogenerated by r7scad. It is not supposed to be edited manually. """ ) )
2d8c413a6b60969de60746c4fb356da88a95e06a
3,652,777
def rollout(policy, env_class, step_fn=default_rollout_step, max_steps=None): """Perform rollout using provided policy and env. :param policy: policy to use when simulating these episodes. :param env_class: class to instantiate an env object from. :param step_fn: a function to be called at each step of rollout. The function can have 2 or 3 parameters, and must return an action: * 2 parameter definition: policy, observation. * 3 parameter definition: policy, observation, step_num. Default value is ``agentos.core.default_rollout_step``. :param max_steps: cap on number of steps per episode. :return: the trajectory that was followed during this rollout. A trajectory is a named tuple that contains the initial observation (a scalar) as well as the following arrays: actions, observations, rewards, dones, contexts. The ith entry of each array corresponds to the action taken at the ith step of the rollout, and the respective results returned by the environment after taking that action. To learn more about the semantics of these, see the documentation and code of gym.Env. """ actions = [] observations = [] rewards = [] dones = [] contexts = [] env = env_class() obs = env.reset() init_obs = obs done = False step_num = 0 while True: if done or (max_steps and step_num >= max_steps): break if step_fn.__code__.co_argcount == 2: action = step_fn(policy, obs) elif step_fn.__code__.co_argcount == 3: action = step_fn(policy, obs, step_num) else: raise TypeError("step_fn must accept 2 or 3 parameters.") obs, reward, done, ctx = env.step(action) actions.append(action) observations.append(obs) rewards.append(reward) dones.append(done) contexts.append(ctx) step_num += 1 Trajectory = namedtuple( "Trajectory", [ "init_obs", "actions", "observations", "rewards", "dones", "contexts", ], ) return Trajectory( init_obs, actions, observations, rewards, dones, contexts )
d5ac3246338165d3cfdb5e37ae5a6cbbe5df0408
3,652,778
def get_source(location, **kwargs): """Factory for StubSource Instance. Args: location (str): PathLike object or valid URL Returns: obj: Either Local or Remote StubSource Instance """ try: utils.ensure_existing_dir(location) except NotADirectoryError: return RemoteStubSource(location, **kwargs) else: return LocalStubSource(location, **kwargs)
6b240d7ad523c2a45ca21c3030a96ec5aebb69c2
3,652,779
def about(request): """ Prepare and displays the about view of the web application. Args: request: django HttpRequest class Returns: A django HttpResponse class """ template = loader.get_template('about.html') return HttpResponse(template.render())
ecf2a890e49a5fe786024f7d7f524e1396064f48
3,652,780
import math import logging def getAp(ground_truth, predict, fullEval=False): """ Calculate AP at IOU=.50:.05:.95, AP at IOU=.50, AP at IOU=.75 :param ground_truth: {img_id1:{{'position': 4x2 array, 'is_matched': 0 or 1}, {...}, ...}, img_id2:{...}, ...} :param predict: [{'position':4x2 array, 'img_id': image Id, 'confident': confident}, {...}, ...] :return: AP, AP at IOU=.50, AP at IOU=.75 """ is_match = {'is_matched': 0} ap_050_095 = 0. ap_050 = 0. ap_075 = 0. prec_050_095 = 0. prec_050 = 0. prec_075 = 0. recall_050_095 = 0. recall_050 = 0. recall_075 = 0. if fullEval: for i in np.arange(0.50, 1.0, 0.05): for key in ground_truth: for win_idx in range(len(ground_truth[key])): ground_truth[key][win_idx].update(is_match) # reset 'is_matched' for all windows ap, recall, precision = evaluateAP(ground_truth, predict, threshold=i) if math.isclose(round(i, 2), 0.5): ap_050 = ap prec_050 = precision recall_050 = recall if math.isclose(round(i, 2), 0.75): ap_075 = ap prec_075 = precision recall_075 = recall ap_050_095 += ap prec_050_095 += precision recall_050_095 += recall logging.info("threshold:%.2f"%i + " precsion:%.2f"%(precision*100) + " recall:%.2f"%(recall*100)) else: ap_050, recall_050, prec_050 = evaluateAP(ground_truth, predict, threshold=0.5) ap_050_095 = ap_050_095 / 10 prec_050_095 = prec_050_095 / 10 recall_050_095 = recall_050_095 / 10 return [ap_050_095, ap_050, ap_075], \ [prec_050_095, prec_050, prec_075], \ [recall_050_095, recall_050, recall_075]
ac44c514166f8e70a6625f4e1ad89b36564ffba4
3,652,782
def aumenta_fome(ani): """ aumenta_fome: animal --> animal Recebe um animal e devolve o mesmo com o valor da fome incrementado por 1 """ if obter_freq_alimentacao(ani) == 0: return ani else: ani['a'][0] += 1 return ani
377e3800e12877f1b8cd1cba19fe3a430ade0207
3,652,783
import warnings def match_inputs( bp_tree, table, sample_metadata, feature_metadata=None, ignore_missing_samples=False, filter_missing_features=False ): """Matches various input sources. Also "splits up" the feature metadata, first by calling taxonomy_utils.split_taxonomy() on it and then by splitting the resulting DataFrame into two separate DataFrames (one for tips and one for internal nodes). Parameters ---------- bp_tree: bp.BP The tree to be visualized. table: pd.DataFrame Representation of the feature table. The index should describe feature IDs; the columns should describe sample IDs. (It's expected that feature IDs in the table only describe tips in the tree, not internal nodes.) sample_metadata: pd.DataFrame Sample metadata. The index should describe sample IDs; the columns should describe different sample metadata fields' names. feature_metadata: pd.DataFrame or None Feature metadata. If this is passed, the index should describe feature IDs and the columns should describe different feature metadata fields' names. (Feature IDs here can describe tips or internal nodes in the tree.) ignore_missing_samples: bool If True, pads missing samples (i.e. samples in the table but not the metadata) with placeholder metadata. If False, raises a DataMatchingError if any such samples exist. (Note that in either case, samples in the metadata but not in the table are filtered out; and if no samples are shared between the table and metadata, a DataMatchingError is raised regardless.) This is analogous to the ignore_missing_samples flag in Emperor. filter_missing_features: bool If True, filters features from the table that aren't present as tips in the tree. If False, raises a DataMatchingError if any such features exist. (Note that in either case, features in the tree but not in the table are preserved.) Returns ------- (table, sample_metadata, tip_metadata, int_metadata): (pd.DataFrame, pd.DataFrame, pd.DataFrame / None, pd.DataFrame / None) Versions of the input table, sample metadata, and feature metadata filtered such that: -The table only contains features also present as tips in the tree. -The sample metadata only contains samples also present in the table. -Samples present in the table but not in the sample metadata will have all of their sample metadata values set to "This sample has no metadata". (This will only be done if ignore_missing_samples is True; otherwise, this situation will trigger an error. See below.) -If feature metadata was not passed, tip_metadata and int_metadata will both be None. Otherwise, tip_metadata will contain the entries of the feature metadata where the feature name was present as a tip in the tree, and int_metadata will contain the entries of the feature metadata where the feature name was present as internal node(s) in the tree. -Also, for sanity's sake, this will call taxonomy_utils.split_taxonomy() on the feature metadata before splitting it up into tip and internal node metadata. Raises ------ DataMatchingError If any of the following conditions are met: 1. No features are shared between the tree's tips and table. 2. There are features present in the table but not as tips in the tree, AND filter_missing_features is False. 3. No samples are shared between the sample metadata and table. 4. There are samples present in the table but not in the sample metadata, AND ignore_missing_samples is False. 5. The feature metadata was passed, but no features present in it are also present as tips or internal nodes in the tree. References ---------- This function was based on match_table_and_data() in Qurro's code: https://github.com/biocore/qurro/blob/b9613534b2125c2e7ee22e79fdff311812f4fefe/qurro/_df_utils.py#L255 """ # Match table and tree. # (Ignore None-named tips in the tree, which will be replaced later on # with "default" names like "EmpressNode0".) tip_names = set(bp_tree.bp_tree_tips()) tree_and_table_features = table.index.intersection(tip_names) if len(tree_and_table_features) == 0: # Error condition 1 raise DataMatchingError( "No features in the feature table are present as tips in the tree." ) ff_table = table.copy() if len(tree_and_table_features) < len(table.index): if filter_missing_features: # Filter table to just features that are also present in the tree. # # Note that we *don't* filter the tree analogously, because it's ok # for the tree's nodes to be a superset of the table's features # (and this is going to be the case in most datasets where the # features correspond to tips, since internal nodes aren't # explicitly described in the feature table). ff_table = table.loc[tree_and_table_features] # Report to user about any dropped features from table. dropped_feature_ct = table.shape[0] - ff_table.shape[0] warnings.warn( ( "{} feature(s) in the table were not present as tips in " "the tree. These feature(s) have been removed from the " "visualization." ).format( dropped_feature_ct ), DataMatchingWarning ) else: # Error condition 2 raise DataMatchingError( "The feature table contains features that aren't present as " "tips in the tree. You can override this error by using the " "--p-filter-missing-features flag." ) # Match table (post-feature-filtering, if done) and sample metadata. table_samples = set(ff_table.columns) sm_samples = set(sample_metadata.index) sm_and_table_samples = sm_samples & table_samples if len(sm_and_table_samples) == 0: # Error condition 3 raise DataMatchingError( "No samples in the feature table are present in the sample " "metadata." ) padded_metadata = sample_metadata.copy() if len(sm_and_table_samples) < len(ff_table.columns): if ignore_missing_samples: # Works similarly to how Emperor does this: see # https://github.com/biocore/emperor/blob/659b62a9f02a6423b6258c814d0e83dbfd05220e/emperor/core.py#L350 samples_without_metadata = table_samples - sm_samples padded_metadata = pd.DataFrame( index=samples_without_metadata, columns=sample_metadata.columns, dtype=str ) padded_metadata.fillna("This sample has no metadata", inplace=True) sample_metadata = pd.concat([sample_metadata, padded_metadata]) # Report to user about samples we needed to "pad." warnings.warn( ( "{} sample(s) in the table were not present in the " "sample metadata. These sample(s) have been assigned " "placeholder metadata." ).format( len(samples_without_metadata) ), DataMatchingWarning ) else: # Error condition 4 raise DataMatchingError( "The feature table contains samples that aren't present in " "the sample metadata. You can override this error by using " "the --p-ignore-missing-samples flag." ) # If we've made it this far, then there must be at least *one* sample # present in both the sample metadata and the table: and by this point the # metadata's samples should be a superset of the table's samples (since we # padded the metadata above if there were any samples that *weren't* in the # table). # # All that's left to do is to filter the sample metadata to just the # samples that are also present in the table. sf_sample_metadata = sample_metadata.loc[ff_table.columns] # If desired, we could report here to the user about any dropped samples # from the metadata by looking at the difference between # sample_metadata.shape[0] and sf_sample_metadata.shape[0]. However, the # presence of such "dropped samples" is a common occurrence in 16S studies, # so we currently don't do that for the sake of avoiding alarm fatigue. # If the feature metadata was passed, filter it so that it only contains # features present as tips / internal nodes in the tree tip_metadata = None int_metadata = None if feature_metadata is not None: # Split up taxonomy column, if present in the feature metadata ts_feature_metadata = taxonomy_utils.split_taxonomy(feature_metadata) fm_ids = ts_feature_metadata.index # Subset tip metadata fm_and_tip_features = fm_ids.intersection(tip_names) tip_metadata = ts_feature_metadata.loc[fm_and_tip_features] # Subset internal node metadata internal_node_names = set(bp_tree.bp_tree_non_tips()) fm_and_int_features = fm_ids.intersection(internal_node_names) int_metadata = ts_feature_metadata.loc[fm_and_int_features] if len(tip_metadata.index) == 0 and len(int_metadata.index) == 0: # Error condition 5 raise DataMatchingError( "No features in the feature metadata are present in the tree, " "either as tips or as internal nodes." ) return ff_table, sf_sample_metadata, tip_metadata, int_metadata
92a97fc39c233a0969c24774d74fdd6b304f5442
3,652,784
def im_adjust(img, tol=1, bit=8): """ Adjust contrast of the image """ limit = np.percentile(img, [tol, 100 - tol]) im_adjusted = im_bit_convert(img, bit=bit, norm=True, limit=limit.tolist()) return im_adjusted
2bbccc08d4dd6aeed50c6fb505ff801e3201c73a
3,652,785
import math def FibanocciSphere(samples=1): """ Return a Fibanocci sphere with N number of points on the surface. This will act as the template for the nanoparticle core. Args: Placeholder Returns: Placeholder Raises: Placeholder """ points = [] phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians for i in range(samples): y = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1 radius = math.sqrt(1 - y * y) # radius at y theta = phi * i # golden angle increment x = math.cos(theta) * radius z = math.sin(theta) * radius points.append((x, y, z)) return points
ea47b7c2eed34bd826ddff1619adac887439f5e0
3,652,786
import inspect def get_code(): """ returns the code for the activity_selection function """ return inspect.getsource(activity_selection)
3bae49b5feea34813c518a3ec3a62a4cde35445f
3,652,787
def calc_luminosity(flux, fluxerr, mu): """ Normalise flux light curves with distance modulus. Parameters ---------- flux : array List of floating point flux values. fluxerr : array List of floating point flux errors. mu : float Distance modulus from luminosity distance. Returns ------- fluxout : array Same shape as input flux. fluxerrout : array Same shape as input fluxerr. """ d = 10 ** (mu/5 + 1) dsquared = d**2 norm = 1e18 fluxout = flux * (4 * np.pi * dsquared/norm) fluxerrout = fluxerr * (4 * np.pi * dsquared/norm) return fluxout, fluxerrout
8cfebee024ae73355daf64b96260d45e57115c8f
3,652,788
def inference(images): """Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). Returns: Logits. """ ### # We instantiate all variables using tf.get_variable() instead of # tf.Variable() in order to share variables across multiple GPU training runs. # If we only ran this model on a single GPU, we could simplify this function # by replacing all instances of tf.get_variable() with tf.Variable(). # # conv1 #xavier = tf.contrib.layers.xavier_initializer_conv2d() with tf.variable_scope('conv1') as scope: kernel1 = _variable_with_weight_decay('weights', shape=[3, 3, 3, 128], stddev=5e-2, wd=None) conv = tf.nn.conv2d(images, kernel1, [1, 2, 2, 1], padding='SAME') #conv = tf.nn.dropout(conv, 0.9) biases1 = cifar10._variable_on_cpu('biases', [128], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases1) conv1 = tf.nn.relu(pre_activation, name = scope.name) cifar10._activation_summary(conv1) norm1 = tf.contrib.layers.batch_norm(conv1, scale=True, is_training=True, updates_collections=None) # conv2 with tf.variable_scope('conv2') as scope: kernel2 = _variable_with_weight_decay('weights', shape=[5, 5, 128, 128], stddev=5e-2, wd=None) conv = tf.nn.conv2d(norm1, kernel2, [1, 1, 1, 1], padding='SAME') biases2 = cifar10._variable_on_cpu('biases', [128], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases2) conv2 = tf.nn.relu(pre_activation, name = scope.name) #conv2 = tf.nn.dropout(conv2, 0.9) cifar10._activation_summary(conv2) # concat conv2 with norm1 to increase the number of features, this step does not affect the Differential_Privacy preserving guarantee current = tf.concat((conv2, norm1), axis=3) # norm2 norm2 = tf.contrib.layers.batch_norm(current, scale=True, is_training=True, updates_collections=None) # conv3 with tf.variable_scope('conv3') as scope: kernel3 = _variable_with_weight_decay('weights', shape=[5, 5, 256, 256], stddev=5e-2, wd=None) conv = tf.nn.conv2d(norm2, kernel3, [1, 1, 1, 1], padding='SAME') biases3 = cifar10._variable_on_cpu('biases', [256], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases3) conv3 = tf.nn.relu(pre_activation, name = scope.name) #conv3 = tf.nn.dropout(conv3, 0.9) cifar10._activation_summary(conv3) # norm3 norm3 = tf.contrib.layers.batch_norm(conv3, scale=True, is_training=True, updates_collections=None) #pool3, row_pooling_sequence, col_pooling_sequence = tf.nn.fractional_max_pool(norm3, pooling_ratio=[1.0, 2.0, 2.0, 1.0]) pool3 = avg_pool(norm3, 2) # local4 with tf.variable_scope('local4') as scope: weights1 = cifar10._variable_with_weight_decay('weights', shape=[5 * 5 * 256, hk], stddev=0.04, wd=None) biases4 = cifar10._variable_on_cpu('biases', [hk], tf.constant_initializer(0.1)) h_pool2_flat = tf.reshape(pool3, [-1, 5*5*256]); z2 = tf.add(tf.matmul(h_pool2_flat, weights1), biases4, name=scope.name) #Applying normalization for the flat connected layer h_fc1# batch_mean2, batch_var2 = tf.nn.moments(z2,[0]) scale2 = tf.Variable(tf.ones([hk])) beta2 = tf.Variable(tf.zeros([hk])) BN_norm = tf.nn.batch_normalization(z2,batch_mean2,batch_var2,beta2,scale2,1e-3) ### local4 = max_out(BN_norm, hk) cifar10._activation_summary(local4) """print(images.get_shape()); print(norm1.get_shape()); print(norm2.get_shape()); print(pool3.get_shape()); print(local4.get_shape());""" # linear layer(WX + b), # We don't apply softmax here because # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits # and performs the softmax internally for efficiency. weights2 = cifar10._variable_with_weight_decay('weights', [hk, 10], stddev=1/(hk*1.0), wd=0.0) biases5 = cifar10._variable_on_cpu('biases', [10], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights2), biases5, name=scope.name) cifar10._activation_summary(softmax_linear) return softmax_linear
224c6792b4f2b066d8627d222e6f89b469921de3
3,652,790
def cluster_molecules(mols, cutoff=0.6): """ Cluster molecules by fingerprint distance using the Butina algorithm. Parameters ---------- mols : list of rdkit.Chem.rdchem.Mol List of molecules. cutoff : float Distance cutoff Butina clustering. Returns ------- pandas.DataFrame Table with cluster ID - molecule ID pairs. """ # Generate fingerprints fingerprints = _generate_fingerprints(mols) # Calculate Tanimoto distance matrix distance_matrix = _get_tanimoto_distance_matrix(fingerprints) # Now cluster the data with the implemented Butina algorithm clusters = Butina.ClusterData(distance_matrix, len(fingerprints), cutoff, isDistData=True) # Sort clusters by size clusters = sorted(clusters, key=len, reverse=True) # Get cluster ID - molecule ID pairs clustered_molecules = [] for cluster_id, molecule_ids in enumerate(clusters, start=1): for cluster_member_id, molecule_id in enumerate(molecule_ids, start=1): clustered_molecules.append([cluster_id, cluster_member_id, molecule_id]) clustered_molecules = pd.DataFrame( clustered_molecules, columns=["cluster_id", "cluster_member_id", "molecule_id"] ) # Print details on clustering print("Number of molecules:", len(fingerprints)) print("Threshold: ", cutoff) print("Number of clusters: ", len(clusters)) print( "# Clusters with only 1 molecule: ", len([cluster for cluster in clusters if len(cluster) == 1]), ) print( "# Clusters with more than 5 molecules: ", len([cluster for cluster in clusters if len(cluster) > 5]), ) print( "# Clusters with more than 25 molecules: ", len([cluster for cluster in clusters if len(cluster) > 25]), ) print( "# Clusters with more than 100 molecules: ", len([cluster for cluster in clusters if len(cluster) > 100]), ) return clustered_molecules
ba98342d10512b4ee08e756644a26bc8585f5abc
3,652,791
import timeit def exec_benchmarks_empty_inspection(code_to_benchmark, repeats): """ Benchmark some code without mlinspect and with mlinspect with varying numbers of inspections """ benchmark_results = { "no mlinspect": timeit.repeat(stmt=code_to_benchmark.benchmark_exec, setup=code_to_benchmark.benchmark_setup, repeat=repeats, number=1), "no inspection": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str, code_to_benchmark.benchmark_setup_func_str, "[]", repeats), "one inspection": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str, code_to_benchmark.benchmark_setup_func_str, "[EmptyInspection(0)]", repeats), "two inspections": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str, code_to_benchmark.benchmark_setup_func_str, "[EmptyInspection(0), EmptyInspection(1)]", repeats), "three inspections": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str, code_to_benchmark.benchmark_setup_func_str, "[EmptyInspection(0), " + "EmptyInspection(1), EmptyInspection(2)]", repeats)} return benchmark_results
c4038b98968c9c44b5cbd0bfc9e92654dae8aca2
3,652,792
def detect_version(): """ Try to detect the main package/module version by looking at: module.__version__ otherwise, return 'dev' """ try: m = __import__(package_name, fromlist=['__version__']) return getattr(m, '__version__', 'dev') except ImportError: pass return 'dev'
c9cb3a30d84c7e9118df46dcc73ce37278788db5
3,652,793
def model(p, x): """ Evaluate the model given an X array """ return p[0] + p[1]*x + p[2]*x**2. + p[3]*x**3.
fe923f6f6aea907d3dc07756813ed848fbcc2ac6
3,652,794
def normalize(x:"tensor|np.ndarray") -> "tensor|np.ndarray": """Min-max normalization (0-1): :param x:"tensor|np.ndarray": :returns: Union[Tensor,np.ndarray] - Return same type as input but scaled between 0 - 1 """ return (x - x.min())/(x.max()-x.min())
6230077008c084bdcbebfc32d25251564c4266f0
3,652,795
import warnings import Bio def apply_on_multi_fasta(file, function, *args): """Apply a function on each sequence in a multiple FASTA file (DEPRECATED). file - filename of a FASTA format file function - the function you wish to invoke on each record *args - any extra arguments you want passed to the function This function will iterate over each record in a FASTA file as SeqRecord objects, calling your function with the record (and supplied args) as arguments. This function returns a list. For those records where your function returns a value, this is taken as a sequence and used to construct a FASTA format string. If your function never has a return value, this means apply_on_multi_fasta will return an empty list. """ warnings.warn("apply_on_multi_fasta is deprecated", Bio.BiopythonDeprecationWarning) try: f = globals()[function] except: raise NotImplementedError("%s not implemented" % function) handle = open(file, 'r') records = SeqIO.parse(handle, "fasta") results = [] for record in records: arguments = [record.sequence] for arg in args: arguments.append(arg) result = f(*arguments) if result: results.append('>%s\n%s' % (record.name, result)) handle.close() return results
e204322e512a0f1eb875d7a6434ab6e3356cff10
3,652,796
def resize_bbox(box, image_size, resize_size): """ Args: box: iterable (ints) of length 4 (x0, y0, x1, y1) image_size: iterable (ints) of length 2 (width, height) resize_size: iterable (ints) of length 2 (width, height) Returns: new_box: iterable (ints) of length 4 (x0, y0, x1, y1) """ check_box_convention(np.array(box), 'x0y0x1y1') box_x0, box_y0, box_x1, box_y1 = map(float, box) image_w, image_h = map(float, image_size) new_image_w, new_image_h = map(float, resize_size) newbox_x0 = box_x0 * new_image_w / image_w newbox_y0 = box_y0 * new_image_h / image_h newbox_x1 = box_x1 * new_image_w / image_w newbox_y1 = box_y1 * new_image_h / image_h return int(newbox_x0), int(newbox_y0), int(newbox_x1), int(newbox_y1)
3b6a309e6ccf0e244bb5a51a922bcf96303116ea
3,652,797
import time def perf_counter_ms(): """Returns a millisecond performance counter""" return time.perf_counter() * 1_000
55f1bbbd8d58593d85f2c6bb4ca4f79ad22f233a
3,652,798
import struct def make_shutdown_packet( ): """Create a shutdown packet.""" packet = struct.pack( "<B", OP_SHUTDOWN ); return packet;
6d696d76c9aa783e477f65e5c89106b2fff6db6d
3,652,799
def unique(): """Return unique identification number.""" global uniqueLock global counter with uniqueLock: counter = counter + 1 return counter
12ac0e8f9ec5d4f8d6a41066f2325ef57d593d26
3,652,800
def pointCoordsDP2LP(dpX, dpY, dptZero, lPix = 1.0): """Convert device coordinates into logical coordinates dpX - x device coordinate dpY - y device coordinate dptZero - device coordinates of logical 0,0 point lPix - zoom value, number of logical points inside one device point (aka pixel) return point in logical coordinates """ return point.fromXY(xDP2LP(dpX, dptZero, lPix), yDP2LP(dpY, dptZero, lPix))
2494b5d95756aab33434969fe2b02917a4529ef9
3,652,801
def geocode_input(api_key, input, geolocator): """ Use parallel processing to process inputted addresses as geocode Parameters: api_key (string): Google API key input (string): user inputted addresses geolocator: object from Google Maps API that generate geocode of address Returns: string[]: List of incorrect addresses string[]: formatted addresses of the inputted addresses float[]: coordinates of each address string: original inputted addresses """ #lessThanOneInt = True #time.sleep(1) #print(input) faultyAddress = None coords = None address = None #print('1') # for every line of input, generate location object placeid = database.fetch_placeid(input) if len(placeid) == 0: try: location = geolocator.geocode(input + " NC") # IMPORTANT: NC must be changed for usage in different states. coords = (location[0]['geometry']['location']['lat'], location[0]['geometry']['location']['lng']) address = location[0]["formatted_address"] database.insert_data(input, location[0]['place_id'], coords[0], coords[1], address) except: faultyAddress = str(input) #print(faultyAddress) else: out_data = database.fetch_output_data(placeid[0][0]) address = out_data[0][2] coords = [float(out_data[0][0]), float(out_data[0][1])] # output data return (faultyAddress, address, coords, input)
b7c31ccc1364364a704602438e263b107de9046c
3,652,802
def satContact(sat_R, gs_R): """ Determines if satellite is within sight of a Ground Station Parameters ---------- sat_R : numpy matrix [3, 1] - Input radius vector in Inertial System ([[X], [Y], [Y]]) gs_R : numpy matrix [3, 1] - Input radius vector in Inertial System ([[X], [Y], [Y]]) Returns ------- inContact : int - 1 or 0 if sat is in sight our out of sight, respectively See Also -------- Sun_Contact_Times : Determine if a orbit vector is illuminated Geo_Contact_Times : Determine if a orbit vector is in within a geometric boundary References ---------- [1] D. Vallado, `Fundamentals of Astrodynamics and Applications`. 4th ed., Microcosm Press, 2013. - Modified Alg. 35, pg. 308 """ # Simplifying equations mag_sat = np.linalg.norm(sat_R) mag_gs = np.linalg.norm(gs_R) dot_ss = np.dot(np.transpose(sat_R), gs_R) # Find minimum parametric value Tmin = (((mag_sat ** 2) - dot_ss) / ((mag_sat ** 2) + (mag_gs ** 2) - 2 * (dot_ss))) if Tmin < 0 or Tmin > 1: InContact = 1 # Satellite can see GS if Tmin > 0 and Tmin < 1: cTmin = (((1 - Tmin) * (mag_sat ** 2) + (dot_ss * Tmin)) / (6378.137 ** 2)) if cTmin > 1: InContact = 1 # Satellite can see GS if cTmin < 1: InContact = 0 # Satellite can't see GS return InContact
6fb6d5fc9121ddb0627f276a13446891f1da7542
3,652,803
def determine_visible_field_names(hard_coded_keys, filter_string, ref_genome): """Determine which fields to show, combining hard-coded keys and the keys in the filter string. """ fields_from_filter_string = extract_filter_keys(filter_string, ref_genome) return list(set(hard_coded_keys) | set(fields_from_filter_string))
2d885e7caa183916691def8abf685a6560f55309
3,652,804
def get_data_day(data: pd.DataFrame): """Get weekday/weekend designation value from data. :param pandas.DataFrame data: the data to get day of week from. :return: (*numpy.array*) -- indicates weekend or weekday for every day. """ return np.array(data["If Weekend"])
3e4654cf3ad3c2f0e213563e0dac3b21c7fb847c
3,652,805
def make_pretty(image, white_level=50): """Rescale and clip an astronomical image to make features more obvious. This rescaling massively improves the sensitivity of alignment by removing background and decreases the impact of hot pixels and cosmic rays by introducing a white clipping level that should be set so that most of a star's psf is clipped. Arguments: white_level -- the clipping level as a multiple of the median-subtracted image's mean. For most images, 50 is good enough. """ pretty = (image - np.median(image)).clip(0) pretty /= np.mean(pretty) pretty = pretty.clip(0, white_level) return pretty
c6d95a76db8aee7a8e2ca2bbc881094577e547ca
3,652,807
def hash(data: bytes) -> bytes: """ Compute the hash of the input data using the default algorithm Args: data(bytes): the data to hash Returns: the hash of the input data """ return _blake2b_digest(data)
62dec8f0e05b668dd486deb87bd3cc64a0cd5d08
3,652,809
import torch def compute_cd_small_batch(gt, output,batch_size=50): """ compute cd in case n_pcd is large """ n_pcd = gt.shape[0] dist = [] for i in range(0, n_pcd, batch_size): last_idx = min(i+batch_size,n_pcd) dist1, dist2 , _, _ = distChamfer(gt[i:last_idx], output[i:last_idx]) cd_loss = dist1.mean(1) + dist2.mean(1) dist.append(cd_loss) dist_tensor = torch.cat(dist) cd_ls = (dist_tensor*10000).cpu().numpy().tolist() return cd_ls
b7e1b22ab63624afd154a3228314a954304a3941
3,652,810
def find_sub_supra(axon, stimulus, eqdiff, sub_value=0, sup_value=0.1e-3): """ 'find_sub_supra' computes boundary values for the bisection method (used to identify the threeshold) Parameters ---------- axon (AxonModel): axon model stimulus (StimulusModel): stimulus model eqdiff (function): function that defines the ODE system sub_value (float): initial guess of sub-threshold value (default is 0) sup_value (float): initial guess of supra-threshold value (default is 0.1e-3) Returns ------- sub_value (float): sub-threshold value sup_value (float): supra-threshold value """ # Identification of bound values flag = 1 print('\n------------------------------------------------------') print('Identifying sub and supra threshold values...') print('------------------------------------------------------') ts = timer() while flag: # update stimulus stimulus.magnitude = -sup_value stimulus.update_stimulus(axon) # callback to save solution at each iteration of the integration def solout(t, y): time.append(t) sol.append(y.copy()) # initialize solution variable time = [] sol = [] # define integrator r = ode(eqdiff).set_integrator('dopri5') # set initial conditions r.set_initial_value(axon.icond, 0).set_f_params(axon.Ga, axon.Gm, axon.Cm, stimulus.voltage_ext, axon.d, axon.l, axon.Vr) # store solution at each iteration step r.set_solout(solout) # integrate r.integrate(stimulus.tend) # get complete solution x = np.array(sol) # get number of nodes with voltage > 80 mV N80 = (np.max(x[:, 0:axon.node_num], axis=0) > 80e-3).sum() if N80 > 3: flag = 0 else: sub_value = 1*sup_value sup_value = 2 * sup_value te = timer() print('...done. (sub, sup) = ({},{})'.format(sub_value, sup_value)) print('\n elapsed time: {:3f} ms'.format(te - ts)) return sub_value, sup_value
6efe62ac2d00d946422b1e0f915714cb9bd4dc50
3,652,811
def constantly(x): """constantly: returns the function const(x)""" @wraps(const) def wrapper(*args, **kwargs): return x return wrapper
7fdc78248f6279b96a2d45edaa2f76abe7d60d54
3,652,812
def ToBaseBand(xc, f_offset, fs): """ Parametros: xc: Señal a mandar a banda base f_offset: Frecuencia que esta corrido fs: Frecuencia de muestreo """ if PLOT: PlotSpectrum(xc, "xc", "xc_offset_spectrum.pdf", fs) # Se lo vuelve a banda base, multiplicando por una exponencial con fase f_offset / fs x_baseband = xc * np.exp((-1.0j * 2.0 * np.pi * f_offset/fs) * np.arange(len(xc))) if PLOT: PlotSpectrum(x_baseband, "x baseband", "x_baseband_spectrum.pdf", fs) return x_baseband
0389c3a25b3268b04be8c47cebaf1bbb6b863235
3,652,813
def hvp( f: DynamicJaxFunction, x: TracerOrArray, v: TracerOrArray, ) -> TracerOrArray: """Hessian-vector product function""" return jax.grad(lambda y: jnp.vdot(jax.grad(f)(y), v))(x)
585ca7a5c749b6d393ae04e1e89f21f87c6f0269
3,652,814
def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ return hvd.allgather(tensor.contiguous())
97b2a3e43cf36adda6c517264f3307deb4d98ed6
3,652,815
def get_min_area_rect(points): """ 【得到点集的最小面积外接矩形】 :param points: 轮廓点集,n*1*2的ndarray :return: 最小面积外接矩形的四个端点,4*1*2的ndarray """ rect = cv2.minAreaRect(points) # 最小面积外接矩形 box = cv2.boxPoints(rect) # 得到矩形的四个端点 box = np.int0(box) box = box[:, np.newaxis, :] # 从4*2转化为4*1*2 return box
59b801e77d03d3f81227c645a55b2c56f2ce5959
3,652,817
def vector_to_cyclic_matrix(vec): """vec is the first column of the cyclic matrix""" n = len(vec) if vec.is_sparse(): matrix_dict = dict((((x+y)%n, y), True) for x in vec.dict() for y in xrange(n)) return matrix(GF(2), n, n, matrix_dict) vec_list = vec.list() matrix_lists = [vec_list[-i:] + vec_list[:-i] for i in xrange(n)] return matrix(GF(2), n, n, matrix_lists)
79fdb28f1b254de4700e1e163b95b4bdbf579294
3,652,818
def cfn_resource_helper(): """ A helper method for the custom cloudformation resource """ # Custom logic goes here. This might include side effects or # Producing a a return value used elsewhere in your code. logger.info("cfn_resource_helper logic") return True
865216f77f09681e36e8b8409a8673c8dbcdffa0
3,652,819
def get_ts_code_and_list_date(engine): """查询ts_code""" return pd.read_sql('select ts_code,list_date from stock_basic', engine)
4bd31cbadfdb92a70983d53c74426b0727ad4d0b
3,652,820
def nested_cv_ridge( X, y, test_index, n_bins=4, n_folds=3, alphas = 10**np.linspace(-20, 20, 81), npcs=[10, 20, 40, 80, 160, 320, None], train_index=None, ): """ Predict the scores of the testing subjects based on data from the training subjects using ridge regression. Hyperparameters are chosen based on a nested cross-validation. The inner-loop of the nested cross-validation is a stratified k-fold cross-validation. Parameters ---------- X : ndarray of shape (n_samples, n_features) y : ndarray of shape (n_samples, ) test_idx : ndarray of shape (n_test_samples, ) Indices for the samples that are used for testing. n_bins : int Training data are divided into `n_bins` bins for stratified k-fold cross-validation. n_folds : int Number of folds for stratified k-fold cross-validation. alphas : {list, ndarray of shape (n_alphas, )} Choices of the regularization parameter for ridge regression. npcs : list Choices of the number of PCs used in the prediction model in increasing order. Each element in the list should be an integer or `None`. `None` means all PCs are used. train_idx : {None, ndarray of shape (n_training_samples, )} Indices for the samples that are used for training. If it is `None`, then all the samples except for the test samples are used. Returns ------- yhat : ndarray of shape (n_test_samples, ) Predicted scores for the test samples. alpha : float The chosen element of `alphas` based on nested cross-validation. npc : {int, None} The chosen element of `npcs` based on nested cross-validation. cost : float The cost based on the chosen hyperparameters, which is the minimum cost for training data among all hyperparameter choices. """ if train_index is None: train_index = np.setdiff1d(np.arange(X.shape[0], dtype=int), test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] bin_limits = np.histogram(y_train, n_bins)[1] bins = np.digitize(y_train, bin_limits[:-1]) cv = StratifiedKFold(n_splits=n_folds) costs = [] for train, test in cv.split(X_train, bins): yhat = grid_ridge(X_train[train], X_train[test], y_train[train], alphas, npcs) cost = ((y_train[test][:, np.newaxis, np.newaxis] - yhat)**2).sum(axis=0) costs.append(cost) costs = np.sum(costs, axis=0) a, b = np.unravel_index(costs.argmin(), costs.shape) alpha = alphas[a] npc = npcs[b] yhat = ridge(X_train, X_test, y_train, alpha, npc) return yhat, alpha, npc, costs[a, b]
47d5d8821b796031298a194aaf1781dc4df68a2f
3,652,821
def absolute_time(time_delta, meta): """Convert a MET into human readable date and time. Parameters ---------- time_delta : `~astropy.time.TimeDelta` time in seconds after the MET reference meta : dict dictionary with the keywords ``MJDREFI`` and ``MJDREFF`` Returns ------- time : `~astropy.time.Time` absolute time with ``format='ISOT'`` and ``scale='UTC'`` """ time = time_ref_from_dict(meta) + time_delta return Time(time.utc.isot)
dd6c02be87840022e88769d3d70e67ce50f24d64
3,652,822
from controllers.main import main from controllers.user import user def create_app(object_name, env="prod"): """ Arguments: object_name: the python path of the config object, e.g. webapp.settings.ProdConfig env: The name of the current environment, e.g. prod or dev """ app = Flask(__name__) app.config.from_object(object_name) app.config['ENV'] = env # init the cache cache.init_app(app) # init SQLAlchemy db.init_app(app) login_manager.init_app(app) # register our blueprints app.register_blueprint(main) app.register_blueprint(user) return app
a2760a759f3afebf8e09c498398712fb26d44de8
3,652,823
from datetime import datetime def yyyydoy_to_date(yyyydoy): """ Convert a string in the form of either 'yyyydoy' or 'yyyy.doy' to a datetime.date object, where yyyy is the 4 character year number and doy is the 3 character day of year :param yyyydoy: string with date in the form 'yyyy.doy' or 'yyyydoy' :return: datetime.date object :rtype: datetime.date """ try: if '.' in yyyydoy: if len(yyyydoy) != 8: raise ValueError('Invalid string: must be yyyydoy or yyyy.doy') yyyy, doy = yyyydoy.split('.') else: if len(yyyydoy) != 7: raise ValueError('Invalid string: must be yyyydoy or yyyy.doy') yyyy = yyyydoy[0:4] doy = yyyydoy[4:7] return datetime.date(int(yyyy), 1, 1) + datetime.timedelta(int(doy) - 1) except ValueError: raise ValueError('Invalid string: must be yyyydoy or yyyy.doy')
b289419c14321afc37ea05501307e36203191fec
3,652,824
from typing import Optional def create_selection(): """ Create a selection expression """ operation = Forward() nested = Group(Suppress("(") + operation + Suppress(")")).setResultsName("nested") select_expr = Forward() functions = select_functions(select_expr) maybe_nested = functions | nested | Group(var_val) operation <<= maybe_nested + OneOrMore(oneOf("+ - * /") + maybe_nested) select_expr <<= operation | maybe_nested alias = Group(Suppress(upkey("as")) + var).setResultsName("alias") full_select = Group( Group(select_expr).setResultsName("selection") + Optional(alias) ) return Group( Keyword("*") | upkey("count(*)") | delimitedList(full_select) ).setResultsName("attrs")
38a3eaef51d0559e796ce7b6bef6127a771a395d
3,652,825
def move_nodes(source_scene, dest_scene): """ Moves scene nodes from the source scene to the destination scene. :type source_scene: fbx.FbxScene :type dest_scene: fbx.FbxScene """ source_scene_root = source_scene.GetRootNode() # type: fbx.FbxNode dest_scene_root = dest_scene.GetRootNode() # type: fbx.FbxNode for node in get_children(source_scene_root): dest_scene_root.AddChild(node) # Although the original nodes are attached to the destination Scene root node, they are still connected to the old one and # so the connections must to be removed. Since there could be lots of children, its better to disconnect the root node from the children. source_scene_root.DisconnectAllSrcObject() # Because the Scene Object also has connections to other types of FBX objects, they need to be moved too. # (I'm guessing) Also since there could be only a single mesh in the FBX, the scene has connections to that too. for index in range(source_scene.GetSrcObjectCount()): fbx_obj = source_scene.GetSrcObject(index) # type: fbx.FbxObject # Don't want to move the root node, the global settings or the Animation Evaluator (at this point) # The equality check is split as the root node is an instance of fbx.FbxNode type but other objects such as fbx.FbxGlobalSettings # are subclasses of the fbx.FbxNode type but NOT instances. A little weird but this works! # The == equality check could be used as fallback for isinstance() if necessary if isinstance(fbx_obj, type(source_scene_root)): continue elif issubclass(type(fbx_obj), (fbx.FbxGlobalSettings, fbx.FbxAnimEvaluator, fbx.FbxAnimStack, fbx.FbxAnimLayer)): continue else: fbx_obj.ConnectDstObject(dest_scene) # Now the scene can be disconnected as everything has been moved! (DO NOT FORGET THIS STEP) return source_scene.DisconnectAllSrcObject()
26a413736ab5fee46182f05247fe989d66358f19
3,652,826
def extract_values(*args): """ Wrapper around `extract_value`; iteratively applies that method to all items in a list. If only one item was passed in, then we return that one item's value; if multiple items were passed in, we return a list of the corresponding item values. """ processed = [extract_value(arg) for arg in args] if len(processed) == 1: return processed[0] return processed
2906ca3aa42bfb47b231fd23b2a69a816399c255
3,652,827
def predefined_split(dataset): """Uses ``dataset`` for validiation in :class:`.NeuralNet`. Examples -------- >>> valid_ds = skorch.dataset.Dataset(X, y) >>> net = NeuralNet(..., train_split=predefined_split(valid_ds)) Parameters ---------- dataset: torch Dataset Validiation dataset """ return partial(_make_split, valid_ds=dataset)
4f4f775e41b07efba3425bc2243d9766b41f5bc1
3,652,828
from typing import Union def bgr_to_rgba(image: Tensor, alpha_val: Union[float, Tensor]) -> Tensor: """Convert an image from BGR to RGBA. Args: image (Tensor[B, 3, H, W]): BGR Image to be converted to RGBA. alpha_val (float, Tensor[B, 1, H, W]): A float number or tensor for the alpha value. Returns: rgba (Tensor[B, 4, H, W]): RGBA version of the image. Notes: Current functionality is NOT supported by Torchscript. """ if not isinstance(alpha_val, (float, Tensor)): raise TypeError(f"`alpha_val` must be a `float` or `Tensor`. " f"But got: {type(alpha_val)}.") # Convert first to RGB, then add alpha channel rgb = bgr_to_rgb(image) rgba = rgb_to_rgba(rgb, alpha_val) return rgba
654cb3df7432d799b2a391bf5cfa19a15a26b1fa
3,652,830
def d_matrix_1d(n, r, v): """Initializes the differentiation matrices on the interval. Args: n: The order of the polynomial. r: The nodal points. v: The Vandemonde matrix. Returns: The gradient matrix D. """ vr = grad_vandermonde_1d(n, r) return np.linalg.lstsq(v.T, vr.T, rcond=None)[0].T
a8d1df34726ea1ac6ef7b49209c45374cb2bed04
3,652,831
import functools def compile_replace(pattern, repl, flags=0): """Construct a method that can be used as a replace method for sub, subn, etc.""" call = None if pattern is not None and isinstance(pattern, RE_TYPE): if isinstance(repl, (compat.string_type, compat.binary_type)): repl = ReplaceTemplate(pattern, repl, bool(flags & FORMAT)) call = Replace( functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash ) elif isinstance(repl, Replace): if flags: raise ValueError("Cannot process flags argument with a compiled pattern!") if repl.pattern_hash != hash(pattern): raise ValueError("Pattern hash doesn't match hash in compiled replace!") call = repl elif isinstance(repl, ReplaceTemplate): if flags: raise ValueError("Cannot process flags argument with a ReplaceTemplate!") call = Replace( functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash ) else: raise TypeError("Not a valid type!") else: raise TypeError("Pattern must be a compiled regular expression!") return call
eb753edeb9c212a28968eaf9c070aeeec8678d49
3,652,832
import six def python_2_unicode_compatible(klass): """ From Django A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if six.PY2: # pragma: no cover if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass
18c290d649e0299c72f85209c4db6a7a4b716300
3,652,833
import re import logging def ParseNewPingMsg(msg): """Attempt to parse the message for a ping (in the new format). Return the request and response strings (json-ified dict) if parsing succeeded. Return None otherwise. """ parsed = re.match(kNewPingMsgRe, msg) if not parsed: return None try: return (parsed.group(1), parsed.group(2)) except IndexError as e: logging.warning('RE matched "%s", but extracted wrong numbers of items: %r' % (msg, e)) return None
6bca164892ea13b598af75d468580a7d4bd04d4c
3,652,834
from faker import Faker def parse_main_dict(): """Parses dict to get the lists of countries, cities, and fakers. Fakers allow generation of region specific fake data. Also generates total number of agents """ Faker.seed(seed) # required to generate reproducible data countries = main_dict.keys() cities = [v['city'] for v in main_dict.values()] fakers = [Faker(v['faker_abbrev']) for v in main_dict.values()] total_agents = sum([v['number_of_agents'] for v in main_dict.values()]) return fakers, countries, cities, total_agents
7cf9870c86c40bb2d1565479d6789d9cd7114024
3,652,835
import json def format_payload(svalue): """formats mqtt payload""" data = {"idx": IDX, "nvalue": 0, "svalue": svalue} return json.dumps(data)
1cbee0d5169acde802be176cc47a25c2db1c2f62
3,652,836
def load_auth_client(): """Create an AuthClient for the portal No credentials are used if the server is not production Returns ------- globus_sdk.ConfidentialAppAuthClient Client used to perform GlobusAuth actions """ _prod = True if _prod: app = globus_sdk.ConfidentialAppAuthClient(GLOBUS_CLIENT, GLOBUS_KEY) else: app = globus_sdk.ConfidentialAppAuthClient('', '') return app
8e16303fa80e775d94e669d96db24a9f7a63e0b6
3,652,837
def DCGAN_discriminator(img_dim, nb_patch, bn_mode, model_name="DCGAN_discriminator", use_mbd=True): """ Discriminator model of the DCGAN args : img_dim (tuple of int) num_chan, height, width pretr_weights_file (str) file holding pre trained weights returns : model (keras NN) the Neural Net model """ list_input = [Input(shape=img_dim, name="disc_input_%s" % i) for i in range(nb_patch)] if K.image_dim_ordering() == "th": bn_axis = 1 else: bn_axis = -1 nb_filters = 64 nb_conv = int(np.floor(np.log(img_dim[1]) / np.log(2))) list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)] # First conv x_input = Input(shape=img_dim, name="discriminator_input") # x = Convolution2D(list_filters[0], 3, 3, subsample=(2, 2), name="disc_conv2d_1", border_mode="same")(x_input) # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) # x = LeakyReLU(0.2)(x) x = MaxPooling2D( pool_size=(2, 2), strides=(2, 2))(x_input) x = Convolution2D( list_filters[0]/8, 1, 1, activation='relu', init='glorot_uniform', border_mode='same', name='disc_conv2d_1')(x) x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) e1 = Convolution2D( list_filters[0]/2, 1, 1, activation='relu', init='glorot_uniform', border_mode='same')(x) e2 = Convolution2D( list_filters[0]/2, 3, 3, activation='relu', init='glorot_uniform', border_mode='same')(x) x = merge( [e1, e2], mode='concat', concat_axis=bn_axis) # Next convs for i, f in enumerate(list_filters[1:]): name = "disc_conv2d_fire_%s" % (i + 2) # x = Convolution2D(f, 3, 3, subsample=(2, 2), name=name, border_mode="same")(x) # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) # x = LeakyReLU(0.2)(x) x = MaxPooling2D( pool_size=(2, 2), strides=(2, 2))(x) x = Convolution2D( f/8, 1, 1, activation='relu', init='glorot_uniform', border_mode='same', name=name)(x) x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) e1 = Convolution2D( f/2, 1, 1, activation='relu', init='glorot_uniform', border_mode='same')(x) e2 = Convolution2D( f/2, 3, 3, activation='relu', init='glorot_uniform', border_mode='same')(x) x = merge( [e1, e2], mode='concat', concat_axis=bn_axis) x_flat = Flatten()(x) x = Dense(2, activation='softmax', name="disc_dense")(x_flat) PatchGAN = Model(input=[x_input], output=[x, x_flat], name="PatchGAN") print("PatchGAN summary") PatchGAN.summary() x = [PatchGAN(patch)[0] for patch in list_input] x_mbd = [PatchGAN(patch)[1] for patch in list_input] if len(x) > 1: x = merge(x, mode="concat", name="merge_feat") else: x = x[0] if use_mbd: if len(x_mbd) > 1: x_mbd = merge(x_mbd, mode="concat", name="merge_feat_mbd") else: x_mbd = x_mbd[0] num_kernels = 100 dim_per_kernel = 5 M = Dense(num_kernels * dim_per_kernel, bias=False, activation=None) MBD = Lambda(minb_disc, output_shape=lambda_output) x_mbd = M(x_mbd) x_mbd = Reshape((num_kernels, dim_per_kernel))(x_mbd) x_mbd = MBD(x_mbd) x = merge([x, x_mbd], mode='concat') x_out = Dense(2, activation="softmax", name="disc_output")(x) discriminator_model = Model(input=list_input, output=[x_out], name=model_name) return discriminator_model
7aeabfffcc15a10c2eb2c81c795cbc4ff70a890b
3,652,838
def common_stat_style(): """ The common style for info statistics. Should be used in a dash component className. Returns: (str): The style to be used in className. """ return "has-margin-right-10 has-margin-left-10 has-text-centered has-text-weight-bold"
899381fc56e28ecd042e19507f6bc51ceeca3ef0
3,652,839
def TourType_LB_rule(M, t): """ Lower bound on tour type :param M: Model :param t: tour type :return: Constraint rule """ return sum(M.TourType[i, t] for (i, s) in M.okTourType if s == t) >= M.tt_lb[t]
0495e2d01c7d5d02e8bc85374ec1d05a8fdcbd91
3,652,840
import json def build_auto_dicts(jsonfile): """Build auto dictionaries from json""" dicts = {} with open(jsonfile, "r") as jsondata: data = json.load(jsondata) for dicti in data: partialstr = data[dicti]["partial"] partial = bool(partialstr == "True") dictlist = data[dicti]["list"] autodict = AuDict(partial) tag = get_tag(dicti) autodict.set_base_tag(tag) for dictdata in dictlist: value = dictdata["value"] applicants = dictdata["applicants"] autodict.add_auto_value(value, applicants) dicts[tag.tag] = autodict return dicts
50978acc9696647746e2065144fda8537d0c6dba
3,652,841
def log_gammainv_pdf(x, a, b): """ log density of the inverse gamma distribution with shape a and scale b, at point x, using Stirling's approximation for a > 100 """ return a * np.log(b) - sp.gammaln(a) - (a + 1) * np.log(x) - b / x
27bc239770e94cb68a27291abd01050f9780c4fb
3,652,842
from pathlib import Path def read_basin() -> gpd.GeoDataFrame: """Read the basin shapefile.""" basin = gpd.read_file(Path(ROOT, "HCDN_nhru_final_671.shp")) basin = basin.to_crs("epsg:4326") basin["hru_id"] = basin.hru_id.astype(str).str.zfill(8) return basin.set_index("hru_id").geometry
9d590d478b71bdd2a857ab8f0864144ac598cc58
3,652,843
from typing import Callable from typing import Tuple def cross_validate(estimator: BaseEstimator, X: np.ndarray, y: np.ndarray, scoring: Callable[[np.ndarray, np.ndarray, ...], float], cv: int = 5) -> Tuple[float, float]: """ Evaluate metric by cross-validation for given estimator Parameters ---------- estimator: BaseEstimator Initialized estimator to use for fitting the data X: ndarray of shape (n_samples, n_features) Input data to fit y: ndarray of shape (n_samples, ) Responses of input data to fit to scoring: Callable[[np.ndarray, np.ndarray, ...], float] Callable to use for evaluating the performance of the cross-validated model. When called, the scoring function receives the true- and predicted values for each sample and potentially additional arguments. The function returns the score for given input. cv: int Specify the number of folds. Returns ------- train_score: float Average train score over folds validation_score: float Average validation score over folds """ # raise NotImplementedError() # k_foldes = KFold(n_splits=cv) # k_foldes.get_n_splits(X) # # for train_index in k_foldes.split(X): # X, y = X[train_index], y[train_index] # m = y.size # shuffled_inds = np.arange(m) # np.random.shuffle(shuffled_inds) # X_shuffled, y_shuffled = X.astype('float64'), y.astype('float64') # kf_X = np.array_split(X_shuffled, 5, axis=0) # kf_y = np.array_split(y_shuffled, 5, axis=0) # kf_X = np.array_split(X, cv, axis=0) # kf_y = np.array_split(y, cv, axis=0) # # # for param in range(k): # what is k? # X_wo_fold = np.concatenate(kf_X[1:]) # y_wo_fold = np.concatenate(kf_y[1:]) # train_scores = [] # validation_score = [] # for fold in range(cv): # cur_fold = kf_X[fold] # cur_fold_y = kf_y[fold] # if len(kf_y[fold+1:]) == 0: # X_wo_fold = np.concatenate(kf_X[:-1]) # y_wo_fold = np.concatenate(kf_y[:-1]) # elif len(kf_X[:fold]) != 0: # X_wo_fold1, X_wo_fold2 = np.concatenate(kf_X[:fold]), np.concatenate(kf_X[fold+1:]) # X_wo_fold = np.concatenate((X_wo_fold1, X_wo_fold2)) # y_wo_fold1, y_wo_fold2 = np.concatenate(kf_y[:fold]), np.concatenate(kf_y[fold+1:]) # y_wo_fold = np.concatenate((y_wo_fold1, y_wo_fold2)) # h_i = estimator.fit(X_wo_fold.flatten(), y_wo_fold) # y_pred_test = h_i.predict(cur_fold.flatten()) # y_pred_train = h_i.predict(X_wo_fold.flatten()) # cur_train_score = scoring(y_wo_fold, y_pred_train) # train_scores.append(cur_train_score) # cur_validation_score = scoring(cur_fold_y, y_pred_test) # validation_score.append(cur_validation_score) # # return np.mean(train_scores), np.mean(validation_score) X = X.flatten() y = y.flatten() kf_X = np.array_split(X, cv, axis=0) kf_y = np.array_split(y, cv, axis=0) # for param in range(k): # what is k? X_wo_fold = np.concatenate(kf_X[1:]) y_wo_fold = np.concatenate(kf_y[1:]) train_scores = [] validation_score = [] for fold in range(cv): cur_fold = kf_X[fold] cur_fold_y = kf_y[fold] if len(kf_y[fold + 1:]) == 0: X_wo_fold = np.concatenate(kf_X[:-1]) y_wo_fold = np.concatenate(kf_y[:-1]) elif len(kf_X[:fold]) != 0: X_wo_fold1, X_wo_fold2 = np.concatenate( kf_X[:fold]), np.concatenate(kf_X[fold + 1:]) X_wo_fold = np.concatenate((X_wo_fold1, X_wo_fold2)) y_wo_fold1, y_wo_fold2 = np.concatenate( kf_y[:fold]), np.concatenate(kf_y[fold + 1:]) y_wo_fold = np.concatenate((y_wo_fold1, y_wo_fold2)) h_i = estimator.fit(X_wo_fold, y_wo_fold) y_pred_test = h_i.predict(cur_fold) y_pred_train = h_i.predict(X_wo_fold) cur_train_score = scoring(y_wo_fold, y_pred_train) train_scores.append(cur_train_score) cur_validation_score = scoring(cur_fold_y, y_pred_test) validation_score.append(cur_validation_score) return np.mean(train_scores), np.mean(validation_score)
c127b1cf68d011e76fdbf813673bf1d84a7520bb
3,652,844
def GetMembership(name, release_track=None): """Gets a Membership resource from the GKE Hub API. Args: name: the full resource name of the membership to get, e.g., projects/foo/locations/global/memberships/name. release_track: the release_track used in the gcloud command, or None if it is not available. Returns: a Membership resource Raises: apitools.base.py.HttpError: if the request returns an HTTP error """ client = gkehub_api_util.GetApiClientForTrack(release_track) return client.projects_locations_memberships.Get( client.MESSAGES_MODULE.GkehubProjectsLocationsMembershipsGetRequest( name=name))
b2232faec0a2302ec554a8658cdf0a44f9374861
3,652,846
def receive_messages(queue, max_number, wait_time): """ Receive a batch of messages in a single request from an SQS queue. Usage is shown in usage_demo at the end of this module. :param queue: The queue from which to receive messages. :param max_number: The maximum number of messages to receive. The actual number of messages received might be less. :param wait_time: The maximum time to wait (in seconds) before returning. When this number is greater than zero, long polling is used. This can result in reduced costs and fewer false empty responses. :return: The list of Message objects received. These each contain the body of the message and metadata and custom attributes. """ try: messages = queue.receive_messages( MessageAttributeNames=['All'], MaxNumberOfMessages=max_number, WaitTimeSeconds=wait_time ) for msg in messages: logger.info("Received message: %s: %s", msg.message_id, msg.body) request = extract_request(msg.message_attributes) recommendations = get_recommendations(request) send_to_sns(request, recommendations) except ClientError as error: logger.exception("Couldn't receive messages from queue: %s", queue) raise error else: return messages
dd422eb96ddb41513bcf248cf2dc3761a9b56191
3,652,847
def get_snmp_community(device, find_filter=None): """Retrieves snmp community settings for a given device Args: device (Device): This is the device object of an NX-API enabled device using the Device class community (str): optional arg to filter out this specific community Returns: dictionary """ command = 'show snmp community' data = device.show(command) data_dict = xmltodict.parse(data[1]) c_dict = {} try: comm_table = data_dict['ins_api']['outputs']['output']['body'].get( 'TABLE_snmp_community')['ROW_snmp_community'] for each in comm_table: community = {} key = str(each['community_name']) community['group'] = str(each['grouporaccess']) community['acl'] = str(each['aclfilter']) c_dict[key] = community except (TypeError): community = {} key = str(each['community_name']) community['group'] = str(comm_table['grouporaccess']) community['acl'] = str(comm_table['aclfilter']) c_dict[key] = community except (KeyError, AttributeError): return c_dict if find_filter: find = c_dict.get(find_filter, None) if find_filter is None or find is None: return {} else: return find
ae36269133fcc482c30bd29f58e44d3d1e10dcd1
3,652,848
def get_header_size(tif): """ Gets the header size of a GeoTIFF file in bytes. The code used in this function and its helper function `_get_block_offset` were extracted from the following source: https://github.com/OSGeo/gdal/blob/master/swig/python/gdal-utils/osgeo_utils/samples/validate_cloud_optimized_geotiff.py Copyright (c) 2017, Even Rouault Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. Parameters ---------- tif: str A path to a GeoTIFF file of the currently processed NRB product. Returns ------- header_size: int The size of all IFD headers of the GeoTIFF file in bytes. """ def _get_block_offset(band): blockxsize, blockysize = band.GetBlockSize() for y in range(int((band.YSize + blockysize - 1) / blockysize)): for x in range(int((band.XSize + blockxsize - 1) / blockxsize)): block_offset = band.GetMetadataItem('BLOCK_OFFSET_%d_%d' % (x, y), 'TIFF') if block_offset: return int(block_offset) return 0 details = {} ds = gdal.Open(tif) main_band = ds.GetRasterBand(1) ovr_count = main_band.GetOverviewCount() block_offset = _get_block_offset(band=main_band) details['data_offsets'] = {} details['data_offsets']['main'] = block_offset for i in range(ovr_count): ovr_band = ds.GetRasterBand(1).GetOverview(i) block_offset = _get_block_offset(band=ovr_band) details['data_offsets']['overview_%d' % i] = block_offset headers_size = min(details['data_offsets'][k] for k in details['data_offsets']) if headers_size == 0: headers_size = gdal.VSIStatL(tif).size return headers_size
f7d41b9f6140e2d555c8de7e857612c692ebea16
3,652,849
def format_x_ticks_as_dates(plot): """Formats x ticks YYYY-MM-DD and removes the default 'Date' label. Args: plot: matplotlib.AxesSubplot object. """ plot.xaxis.set_major_formatter(mpl.dates.DateFormatter('%Y-%m-%d')) plot.get_xaxis().get_label().set_visible(False) return plot
00838b40582c9205e3ba6f87192852af37a88e7a
3,652,850
def operations(): """Gets the base class for the operations class. We have to use the configured base back-end's operations class for this. """ return base_backend_instance().ops.__class__
845d50884e58491539fb9ebfcf0da62e5cad66d4
3,652,851
import mimetypes def office_convert_get_page(request, repo_id, commit_id, path, filename): """Valid static file path inclueds: - index.html for spreadsheets and index_html_xxx.png for images embedded in spreadsheets - 77e168722458356507a1f373714aa9b575491f09.pdf """ if not HAS_OFFICE_CONVERTER: raise Http404 if not _OFFICE_PAGE_PATTERN.match(filename): return HttpResponseForbidden() path = '/' + path file_id = _office_convert_get_file_id(request, repo_id, commit_id, path) if filename.endswith('.pdf'): filename = "{0}.pdf".format(file_id) if CLUSTER_MODE: resp = cluster_get_office_converted_page(path, filename, file_id) else: resp = get_office_converted_page(request, filename, file_id) if filename.endswith('.page'): content_type = 'text/html' else: content_type = mimetypes.guess_type(filename)[0] or 'text/html' resp['Content-Type'] = content_type return resp
48a3c5716b833e639a10c0366829185a1ce623aa
3,652,852
def tensorize_data( uvdata, corr_inds, ants_map, polarization, time, data_scale_factor=1.0, weights=None, nsamples_in_weights=False, dtype=np.float32, ): """Convert data in uvdata object to a tensor Parameters ---------- uvdata: UVData object UVData object containing data, flags, and nsamples to tensorize. corr_inds: list list of list of lists of 2-tuples. Hierarchy of lists is chunk group baseline - (int 2-tuple) ants_map: dict mapping integers to integers map between each antenna number to a unique index between 0 and Nants_data (typically the index of each antenna in ants_map) polarization: str pol-str of gain to extract. time: float time of data to convert to tensor. data_scale_factor: float, optional overall scaling factor to divide tensorized data by. default is 1.0 weights: UVFlag object, optional UVFlag weights object containing weights to use for data fitting. default is None -> use nsamples * ~flags if nsamples_in_weights or ~flags if not nsamples_in_weights nsamples_in_weights: bool, optional If True and weights is None, generate weights proportional to nsamples. default is False. dtype: numpy.dtype data-type to store in tensor. default is np.float32 Returns ------- data_r: list of tf.Tensor objects list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs) where ngrps, nbls are the dimensions of each sublist in corr_inds and contain the real components of the baselines specified by these 2-tuples. data_i: list of tf.Tensor objects list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs) where ngrps, nbls are the dimensions of each sublist in corr_inds and contain the imag components of the baselines specified by these 2-tuples. wgts: tf.Tensor object list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs) where ngrps, nbls are the dimensions of each sublist in corr_inds and contain the weights of the baselines specified by these 2-tuples. """ ants_map_inv = {ants_map[i]: i for i in ants_map} dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs) data_r = np.zeros(dshape, dtype=dtype) data_i = np.zeros_like(data_r) wgts = np.zeros_like(data_r) wgtsum = 0.0 for chunk in corr_inds: for fitgrp in chunk: for (i, j) in fitgrp: ap = ants_map_inv[i], ants_map_inv[j] bl = ap + (polarization,) dinds1, dinds2, pol_ind = uvdata._key2inds(bl) if len(dinds1) > 0: dinds = dinds1 conjugate = False pol_ind = pol_ind[0] else: dinds = dinds2 conjugate = True pol_ind = pol_ind[1] dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]] data = uvdata.data_array[dind, 0, :, pol_ind].squeeze() iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze() nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze() data /= data_scale_factor if conjugate: data = np.conj(data) data_r[i, j] = data.real.astype(dtype) data_i[i, j] = data.imag.astype(dtype) if weights is None: wgts[i, j] = iflags if nsamples_in_weights: wgts[i, j] *= nsamples else: if ap in weights.get_antpairs(): dinds = weights.antpair2ind(*ap) else: dinds = weights.antpair2ind(*ap[::-1]) dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]] polnum = np.where( weights.polarization_array == uvutils.polstr2num(polarization, x_orientation=weights.x_orientation) )[0][0] wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags if nsamples_in_weights: wgts[i, j] *= nsamples wgtsum += np.sum(wgts[i, j]) data_r = tf.convert_to_tensor(data_r, dtype=dtype) data_i = tf.convert_to_tensor(data_i, dtype=dtype) wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype) nchunks = len(corr_inds) data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)] data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)] wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)] return data_r, data_i, wgts
0a780bb022854c83341ed13c0a7ad0346bb43016
3,652,853
import torch def _normalize_rows(t, softmax=False): """ Normalizes the rows of a tensor either using a softmax or just plain division by row sums Args: t (:obj:`batch_like`) Returns: Normalized version of t where rows sum to 1 """ if not softmax: # EPSILON hack avoids occasional NaNs row_sums = torch.sum(t, len(t.size())-1, keepdim=True) + EPSILON #return torch.exp(torch.log(t)-torch.log(row_sums+EPSILON).expand_as(t)) return torch.div(t, row_sums.expand_as(t)) else: s = nn.Softmax() return s(t.view(-1, t.size(len(t.size())-1))).view(t.size())
3ffcedbaf279ead72414256290d2b88078aff468
3,652,854
def calculate_baselines(baselines: pd.DataFrame) -> dict: """ Read a file that contains multiple runs of the same pair. The format of the file must be: workload id, workload argument, run number, tFC, tVM This function calculates the average over all runs of each unique pair of workload id and workload argument. """ if type(baselines) is not pd.DataFrame: raise TypeError("calculate_baselines: invalid object type passed.") processed_baselines = {} distinct_workloads = baselines[COLUMN_WORKLOAD].unique() for workload in distinct_workloads: # Filter for current workload workload_baseline = baselines.loc[baselines[COLUMN_WORKLOAD] == workload] # Get all the arguments workload_arguments = workload_baseline[COLUMN_ARGUMENT].unique() if workload not in processed_baselines: processed_baselines[workload] = {} for argument in workload_arguments: workload_argument_baseline = workload_baseline.loc[ workload_baseline[COLUMN_ARGUMENT] == argument] # Calculate the means of the timings for the workload-argument pair tVM = round(workload_argument_baseline[COLUMN_TIMEVM].mean()) tFC = round(workload_argument_baseline[COLUMN_TIMEFC].mean()) processed_baselines[workload][argument] = [tFC, tVM] return processed_baselines
69cd0473fc21366e57d20ee39fceb704001aba1b
3,652,855
def pick_ind(x, minmax): """ Return indices between minmax[0] and minmax[1]. Args: x : Input vector minmax : Minimum and maximum values Returns: indices """ return (x >= minmax[0]) & (x <= minmax[1])
915a1003589b880d4edf5771a23518d2d4224094
3,652,856
def read_files(file_prefix,start=0,end=100,nfmt=3,pixel_map=None): """ read files that have a numerical suffix """ images = [] format = '%' + str(nfmt) + '.' + str(nfmt) + 'd' for j in range(start,end+1): ext = format % j file = file_prefix + '_' + ext + '.tif' arr = read(file,pixel_map=pixel_map) images.append(arr) return images
95d283f04b8ef6652da290396bb4649deedff665
3,652,857
def describing_function( F, A, num_points=100, zero_check=True, try_method=True): """Numerical compute the describing function of a nonlinear function The describing function of a nonlinearity is given by magnitude and phase of the first harmonic of the function when evaluated along a sinusoidal input :math:`A \\sin \\omega t`. This function returns the magnitude and phase of the describing function at amplitude :math:`A`. Parameters ---------- F : callable The function F() should accept a scalar number as an argument and return a scalar number. For compatibility with (static) nonlinear input/output systems, the output can also return a 1D array with a single element. If the function is an object with a method `describing_function` then this method will be used to computing the describing function instead of a nonlinear computation. Some common nonlinearities use the :class:`~control.DescribingFunctionNonlinearity` class, which provides this functionality. A : array_like The amplitude(s) at which the describing function should be calculated. zero_check : bool, optional If `True` (default) then `A` is zero, the function will be evaluated and checked to make sure it is zero. If not, a `TypeError` exception is raised. If zero_check is `False`, no check is made on the value of the function at zero. try_method : bool, optional If `True` (default), check the `F` argument to see if it is an object with a `describing_function` method and use this to compute the describing function. More information in the `describing_function` method for the :class:`~control.DescribingFunctionNonlinearity` class. Returns ------- df : array of complex The (complex) value of the describing function at the given amplitudes. Raises ------ TypeError If A[i] < 0 or if A[i] = 0 and the function F(0) is non-zero. """ # If there is an analytical solution, trying using that first if try_method and hasattr(F, 'describing_function'): try: return np.vectorize(F.describing_function, otypes=[complex])(A) except NotImplementedError: # Drop through and do the numerical computation pass # # The describing function of a nonlinear function F() can be computed by # evaluating the nonlinearity over a sinusoid. The Fourier series for a # static nonlinear function evaluated on a sinusoid can be written as # # F(A\sin\omega t) = \sum_{k=1}^\infty M_k(A) \sin(k\omega t + \phi_k(A)) # # The describing function is given by the complex number # # N(A) = M_1(A) e^{j \phi_1(A)} / A # # To compute this, we compute F(A \sin\theta) for \theta between 0 and 2 # \pi, use the identities # # \sin(\theta + \phi) = \sin\theta \cos\phi + \cos\theta \sin\phi # \int_0^{2\pi} \sin^2 \theta d\theta = \pi # \int_0^{2\pi} \cos^2 \theta d\theta = \pi # # and then integrate the product against \sin\theta and \cos\theta to obtain # # \int_0^{2\pi} F(A\sin\theta) \sin\theta d\theta = M_1 \pi \cos\phi # \int_0^{2\pi} F(A\sin\theta) \cos\theta d\theta = M_1 \pi \sin\phi # # From these we can compute M1 and \phi. # # Evaluate over a full range of angles (leave off endpoint a la DFT) theta, dtheta = np.linspace( 0, 2*np.pi, num_points, endpoint=False, retstep=True) sin_theta = np.sin(theta) cos_theta = np.cos(theta) # See if this is a static nonlinearity (assume not, just in case) if not hasattr(F, '_isstatic') or not F._isstatic(): # Initialize any internal state by going through an initial cycle for x in np.atleast_1d(A).min() * sin_theta: F(x) # ignore the result # Go through all of the amplitudes we were given retdf = np.empty(np.shape(A), dtype=complex) df = retdf # Access to the return array df.shape = (-1, ) # as a 1D array for i, a in enumerate(np.atleast_1d(A)): # Make sure we got a valid argument if a == 0: # Check to make sure the function has zero output with zero input if zero_check and np.squeeze(F(0.)) != 0: raise ValueError("function must evaluate to zero at zero") df[i] = 1. continue elif a < 0: raise ValueError("cannot evaluate describing function for A < 0") # Save the scaling factor to make the formulas simpler scale = dtheta / np.pi / a # Evaluate the function along a sinusoid F_eval = np.array([F(x) for x in a*sin_theta]).squeeze() # Compute the prjections onto sine and cosine df_real = (F_eval @ sin_theta) * scale # = M_1 \cos\phi / a df_imag = (F_eval @ cos_theta) * scale # = M_1 \sin\phi / a df[i] = df_real + 1j * df_imag # Return the values in the same shape as they were requested return retdf
4e9b779ba30f2588262e2ecff7a993d210533b59
3,652,858
from typing import List def _read_point(asset: str, *args, **kwargs) -> List: """Read pixel value at a point from an asset""" with COGReader(asset) as cog: return cog.point(*args, **kwargs)
246c98d55fd27465bc2c6f737cac342ccf9d52d8
3,652,859
import torch def image2tensor(image: np.ndarray, range_norm: bool, half: bool) -> torch.Tensor: """Convert ``PIL.Image`` to Tensor. Args: image (np.ndarray): The image data read by ``PIL.Image`` range_norm (bool): Scale [0, 1] data to between [-1, 1] half (bool): Whether to convert torch.float32 similarly to torch.half type. Returns: Normalized image data Examples: >>> image = cv2.imread("image.bmp", cv2.IMREAD_UNCHANGED).astype(np.float32) / 255. >>> tensor_image = image2tensor(image, range_norm=False, half=False) """ tensor = F.to_tensor(image) if range_norm: tensor = tensor.mul_(2.0).sub_(1.0) if half: tensor = tensor.half() return tensor
86ab04d599ac9b1bfe2e90d0b719ea47dc8f7671
3,652,861
def panda_four_load_branch(): """ This function creates a simple six bus system with four radial low voltage nodes connected to \ a medium valtage slack bus. At every low voltage node the same load is connected. RETURN: **net** - Returns the required four load system EXAMPLE: import pandapower.networks as pn net_four_load = pn.panda_four_load_branch() """ pd_net = pp.create_empty_network() busnr1 = pp.create_bus(pd_net, name="bus1", vn_kv=10.) busnr2 = pp.create_bus(pd_net, name="bus2", vn_kv=.4) busnr3 = pp.create_bus(pd_net, name="bus3", vn_kv=.4) busnr4 = pp.create_bus(pd_net, name="bus4", vn_kv=.4) busnr5 = pp.create_bus(pd_net, name="bus5", vn_kv=.4) busnr6 = pp.create_bus(pd_net, name="bus6", vn_kv=.4) pp.create_ext_grid(pd_net, busnr1) pp.create_transformer(pd_net, busnr1, busnr2, std_type="0.25 MVA 10/0.4 kV") pp.create_line(pd_net, busnr2, busnr3, name="line1", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_line(pd_net, busnr3, busnr4, name="line2", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_line(pd_net, busnr4, busnr5, name="line3", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_line(pd_net, busnr5, busnr6, name="line4", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_load(pd_net, busnr3, 30, 10) pp.create_load(pd_net, busnr4, 30, 10) pp.create_load(pd_net, busnr5, 30, 10) pp.create_load(pd_net, busnr6, 30, 10) return pd_net
dd5bc45a75943f0c078ab3bde9aa94b4bafc804f
3,652,862
def word_flipper(our_string): """ Flip the individual words in a sentence Args: our_string(string): Strings to have individual words flip Returns: string: String with words flipped """ word_list = our_string.split(" ") for idx in range(len(word_list)): word_list[idx] = word_list[idx][::-1] # [index1:index2:step] return " ".join(word_list)
fd484079407342925fc13583fb1fbee9ee472b14
3,652,863
import json import base64 def load_json(ctx, param, value): """Decode and load json for click option.""" value = value[1:] return json.loads(base64.standard_b64decode(value).decode())
99236d6fcde6c69a4bdadad4c6f3487d88fb7ce0
3,652,864
def hyperparam_search(model_config, train, test): """Perform hyperparameter search using Bayesian optimization on a given model and dataset. Args: model_config (dict): the model and the parameter ranges to search in. Format: { "name": str, "model": sklearn.base.BaseEstimator, "params": dict } train (pandas.DataFrame): training data test (pandas.DataFrame): test data """ X_train = train.drop("label", axis=1) y_train = train.label X_test = test.drop("label", axis=1) y_test = test.label opt = BayesSearchCV( model_config["model"], model_config["params"], n_jobs=4, cv=5, random_state=RANDOM_SEED, ) opt.fit(X_train, y_train) acc = opt.score(X_test, y_test) print(f"{model_config['name']} results:") print(f"Best validation accuracy: {opt.best_score_}") print(f"Test set accuracy: {acc}") print(f"Best parameters:") for param, value in opt.best_params_.items(): print(f"- {param}: {value}") return { "name": model_config["name"], "class": model_config["class"], "model": opt.best_estimator_, "params": opt.best_params_, "score": acc, }
8f496a2c4494545ffdba2a5f63512ff45da4bb03
3,652,865
def _sawtooth_wave_samples(freq, rate, amp, num): """ Generates a set of audio samples taken at the given sampling rate representing a sawtooth wave oscillating at the given frequency with the given amplitude lasting for the given duration. :param float freq The frequency of oscillation of the sawtooth wave :param int rate The sampling rate :param float amp The amplitude of the sawtooth wave :param float num The number of samples to generate. :return List[float] The audio samples representing the signal as described above. """ return [utils._sawtooth_sample(amp, freq, rate, i) for i in range(num)]
4691fb94e1709c5dc1a1dcb8ed02795d0b3cfe40
3,652,867
from keras.models import Model from keras.layers import Conv2D, SpatialDropout2D from keras.layers import UpSampling2D, Reshape, concatenate from keras.applications.resnet50 import ResNet50 def ResNet_UNet_Dropout(dim=512, num_classes=6, dropout=0.5, final_activation=True): """ Returns a ResNet50 Nework with a U-Net like upsampling stage. Inlcudes skip connections from previous ResNet50 layers. Uses a SpatialDrop on the final layer as introduced in https://arxiv.org/pdf/1411.4280.pdf, 2015. Input: dim - the size of the input image. Note that is should be a square of 2 so that downsampling and upsampling always match. ie. 128 -> 64 -> 32 -> 64 -> 128 This is only needed for training. num_classes - the number of classes in the whole problem. Used to determine the dimension of output map. i.e. model.predict() returns array that can be reshaped to (dim, dim, num_classes). Output: model - an uncompiled keras model. Check output shape before use. """ # Import a headless ResNet50 resnet = ResNet50(input_shape = (None, None, 3), include_top=False) # Attached U-net from second last layer - activation_49 res_out = resnet.layers[-2].output # Standard U-Net upsampling 512 -> 256 -> 128 -> 64 # Upsampling 1 - 512 fs = 32 up1 = UpSampling2D(size=(2,2))(res_out) up1_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up1) prev_layer = resnet.get_layer("activation_40").output merge1 = concatenate([prev_layer,up1_conv], axis = 3) merge1_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1) merge1_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1_conv1) # Upsampling 2 - 256 fs = 32 up2 = UpSampling2D(size = (2,2))(merge1_conv2) up2_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up2) prev_layer = resnet.get_layer("activation_22").output merge2 = concatenate([prev_layer,up2_conv], axis = 3) merge2_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge2) merge2_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge2_conv1) # Upsampling 3 & 4 - 128 fs = 32 up3 = UpSampling2D(size = (2,2))(merge2_conv2) up3_conv1 = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up3) up3_conv2 = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up3_conv1) up4 = UpSampling2D(size = (2,2))(up3_conv2) up4_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up4) prev_layer = resnet.get_layer("activation_1").output merge3 = concatenate([prev_layer,up4_conv], axis = 3) merge3_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge3) merge3_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge3_conv1) # Upsample 5 - 64 fs = 32 up5 = UpSampling2D(size=(2,2))(merge3_conv2) up5_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up5) merge5_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up5_conv) merge5_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge5_conv1) # Drop Out do = SpatialDropout2D(dropout)(merge5_conv2) # Activation and reshape for training if final_activation: activation = Conv2D(num_classes, 1, activation="softmax")(do) else: activation = Conv2D(num_classes, 1, activation=None)(do) output = Reshape((dim*dim, num_classes))(activation) # Build model model = Model(inputs=[resnet.input], outputs=[output]) return model
6d99cbb9f5986a87e79653b03cc91ca652ca2d2d
3,652,868
import sqlite3 def _parse_accounts_ce(database, uid, result_path): """Parse accounts_ce.db. Args: database (SQLite3): target SQLite3 database. uid (str): user id. result_path (str): result path. """ cursor = database.cursor() try: cursor.execute(query) except sqlite3.Error as exception: logger.error('Accounts not found! {0!s}'.format(exception)) results = cursor.fetchall() num_of_results = len(results) data = {} header = ('name', 'type', 'password') data['title'] = 'accounts_ce'+f'_{uid}' data['number_of_data_headers'] = len(header) data['number_of_data'] = num_of_results data['data_header'] = header data_list = [] if num_of_results >0: for row in results: data_list.append((row[0], row[1], row[2])) data['data'] = data_list else: logger.warning('NO Accounts found!') return data
05538c21342f854d8465a415c32f5e2ea4f3f14d
3,652,869
from flask import current_app def resolve_grant_endpoint(doi_grant_code): """Resolve the OpenAIRE grant.""" # jsonresolver will evaluate current_app on import if outside of function. pid_value = '10.13039/{0}'.format(doi_grant_code) try: _, record = Resolver(pid_type='grant', object_type='rec', getter=Record.get_record).resolve(pid_value) return record except Exception: current_app.logger.error( 'Grant {0} does not exists.'.format(pid_value), exc_info=True) raise
e3217aeda5e6dec935c3ccb96e1164be66083e4f
3,652,870
from typing import Union from pathlib import Path def from_tiff(path: Union[Path, str]) -> OME: """Generate OME metadata object from OME-TIFF path. This will use the first ImageDescription tag found in the TIFF header. Parameters ---------- path : Union[Path, str] Path to OME TIFF. Returns ------- ome: ome_types.model.ome.OME ome_types.OME metadata object Raises ------ ValueError If the TIFF file has no OME metadata. """ with Path(path).open(mode="rb") as fh: try: offsetsize, offsetformat, tagnosize, tagnoformat, tagsize, codeformat = { b"II*\0": (4, "<I", 2, "<H", 12, "<H"), b"MM\0*": (4, ">I", 2, ">H", 12, ">H"), b"II+\0": (8, "<Q", 8, "<Q", 20, "<H"), b"MM\0+": (8, ">Q", 8, ">Q", 20, ">H"), }[fh.read(4)] except KeyError: raise ValueError(f"{path!r} does not have a recognized TIFF header") fh.read(4 if offsetsize == 8 else 0) fh.seek(unpack(offsetformat, fh.read(offsetsize))[0]) for _ in range(unpack(tagnoformat, fh.read(tagnosize))[0]): tagstruct = fh.read(tagsize) if unpack(codeformat, tagstruct[:2])[0] == 270: size = unpack(offsetformat, tagstruct[4 : 4 + offsetsize])[0] if size <= offsetsize: desc = tagstruct[4 + offsetsize : 4 + offsetsize + size] break fh.seek(unpack(offsetformat, tagstruct[-offsetsize:])[0]) desc = fh.read(size) break else: raise ValueError(f"No OME metadata found in file: {path}") if desc[-1] == 0: desc = desc[:-1] return from_xml(desc.decode("utf-8"))
98ed750bba4b6aeaa791cc9041cf394e43fc50f9
3,652,871
def create_table_string(data, highlight=(True, False, False, False), table_class='wikitable', style=''): """ Takes a list and returns a wikitable. @param data: The list that is converted to a wikitable. @type data: List (Nested) @param highlight: Tuple of rows and columns that should be highlighted. (first row, last row, left column, right column) @type highlight: Tuple @param table_class: A string containing the class description. See wikitable help. @type table_class: String @param style: A string containing the style description. See wikitable help. @type style: String """ last_row = len(data) - 1 last_cell = len(data[0]) - 1 table = '{{| class="{}" style="{}"\n'.format(table_class, style) for key, row in enumerate(data): if key == 0 and highlight[0] or key == last_row and highlight[1]: row_string = '|-\n! ' + '\n! '.join(cell for cell in row) else: row_string = '|-' cells = '' for ckey, cell in enumerate(row): if ckey == 0 and highlight[2]: cells += '\n! ' + cell elif ckey == last_cell and highlight[3]: cells += '\n! ' + cell else: cells += '\n| ' + cell row_string += cells table += row_string + '\n' table += '|}' return table
f586fac681e1b4f06ad5e2a1cc451d9250fae929
3,652,873
def registry_dispatcher_document(self, code, collection): """ This task receive a list of codes that should be queued for DOI registry """ return _registry_dispatcher_document(code, collection, skip_deposited=False)
530b2d183e6e50dc475ac9ec258fc13bea76aa8d
3,652,875
from typing import Collection import requests def get_reddit_oauth_scopes( scopes: Collection[str] | None = None, ) -> dict[str, dict[str, str]]: """Get metadata on the OAUTH scopes offered by the Reddit API.""" # Set up the request for scopes scopes_endpoint = "/api/v1/scopes" scopes_endpoint_url = REDDIT_BASE_URL + scopes_endpoint headers = {"User-Agent": USER_AGENT} query_params = {} if scopes: query_params["scopes"] = scopes # Make and process the request response = requests.get( scopes_endpoint_url, params=query_params, headers=headers, timeout=REQUEST_TIMEOUT_S, ) response.raise_for_status() response_json: dict[str, dict[str, str]] = response.json() return response_json
0a55facfd07af259c1229aa30417b516b268602b
3,652,876
def beta_reader(direc): """ Function to read in beta values for each tag """ path = direc H_beta = np.loadtxt('%s/Beta Values/h_beta_final2.txt' % path) Si_beta = np.loadtxt('%s/Beta Values/si_beta_final2.txt' % path) He_emi_beta = np.loadtxt('%s/Beta Values/he_emi_beta_final2.txt' % path) He_cyg_beta = np.loadtxt('%s/Beta Values/he_cyg_beta_final2.txt' % path) He_abs_beta = np.loadtxt('%s/Beta Values/he_abs_beta_final2.txt' % path) H_alp_beta = np.loadtxt('%s/Beta Values/h_alp_beta_final2.txt' % path) Ca_beta = np.loadtxt('%s/Beta Values/ca_beta_final2.txt' % path) iib_dp_beta = np.loadtxt('%s/Beta Values/iibdp_beta_final2.txt' % path) Fe_beta = np.loadtxt('%s/Beta Values/fe_beta_final2.txt' % path) S_beta = np.loadtxt('%s/Beta Values/s_beta_final2.txt' % path) return H_beta,Si_beta,He_emi_beta,He_cyg_beta,He_abs_beta,H_alp_beta,Ca_beta,iib_dp_beta,Fe_beta,S_beta
ab8aef0acd6a9cd86301d5cc99e45511cf193a10
3,652,877
def get_logging_format(): """return the format string for the logger""" formt = "[%(asctime)s] %(levelname)s:%(message)s" return formt
3380cdd34f1a44cf15b9c55d2c05d3ecb81116cb
3,652,879
def plot_hydrogen_balance(results): """ Plot the hydrogen balance over time """ n_axes = results["times"].shape[0] fig = plt.figure(figsize=(6.0, 5.5)) fig.suptitle('Hydrogen production and utilization over the year', fontsize=fontsize+1, fontweight='normal', color='k') axes = fig.subplots(n_axes) for index, ax in enumerate(axes): x1, y1 = results["times"][index, :] / 24, +results["H2_produced"][index, :] x2, y2 = results["times"][index, :] / 24, -results["H2_utilized"][index, :] for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize) for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize) ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1)) ax.plot(x1, y1, linewidth=0.75, linestyle='-', color='k', label="Produced") ax.plot(x2, y2, linewidth=0.75, linestyle='-', color='r', label="Utilized") ax.set_ylabel('Mass flow (kg/s)', fontsize=fontsize, color='k', labelpad=fontsize) if index + 1 == n_axes: ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize) ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0) dy = max(np.max(y1)-np.min(y2), 0.02) ax.set_ylim([np.min(y2)-dy/5, np.max(y1)+dy/5]) fig.tight_layout() return fig, axes
e352b1885b53ec9f5fc41f32f67afc5f86cae647
3,652,880
def ref_dw(fc, fmod): """Give the reference value for roughness by linear interpolation from the data given in "Psychoacoustical roughness:implementation of an optimized model" by Daniel and Weber in 1997 Parameters ---------- fc: integer carrier frequency fmod: integer modulation frequency Output ------ roughness reference values from the article by Daniel and Weber """ if fc == 125: fm = np.array( [ 1.0355988, 10.355987, 11.132686, 13.851132, 18.511328, 20.064724, 24.724918, 31.32686, 41.423946, 49.967636, 57.34628, 64.33657, 72.10356, 90.74434, 79.4822, 86.084145, 91.909386, 100.45307, ] ) R = np.array( [ 0.0, 0.04359673, 0.09468665, 0.16416894, 0.19482289, 0.27656674, 0.3113079, 0.34196186, 0.32356948, 0.26226157, 0.20299728, 0.15803815, 0.11512262, 0.0619891, 0.09264305, 0.07016349, 0.05177112, 0.03950954, ] ) if fc == 250: fm = np.array( [ 0.7373272, 3.9324117, 9.585254, 14.2549925, 16.71275, 19.907835, 22.611366, 23.594471, 29.493088, 30.47619, 37.112137, 41.29032, 47.926266, 50.13825, 51.121353, 53.08756, 54.07066, 56.774193, 58.248848, 62.427036, 61.68971, 69.308754, 68.57143, 71.27496, 73.73272, 73.97849, 75.207375, 79.139786, 79.139786, 84.792625, 90.19969, 97.81874, 104.70046, 112.31951, 120.92166, 129.76959, ] ) R = np.array( [ 0.00432277, 0.00576369, 0.06340057, 0.16138329, 0.17435159, 0.26945245, 0.32132566, 0.3443804, 0.42651296, 0.44668588, 0.47694525, 0.4668588, 0.42651296, 0.46253604, 0.41210374, 0.4020173, 0.43948126, 0.37463978, 0.39193085, 0.3631124, 0.3429395, 0.3040346, 0.28242075, 0.27521613, 0.259366, 0.24207492, 0.24351585, 0.2204611, 0.20461094, 0.17146975, 0.14697406, 0.11815562, 0.09942363, 0.07636888, 0.05619597, 0.04322766, ] ) if fc == 500: fm = np.array( [ 7.6375403, 15.79288, 20.841423, 26.666666, 30.93851, 34.43366, 40.2589, 44.919094, 49.190937, 51.521034, 57.34628, 64.33657, 69.77346, 74.04531, 81.42395, 87.63754, 94.23948, 102.78317, 116.763756, 129.57928, 140.84143, 149.77347, 160.2589, ] ) R = np.array( [ 0.04972752, 0.1253406, 0.23569483, 0.35013625, 0.46457765, 0.5258856, 0.619891, 0.67302454, 0.69346046, 0.69550407, 0.6873297, 0.67098093, 0.6321526, 0.57901907, 0.5074932, 0.4400545, 0.38487738, 0.3153951, 0.22752044, 0.16621253, 0.11920981, 0.08651226, 0.06811989, ] ) if fc == 1000: fm = np.array( [ 0.0, 3.884415, 9.7237625, 17.147604, 29.302307, 37.933605, 48.504757, 55.145306, 55.948395, 57.480103, 60.618927, 63.314735, 65.28852, 67.201035, 69.55657, 76.14433, 77.2943, 82.847725, 83.352325, 88.26008, 89.019806, 93.92756, 94.4309, 97.78904, 99.06719, 104.23258, 103.963005, 106.03293, 109.89504, 111.18953, 115.05101, 117.38172, 119.95311, 125.630646, 132.60141, 137.24963, 144.47617, 151.19432, 159.97737, ] ) R = np.array( [ 0.0, 0.00211198, 0.03450088, 0.1382977, 0.40437, 0.60555416, 0.80238307, 0.89103884, 0.9516347, 0.90182984, 0.9753813, 0.92339617, 0.9969634, 0.92983717, 0.9882475, 0.9556905, 0.92104256, 0.89138556, 0.86107534, 0.83503467, 0.7960629, 0.7700222, 0.736826, 0.71946436, 0.6819286, 0.6529984, 0.6284707, 0.62555665, 0.5764418, 0.5764243, 0.52586645, 0.52727795, 0.48683867, 0.44491437, 0.40008652, 0.3726063, 0.3205599, 0.29016566, 0.24531329, ] ) if fc == 2000: fm = np.array( [ 0.0, 4.4051557, 7.5956764, 10.048887, 12.017292, 15.69636, 17.911657, 20.366364, 20.619616, 25.28251, 27.987852, 30.20053, 31.18548, 34.37525, 34.38161, 39.782192, 39.298134, 42.23989, 42.981316, 45.18539, 44.95683, 46.663754, 48.13538, 50.358532, 53.04068, 55.264206, 56.971127, 58.68778, 60.890354, 62.367218, 62.84529, 65.06246, 67.00842, 68.48715, 71.90736, 73.62214, 76.79096, 79.24305, 81.67831, 85.10337, 91.45038, 93.655945, 96.586105, 96.33435, 98.04801, 106.5901, 107.57281, 115.62524, 118.07209, 120.26419, 121.97673, 129.54285, 131.255, 134.91576, 135.15628, 136.87106, 144.92911, 159.83092, ] ) R = np.array( [ 0.00271003, 0.00538277, 0.04194128, 0.06631085, 0.10694477, 0.1407891, 0.18955104, 0.21934068, 0.250504, 0.30331025, 0.35477808, 0.39405492, 0.41708192, 0.4509304, 0.47396567, 0.54031587, 0.55929023, 0.5809457, 0.60803974, 0.6161512, 0.674419, 0.65407926, 0.66761696, 0.74483424, 0.71229106, 0.7908634, 0.7705236, 0.7854143, 0.78810567, 0.8206137, 0.779959, 0.83549607, 0.79482895, 0.83411205, 0.8164678, 0.8245834, 0.78255093, 0.8028555, 0.76218426, 0.76215523, 0.7119658, 0.7254973, 0.7051472, 0.67940396, 0.6834545, 0.6088561, 0.62375295, 0.5478037, 0.549138, 0.5138889, 0.5138744, 0.4487694, 0.44739988, 0.41484842, 0.39994115, 0.40805677, 0.3524327, 0.27371538, ] ) if fc == 4000: fm = np.array( [ 3.1950846, 16.221199, 23.840246, 29.984638, 30.230415, 37.112137, 37.603687, 45.714287, 51.85868, 57.265743, 63.90169, 68.57143, 74.47005, 78.156685, 82.33487, 88.97082, 98.064514, 108.14132, 115.02304, 123.870964, 128.78648, 133.21045, 143.04147, 151.39784, 155.08449, 157.29646, 160.24577, ] ) R = np.array( [ 0.00432277, 0.11383285, 0.23054755, 0.29538906, 0.31123918, 0.39337176, 0.41066283, 0.50864553, 0.5907781, 0.62680113, 0.6426513, 0.65273774, 0.64841497, 0.6440922, 0.6152738, 0.5720461, 0.5158501, 0.45677233, 0.41210374, 0.3631124, 0.34149855, 0.3184438, 0.2795389, 0.24495678, 0.24783862, 0.23919308, 0.24063401, ] ) if fc == 8000: fm = np.array( [ 4.6498036, 7.1022663, 8.569778, 16.16957, 23.037289, 24.018497, 25.735521, 27.451048, 30.885843, 33.578465, 34.319515, 38.48526, 40.206398, 42.654747, 45.355972, 50.995964, 52.953144, 55.896774, 56.631092, 60.54957, 61.772808, 63.238823, 66.18058, 68.86871, 70.58611, 72.78196, 74.744, 78.409225, 80.61181, 82.31723, 86.23272, 87.20532, 90.384995, 91.11295, 96.73499, 100.39909, 106.50631, 117.26071, 127.28154, 137.0596, 145.37276, 154.66376, 159.55597, ] ) R = np.array( [ 0.0053807, 0.02704024, 0.0256728, 0.08251926, 0.14614701, 0.15562384, 0.17186953, 0.18269515, 0.21789658, 0.22329386, 0.24903294, 0.27338803, 0.30453888, 0.31129324, 0.3478559, 0.3952338, 0.39521724, 0.42364773, 0.42499653, 0.43986857, 0.4398582, 0.4330707, 0.4547261, 0.44386315, 0.46146387, 0.43976498, 0.4573636, 0.44107231, 0.4437637, 0.4180039, 0.42203578, 0.40034726, 0.39761028, 0.3759238, 0.35826093, 0.3379046, 0.30533242, 0.2686558, 0.23334044, 0.20480223, 0.18711658, 0.1667126, 0.16396113, ] ) return np.interp(fmod, fm, R)
adf7a67c7b9d4448074f6ccd5fbf8e62c52b113d
3,652,881
from typing import Optional def points_2d_inside_image( width: int, height: int, camera_model: str, points_2d: np.ndarray, points_3d: Optional[np.ndarray] = None, ) -> np.ndarray: """Returns the indices for an array of 2D image points that are inside the image canvas. Args: width: Pixel width of the image canvas. height: Pixel height of the image canvas. camera_model: One of `opencv_pinhole`, `opencv_fisheye`, `pd_fisheye`. More details in :obj:`~.model.sensor.CameraModel`. points_2d: A matrix with dimensions (nx2) containing the points that should be tested if inside the image canvas. Points must be in image coordinate system (x,y). points_3d: Optional array of size (nx3) which provides the 3D camera coordinates for each point. Required for camera models `opencv_pinhole` and `opencv_fisheye`. Returns: An array with dimensions (n,). """ if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) and points_3d is None: raise ValueError(f"`points_3d` must be provided for camera model {camera_model}") if len(points_2d) != len(points_3d): raise ValueError( f"Mismatch in length between `points_2d` and `points_3d` with {len(points_2d)} vs. {len(points_3d)}" ) return np.where( (points_2d[:, 0] >= 0) & (points_2d[:, 0] < width) & (points_2d[:, 1] >= 0) & (points_2d[:, 1] < height) & (points_3d[:, 2] > 0 if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) else True) )
95d235e475555c184e95b1e30c3cac686fe3e65f
3,652,882
import torch def list2tensors(some_list): """ :math:`` Description: Implemented: [True/False] Args: (:): (:): Default: Shape: - Input: list - Output: list of tensors Examples:: """ t_list=[] for i in some_list: t_list.append(torch.tensor(i)) return t_list
35efe7c13c8c4f75266eceb912e8afccd25408cf
3,652,883
def interpret_input(inputs): """ convert input entries to usable dictionaries """ for key, value in inputs.items(): # interpret each line's worth of entries if key in ['v0', 'y0', 'angle']: # for variables, intepret distributions converted = interpret_distribution(key, value) # use a separate method to keep things clean elif key == 'metric': # metrics are easy, they're just a list converted = list(x.strip().lower() for x in value.split(',')) for c in converted: # check the metrics are valid entries if c not in ['mean', 'std', 'percentile']: raise IOError('Unrecognized metric:', c) else: raise IOError('Unrecognized keyword entry: {} = {}'.format(key, value)) inputs[key] = converted # replace the key with the converted values return inputs
5a68f8e551ae3e31e107ab5a6a9aacc2db358263
3,652,884
def time(prompt=None, output_hour_clock=24, milli_seconds=False, fill_0s=True, allow_na=False): """ Repeatedly ask the user to input hours, minutes and seconds until they input valid values and return this in a defined format :param prompt: Message to display to the user before asking them for inputs. Default: None :param output_hour_clock: Whether to output in 24 hour clock or in 12 hour clock with AM/PM. Default: 24 :param milli_seconds: Whether or not to allow more accuracy in seconds. Default: False :param fill_0s: Whether or not to fill numerical times with leading 0s. Default: False :param allow_na: Whether or not to allow empty inputs too. Default: False """ extras = None if allow_na else [""] output_hour_clock = assert_valid(output_hour_clock, SpecNumList([12, 24], None, True), "param output_hour_clock") if prompt is not None: print(prompt, "\n") input_hour_clock = validate_input(SpecNumList([12, 24], None, True), "Input hour clock (12/24): ") if input_hour_clock == 12: hours = validate_input(SpecNumRange(1, 12, None, True, extras), "Hours (12 hour clock): ") period = validate_input(SpecStr(["am", "pm"], extra_values=extras), "AM or PM? ") if hours == 12: hours = 0 if period == "pm": hours += 12 else: hours = validate_input(SpecNumRange(0, 23, None, True, extras), "Hours (24 hour clock): ") minutes = validate_input(SpecNumRange(0, 59, None, True, extras), "Minutes: ") if milli_seconds: seconds = validate_input(SpecNumRange(0, 59.999999, 6, False, extras), "Seconds including decimal: ") else: seconds = validate_input(SpecNumRange(0, 59, 0, True, extras), "Seconds: ") if hours is not None and output_hour_clock == 12: if hours < 12: period = "AM" else: period = "PM" hours %= 12 if hours == 0: hours = 12 if fill_0s: if hours is not None and hours < 10: hours = "0" + str(hours) if minutes is not None and minutes < 10: minutes = "0" + str(minutes) if seconds is not None and seconds < 10: seconds = "0" + str(seconds) to_return = "{}:{}:{}".format(hours, minutes, seconds) if output_hour_clock == 12: to_return += " {}".format(period) return to_return
82c0d8fae1f82e3f19b6af220ada5fadcea63bb3
3,652,885
def byol_a_url(ckpt, refresh=False, *args, **kwargs): """ The model from URL ckpt (str): URL """ return byol_a_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
c9a8ce31ae5b6b59832d8ae9bb4e05d697f96cc9
3,652,886
def bellman_ford(g, start): """ Given an directed graph with possibly negative edge weights and with n vertices and m edges as well as its vertex s, compute the length of shortest paths from s to all other vertices of the graph. Returns dictionary with vertex as key. - If vertex not present in the dictionary, then it is not reachable from s - If distance to vertex is None, then this vertex is reachable from a negative cycle - Otherwise, value of a dictionary is the length of a path from s to a vertex """ dist = {} prev = {} dist[start] = 0 def __construct_path(t): path = [] path.append(t) u = prev[t] while u in prev and u != t: path.append(u) u = prev[u] path.reverse() return path c = Graph() for _ in g.get_vertices(): relaxed = False for e in g.get_edges(): u = e.start v = e.end w = e.weight if u not in dist: continue if v not in dist or dist[u] + w < dist[v]: dist[v] = dist[u] + w prev[v] = u relaxed = True c.add_edge(u, v, w) if not relaxed: return dist ncv = set() for e in g.get_edges(): u = e.start v = e.end w = e.weight if u not in dist: continue if v in dist and dist[u] + w < dist[v]: for x in __construct_path(u): ncv.add(x) dist[v] = dist[u] + w prev[v] = u for v in ncv: if v not in dist: continue if dist[v] is None: continue visited = set() q = deque() q.append(v) while q: x = q.popleft() dist[x] = None visited.add(x) for e in c.get_edges(x): if e.end in visited: continue q.append(e.end) return dist
dd09de61d26a6ee988e549c5a0f8aafdf54b78ab
3,652,887
import locale from datetime import datetime def _read_date(settings_file): """Get the data from the settings.xml file Parameters ---------- settings_file : Path path to settings.xml inside open-ephys folder Returns ------- datetime start time of the recordings Notes ----- The start time is present in the header of each file. This might be useful if 'settings.xml' is not present. """ locale.setlocale(locale.LC_TIME, 'en_US.utf-8') root = ElementTree.parse(settings_file).getroot() for e0 in root: if e0.tag == 'INFO': for e1 in e0: if e1.tag == 'DATE': break return datetime.strptime(e1.text, '%d %b %Y %H:%M:%S')
2f762bd7e190323acc44e5408c5f0977069d8828
3,652,888