content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _min(group_idx, a, size, fill_value, dtype=None): """Same as aggregate_numpy.py""" dtype = minimum_dtype(fill_value, dtype or a.dtype) dmax = np.iinfo(a.dtype).max if issubclass(a.dtype.type, np.integer)\ else np.finfo(a.dtype).max ret = np.full(size, fill_value, dtype=dtype) if fill_value != dmax: ret[group_idx] = dmax # min starts from maximum np.minimum.at(ret, group_idx, a) return ret
a11d1e5bcf0c3aca81cdc6081fc0dfd186fa499e
3,649,396
def find_word(ch, row, boggle_lst, used_positions_lst, current, ans): """ :param ch: int, index for each character in a row :param row: int, index for each row in the boggle list :param boggle_lst: list, list for all rows :param used_positions_lst: tuple, index of ch and row that indicates the position of an used character :param current: str, current character composition that might become a vocabulary :param ans: list, answer list for all found vocabularies :return: answer list (ans) """ if has_prefix(current): # process only suitable prefixes to save time # Base Case if len(current) >= 4: # consider those with at least 4 characters if current in dictionary_lst: if current not in ans: # avoid repeated found words print('Found: ' + current) ans.append(current) # Recursive # Loop over surrounding characters for i in range(-1, 2): for j in range(-1, 2): # Make sure it won't loop outside the bound if 0 <= ch+i < len(boggle_lst[row]): if 0 <= row+j < len(boggle_lst): # Make sure current and used positions are not considered if i != 0 or j != 0: if not (ch+i, row+j) in used_positions_lst: # Choose current += boggle_lst[row+j][ch+i] # Explore if len(current) > 1: used_positions_lst.append((ch, row)) find_word(ch + i, row + j, boggle_lst, used_positions_lst, current, ans) # Un-choose: used_positions_lst.pop() current = current[:len(current) - 1] return ans
c896c4b4ac7b6816d0d4a35135ea2c87891f214e
3,649,397
def lazy_tt_ranks(tt): """Returns static TT-ranks of a TensorTrain if defined, and dynamic otherwise. This operation returns a 1-D integer numpy array of TT-ranks if they are available on the graph compilation stage and 1-D integer tensor of dynamic TT-ranks otherwise. Args: tt: `TensorTrain` object. Returns: A 1-D numpy array or `tf.Tensor` """ static_tt_ranks = tt.get_tt_ranks() if static_tt_ranks.is_fully_defined(): return np.array(static_tt_ranks.as_list()) else: return tt_ranks(tt)
6792b269c9c27dc7ad83202b7920f44ae8ee3ff8
3,649,398
def word_errors(reference, hypothesis, ignore_case=False, delimiter=' '): """Compute the levenshtein distance between reference sequence and hypothesis sequence in word-level. :param reference: The reference sentence. :type reference: str :param hypothesis: The hypothesis sentence. :type hypothesis: str :param ignore_case: Whether case-sensitive or not. :type ignore_case: bool :param delimiter: Delimiter of input sentences. :type delimiter: char :return: Levenshtein distance and word number of reference sentence. :rtype: list """ if ignore_case: reference = reference.lower() hypothesis = hypothesis.lower() ref_words = list(filter(None, reference.split(delimiter))) hyp_words = list(filter(None, hypothesis.split(delimiter))) edit_distance = _levenshtein_distance(ref_words, hyp_words) # `editdistance.eavl precision` less than `_levenshtein_distance` # edit_distance = editdistance.eval(ref_words, hyp_words) return float(edit_distance), len(ref_words)
11473b01bd222c4550403afb07303a14cd123720
3,649,399
def augment_note_matrix(nmat, length, shift): """Pitch shift a note matrix in R_base format.""" aug_nmat = nmat.copy() aug_nmat[0: length, 1] += shift return aug_nmat
a1ff855266e44012e05347a95abfa5324fd6e4e6
3,649,400
def breed_list(request): """ Фикстура возвращает список всех пород собак """ return request.param
29394d8a97444680acc3a0b7ff0f1b2949a5609d
3,649,401
def norm_layer(norm_type, nc): """tbd""" # normalization layer 1d norm = norm_type.lower() if norm == 'batch': layer = batch_norm_1d(nc) elif norm == 'layer': layer = nn.LayerNorm(nc) else: raise NotImplementedError('normalization layer [%s] is not found' % norm) return layer
05207a65ca4afd551230b3b3f2e87bce58905b6d
3,649,402
def tail(file, n=1, bs=1024): """ Read Last n Lines of file credit: https://www.roytuts.com/read-last-n-lines-from-file-using-python/ https://github.com/roytuts/python/blob/master/read-lines-from-last/last_lines_file.py """ f = open(file) f.seek(0, 2) l = 1-f.read(1).count('\n') B = f.tell() while n >= l and B > 0: block = min(bs, B) B -= block f.seek(B, 0) l += f.read(block).count('\n') f.seek(B, 0) l = min(l, n) lines = f.readlines()[-l:] f.close() return lines
db7443e4af1028565491cb06944717488506b2b7
3,649,404
import random def create_hparams(state, FLAGS): # pylint: disable=invalid-name """Creates hyperparameters to pass into Ray config. Different options depending on search or eval mode. Args: state: a string, 'train' or 'search'. FLAGS: parsed command line flags. Returns: tf.hparams object. """ epochs = 0 tf.logging.info('data path: {}'.format(FLAGS.data_path)) hparams = tf.contrib.training.HParams( train_size=FLAGS.train_size, validation_size=FLAGS.val_size, dataset=FLAGS.dataset, data_path=FLAGS.data_path, expsize=FLAGS.expsize, batch_size=FLAGS.bs, max_seq_length = FLAGS.max_seq_length, gradient_clipping_by_global_norm=0.1, explore=FLAGS.explore, aug_policy=FLAGS.aug_policy, recompute_dset_stats=FLAGS.recompute_dset_stats, lr=FLAGS.lr, weight_decay_rate=FLAGS.wd, test_batch_size=FLAGS.test_bs) if state == 'train': hparams.add_hparam('no_aug', FLAGS.no_aug) hparams.add_hparam('development', FLAGS.development) hparams.add_hparam('use_hp_policy', FLAGS.use_hp_policy) hparams.add_hparam('limit_test_data', False) if FLAGS.use_hp_policy: if FLAGS.hp_policy == 'random': tf.logging.info('RANDOM SEARCH') parsed_policy = [] for i in range(NUM_HP_TRANSFORM * 2): if i % 2 == 0: parsed_policy.append(random.random()) # --- probability else: parsed_policy.append(random.random()) # --- magnitude elif FLAGS.hp_policy.endswith('.txt') or FLAGS.hp_policy.endswith( '.p'): # --- will be loaded in in data_utils parsed_policy = FLAGS.hp_policy else: # --- parse input into a fixed augmentation policy print(FLAGS.hp_policy) print(type(FLAGS.hp_policy)) parsed_policy = FLAGS.hp_policy.split(',') parsed_policy = [float(p) for p in parsed_policy] hparams.add_hparam('hp_policy', parsed_policy) hparams.add_hparam('hp_policy_epochs', FLAGS.hp_policy_epochs) hparams.add_hparam('flatten', FLAGS.flatten) elif state == 'search': hparams.add_hparam('no_aug', False) hparams.add_hparam('development', FLAGS.development) hparams.add_hparam('use_hp_policy', True) hparams.add_hparam('limit_test_data', True) hparams.add_hparam('hp_policy', [0 for _ in range(2 * NUM_HP_TRANSFORM)]) # --- default start values of 0 else: raise ValueError('unknown state') # -- Add new model here if FLAGS.model_name == 'bert': hparams.add_hparam('model_name', 'bert') else: raise ValueError('Not Valid Model Name: %s' % FLAGS.model_name) if FLAGS.epochs > 0: tf.logging.info('overwriting with custom epochs') epochs = FLAGS.epochs hparams.add_hparam('num_epochs', epochs) tf.logging.info('epochs: {}, lr: {}, wd: {}'.format( hparams.num_epochs, hparams.lr, hparams.weight_decay_rate)) return hparams
b61c4d21dbc232700d4eb50aadd0e4699ed43b96
3,649,405
def resources_match(resource_one, resource_two): """ Checks if resource_one and resource_two match. If two folders, recursively compares contents. If two files, compares versions. """ if resource_one['type'] == FOLDER: match = recursively_compare_folders(resource_one, resource_two) else: match = compare_versions(resource_one, resource_two) return match
824200e5a107b612981dab9c77e34386f191d8ab
3,649,406
def read(): """Read content of predefined numpy archive file.""" return _read(tml.value('numpy', section='data', subkey='fname'))
decee54289f532e6f5c385336b1a98536595139a
3,649,407
def elexon_b1630(args): """ Actual or forecast Wind & Solar Generation """ if not check_api_key(args): return None api = B1630(args.apikey) if args.settlement_period is None: print("A settlement period should be supplied using the --settlement-period flag (range 1 to 50)." "Defaulting to 1") if args.date is None: print("A date should be supplied using the --date flag. Format is YYYY-MM-DD. Defaulting to today") if not api.get_data(**{'SettlementDate': args.date or date.today().strftime("%Y-%m-%d"), 'Period': args.settlement_period or 1}): print("No data returned.") return None fmt = StdoutFormatter("10s", "6s", "6s", "10.1f", "20s", "30s") print("\n" + fmt.titles('Date', 'Period', 'Active', 'Output', 'Type', 'Reference')) for item in sorted(api.items, key=lambda xxx: xxx['documentid']): print(fmt.row(item['settlementdate'], str(item['settlementperiod']), str(item['activeflag']), float(item['quantity']), item.get('powersystemresourcetype', 'n/a'), item['documentid'] + " - " + item['documentrevnum'])) return api
151d2cd7aa44d90fd46e647d69131f6ac4b37270
3,649,408
def null_count(df): """ df is a dataframe Check a dataframe for nulls and return the number of missing values. """ return df.isnull().sum().sum()
6e3eb91a3eaec456bb828b44be0780b64470e823
3,649,409
def rpy2r(roll, pitch=None, yaw=None, *, unit="rad", order="zyx"): """ Create an SO(3) rotation matrix from roll-pitch-yaw angles :param roll: roll angle :type roll: float :param pitch: pitch angle :type pitch: float :param yaw: yaw angle :type yaw: float :param unit: angular units: 'rad' [default], or 'deg' :type unit: str :param order: rotation order: 'zyx' [default], 'xyz', or 'yxz' :type order: str :return: SO(3) rotation matrix :rtype: ndarray(3,3) :raises ValueError: bad argument - ``rpy2r(⍺, β, γ)`` is an SO(3) orthonormal rotation matrix (3x3) equivalent to the specified roll (⍺), pitch (β), yaw (γ) angles angles. These correspond to successive rotations about the axes specified by ``order``: - 'zyx' [default], rotate by γ about the z-axis, then by β about the new y-axis, then by ⍺ about the new x-axis. Convention for a mobile robot with x-axis forward and y-axis sideways. - 'xyz', rotate by γ about the x-axis, then by β about the new y-axis, then by ⍺ about the new z-axis. Convention for a robot gripper with z-axis forward and y-axis between the gripper fingers. - 'yxz', rotate by γ about the y-axis, then by β about the new x-axis, then by ⍺ about the new z-axis. Convention for a camera with z-axis parallel to the optic axis and x-axis parallel to the pixel rows. - ``rpy2r(RPY)`` as above but the roll, pitch, yaw angles are taken from ``RPY`` which is a 3-vector with values (⍺, β, γ). .. runblock:: pycon >>> from spatialmath.base import * >>> rpy2r(0.1, 0.2, 0.3) >>> rpy2r([0.1, 0.2, 0.3]) >>> rpy2r([10, 20, 30], unit='deg') :seealso: :func:`~eul2r`, :func:`~rpy2tr`, :func:`~tr2rpy` """ if base.isscalar(roll): angles = [roll, pitch, yaw] else: angles = base.getvector(roll, 3) angles = base.getunit(angles, unit) if order == "xyz" or order == "arm": R = rotx(angles[2]) @ roty(angles[1]) @ rotz(angles[0]) elif order == "zyx" or order == "vehicle": R = rotz(angles[2]) @ roty(angles[1]) @ rotx(angles[0]) elif order == "yxz" or order == "camera": R = roty(angles[2]) @ rotx(angles[1]) @ rotz(angles[0]) else: raise ValueError("Invalid angle order") return R
2e9217396408452f54a663697d317a7fd7807c81
3,649,410
def plot3d_embeddings(dataset, embeddings, figure=None): """Plot sensor embedding in 3D space using mayavi. Given the dataset and a sensor embedding matrix, each sensor is shown as a sphere in the 3D space. Note that the shape of embedding matrix is (num_sensors, 3) where num_sensors corresponds to the length of ``dataset.sensor_list``. All embedding vectors range between 0 and 1. Args: dataset (:obj:`~pymrt.casas.CASASDataset`): CASAS smart home dataset. embeddings (:obj:`numpy.ndarray`): 3D sensor vector embedding. """ show_figure = False if figure is None: show_figure = True figure = mlab.figure('Sensor Embedding (3D)') # Plot sensors, texts and outlines figure.scene.disable_render = True points = mlab.points3d(embeddings[:, 0], embeddings[:, 1], embeddings[:, 2], scale_factor=0.015) for i, x in enumerate(embeddings): mlab.text3d(x[0], x[1], x[2], dataset.sensor_list[i]['name'], scale=(0.01, 0.01, 0.01)) mlab.outline(None, color=(.7, .7, .7), extent=[0, 1, 0, 1, 0, 1]) ax = mlab.axes(None, color=(.7, .7, .7), extent=[0, 1, 0, 1, 0, 1], ranges=[0, 1, 0, 1, 0, 1], nb_labels=6) ax.label_text_property.font_size = 3 ax.axes.font_factor = 0.3 figure.scene.disable_render = False if show_figure: mlab.show() return figure, points
49194f7ea6dee85dc84dd1c9047d21140a5e7a38
3,649,411
def geometry(cnf_save_fs, mod_thy_info, conf='sphere', hbond_cutoffs=None): """ get the geometry """ assert conf in ('minimum', 'sphere') # Read the file system if conf == 'minimum': geom = _min_energy_conformer( cnf_save_fs, mod_thy_info, hbond_cutoffs=hbond_cutoffs) elif conf == 'sphere': geom = _spherical_conformer(cnf_save_fs) return geom
9805baa4479ebcafa158b26ef4e19ea31109e8eb
3,649,412
def exportToVtk(gridFunction, dataType, dataLabel, fileNamesBase, filesPath=None, type='ascii'): """ Export a grid function to a VTK file. *Parameters:* - gridFunction (GridFunction) The grid function to be exported. - dataType ('cell_data' or 'vertex_data') Determines whether data are attaches to vertices or cells. - dataLabel (string) Label used to identify the function in the VTK file. - fileNamesBase (string) Base name of the output files. It should not contain any directory part or filename extensions. - filesPath (string) Output directory. Can be set to None (default), in which case the files are output in the current directory. - type ('ascii', 'base64', 'appendedraw' or 'appendedbase64') Output type. See the Dune reference manual for more details. """ return _constructObjectTemplatedOnBasisAndResult( core, "exportToVtk", gridFunction.basisFunctionType(), gridFunction.resultType(), gridFunction, dataType, dataLabel, fileNamesBase, filesPath, type)
04efb88c7870ec46d793bb6bbdd40b7ef70ae8ce
3,649,413
def get_fullname(user): """ Get from database fullname for user """ data = frappe.db.sql(""" SELECT full_name FROM `tabUser` WHERE name=%s and docstatus<2""", user, True) return data
51d8c0115964cc3159340e2fdc1356d922bc5ae0
3,649,414
def greedy_inference(original, protein_column = 'Protein Accession', peptide_column = 'Base Sequence'): """ Greedy protein inference algorithm for matching peptids to corresponding proteins Notaion: G : original graph Gi : inferred graph Gr : remaining graph Gd: dropped graph p : greedily selcted protein s : peptides connected to p Select peptides in G that only match to single protein Add proteins corresponding to peptides and all attached peptides to Gi Remove said proteins from Gr While Gr has edges connected proteins and peptides Greedily select best protein p Add p and connected peptides Gi Add peptide-protein edges where protein is not p and peptide is in s in Gd Remove edgees where peptides is in s from Gr Remake Gi and make Gd Gi remade to contain all protein-peptide edges that connect to an inferred protein Gd made to contain all protein-peptide edges that do not connect to an inferred protein Parameters --------- original : pandas DataFrame original peptide-protien graph protein_column : str column associated with protein accession peptide_column : str column associated with peptide Returns -------- inferred: pandas DataFrame Gi, subgraph of G of proteins and their associated peptides dropped: pandas DataFrame Gd, subgraph of G of proteins and their associated peptides """ # Find peptides that attach to only one protein # Add those proteins to inferred proteins bag # Remove any peptides that connect to inferred proteins peptide_sizes = original.groupby(peptide_column).size().reset_index().rename(columns = {0:'size'}) single_peptides = list(peptide_sizes[peptide_sizes['size'] == 1][peptide_column]) inferred_proteins = list(original[original[peptide_column].isin(single_peptides)][protein_column]) attached_peptides = set(original[original[protein_column].isin(inferred_proteins)][peptide_column]) remaining = original[~original[peptide_column].isin(attached_peptides)] while len(remaining) > 0: # Greedy select best protein best_protein = find_best_protein(remaining, original, protein_column) inferred_proteins.append(best_protein) # Remove peptides that connect to protein from remaining attached_peptides = set(remaining[remaining[protein_column] == best_protein][peptide_column]) is_matched_peptide = remaining[peptide_column].isin(attached_peptides) remaining = remaining[~is_matched_peptide] inferred = original[original[protein_column].isin(inferred_proteins)] dropped = original[~original[protein_column].isin(inferred_proteins)] # Rescue proteins inferred, dropped, rescued = rescue_matched_proteins(inferred, dropped) return inferred, dropped, rescued
aa43950c859bcac0371ce4e845c26bdb13182897
3,649,415
import requests def deploy_droplet(token): """ deploy a new droplet. return the droplet infos so that it can be used to further provision. """ droplet_info = { 'name': 'marian', 'region': 'sfo2', 'size': '4gb', 'image': 'ubuntu-18-04-x64', 'ssh_keys[]': get_key_fingerprints(token), 'tags[]': ['marian'], } print('deploying new droplet...') url = 'https://api.digitalocean.com/v2/droplets' request = requests.post(url, headers=headers(token), params=droplet_info) # see https://github.com/requests/requests/blob/master/requests/status_codes.py # pylint: disable=E1101 if request.status_code != requests.codes.accepted: print('Something went wrong. ' + request.json()['message']) request.raise_for_status() droplet_infos = request.json()['droplet'] droplet_id = droplet_infos['id'] print(f'Deployed Marian 👸 (id: {droplet_id})!') return droplet_infos
34d9fa31f686936a1c0abb4b5eafcb8eaaac1b11
3,649,416
from typing import Dict from typing import Any def _convert_run_describer_v1_like_dict_to_v0_like_dict( new_desc_dict: Dict[str, Any]) -> Dict[str, Any]: """ This function takes the given dict which is expected to be representation of `RunDescriber` with `InterDependencies_` (underscore!) object and without "version" field, and converts it to a dict that is a representation of the `RunDescriber` object with `InterDependencies` (no underscore!) object and without "version" field. """ new_desc_dict = new_desc_dict.copy() # We intend to use conversion methods from `serialization` module, # but those work only with RunDescriber representations that have # "version" field. So first, the "version" field with correct value is # added. new_desc_dict['version'] = 1 # Out of that dict we create RunDescriber object of the current version # (regardless of what the current version is). new_desc = serial.from_dict_to_current(new_desc_dict) # The RunDescriber of the current version gets converted to a dictionary # that represents a RunDescriber object of version 0 - this is the one # that has InterDependencies object in it (not the InterDependencies_ one). old_desc_dict = serial.to_dict_as_version(new_desc, 0) # Lastly, the "version" field is removed. old_desc_dict.pop('version') return old_desc_dict
b5d4126f0b480a90323df24dda1d7ecb0c84d712
3,649,417
from io import StringIO import textwrap def download_sequences(request): """Download the selected and/or user uploaded protein sequences.""" selected_values = request.session.get("list_names", []) list_nterminal = request.session.get("list_nterminal", []) list_middle = request.session.get("list_middle", []) list_cterminal = request.session.get("list_cterminal", []) values = [] if list_nterminal: values += list_nterminal if list_middle: values += list_middle if list_cterminal: values += list_cterminal if selected_values: values += selected_values values = list(set(values)) data = PesticidalProteinDatabase.objects.filter(name__in=values) userdata = UserUploadData.objects.filter( session_key=request.session.session_key) combined_selection = [] if list_nterminal: combined_selection += list_nterminal if list_middle: combined_selection += list_middle if list_cterminal: combined_selection += list_cterminal if selected_values: combined_selection += selected_values accession = {} data = PesticidalProteinDatabase.objects.filter( name__in=combined_selection) if data: for item in data: accession[item.accession] = item protein_detail = ProteinDetail.objects.filter( accession__in=list(accession.keys())) file = StringIO() # buffer = BytesIO() for item in data: output = "" item_name = item.name # print("item_name", item_name) if item.name in list_nterminal: nterminal = [ protein for protein in protein_detail if protein.accession == item.accession] item_name += "_d1" for item1 in nterminal: output += item1.get_endotoxin_n() if item.name in list_middle: middle = [ protein for protein in protein_detail if protein.accession == item.accession] item_name += "_d2" for item1 in middle: output += item1.get_endotoxin_m() if item.name in list_cterminal: cterminal = [ protein for protein in protein_detail if protein.accession == item.accession] # print(cterminal) item_name += "_d3" for item1 in cterminal: output += item1.get_endotoxin_c() # print("download output", output) if item.name in selected_values: fasta = textwrap.fill(item.sequence, 80) output += fasta # print(str_to_write) # file.write(str_to_write) if output: str_to_write = f">{item_name}\n{output}\n" file.write(str_to_write) for item in userdata: fasta = textwrap.fill(item.sequence, 80) if len(item.name) > 10: item.name = item.name[:10] str_to_write = f">{item.name}\n{fasta}\n" file.write(str_to_write) response = HttpResponse(file.getvalue(), content_type="text/plain") download_file = "cart_fasta_sequences.txt" response["Content-Disposition"] = "attachment;filename=" + download_file response["Content-Length"] = file.tell() return response
a5f063d4323290b939ccb635f0db175f6fe48ce0
3,649,419
def resize_to_fill(image, size): """ Resize down and crop image to fill the given dimensions. Most suitable for thumbnails. (The final image will match the requested size, unless one or the other dimension is already smaller than the target size) """ resized_image = resize_to_min(image, size) return crop_to_centre(resized_image, size)
977b9a1e84a0a2125aa60cea09e7d2bea520cccf
3,649,420
def extract_dual_coef(num_classes, sv_ind_by_clf, sv_coef_by_clf, labels): """ Construct dual coefficients array in SKLearn peculiar layout, as well corresponding support vector indexes """ sv_ind_by_class = group_indices_by_class( num_classes, sv_ind_by_clf, labels) sv_ind_mapping = map_sv_to_columns_in_dual_coef_matrix(sv_ind_by_class) num_unique_sv = len(sv_ind_mapping) dc_dt = sv_coef_by_clf[0].dtype dual_coef = np.zeros((num_classes - 1, num_unique_sv), dtype=dc_dt) support_ = np.empty((num_unique_sv,), dtype=np.int32) p = 0 for i in range(0, num_classes): for j in range(i + 1, num_classes): sv_ind_i_vs_j = sv_ind_by_clf[p] sv_coef_i_vs_j = sv_coef_by_clf[p] p += 1 for k, sv_index in enumerate(sv_ind_i_vs_j): label = labels[sv_index] col_index = sv_ind_mapping[sv_index] if j == label: row_index = i else: row_index = j - 1 dual_coef[row_index, col_index] = sv_coef_i_vs_j[k] support_[col_index] = sv_index return dual_coef, support_
cd64c5df3b6e633a482271e82b53b5d1f431bf7a
3,649,421
def namespaces_of(name): """ utility to determine namespaces of a name @raises ValueError @raises TypeError """ if name is None: raise ValueError('name') try: if not isinstance(name, basestring): raise TypeError('name') except NameError: if not isinstance(name, str): raise TypeError('name') if not name: return ['/'] splits = [x for x in name.split('/') if x] return ['/'] + ['/'+'/'.join(splits[:i]) for i in range(1, len(splits))]
7226d0540963f021b5a0bcf34763ab60942094d0
3,649,423
def create_c3d_sentiment_model(): """ C3D sentiment Keras model definition :return: """ model = Sequential() input_shape = (16, 112, 112, 3) model.add(Conv3D(64, (3, 3, 3), activation='relu', padding='same', name='conv1', input_shape=input_shape)) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')) # 2nd layer group model.add(Conv3D(128, (3, 3, 3), activation='relu', padding='same', name='conv2')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')) # 3rd layer group model.add(Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3a')) model.add(Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3b')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')) # 4th layer group model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4a')) model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4b')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')) # 5th layer group model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5a')) model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5b')) model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad5')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')) model.add(Flatten()) # FC layers group model.add(Dense(4096, activation='relu', name='fc6')) model.add(Dropout(.5)) model.add(Dense(4096, activation='relu', name='fc7')) model.add(Dropout(.5)) model.add(Dense(2, activation='softmax', name='nfc8')) return model
77de7bc69c848b6b1efdd222161e2e471186cd41
3,649,424
def notch_filter(data: FLOATS_TYPE, sampling_freq_hz: float, notch_freq_hz: float, quality_factor: float) -> FLOATS_TYPE: """ Design and use a notch (band reject) filter to filter the data. Args: data: time series of the data sampling_freq_hz: sampling frequency :math:`f_s`, in Hz (or other consistent units) notch_freq_hz: notch frequency, in Hz (or other consistent units) quality_factor: notch filter quality factor, :math:`Q` Returns: filtered data """ b, a = iirnotch( w0=normalized_frequency(notch_freq_hz, sampling_freq_hz), Q=quality_factor ) filtered_data = lfilter(b=b, a=a, x=data) return filtered_data
4a7fc2c41343258e9951503fb5579b3283f14e31
3,649,426
def get_trail_max(self, rz_array=None): """ Return the position of the blob maximum. Either in pixel or in (R,Z) coordinates if rz_array is passed. """ if (rz_array is None): return self.xymax # Remember xycom[:,1] is the radial (X) index which corresponds to R return rz_array[self.xymax[:,0].astype('int'), self.xymax[:,1].astype('int'), :]
5456c95ba4cb02352aa69398f9fa5307f3dc8e06
3,649,427
def create_action_type(request): """ Create a new action type """ # check name uniqueness if ActionType.objects.filter(name=request.data['name']).exists(): raise SuspiciousOperation(_('An action with a similar name already exists')) description = request.data.get("description") label = request.data.get("label") format_data = request.data.get("format", {'type': 'undefined'}) lang = translation.get_language() # create the action type action_type = ActionType() action_type.name = request.data['name'] action_type.set_label(lang, label) action_type.description = description action_type.format = format_data action_controller = ActionController(action_type, request.user) if format_data['type'] != 'undefined': # format validation ActionStepFormatManager.check(action_controller, format_data) action_type.save() result = { 'id': action_type.id, 'name': action_type.name, 'label': action_type.get_label(), 'format': action_type.format, 'description': action_type.description } return HttpResponseRest(request, result)
c8101eb8721a63771484ae16a39783338d7ad7a5
3,649,428
def ec_chi_sq(params,w,y,weights,model,normalize='deg'): """ Chi squared for equivalent circuit model. Parameters: ----------- params: dict of model parameters w: frequencies y: measured impedance data: nx2 matrix of Zreal, Zimag weights: weights for squared residuals (n-vector) model: equivalent circuit model normalize: normalization method. Options: 'deg': normalize by degrees of freedom, i.e. len(y) - len(params) 'n': normalize by number of observations, i.e. len(y) False: don't normalize """ Zfit = model(w,**params) y_fit = np.array([Zfit.real,Zfit.imag]).T x2 = chi_sq(y,y_fit,weights) #+ np.sum((x < 0).astype(int)*1000) if normalize=='deg': x2 /= (len(y) - len(params)) elif normalize=='n': x2 /= len(y) elif normalize is not False: raise ValueError(f'Invalid normalize option {normalize}. Options are ''deg'', ''n'', False') return x2
3d108f79aec530cc375443001902cd8f3797cd95
3,649,429
def synthesize_genre_favs(xn_train_df): """ Making synthetic user-genre favorite interactions We're going to just count the genres watched by each user. Subsample from a random top percentile of genres and consider those the user's favorites. We will then subsample again -- simulating the voluntary aspect of favoriting a genre. """ def sample_fav(df, q_thresh=None, frac=None): q_thresh = q_thresh or np.random.rand() frac = frac or np.random.rand() return df.reset_index().genre_id \ .loc[(df.item_id >= df.item_id.quantile(q_thresh)).values] \ .sample(frac=frac, replace=False) n_users = xn_train_df['user_id'].nunique() genre_counts = xn_train_df.groupby(('user_id', 'genre_id')).count() xn_genre_favs = genre_counts.groupby(level=0) \ .apply(sample_fav) \ .reset_index().drop('level_1', axis=1) # say 0.7 users know of the genre favoriting feature aware_users = set(np.random.permutation(n_users)[:int(0.7 * n_users)]) xn_genre_favs_samp = xn_genre_favs.loc[ xn_genre_favs.user_id.isin(aware_users)] return xn_genre_favs_samp
f0be8bf6441efda1c27f0f03df644c5fab408ca5
3,649,430
def mfa_to_challenge(mfa): """ Convert MFA from bastion to internal Challenge param mfa: MFA from bastion :rtype: Challenge :return: a converted Challenge """ if not mfa.fields: return None message_list = [] echos = [False for x in mfa.fields] fields = mfa.fields if hasattr(mfa, "auth_type"): message_list.append("Authentication type: %s" % mfa.auth_type) if mfa.fields[0] == "username": fields = fields[1:] echos = echos[1:] message_list.append("Username: %s" % mfa.username) message = "\n".join(message_list) recall = (len(fields) == 0) return Challenge( challenge_type="MFA", title="= MultiFactor Authentication =", message=message, fields=fields, echos=echos, username=mfa.username, token=mfa.token, recall=recall )
903bbc7f82e624d2dac0fbc1c0711742de57e876
3,649,431
def put_vns3_controller_api_password( api_client, vns3_controller_id, api_password=None, **kwargs ): # noqa: E501 """Update VNS3 Controller API password # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> response = await api.put_vns3_controller_api_password(id, async_req=True) :param VNS3Client api_client: (required) :param vns3_controller_id int: Controller ID (required) :param api_password str: New api password (required) :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: APIResponse or awaitable if async """ local_var_params = locals() request_params = ["api_password"] collection_formats = {} path_params = {"vns3_controller_id": vns3_controller_id} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = {} for param in [p for p in request_params if local_var_params.get(p) is not None]: body_params[param] = local_var_params[param] # HTTP header `Accept` header_params["Accept"] = api_client.select_header_accept( ["application/json"] ) # noqa: E501 # HTTP header `Content-Type` header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501 ["application/json"] ) # noqa: E501 # Authentication setting auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501 return api_client.call_api( "/vns3_controllers/{vns3_controller_id}/update_api_password", "PUT", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="object", # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get("async_req"), _return_http_data_only=local_var_params.get( "_return_http_data_only" ), # noqa: E501 _preload_content=local_var_params.get("_preload_content", True), _request_timeout=local_var_params.get("_request_timeout"), collection_formats=collection_formats, )
b24f43b047c0b96ff41b628de7d2efbd3bd71f12
3,649,432
def create_pod(interface_type=None, pvc_name=None, desired_status=constants.STATUS_RUNNING, wait=True): """ Create a pod Args: interface_type (str): The interface type (CephFS, RBD, etc.) pvc (str): The PVC that should be attached to the newly created pod desired_status (str): The status of the pod to wait for wait (bool): True for waiting for the pod to reach the desired status, False otherwise Returns: Pod: A Pod instance Raises: AssertionError: In case of any failure """ if interface_type == constants.CEPHBLOCKPOOL: pod_dict = constants.CSI_RBD_POD_YAML interface = constants.RBD_INTERFACE else: pod_dict = constants.CSI_CEPHFS_POD_YAML interface = constants.CEPHFS_INTERFACE pod_data = templating.load_yaml_to_dict(pod_dict) pod_data['metadata']['name'] = create_unique_resource_name( f'test-{interface}', 'pod' ) pod_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE if pvc_name: pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_name pod_obj = pod.Pod(**pod_data) pod_name = pod_data.get('metadata').get('name') created_resource = pod_obj.create(do_reload=wait) assert created_resource, ( f"Failed to create resource {pod_name}" ) if wait: assert wait_for_resource_state(pod_obj, desired_status) return pod_obj
6d8142a71e187efa194ac8c492f004bbc9e126b8
3,649,433
from typing import Tuple from typing import List def create_annotation(annotation_id: int, image_id: int, category_id: int, is_crowd: int, area: int, bounding_box: Tuple[int, int, int, int], segmentation: List[Tuple[int, int]]) -> dict: """ Converts input data to COCO annotation information storing format. :param int annotation_id: unique identificator of the annotation :param int image_id: identificator of related image :param int category_id: identificator of related category (annotation class) :param int is_crowd: "iscrowd": 0 if your segmentation based on polygon (object instance) "iscrowd": 1 if your segmentation based uncompressed RLE (crowd) :param float area: area occupied by segmentation in pixels :param Tuple[float, float, float, float] bounding_box: coordinates of bbox in format (x,y,w,h) :param list segmentation: polygon coordinates :return: dict of the annotation information in COCO format """ return { "id": annotation_id, "image_id": image_id, "category_id": category_id, "iscrowd": is_crowd, "area": area, # float "bbox": bounding_box, # [x,y,width,height] "segmentation": segmentation # [polygon] }
715a6204ed5dd9b081ac6e87541df3cd46d329a1
3,649,435
def check_sequence_is_valid(seq, alignment=False): """ Parameters -------------- seq : str Amino acid sequence alignment : bool Flag that defines if this alignment sequence rules should be applied or not. Returns ------------ Tuple Returns a tuple of size 2 where element 0 is a boolean (True or False) that flags if the sequence was valid (True) or not (False). element 1 is a value that will return as the invalid amino acid (if sequence is invalid) OR if it's a valid sequence will be 0 """ if alignment == True: valid_AA_list = STANDARD_AAS_WITH_GAP else: valid_AA_list = STANDARD_AAS s = list(set(seq)) for i in s: if i not in valid_AA_list: return (False, i) return (True, 0)
eeae8e65e068a8c94b63e70eec2cc84e0fb5c85c
3,649,436
import torch def get_device(): """Pick GPU if available, else CPU""" if torch.cuda.is_available(): return torch.device('cuda') else: return torch.device('cpu')
6b1a9baa0c7a98c31bdfebba513565fedc9335af
3,649,437
from scipy.stats import chisquare from sklearn.feature_extraction import DictVectorizer def extension_chisquare(x, y=None, lower=True): """Calculates a one-way chi square test for file extensions. :param x: Paths to compare with y. :type x: list, tuple, array of WindowsFilePath or PosixFilePath objects :param y: Paths to compare with x. :type y: list, tuple, array of WindowsFilePath or PosixFilePath objects :param lower: Convert the extensions to lower before counting. :type lower: boolean :return: The test result. :rtype: scipy.stats.Power_divergenceResult """ try: except ModuleNotFoundError: raise MissingDependencyError( "Install the module 'scipy' and 'sklearn' to compute chisquares.") counts_x = extension_counts(x, lower=lower) counts_y = extension_counts(y, lower=lower) dv = DictVectorizer(sparse=False) arr = dv.fit_transform([counts_x, counts_y]) return chisquare(arr[0], arr[1])
e181c119c85ce2914f66c2064863d8eaa43754d4
3,649,438
def kuster_toksoz_moduli( k1, mu1, k2, mu2, frac2, inclusion_shape="spheres", alpha=None ): """Kuster-Toksoz Moduli for an inclusion to a material. Best used for low-porosity materials. To add multiple inclusions to a model use this function recursively substituting the output for k1 and mu1 after the first pass. Inclusions are added randomly (iso-tropic). Assumes the material is learn and elastic, is limited to dilute concentrations of inclusions and idealised ellipsoidal inclusion shapes. Args: k1 (array-like): Material bulk moduli mu1 (array-like): Material shear moduli k2 (array-like): Inclusion bulk moduli mu2 (array-like): Inclusion shear moduli frac2 (array-like): The volume fraction of the inclusion to be added. inclusion_shape (str, Optional): The shape of the inclusion. Defaults to 'spheres'. One of ['spheres', 'needles', 'disks', 'cracks']. alpha (float, Optional): Required if inclusion_shape='cracks'. The aspect ratio of the cracks. """ if inclusion_shape == "spheres": pmi, qmi = _kuster_toksoz_spheres(k1, mu1, k2, mu2) elif inclusion_shape == "needles": pmi, qmi = _kuster_toksoz_needles(k1, mu1, k2, mu2) elif inclusion_shape == "disks": pmi, qmi = _kuster_toksoz_disks(k1, mu1, k2, mu2) elif inclusion_shape == "cracks" and isinstance(alpha, float): pmi, qmi = _kuster_toksoz_cracks(k1, mu1, k2, mu2, alpha) else: raise ValueError( "Unknown inclusions_shape or alpha must be specified as float for cracks." ) eta1 = _kuster_toksoz_eta(k1, mu1) k_a = frac2 * (k2 - k1) * pmi mu_a = frac2 * (mu2 - mu1) * qmi return ( (4 / 3 * mu1 * (k_a + k1) + power(k1, 2)) / (k1 + 4 * mu1 / 3 - k_a), (eta1 * (mu_a + mu1) + power(mu1, 2)) / (mu1 + eta1 - mu_a), )
42f50effa180abfefe83c0c9c4a67c102d5a7d88
3,649,439
def fetchResearchRadius(chatId: str, reachableByFoot: bool) -> tuple: """Given a chat id and a distance type, returns the user distance preference. Args: chatId (str) - the chat_id of which the language is required reachableByFoot (bool) - true if the preferred_distance_on_foot param has to be fetched, otherwise false if the user wants to fetch preferred_distance_by_car Returns: int - the user preference in terms of distance from the restaurant """ connection = dbConnect() if reachableByFoot: result = ( connection.cursor() .execute( """SELECT preferred_distance_on_foot FROM chat WHERE chat_id = ?""", (chatId,), ) .fetchone() ) else: result = ( connection.cursor() .execute( """SELECT preferred_distance_by_car FROM chat WHERE chat_id = ?""", (chatId,), ) .fetchone() ) connection.close() return result
9a5c83b25b50ba31e0e9376bef5a3cc8dc38d08d
3,649,440
def build_train_valid_test_datasets(tokenizer, data_class, data_prefix, data_impl, splits_string, train_valid_test_num_samples, enc_seq_length, dec_seq_length, seed, skip_warmup, prompt_config): """Build train, valid, and test datasets.""" context_data_prefix = data_prefix + "_context" target_data_prefix = data_prefix + "_target" # Indexed dataset. context_indexed_dataset = get_indexed_dataset_(context_data_prefix, data_impl, skip_warmup) target_indexed_dataset = get_indexed_dataset_(target_data_prefix, data_impl, skip_warmup) total_num_of_documents = context_indexed_dataset.sizes.shape[0] splits = get_train_valid_test_split_(splits_string, total_num_of_documents) # Print stats about the splits. print_rank_0(' > dataset split:') def print_split_stats(name, index): print_rank_0(' {}:'.format(name)) print_rank_0(' document indices in [{}, {}) total of {} ' 'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])) print_split_stats('train', 0) print_split_stats('validation', 1) print_split_stats('test', 2) def build_dataset(index, name): dataset = None if splits[index + 1] > splits[index]: document_ids_in_splits = np.arange(start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32) dataset = data_class(tokenizer, name, data_prefix, document_ids_in_splits, context_indexed_dataset, target_indexed_dataset, train_valid_test_num_samples[index], enc_seq_length, dec_seq_length, prompt_config, seed) return dataset train_dataset = build_dataset(0, 'train') valid_dataset = build_dataset(1, 'valid') test_dataset = build_dataset(2, 'test') return (train_dataset, valid_dataset, test_dataset)
48e2528007e34f668dc34052db2e150646bb42ec
3,649,441
def _scal_sub_fp(x, scal): """Subtract a scalar scal from a vector or matrix x.""" if _type_of(x) == 'vec': return [a - scal for a in x] else: return [[a - scal for a in x_row] for x_row in x]
ef431b7dcceb9339381b623c957a860ee789e2bd
3,649,442
def asset_name(aoi_model, model, fnf=False): """return the standard name of your asset/file""" prefix = "kc_fnf" if fnf else "alos_mosaic" filename = f"{prefix}_{aoi_model.name}_{model.year}" if model.filter != "NONE": filename += f"_{model.filter.lower()}" if model.rfdi: filename += "_rfdi" if model.ls_mask: filename += "_masked" if model.dB: filename += "_dB" if model.texture: filename += "_texture" if model.aux: filename += "_aux" return filename
e7211bec70739e53280ce424e1cb3c4c4304ac54
3,649,443
from nipype.interfaces import ants from collections import ( OrderedDict, ) # Need OrderedDict internally to ensure consistent ordering from nipype.interfaces.semtools.segmentation.specialized import BRAINSROIAuto from nipype.interfaces.semtools.segmentation.specialized import ( BRAINSROIAuto, ) from .FixLabelMapsTools import fix_label_map_from_neuromorphemetrics_2012 from .FixLabelMapsTools import recode_label_map from collections import ( OrderedDict, ) # Need OrderedDict internally to ensure consistent ordering def create_joint_fusion_workflow( WFname, onlyT1, master_config, runFixFusionLabelMap=True ): """ This function... :param WFname: :param onlyT1: :param master_config: :param runFixFusionLabelMap: :return: """ if onlyT1: n_modality = 1 else: n_modality = 2 CLUSTER_QUEUE = master_config["queue"] CLUSTER_QUEUE_LONG = master_config["long_q"] JointFusionWF = pe.Workflow(name=WFname) inputsSpec = pe.Node( interface=IdentityInterface( fields=[ "subj_t1_image", # Desired image to create label map for "subj_t2_image", # Desired image to create label map for "subj_lmks", # The landmarks corresponding to t1_image "subj_fixed_head_labels", # The fixed head labels from BABC "subj_posteriors", # The BABC posteriors "subj_left_hemisphere", # The warped left hemisphere mask "atlasWeightFilename", # The static weights file name "labelBaseFilename" # Atlas label base name ex) neuro_lbls.nii.gz ] ), run_without_submitting=True, name="inputspec", ) outputsSpec = pe.Node( interface=IdentityInterface( fields=[ "JointFusion_HDAtlas20_2015_label", "JointFusion_HDAtlas20_2015_CSFVBInjected_label", "JointFusion_HDAtlas20_2015_fs_standard_label", "JointFusion_HDAtlas20_2015_lobe_label", "JointFusion_extended_snapshot", "JointFusion_HDAtlas20_2015_dustCleaned_label", "JointFusion_volumes_csv", "JointFusion_volumes_json", "JointFusion_lobe_volumes_csv", "JointFusion_lobe_volumes_json", ] ), run_without_submitting=True, name="outputspec", ) BLICreator = OrderedDict() A2SantsRegistrationPreJointFusion_SyN = OrderedDict() movingROIAuto = OrderedDict() labelMapResample = OrderedDict() NewlabelMapResample = OrderedDict() jointFusion_atlas_mergeindex = 0 merge_input_offset = 1 # Merge nodes are indexed from 1, not zero! """ multimodal ants registration if t2 exists """ sessionMakeMultimodalInput = pe.Node( Function( function=make_vector, input_names=["inFN1", "inFN2", "jointFusion"], output_names=["outFNs"], ), run_without_submitting=True, name="sessionMakeMultimodalInput", ) sessionMakeMultimodalInput.inputs.jointFusion = False JointFusionWF.connect( inputsSpec, "subj_t1_image", sessionMakeMultimodalInput, "inFN1" ) """ T2 resample to T1 average image :: BRAINSABC changed its behavior to retain image's original spacing & origin :: Since antsJointFusion only works for the identical origin images for targets, :: Resampling is placed at this stage """ subjectT2Resample = pe.Node( interface=BRAINSResample(), name="BRAINSResample_T2_forAntsJointFusion" ) if not onlyT1: subjectT2Resample.plugin_args = { "qsub_args": modify_qsub_args(master_config["queue"], 1, 1, 1), "overwrite": True, } subjectT2Resample.inputs.pixelType = "short" subjectT2Resample.inputs.interpolationMode = "Linear" subjectT2Resample.inputs.outputVolume = "t2_resampled_in_t1.nii.gz" # subjectT2Resample.inputs.warpTransform= "Identity" # Default is "Identity" JointFusionWF.connect( inputsSpec, "subj_t1_image", subjectT2Resample, "referenceVolume" ) JointFusionWF.connect( inputsSpec, "subj_t2_image", subjectT2Resample, "inputVolume" ) JointFusionWF.connect( subjectT2Resample, "outputVolume", sessionMakeMultimodalInput, "inFN2" ) else: pass # print('jointFusion_atlas_db_base') print("master_config") print(master_config) print("master_config['jointfusion_atlas_db_base']") print((master_config["jointfusion_atlas_db_base"])) jointFusionAtlasDict = read_malf_atlas_db_base( master_config["jointfusion_atlas_db_base"] ) number_of_atlas_sources = len(jointFusionAtlasDict) jointFusionAtlases = OrderedDict() atlasMakeMultimodalInput = OrderedDict() t2Resample = OrderedDict() warpedAtlasLblMergeNode = pe.Node( interface=Merge(number_of_atlas_sources), name="LblMergeAtlas" ) NewwarpedAtlasLblMergeNode = pe.Node( interface=Merge(number_of_atlas_sources), name="fswmLblMergeAtlas" ) # "HACK NOT to use T2 for JointFusion only" # warpedAtlasesMergeNode = pe.Node(interface=Merge(number_of_atlas_sources*n_modality),name="MergeAtlases") warpedAtlasesMergeNode = pe.Node( interface=Merge(number_of_atlas_sources * 1), name="MergeAtlases" ) ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons UseRegistrationMasking = True if UseRegistrationMasking == True: fixedROIAuto = pe.Node(interface=BRAINSROIAuto(), name="fixedROIAUTOMask") fixedROIAuto.inputs.ROIAutoDilateSize = 10 fixedROIAuto.inputs.outputROIMaskVolume = "fixedImageROIAutoMask.nii.gz" JointFusionWF.connect(inputsSpec, "subj_t1_image", fixedROIAuto, "inputVolume") for jointFusion_atlas_subject in list(jointFusionAtlasDict.keys()): ## Need DataGrabber Here For the Atlas jointFusionAtlases[jointFusion_atlas_subject] = pe.Node( interface=IdentityInterface( fields=["t1", "t2", "label", "lmks", "registration_mask"] ), name="jointFusionAtlasInput" + jointFusion_atlas_subject, ) jointFusionAtlases[jointFusion_atlas_subject].inputs.t1 = jointFusionAtlasDict[ jointFusion_atlas_subject ]["t1"] jointFusionAtlases[jointFusion_atlas_subject].inputs.t2 = jointFusionAtlasDict[ jointFusion_atlas_subject ]["t2"] jointFusionAtlases[ jointFusion_atlas_subject ].inputs.label = jointFusionAtlasDict[jointFusion_atlas_subject]["label"] jointFusionAtlases[ jointFusion_atlas_subject ].inputs.lmks = jointFusionAtlasDict[jointFusion_atlas_subject]["lmks"] jointFusionAtlases[ jointFusion_atlas_subject ].inputs.registration_mask = jointFusionAtlasDict[jointFusion_atlas_subject][ "registration_mask" ] ## Create BLI first ######################################################## # Run BLI atlas_to_subject ######################################################## BLICreator[jointFusion_atlas_subject] = pe.Node( interface=BRAINSLandmarkInitializer(), name="BLI_" + jointFusion_atlas_subject, ) BLICreator[ jointFusion_atlas_subject ].inputs.outputTransformFilename = "landmarkInitializer_{0}_to_subject_transform.h5".format( jointFusion_atlas_subject ) JointFusionWF.connect( inputsSpec, "atlasWeightFilename", BLICreator[jointFusion_atlas_subject], "inputWeightFilename", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "lmks", BLICreator[jointFusion_atlas_subject], "inputMovingLandmarkFilename", ) JointFusionWF.connect( inputsSpec, "subj_lmks", BLICreator[jointFusion_atlas_subject], "inputFixedLandmarkFilename", ) ##### Initialize with ANTS Transform For SyN currentAtlasToSubjectantsRegistration = ( "SyN_AtlasToSubjectANTsPreJointFusion_" + jointFusion_atlas_subject ) A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject] = pe.Node( interface=ants.Registration(), name=currentAtlasToSubjectantsRegistration ) many_cpu_ANTsSyN_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE_LONG, 4, 2, 16), "overwrite": True, } A2SantsRegistrationPreJointFusion_SyN[ jointFusion_atlas_subject ].plugin_args = many_cpu_ANTsSyN_options_dictionary if onlyT1: JFregistrationTypeDescription = "FiveStageAntsRegistrationT1Only" else: JFregistrationTypeDescription = "FiveStageAntsRegistrationMultiModal" common_ants_registration_settings( antsRegistrationNode=A2SantsRegistrationPreJointFusion_SyN[ jointFusion_atlas_subject ], registrationTypeDescription=JFregistrationTypeDescription, output_transform_prefix=jointFusion_atlas_subject + "_ToSubjectPreJointFusion_SyN", output_warped_image=jointFusion_atlas_subject + "_2subject.nii.gz", output_inverse_warped_image=None, # NO NEED FOR THIS save_state=None, # NO NEED FOR THIS invert_initial_moving_transform=False, initial_moving_transform=None, ) ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons if UseRegistrationMasking == True: JointFusionWF.connect( fixedROIAuto, "outputROIMaskVolume", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "fixed_image_masks", ) # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels', # A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'fixed_image_masks') # NOTE: Moving image mask can be taken from Atlas directly so that it does not need to be read in # movingROIAuto[jointFusion_atlas_subject] = pe.Node(interface=BRAINSROIAuto(), name="movingROIAUTOMask_"+jointFusion_atlas_subject) # movingROIAuto.inputs.ROIAutoDilateSize=10 # movingROIAuto[jointFusion_atlas_subject].inputs.outputROIMaskVolume = "movingImageROIAutoMask.nii.gz" # JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject], 't1', movingROIAuto[jointFusion_atlas_subject],'inputVolume') # JointFusionWF.connect(movingROIAuto[jointFusion_atlas_subject], 'outputROIMaskVolume',A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'moving_image_masks') JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "registration_mask", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "moving_image_masks", ) JointFusionWF.connect( BLICreator[jointFusion_atlas_subject], "outputTransformFilename", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "initial_moving_transform", ) """ make multimodal input for atlases """ atlasMakeMultimodalInput[jointFusion_atlas_subject] = pe.Node( Function( function=make_vector, input_names=["inFN1", "inFN2", "jointFusion"], output_names=["outFNs"], ), run_without_submitting=True, name="atlasMakeMultimodalInput" + jointFusion_atlas_subject, ) atlasMakeMultimodalInput[jointFusion_atlas_subject].inputs.jointFusion = False JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "t1", atlasMakeMultimodalInput[jointFusion_atlas_subject], "inFN1", ) if not onlyT1: JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "t2", atlasMakeMultimodalInput[jointFusion_atlas_subject], "inFN2", ) else: pass JointFusionWF.connect( sessionMakeMultimodalInput, "outFNs", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "fixed_image", ) JointFusionWF.connect( atlasMakeMultimodalInput[jointFusion_atlas_subject], "outFNs", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "moving_image", ) "HACK NOT to use T2 for JointFusion" # JointFusionWF.connect(A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'warped_image', # warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality) ) JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "warped_image", warpedAtlasesMergeNode, "in" + str(merge_input_offset + jointFusion_atlas_mergeindex * 1), ) """ Original t2 resampling """ for modality_index in range(1, n_modality): t2Resample[jointFusion_atlas_subject] = pe.Node( interface=ants.ApplyTransforms(), name="resampledT2" + jointFusion_atlas_subject, ) many_cpu_t2Resample_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1), "overwrite": True, } t2Resample[ jointFusion_atlas_subject ].plugin_args = many_cpu_t2Resample_options_dictionary t2Resample[jointFusion_atlas_subject].inputs.num_threads = -1 t2Resample[jointFusion_atlas_subject].inputs.dimension = 3 t2Resample[jointFusion_atlas_subject].inputs.output_image = ( jointFusion_atlas_subject + "_t2.nii.gz" ) t2Resample[jointFusion_atlas_subject].inputs.interpolation = "BSpline" t2Resample[jointFusion_atlas_subject].inputs.default_value = 0 t2Resample[jointFusion_atlas_subject].inputs.invert_transform_flags = [ False ] JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "composite_transform", t2Resample[jointFusion_atlas_subject], "transforms", ) JointFusionWF.connect( inputsSpec, "subj_t1_image", t2Resample[jointFusion_atlas_subject], "reference_image", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "t2", t2Resample[jointFusion_atlas_subject], "input_image", ) "HACK NOT to use T2 for JointFusion only" # JointFusionWF.connect(t2Resample[jointFusion_atlas_subject],'output_image', # warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality+modality_index) ) """ Original labelmap resampling """ labelMapResample[jointFusion_atlas_subject] = pe.Node( interface=ants.ApplyTransforms(), name="resampledLabel" + jointFusion_atlas_subject, ) many_cpu_labelMapResample_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1), "overwrite": True, } labelMapResample[ jointFusion_atlas_subject ].plugin_args = many_cpu_labelMapResample_options_dictionary labelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1 labelMapResample[jointFusion_atlas_subject].inputs.dimension = 3 labelMapResample[jointFusion_atlas_subject].inputs.output_image = ( jointFusion_atlas_subject + "_2_subj_lbl.nii.gz" ) labelMapResample[jointFusion_atlas_subject].inputs.interpolation = "MultiLabel" labelMapResample[jointFusion_atlas_subject].inputs.default_value = 0 labelMapResample[jointFusion_atlas_subject].inputs.invert_transform_flags = [ False ] JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "composite_transform", labelMapResample[jointFusion_atlas_subject], "transforms", ) JointFusionWF.connect( inputsSpec, "subj_t1_image", labelMapResample[jointFusion_atlas_subject], "reference_image", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "label", labelMapResample[jointFusion_atlas_subject], "input_image", ) JointFusionWF.connect( labelMapResample[jointFusion_atlas_subject], "output_image", warpedAtlasLblMergeNode, "in" + str(merge_input_offset + jointFusion_atlas_mergeindex), ) ### New labelmap resampling NewlabelMapResample[jointFusion_atlas_subject] = pe.Node( interface=ants.ApplyTransforms(), name="FSWM_WLABEL_" + jointFusion_atlas_subject, ) many_cpu_NewlabelMapResample_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1), "overwrite": True, } NewlabelMapResample[ jointFusion_atlas_subject ].plugin_args = many_cpu_NewlabelMapResample_options_dictionary NewlabelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1 NewlabelMapResample[jointFusion_atlas_subject].inputs.dimension = 3 NewlabelMapResample[jointFusion_atlas_subject].inputs.output_image = ( jointFusion_atlas_subject + "fswm_2_subj_lbl.nii.gz" ) NewlabelMapResample[ jointFusion_atlas_subject ].inputs.interpolation = "MultiLabel" NewlabelMapResample[jointFusion_atlas_subject].inputs.default_value = 0 NewlabelMapResample[jointFusion_atlas_subject].inputs.invert_transform_flags = [ False ] JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "composite_transform", NewlabelMapResample[jointFusion_atlas_subject], "transforms", ) JointFusionWF.connect( inputsSpec, "subj_t1_image", NewlabelMapResample[jointFusion_atlas_subject], "reference_image", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "label", NewlabelMapResample[jointFusion_atlas_subject], "input_image", ) JointFusionWF.connect( NewlabelMapResample[jointFusion_atlas_subject], "output_image", NewwarpedAtlasLblMergeNode, "in" + str(merge_input_offset + jointFusion_atlas_mergeindex), ) jointFusion_atlas_mergeindex += 1 ## Now work on cleaning up the label maps ### Original NeuroMorphometrica merged fusion jointFusion = pe.Node(interface=ants.AntsJointFusion(), name="AntsJointFusion") many_cpu_JointFusion_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 10, 8, 16), "overwrite": True, } jointFusion.plugin_args = many_cpu_JointFusion_options_dictionary jointFusion.inputs.num_threads = -1 jointFusion.inputs.dimension = 3 jointFusion.inputs.search_radius = [3] # jointFusion.inputs.method='Joint[0.1,2]' jointFusion.inputs.out_label_fusion = "JointFusion_HDAtlas20_2015_label.nii.gz" # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels', jointFusion, 'mask_image') JointFusionWF.connect( fixedROIAuto, "outputROIMaskVolume", jointFusion, "mask_image" ) JointFusionWF.connect( warpedAtlasLblMergeNode, "out", jointFusion, "atlas_segmentation_image" ) AdjustMergeListNode = pe.Node( Function( function=adjust_merge_list, input_names=["allList", "n_modality"], output_names=["out"], ), name="AdjustMergeListNode", ) "*** HACK JointFusion only uses T1" # AdjustMergeListNode.inputs.n_modality = n_modality AdjustMergeListNode.inputs.n_modality = 1 JointFusionWF.connect(warpedAtlasesMergeNode, "out", AdjustMergeListNode, "allList") JointFusionWF.connect(AdjustMergeListNode, "out", jointFusion, "atlas_image") AdjustTargetImageListNode = pe.Node( Function( function=adjust_merge_list, input_names=["allList", "n_modality"], output_names=["out"], ), name="AdjustTargetImageListNode", ) AdjustTargetImageListNode.inputs.n_modality = n_modality "*** HACK JointFusion only uses T1" """ Once JointFusion works with T2 properly, delete sessionMakeListSingleModalInput and use sessionMakeMultimodalInput instead """ sessionMakeListSingleModalInput = pe.Node( Function( function=make_vector, input_names=["inFN1", "inFN2", "jointFusion"], output_names=["outFNs"], ), run_without_submitting=True, name="sessionMakeListSingleModalInput", ) sessionMakeListSingleModalInput.inputs.jointFusion = False JointFusionWF.connect( inputsSpec, "subj_t1_image", sessionMakeListSingleModalInput, "inFN1" ) JointFusionWF.connect( sessionMakeListSingleModalInput, "outFNs", jointFusion, "target_image" ) JointFusionWF.connect( jointFusion, "out_label_fusion", outputsSpec, "JointFusion_HDAtlas20_2015_label" ) ## We need to recode values to ensure that the labels match FreeSurer as close as possible by merging ## some labels together to standard FreeSurfer confenventions (i.e. for WMQL) RECODE_LABELS_2_Standard_FSWM = [ (15071, 47), (15072, 47), (15073, 47), (15145, 1011), (15157, 1011), (15161, 1011), (15179, 1012), (15141, 1014), (15151, 1017), (15163, 1018), (15165, 1019), (15143, 1027), (15191, 1028), (15193, 1028), (15185, 1030), (15201, 1030), (15175, 1031), (15195, 1031), (15173, 1035), (15144, 2011), (15156, 2011), (15160, 2011), (15178, 2012), (15140, 2014), (15150, 2017), (15162, 2018), (15164, 2019), (15142, 2027), (15190, 2028), (15192, 2028), (15184, 2030), (15174, 2031), (15194, 2031), (15172, 2035), (15200, 2030), ] ## def recode_label_map(InputFileName,OutputFileName,RECODE_TABLE): RecodeToStandardFSWM = pe.Node( Function( function=recode_label_map, input_names=["InputFileName", "OutputFileName", "RECODE_TABLE"], output_names=["OutputFileName"], ), name="RecodeToStandardFSWM", ) RecodeToStandardFSWM.inputs.RECODE_TABLE = RECODE_LABELS_2_Standard_FSWM RecodeToStandardFSWM.inputs.OutputFileName = ( "JointFusion_HDAtlas20_2015_fs_standard_label.nii.gz" ) JointFusionWF.connect( RecodeToStandardFSWM, "OutputFileName", outputsSpec, "JointFusion_HDAtlas20_2015_fs_standard_label", ) ## JointFusion_SNAPSHOT_WRITER for Segmented result checking: # JointFusion_SNAPSHOT_WRITERNodeName = "JointFusion_ExtendedJointFusion_SNAPSHOT_WRITER" # JointFusion_SNAPSHOT_WRITER = pe.Node(interface=BRAINSSnapShotWriter(), name=JointFusion_SNAPSHOT_WRITERNodeName) # JointFusion_SNAPSHOT_WRITER.inputs.outputFilename = 'JointFusion_HDAtlas20_2015_CSFVBInjected_label.png' # output specification # JointFusion_SNAPSHOT_WRITER.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0] # JointFusion_SNAPSHOT_WRITER.inputs.inputSliceToExtractInPhysicalPoint = [-3, -7, -3, 5, 7, 22, -22] # JointFusionWF.connect(JointFusion_SNAPSHOT_WRITER,'outputFilename',outputsSpec,'JointFusion_extended_snapshot') myLocalDustCleanup = create_dust_cleanup_workflow( "DUST_CLEANUP", onlyT1, master_config ) JointFusionWF.connect( inputsSpec, "subj_t1_image", myLocalDustCleanup, "inputspec.subj_t1_image" ) if not onlyT1: JointFusionWF.connect( subjectT2Resample, "outputVolume", myLocalDustCleanup, "inputspec.subj_t2_image", ) if runFixFusionLabelMap: ## post processing of jointfusion injectSurfaceCSFandVBIntoLabelMap = pe.Node( Function( function=fix_label_map_from_neuromorphemetrics_2012, input_names=[ "fusionFN", "FixedHeadFN", "posteriorListOfTuples", "LeftHemisphereFN", "outFN", "OUT_DICT", ], output_names=["fixedFusionLabelFN"], ), name="injectSurfaceCSFandVBIntoLabelMap", ) injectSurfaceCSFandVBIntoLabelMap.inputs.outFN = ( "JointFusion_HDAtlas20_2015_CSFVBInjected_label.nii.gz" ) FREESURFER_DICT = OrderedDict( { "BRAINSTEM": 16, "RH_CSF": 24, "LH_CSF": 24, "BLOOD": 15000, "UNKNOWN": 999, "CONNECTED": [11, 12, 13, 9, 17, 26, 50, 51, 52, 48, 53, 58], } ) injectSurfaceCSFandVBIntoLabelMap.inputs.OUT_DICT = FREESURFER_DICT JointFusionWF.connect( jointFusion, "out_label_fusion", injectSurfaceCSFandVBIntoLabelMap, "fusionFN", ) JointFusionWF.connect( inputsSpec, "subj_fixed_head_labels", injectSurfaceCSFandVBIntoLabelMap, "FixedHeadFN", ) JointFusionWF.connect( inputsSpec, "subj_posteriors", injectSurfaceCSFandVBIntoLabelMap, "posteriorListOfTuples", ) JointFusionWF.connect( inputsSpec, "subj_left_hemisphere", injectSurfaceCSFandVBIntoLabelMap, "LeftHemisphereFN", ) JointFusionWF.connect( injectSurfaceCSFandVBIntoLabelMap, "fixedFusionLabelFN", myLocalDustCleanup, "inputspec.subj_label_atlas", ) JointFusionWF.connect( injectSurfaceCSFandVBIntoLabelMap, "fixedFusionLabelFN", outputsSpec, "JointFusion_HDAtlas20_2015_CSFVBInjected_label", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", RecodeToStandardFSWM, "InputFileName", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", outputsSpec, "JointFusion_HDAtlas20_2015_dustCleaned_label", ) # JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]), # (injectSurfaceCSFandVBIntoLabelMap, JointFusion_SNAPSHOT_WRITER, # [('fixedFusionLabelFN', 'inputBinaryVolumes')]) # ]) else: JointFusionWF.connect( jointFusion, "output_label_image", myLocalDustCleanup, "inputspec.subj_label_atlas", ) JointFusionWF.connect( jointFusion, "output_label_image", outputsSpec, "JointFusion_HDAtlas20_2015_CSFVBInjected_label", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", RecodeToStandardFSWM, "InputFileName", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", outputsSpec, "JointFusion_HDAtlas20_2015_dustCleaned_label", ) # JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]), # (jointFusion, JointFusion_SNAPSHOT_WRITER, # [('output_label_image', 'inputBinaryVolumes')]) # ]) """ Compute label volumes """ computeLabelVolumes = create_volume_measure_workflow("LabelVolume", master_config) JointFusionWF.connect( inputsSpec, "subj_t1_image", computeLabelVolumes, "inputspec.subj_t1_image" ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", computeLabelVolumes, "inputspec.subj_label_image", ) JointFusionWF.connect( computeLabelVolumes, "outputspec.csvFilename", outputsSpec, "JointFusion_volumes_csv", ) JointFusionWF.connect( computeLabelVolumes, "outputspec.jsonFilename", outputsSpec, "JointFusion_volumes_json", ) ## Lobe Pacellation by recoding if master_config["relabel2lobes_filename"] != None: # print("Generate relabeled version based on {0}".format(master_config['relabel2lobes_filename'])) RECODE_LABELS_2_LobePacellation = read_recoding_list( master_config["relabel2lobes_filename"] ) RecordToFSLobes = pe.Node( Function( function=recode_label_map, input_names=["InputFileName", "OutputFileName", "RECODE_TABLE"], output_names=["OutputFileName"], ), name="RecordToFSLobes", ) RecordToFSLobes.inputs.RECODE_TABLE = RECODE_LABELS_2_LobePacellation RecordToFSLobes.inputs.OutputFileName = ( "JointFusion_HDAtlas20_2015_lobe_label.nii.gz" ) JointFusionWF.connect( RecodeToStandardFSWM, "OutputFileName", RecordToFSLobes, "InputFileName" ) JointFusionWF.connect( RecordToFSLobes, "OutputFileName", outputsSpec, "JointFusion_HDAtlas20_2015_lobe_label", ) """ Compute lobe volumes """ computeLobeVolumes = create_volume_measure_workflow("LobeVolume", master_config) JointFusionWF.connect( inputsSpec, "subj_t1_image", computeLobeVolumes, "inputspec.subj_t1_image" ) JointFusionWF.connect( RecordToFSLobes, "OutputFileName", computeLobeVolumes, "inputspec.subj_label_image", ) JointFusionWF.connect( computeLobeVolumes, "outputspec.csvFilename", outputsSpec, "JointFusion_lobe_volumes_csv", ) JointFusionWF.connect( computeLobeVolumes, "outputspec.jsonFilename", outputsSpec, "JointFusion_lobe_volumes_json", ) return JointFusionWF
fe742ccf65da1935614867b3ea5823054fbfbcc9
3,649,444
def get_partition_info_logic(cluster_name): """ GET 请求集群隔离区信息 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = '' status = '' message = '' resp = {"status": status, "data": data, "message": message} partition_info = SfoPartitionsInfo.query.filter_by(cluster_name=cluster_name).order_by(SfoPartitionsInfo.update_time.desc()).first() if partition_info: status = 200 message = 'OK' data = partition_info else: status = 404 message = 'Not Found Record' resp.update({"status": status, "data": data, "message": message}) return resp, status
ee0b08fddc8dcb19bd226aca51a918e48af036e3
3,649,445
def choice_group_name(identifier: Identifier) -> Identifier: """ Generate the XML group name for the interface of the given class ``identifier``. >>> choice_group_name(Identifier("something")) 'something_choice' >>> choice_group_name(Identifier("URL_to_something")) 'urlToSomething_choice' """ parts = identifier.split("_") assert ( len(parts) >= 1 ), f"Expected at least one part for the valid identifier: {identifier}" if len(parts) == 1: return Identifier(f"{parts[0].lower()}_choice") return Identifier( "{}{}_choice".format( parts[0].lower(), "".join(part.capitalize() for part in parts[1:]) ) )
6eb9b40154dd4c1f38e6a22dda2ebfccf6180506
3,649,446
def init_all_sources_wavelets(observation, centers, min_snr=50, bulge_grow=5, disk_grow=5, use_psf=True, bulge_slice=slice(None,2), disk_slice=slice(2, -1), scales=5, wavelets=None): """Initialize all sources using wavelet detection images. This does not initialize the SED and morpholgy parameters, so `parameterize_source` must still be run to select a parameterization (optimizer) that `LiteBlend` requires for fitting. Parameters ---------- observation: `scarlet.lite.LiteObservation` The multiband observation of the blend. centers: `list` of `tuple` Peak locations for all of the sources to attempt to initialize. wavelets: `numpy.ndarray` The array of wavelet coefficients `(scale, y, x)` used for detection. bulge_slice, disk_slice: `slice` The slice used to select the wavelet scales used for the bulge/disk. bulge_grow, disk_grow: `int` The number of pixels to grow the bounding box of the bulge/disk to leave extra room for growth in the first few iterations. use_psf: `bool` Whether or not to use the PSF for single component sources. If `use_psf` is `False` then only sources with low signal at all scales are initialized with the PSF morphology. min_snr: `float` Minimum signal to noise for each component. So if `min_snr=50`, a source must have SNR > 50 to be initialized with one component and SNR > 100 for 2 components. Returns ------- sources: `list` of `scarlet.lite.LiteSource` The sources that have been initialized. """ init = WaveletInitParameters( observation, bulge_slice, disk_slice, bulge_grow, disk_grow, use_psf, scales, wavelets) sources = [] for center in centers: snr = np.floor(calculate_snr(observation.images, observation.variance, observation.psfs, center)) component_snr = snr / min_snr source = init_wavelet_source(center, component_snr, init) sources.append(source) return sources
777d0be59a9cab19c91bc8ec1eb956beecdd5066
3,649,447
def init_coreg_conversion_wf(name: str = "coreg_conversion_wf") -> pe.Workflow: """ Initiate a workflow to convert input files to NIfTI format for ease of use Parameters ---------- name : str, optional Workflow's name, by default "nii_conversion_wf" Returns ------- pe.Workflow A NIfTI conversion workflow """ wf = pe.Workflow(name=name) wf.connect(NII_CONVERSION) return wf
411e19083a243e1f38b8243e4bae1384aaba190e
3,649,448
def rgb2hex(rgb_color): """ 'rgb(180, 251, 184)' => '#B4FBB8' """ rgb = [int(i) for i in rgb_color.strip('rgb()').split(',')] return '#{:02x}{:02x}{:02x}'.format(rgb[0], rgb[1], rgb[2])
40a01ccc5695266aebaf63a169c1039a6f42a724
3,649,449
def validate_tax_request(tax_dict): """Return the sales tax that should be collected for a given order.""" client = get_client() if not client: return try: tax_data = client.tax_for_order(tax_dict) except taxjar.exceptions.TaxJarResponseError as err: frappe.throw(_(sanitize_error_response(err))) else: return tax_data
5f6dc961595d766548c348149cc988346dcfdbbe
3,649,450
def union_with(array, *others, **kargs): """This method is like :func:`union` except that it accepts comparator which is invoked to compare elements of arrays. Result values are chosen from the first array in which the value occurs. Args: array (list): List to unionize with. others (list): Lists to unionize with `array`. Keyword Args: comparator (callable, optional): Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: list: Unionized list. Example: >>> comparator = lambda a, b: (a % 2) == (b % 2) >>> union_with([1, 2, 3], [2, 3, 4], comparator=comparator) [1, 2] >>> union_with([1, 2, 3], [2, 3, 4]) [1, 2, 3, 4] .. versionadded:: 4.0.0 """ if not others: return array[:] comparator, others = parse_iteratee('comparator', *others, **kargs) return uniq_with(flatten([array] + list(others)), comparator=comparator)
dd1ee10763f826e9cc94c4ab4a11df20b1d2da3f
3,649,451
def CalculateMoranAutoVolume(mol): """ ################################################################# Calculation of Moran autocorrelation descriptors based on carbon-scaled atomic van der Waals volume. Usage: res=CalculateMoranAutoVolume(mol) Input: mol is a molecule object. Output: res is a dict form containing eight moran autocorrealtion descriptors. ################################################################# """ res = {} for i in range(8): res["MATSv" + str(i + 1)] = _CalculateMoranAutocorrelation( mol, lag=i + 1, propertylabel="V" ) return res
28aa0db26041ccddbf12c7d371b06183b9f14fac
3,649,452
def execute(cursor, query): """Secure execute for slow nodes""" while True: try: cursor.execute(query) break except Exception as e: print("Database query: {} {}".format(cursor, query)) print("Database retry reason: {}".format(e)) return cursor
b46338ab7304737d3b12cb1bd4d4dff9665d0f60
3,649,453
import zipfile def zip_to_gdal_path(filepath): """ Takes in a zip filepath and if the zip contains files ascii files, prepend '/viszip' to the path so that they can be opened using GDAL without extraction. """ zip_file_list = [] if zipfile.is_zipfile(filepath): try: zip_file = zipfile.ZipFile(filepath) zip_file_contents = ['/vsizip/{0}/{1}'.format(filepath, zip_info_object.filename) for zip_info_object in zip_file.filelist if zip_info_object.filename.endswith('.asc')] zip_file_list.extend(zip_file_contents) zip_file.close() except zipfile.BadZipfile: pass return zip_file_list
9e9e44d6eb3022ebe982cc44284da76f56a4ddeb
3,649,454
def calculateMACD(prices_data): """Calculate the MACD of EMA15 and EMA30 of an asset Args: prices_data (dataframe): prices data Returns: macd (pandas series object): macd of the asset macd_signal (pandas series object): macd signal of the asset """ ema15 = pd.Series(prices_data['prices'].ewm( span=15, min_periods=15).mean()) ema30 = pd.Series(prices_data['prices'].ewm( span=30, min_periods=30).mean()) macd = pd.Series(ema15 - ema30) macd_signal = pd.Series(macd.ewm(span=9, min_periods=9).mean()) return macd, macd_signal
4a35619a1abf1f9e984cd334641d3f1f765ec352
3,649,455
async def async_setup(hass: HomeAssistant, config: dict): """Set up the Logitech Squeezebox component.""" return True
ca111ab23656110623567eea043ee2bfe4db83e5
3,649,456
def generateDwcaExportFiles(request): """ Generates DarwinCore-Archive files for the 'Export formats' page. """ error_message = None # if request.method == "GET": form = forms.GenerateDwcaExportFilesForm() contextinstance = {'form' : form, 'error_message' : error_message} contextinstance.update(csrf(request)) return render(request, "generate_dwca_exportfiles.html", contextinstance) elif request.method == "POST": # form = forms.GenerateDwcaExportFilesForm(request.POST) if form.is_valid(): # datatype_list = [] year_from = request.POST['year_from'] year_to = request.POST['year_to'] monitoring_type = request.POST['monitoring_type'] user = request.POST['user'] password = request.POST['password'] # if ('phytobenthos' in request.POST) and (request.POST['phytobenthos'] == 'on'): datatype_list.append('Epibenthos') # datatype_list.append('Phytobenthos') if ('phytoplankton' in request.POST) and (request.POST['phytoplankton'] == 'on'): datatype_list.append('Phytoplankton') if ('zoobenthos' in request.POST) and (request.POST['zoobenthos'] == 'on'): datatype_list.append('Zoobenthos') if ('zooplankton' in request.POST) and (request.POST['zooplankton'] == 'on'): datatype_list.append('Zooplankton') # if password != settings.APPS_VALID_USERS_AND_PASSWORDS.get(user, None): error_message = 'Not a valid user or password. Please try again...' # if error_message == None: sharkdata_core.SharkdataAdminUtils().generateDwcaExportFilesInThread( datatype_list, year_from, year_to, monitoring_type, user) # OK. if error_message == None: return HttpResponseRedirect("/sharkdataadmin") # contextinstance = {'form' : form, 'error_message' : error_message} contextinstance.update(csrf(request)) return render(request, "generate_dwca_exportfiles.html", contextinstance) # Not a valid request method. return HttpResponseRedirect("/sharkdataadmin")
cd8cda559bc13d8203a73655fd1c2b2f919c4b9a
3,649,457
def treeFromList(l): """ Builds tree of SNode from provided list Arguments: l: the list with tree representation Return: the tuple with root node of the tree and the sentence index of last leaf node """ root = SNode("S") s_index = 0 for child in l: node = SNode(child["name"]) _, s_index = treeFromDict(child, s_index, node) root.children.append(node) return (root, s_index)
4c08f87f9b0d3872574ef28b2eb15dbc51181ea6
3,649,458
import asyncio def with_event_loop(func): """ This method decorates functions run on dask workers with an async function call Namely, this allows us to manage the execution of a function a bit better, and especially, to exit job execution if things take too long (1hr) Here, the function func is run in a background thread, and has access to the dask schedular through the 'runner'. Critically, sumbission to this runner/client looks the same regardless of if it occurs in a sub-process/thread Mostly, this is a workaround to impliment some form of timeout when running very long-tasks on dask. While one cannot (or should not) kill the running thread, Dask will cleanup the child tasks eventually once all jobs finish. Usage: @with_dask_event_loop my_job(args, kwargs, runner=None): runner.submit(sleep, 10) """ async def wrapped(*args, **kwargs): loop = asyncio.get_event_loop() # Get our current dask worker, functions wrapped with this method can only be run on dask workers logger.info ("Initializing job... getting parent worker") try: worker = get_worker() except ValueError as exc: logger.error("Could not get dask worker!") raise RuntimeError("Data-processing job called without parent dask worker") except Exception as exc: logger.exception(f"Unknown exception when getting dask worker") logger.info (f"Successfully found worker {worker}") logger.info (f"Running job {func} with args: {args}, kwargs: {kwargs}") # Get our worker client, and pass as a dask client exector with worker_client() as runner: # We'll run our function in a background thread # executor = ProcessPoolExecutor(max_workers=1) # Add our runner to kwargs kwargs['runner'] = runner # Kick off the job job = loop.run_in_executor(worker.executor, partial(func, *args, **kwargs)) # Move on from job if things take more than hour done, pending = await asyncio.wait([job], timeout=3600) # Do some cleanup if len(pending) != 0: logger.warning ("Killing pending tasks!") for task in pending: task.cancel() # executor.shutdown(wait=False) # Get the return value if len(done) == 1: return_value = done.pop().result() else: return_value = None # Logg that we're done! logger.info (f"Done running job, returning {return_value}") return return_value def run_loop(*args, **kwargs): """ Uses async and threading capabilities Use of background thread causes this error on shutdown: ERROR - asyncio - task: <Task pending coro=<HTTP1ServerConnection._server_request_loop() running at /gpfs/mskmindhdp_emc/sw/env/lib64/python3.6/site-packages/tornado/http1connection.py:817> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f52e8259318>()]> cb=[IOLoop.add_future.<locals>.<lambda>() at /gpfs/mskmindhdp_emc/sw/env/lib64/python3.6/site-packages/tornado/ioloop.py:690]> Seems like some async task gets hung up in the child thread... """ loop = asyncio.new_event_loop() result = loop.run_until_complete(wrapped(*args, **kwargs)) loop.close() return result
7a977a47e6e20742767c71ab5c78d00d11896b90
3,649,459
from datetime import datetime import pytz def get_current_time(): """Retrieve a Django compliant pre-formated datetimestamp.""" datetime_tz_naive = datetime.datetime.now() django_timezone = settings.TIME_ZONE datetime_tz = pytz.timezone(django_timezone).localize(datetime_tz_naive) return datetime_tz
60ee3acf1b7e805cf6f44cc066e5b452099a6306
3,649,460
def is_eval_epoch(cfg, cur_epoch): """ Determine if the model should be evaluated at the current epoch. Args: cfg (CfgNode): configs. Details can be found in sgs/config/defaults.py cur_epoch (int): current epoch. """ return ( cur_epoch + 1 ) % cfg.TRAIN.EVAL_PERIOD == 0 or cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH
d8abb04409879b88bdfd32cf323bcbea037ae630
3,649,461
def get_app_run_sleep(): """Returns the entrypoint command that starts the app.""" return get(cs.ODIN_CONF, cs.APP_SECTION, cs.RUN_SLEEP)
bf7fd5ce98823e3d2ccb1a80addb1d5eb4b85241
3,649,462
def plot_sphere(Radius, Point, part="Part::Feature", name="Sphere", grp="WorkObjects"): """ makeSphere(radius,[pnt, dir, angle1,angle2,angle3]) -- Make a sphere with a given radius By default pnt=Vector(0,0,0), dir=Vector(0,0,1), angle1=0, angle2=90 and angle3=360 """ if not(App.ActiveDocument.getObject(grp)): App.ActiveDocument.addObject("App::DocumentObjectGroup", grp) sphere = App.ActiveDocument.addObject(part, name) Sphere = Part.makeSphere(Radius, Point) sphere.Shape = Sphere App.ActiveDocument.getObject(grp).addObject(sphere) sphere_User_Name = sphere.Label Gui.ActiveDocument.getObject(sphere_User_Name).PointColor = (1.00, 0.67, 0.00) Gui.ActiveDocument.getObject(sphere_User_Name).LineColor = (1.00, 0.67, 0.00) Gui.ActiveDocument.getObject(sphere_User_Name).ShapeColor = (0.00, 0.33, 1.00) Gui.ActiveDocument.getObject(sphere_User_Name).Transparency = 75 return sphere_User_Name, sphere
be6a2d82d8a2a7268bfc203c0b15fa6d7b711ed2
3,649,463
async def my_job_async_gen(my_job_manager): """Fixture provides the job definition (async generator). Returns: The object yielded by the fixture `my_job_manager` with one extra attribute: `job` - job function decorated with `@job` and wrapped into `sync_to_async` for convenience (tests are async). """ @my_job_manager.job_manager_class.job() async def my_job_async_gen(yieldsteps, *, mustfail): """Job function which yields the progress.""" for i in range(yieldsteps): progress = { 'message': 'step %s or %s' % (i + 1, yieldsteps), 'payload': dict({'step': i + 1, 'total': yieldsteps}), 'readiness': (i + 1) / yieldsteps, } yield progress if mustfail: raise RuntimeError('Job failed, as requested!') my_job_manager.job_orig = my_job_async_gen my_job_manager.job = channels.db.database_sync_to_async(my_job_async_gen) return my_job_manager
cb3bb924e798ddd976a44ecddd8bcec700de7a99
3,649,464
def cluster(self, net_cvg, net_boxes): """ Read output of inference and turn into Bounding Boxes """ batch_size = net_cvg.shape[0] boxes = np.zeros([batch_size, MAX_BOXES, 5]) for i in range(batch_size): cur_cvg = net_cvg[i] cur_boxes = net_boxes[i] if (self.is_groundtruth): # Gather proposals that pass a threshold - propose_boxes, propose_cvgs, mask = gridbox_to_boxes( cur_cvg, cur_boxes, self) # Remove duplicates from ground truth new_array = list({tuple(row) for row in propose_boxes}) boxes_cur_image = np.asarray(new_array, dtype=np.float16) else: # Gather proposals that pass a threshold - propose_boxes, propose_cvgs, mask = gridbox_to_boxes(cur_cvg, cur_boxes, self) # Vote across the proposals to get bboxes boxes_cur_image = vote_boxes(propose_boxes, propose_cvgs, mask, self) boxes_cur_image = np.asarray(boxes_cur_image, dtype=np.float16) if (boxes_cur_image.shape[0] != 0): [r, c] = boxes_cur_image.shape boxes[i, 0:r, 0:c] = boxes_cur_image return boxes
7f30a79911db3bcc1a09e4197f4f1d1adb73aaa2
3,649,466
def _getMissingResidues(lines): """Returns the missing residues, if applicable.""" try: missing_residues = [] for i, line in lines['REMARK 465']: if len(line.split()) == 5 and int(line.split()[4]) > 0: missing_residues.append("{0:<3s} {1}{2:>4d}".format(line.split()[2], line.split()[3], int(line.split()[4]))) return missing_residues except: return "no missing residue information"
071c6d792bc703d0379774eb19c09d9599f17c66
3,649,467
def register_single_sampler(name): """ A decorator with a parameter. This decorator returns a function which the class is passed. """ name = name.lower() def _register(sampler): if name in _registered_single_sampler: raise ValueError("Name {} already chosen, choose a different name.".format(name)) _registered_single_sampler[name] = sampler return sampler return _register
7511e4da51a4078df17f6733855f879b1afb2ca8
3,649,468
def export_txt(obj, file_name, two_dimensional=False, **kwargs): """ Exports control points as a text file. For curves the output is always a list of control points. For surfaces, it is possible to generate a 2-D control point output file using ``two_dimensional`` flag. Please see the supported file formats for more details on the text file format. Please see :py:func:`.exchange.import_txt()` for detailed description of the keyword arguments. :param obj: a curve or a surface object :type obj: abstract.Curve, abstract.Surface :param file_name: file name of the text file to be saved :type file_name: str :param two_dimensional: type of the text file (only works for Surface objects) :type two_dimensional: bool :raises IOError: an error occurred writing the file """ # Check if the user has set any control points if obj.ctrlpts is None or len(obj.ctrlpts) == 0: raise ValueError("There are no control points to save!") # Check the usage of two_dimensional flag if isinstance(obj, abstract.Curve) and two_dimensional: # Silently ignore two_dimensional flag two_dimensional = False # File delimiters col_sep = kwargs.get('col_separator', ";") sep = kwargs.get('separator', ",") content = exch.export_text_data(obj, sep, col_sep, two_dimensional) return exch.write_file(file_name, content)
e3d2cfa787502190ae3897e67b3daed1a0becacb
3,649,469
from .client import PostgresDialect from ibis.sql.alchemy import to_sqlalchemy def compile(expr): """ Force compilation of expression for the Postgres target """ return to_sqlalchemy(expr, dialect=PostgresDialect)
32d6c8e6c7fe9cfd56824e7591a88617833479c7
3,649,470
def build_bar_chart(x_axis_name, request, **kwargs): """This abstract function is used to call submethods/specific model""" base_query = request.GET.get("base_query", None) bar_chart_input = [] if base_query == 'group_users': bar_chart_input = group_users_per_column(x_axis_name) elif base_query == 'group_job_user': user_id = request.GET.get("user_id", None) bar_chart_input = user_jobs_groups(x_axis_name, user_id) elif base_query == 'popular_skills_market': limit_skills = int(request.GET.get("limit_skills", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_skills(asc, limit_skills) elif base_query == 'popular_courses_market': limit_skills = int(request.GET.get("limit_courses", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_courses(asc, limit_skills) elif base_query == 'popular_skills_users': limit_skills = int(request.GET.get("limit_skills", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_user_skills(asc, limit_skills) elif base_query == 'popular_courses_users': limit_skills = int(request.GET.get("limit_courses", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_user_courses(asc, limit_skills) elif base_query == 'group_course_professor': limit_professors = int(request.GET.get("limit_professors", 10)) asc_ordering = request.GET.get("asc", "False") bar_chart_input = group_courses_users(limit_professors, asc_ordering) elif base_query == 'salary_info': y_column = request.GET.get('y_column', None) y_var_names = request.GET.getlist("y_var_names[]", []) agg = request.GET.get('agg', 'mean') if y_column and y_var_names: bar_chart_input = salary_information(data=y_var_names, y_column=y_column, aggregation=agg) print(bar_chart_input) else: bar_chart_input = salary_information(aggregation=agg) elif base_query == 'skill_demand_per_column': limit_results = int(request.GET.get("limit_results", 10)) asc_ordering = request.GET.get("asc", "False") y_var_names = request.GET.getlist("y_var_names[]", []) column = request.GET.get("x_axis_name", "specialization") bar_chart_input = skill_demand_per_column(asc_ordering, y_var_names, limit_results, column) elif base_query == 'user_grades': user_id = request.GET.get('user_id', None) bar_chart_input = user_grades(user_id) elif base_query == 'courses_avg_grades': courses = request.GET.getlist('courses[]', []) print(courses) if courses: bar_chart_input = get_avg_course_names(courses) return bar_chart_input
b23c0ce360c5022dfcb5edf36f3aef90b5ea8ed9
3,649,471
def diff_hours(t1,t2): """ Number of hours between two dates """ return (t2-t1).days*hours_per_day + (t2-t1).seconds/seconds_per_hour
de674b6fca138da49291af4e85a043259d32e525
3,649,472
def fatorial(num=1, show=False): """ Calcula o fatorial de um número: :param n: O número a ser calculado. :param show: (opcional) Mostrar ou não os cálculos. :return: O valor do fatorial. """ f = 1 c = num if show==True: while c > 0: print(c, end='') print(' x ' if c > 1 else ' = ' f'{f}', end='') f *= c c -= 1 return f if show==False: while c > 0: f *= c c -= 1 return f
0755528d43731a47a43950d5be62f265d9941488
3,649,473
def get_season(msg, info_fields): """find season in message""" seasonDICT = {'2016':['二零一六球季', '二零一六賽季', '2016球季', '2016賽季', '2016年', '2016'], '2017':['二零一七球季', '二零一七賽季', '2017球季', '2017賽季', '2017年', '2017'], '2018':['二零一八球季', '二零一八賽季', '2018球季', '2018賽季', '2018年', '2018'], '2019':['二零一九球季', '二零一九賽季', '2019球季', '2019賽季', '2019年', '2019'], '2020':['二零二零球季', '二零二零賽季', '2020球季', '2020賽季', '2020年', '2020']} for season_key in seasonDICT.keys(): for year in seasonDICT[season_key]: if year in msg: info_fields['season'] = season_key msg = msg.replace(year, '').strip() return msg, info_fields return msg, info_fields
8b5dfceafe45d9ba325c519b24dde03a20d37655
3,649,474
import numpy def gray_img(img:'numpy.ndarray'): """ 对读取的图像进行灰度化处理 :param img: 通过cv2.imread(imgPath)读取的图像数组对象 :return: 灰度化的图像 """ grayImage=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) return grayImage pass
e0497d3ec5fa4aed4def293de5981bb0e73ef3e7
3,649,475
def class_acc(label_threshold_less): """ Wrapper function to return keras accuracy logger Args: label_threshold_less (int): all label IDs strictly less than this number will be ignored in class accuracy calculations Returns: argument_candidate_acc (function) """ def argument_candidate_acc(y_true, y_pred): """ Function which returns argument candidate accuracy using the Keras backend Args: y_true (np.ndarray): true labels y_pred (np.ndarray): predicted labels Returns: class_accuracy (int): simple accuracy of argument candidates """ class_id_true = K.cast(y_true, 'int64') class_id_preds = K.argmax(y_pred, axis=-1) accuracy_mask = K.cast(K.less(class_id_preds, label_threshold_less), 'float32') accuracy_mask = 1 - accuracy_mask class_acc_tensor = ( K.cast(K.equal(class_id_true, class_id_preds), 'float32') * accuracy_mask) class_accuracy = (K.sum(class_acc_tensor) / K.maximum(K.sum(accuracy_mask), 1)) return class_accuracy return argument_candidate_acc
6a5dc2806fb223c4f625aa033db808d367676a45
3,649,476
def reduce_30Hz(meas_run_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, scan_index=1, template_reference=None): """ Perform 30Hz reduction @param meas_run_30Hz: run number of the data we want to reduce @param ref_run_30Hz: run number of the reference data, take with the same config @param ref_data_60Hz: file path of the reduce data file at 60Hz @param template_30Hz: file path of the template file for 30Hz @param scan_index: scan index to use within the template. """ # Load the template template_data = read_template(template_30Hz, scan_index) # Reduce the quartz at 30Hz ref_ws_30Hz = api.LoadEventNexus("REF_L_%s" % ref_run_30Hz) # Reduce the sample data at 30Hz meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) # Load the 60Hz reference data data_60Hz = np.loadtxt(ref_data_60Hz).T return reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, scan_index=scan_index, template_reference=template_reference)
b58dc7a344ca84800935993f4d747cdee9245fbe
3,649,477
def sRGB_to_sd_Mallett2019(RGB): """ Recovers the spectral distribution of given *sRGB* colourspace array using *Mallett and Yuksel (2019)* method. Parameters: ----------- RGB : array_like, (3,) *sRGB* colourspace array. Do not apply a transfer function to the *RGB* values. Returns ------- SpectralDistribution Recovered reflectance. """ basis = MultiSpectralDistributions(BASIS_sRGB_MALLETT2019, SPECTRAL_SHAPE_sRGB_MALLETT2019.range()) return RGB_to_sd_Mallett2019(RGB, basis)
8251c864db23d50ad54c35ffc306cf0c5d279fc7
3,649,478
def stationary_traffic_matrix(topology, mean, stddev, gamma, log_psi, n, max_u=0.9, origin_nodes=None, destination_nodes=None): """ Return a stationary sequence of traffic matrices. The sequence is generated by first generating a single matrix assigning traffic volumes drawn from a lognormal distribution and assigned to specific origin-destination pairs using the Ranking Metrics Heuristic method proposed by Nucci et al. [2]_. Then, all matrices of the sequence are generated by adding zero-mean normal fluctuation in the traffic volumes. This process was originally proposed by [2]_ Stationary sequences of traffic matrices are generally suitable for modeling network traffic over short periods (up to 1.5 hours). Over longer periods, real traffic exhibits diurnal patterns and they are better modelled by cyclostationary sequences Parameters ---------- topology : topology The topology for which the traffic matrix is calculated. This topology can either be directed or undirected. If it is undirected, this function assumes that all links are full-duplex. mean : float The mean volume of traffic among all origin-destination pairs stddev : float The standard deviation of volumes among all origin-destination pairs. gamma : float Parameter expressing relation between mean and standard deviation of traffic volumes of a specific flow over the time log_psi : float Parameter expressing relation between mean and standard deviation of traffic volumes of a specific flow over the time n : int Number of matrices in the sequence max_u : float, optional Represent the max link utilization. If specified, traffic volumes are scaled so that the most utilized link of the network has an utilization equal to max_u. If None, volumes are not scaled, but in this case links may end up with an utilization factor greater than 1.0 origin_nodes : list, optional A list of all nodes which can be traffic sources. If not specified all nodes of the topology are traffic sources destination_nodes : list, optional A list of all nodes which can be traffic destinations. If not specified all nodes of the topology are traffic destinations Returns ------- tms : TrafficMatrixSequence References ---------- .. [2] Nucci et al., The problem of synthetically generating IP traffic matrices: initial recommendations, ACM SIGCOMM Computer Communication Review, 35(3), 2005 """ tm_sequence = TrafficMatrixSequence() static_tm = static_traffic_matrix(topology, mean, stddev, max_u=None, origin_nodes=origin_nodes, destination_nodes=destination_nodes) volume_unit = static_tm.attrib['volume_unit'] mean_dict = static_tm.flows() psi = exp(log_psi) if psi == 0.0: raise ValueError("The value of log_psi provided is too small and " "causes psi=0.0, which makes the standard deviation " "of random fluctuation to become infinite. Try with " "a greater value of log_psi") std_dict = {(o, d): (m / psi) ** (1.0 / gamma) for (o, d), m in mean_dict.items()} if any(isinf(std) for std in std_dict.values()): raise ValueError("The value of log_psi or gamma provided are too " "small and causes the standard deviation of random " "fluctuations to become infinite. Try with a greater " "value of log_psi and/or gamma") flows = {} for o, d in mean_dict: # Implementation without Numpy: # flows[(o, d)] = [max([0, normalvariate(mean_dict[(o, d)], # std_dict[(o, d)])]) for _ in range(n)] flows[(o, d)] = [max((0, normal(mean_dict[(o, d)], std_dict[(o, d)])))\ for _ in range(n)] for i in range(n): traffic_marix = TrafficMatrix(volume_unit=volume_unit) for o, d in mean_dict: traffic_marix.add_flow(o, d, flows[(o, d)][i]) tm_sequence.append(traffic_marix) if max_u is not None: if origin_nodes is not None: shortest_path = dict( (node, nx.single_source_dijkstra_path(topology, node, weight='weight')) for node in origin_nodes) else: shortest_path = dict(nx.all_pairs_dijkstra_path(topology, weight='weight')) current_max_u = max((max(link_loads(topology, tm_sequence.get(i), shortest_path ).values()) for i in range(n))) norm_factor = max_u / current_max_u for i in range(n): for o, d in mean_dict: tm_sequence.matrix[i].flow[o][d] *= norm_factor return tm_sequence
ec1267820804dfaec09a9cd0a4fc5c30f5cac329
3,649,479
def lsst_exposure_time(bands=''): """ Sample from the LSST exposure time distribution """ dist = {'u': 15.0, 'g': 15.0, 'r': 15.0, 'i': 15.0, 'z': 15.0, 'Y': 15.0} return [dist[b] for b in bands.split(',')]
1374512a73b9a0eaf3b1757b09cfdd519fba520c
3,649,480
def bin2hexstring(bin_str): """ 二进制串转十六进制串,按照 4:1 比例转换 :param bin_str: 二进制串 :return: 十六进制串 """ bin_len = len(bin_str) left = 0 right = 4 re_str = hex(int(bin_str[left:right], 2))[2:] for i in range(right, bin_len, 4): left = right right += 4 re_str += hex(int(bin_str[left:right], 2))[2:] return re_str
823ba4ef86ebcf7e30a29c3718768c6a654acad5
3,649,481
def check_dict_word(word, target): """ Check dict word. If one character not in searching word, then not add the word to python_dict. :param word: str, word in dictionary.txt. :param target: str, the searching word :return: True, all character within are in searching word. """ # Level one: check len if len(word) == len(target): # Check all the word: contains -> contains, contais for ch in word: if ch not in target: return False else: if ch == word[len(word)-1]: return True
91751f580aa74b7340946f0642c24e11dc19ff32
3,649,482
def get_memory_in_GB(memory_str): """Returns the memory value in GB from a given string in kB""" try: return '{0} GB'.format(int(memory_str[:-2]) / 1000000) except (ValueError, TypeError): return ''
4c94c00a5e800ed807f4c3a31fe89e90f28260fe
3,649,483
def get_slot_dict(token_present=False): """Compiles a dictionary of the available slots :returns: A python dictionary of the available slots """ ret, slot_list = c_get_slot_list(token_present) if (ret != 0): return ret slot_dict = {} ret = CKR_OK for slot in slot_list: ret, data = c_get_slot_info(slot) if ret != CKR_OK: LOG.error("C_GetSlotInfo failed at slot %s") break slot_dict[slot] = data return ret, slot_dict
11fd85b408dbbd50e003c9458f865c24ca6f4677
3,649,484
def load_segment_by_patient(patient): """ Load the pixels for a patient and segment all of them """ pixels = load_pixels_by_patient(patient) segments = [] for pixel in pixels: segments.append(segment(pixel)) return np.array(segments)
f032eb68109707197a05ad1a457d52b36a2f99ed
3,649,485
def filehash(thisfile, filesha): """ First parameter, filename Returns SHA1 sum as a string of hex digits """ try: filehandle = open(thisfile, "rb") except: return "" data = filehandle.read() while data != b"": filesha.update(data) data = filehandle.read() filehandle.close() return filesha.hexdigest()
bb6c965d5a0c5f332320d2426b066b4fa85f77e3
3,649,486
def show_date( enode, _shell='vtysh', _shell_args={ 'matches': None, 'newline': True, 'timeout': None, 'connection': None } ): """ Display system date information This function runs the following vtysh command: :: # show date :param dict kwargs: arguments to pass to the send_command of the vtysh shell. :param str _shell: shell to be selected :param dict _shell_args: low-level shell API arguments :return: A dictionary as returned by :func:`topology_lib_vtysh.parser.parse_show_date` """ cmd = [ 'show date' ] shell = enode.get_shell(_shell) shell.send_command( (' '.join(cmd)).format(**locals()), **_shell_args ) result = shell.get_response( connection=_shell_args.get('connection', None) ) return parse_show_date(result)
f7b650deca043834caa90fecba1d4b25d5e8b1cc
3,649,488
def show_score(connection, amt): """ show_score :param connection: :class:`sqlite3` :param amt: int :return: int """ sand = read_sum(connection, "sand", amt) putt = read_sum(connection, "putt", amt) return sand + putt
26ff2fe98cd24d8480c7b4172cd2fcfc2b1d85fd
3,649,489
import time def current_time(): """ current_time() -> str >>> current_time() 14:28:04 Returns the current local time in 24 clock system. """ return time.strftime('%X', (time.localtime()))
9ab4ed21d1480e1923c8a55b8f213ff47cb8adcc
3,649,490
def kernel_s_xz2(y, x, z, zc, yp, xp, zp): """ Kernel for xz-component of stress in the semi-infinite space domain (2nd system) """ # Y = y - yp # X = x - xp # Z = z - zp - 2 * zc Y = yp - y X = xp - x Z = zp - z + 2 * zc rho = np.sqrt(Y ** 2 + X ** 2 + Z ** 2) kernel = ( safe_log(Y + rho) ) return kernel
c4b4e89584acf5a6af2c91686cbfe542d5942a33
3,649,491
def prepare_hex_string(number, base=10): """ Gets an int number, and returns the hex representation with even length padded to the left with zeroes """ int_number = int(number, base) hex_number = format(int_number, 'X') # Takes the string and pads to the left to make sure the number of characters is even justify_hex_number = hex_number.rjust((len(hex_number) % 2) + len(hex_number), '0') return justify_hex_number
e6efeca87d5f0a603c8fdb65fd7e2d07cc491766
3,649,492
def parse_function(image_size, raw_image_key_name): """Generate parse function for parsing the TFRecord training dataset. Read the image example and resize it to desired size. Args: image_size: int, target size to resize the image to raw_image_key_name: str, name of the JPEG image in each TFRecord entry Returns: A map function to use with tf.data.Dataset.map() . """ def func(example_proto): """A generator to be used as representative_dataset for TFLiteConverter.""" image_raw = tf.io.parse_single_example( example_proto, features={raw_image_key_name: tf.FixedLenFeature([], tf.string)}, ) image = tf.image.decode_jpeg(image_raw[raw_image_key_name]) image = tf.expand_dims(image, axis=0) image = tf.image.resize_bilinear(image, (image_size, image_size)) image = tf.squeeze(image, axis=0) image = image / 255.0 return image return func
549651d152836b8703eaac70b635fbb12158f429
3,649,493
def clean_coverage(x): """ Cleans the coverage polygons by remove small multipolygon shapes. Parameters --------- x : polygon Feature to simplify. Returns ------- MultiPolygon : MultiPolygon Shapely MultiPolygon geometry without tiny shapes. """ # if its a single polygon, just return the polygon geometry if x.geometry.geom_type == 'Polygon': if x.geometry.area > 1e7: return x.geometry # if its a multipolygon, we start trying to simplify and # remove shapes if its too big. elif x.geometry.geom_type == 'MultiPolygon': threshold = 1e7 # save remaining polygons as new multipolygon for # the specific country new_geom = [] for y in x.geometry: if y.area > threshold: new_geom.append(y) return MultiPolygon(new_geom)
a6a82975aabc3ceb90f4b1eab5a0978df048f647
3,649,494
def send(): """For testing: Example of activating a background task.""" log.info("executing a background task") bgtasks.send_email.spool(email="[email protected]", subject="Hello world!", template="welcome.html") return jsonify({"reply":"background task will start"}), 200
5d5fad2025b55e1751c99ef3260a0883795bf469
3,649,495
from datetime import datetime def get_today_month_and_day() -> str: """Returns today's month and day in the format: %m-%d""" return datetime.date.today().strftime("%m-%d")
8358a443c7fec2a7a832c55281f297d8b3573579
3,649,496
def climate_zone_to_tmy3_stations(climate_zone): """Return TMY3 weather stations falling within in the given climate zone. Parameters ---------- climate_zone : str String representing a climate zone. Returns ------- stations : list of str Strings representing TMY3 station ids. """ return _load_climate_zone_to_tmy3_stations_index().get(climate_zone, None)
8ad2d6a378d7350221655cf323ec42ca66271fb9
3,649,497
from pathlib import Path import re def artist_html_file_path(artist) -> Path: # Used """Return absolute artists HTML file path. Parameters ---------- artist Artist name. Returns ------- :cod:`Path` Absolute artists HTML file path. """ artist_file_name = re.sub(r"[\s/]", "_", artist) return artists_dir_path().joinpath(f"{artist_file_name}.html")
9f6ae1849905f820febd8d45bd7583d2911fcaf2
3,649,498
def _deepfoolx_batch(model, epochs, eta, clip_min, clip_max): """DeepFool for multi-class classifiers in batch mode. """ original_model_X = model.X y0 = tf.stop_gradient(model.prob) B, ydim = tf.shape(y0)[0], y0.get_shape().as_list()[1] k0 = tf.argmax(y0, axis=1, output_type=tf.int32) k0 = tf.stack((tf.range(B), k0), axis=1) xshape = original_model_X.get_shape().as_list()[1:] xdim = _prod(xshape) perm = list(range(len(xshape) + 2)) perm[0], perm[1] = perm[1], perm[0] def _cond(i, z): return tf.less(i, epochs) def _body(i, z): xadv = tf.clip_by_value(original_model_X + z*(1+eta), clip_min, clip_max) model.X = xadv model.build_arch() model.normalize_scores() y = model.prob gs = [tf.gradients(y[:, j], xadv)[0] for j in range(ydim)] g = tf.stack(gs, axis=0) g = tf.transpose(g, perm) yk = tf.expand_dims(tf.gather_nd(y, k0), axis=1) gk = tf.expand_dims(tf.gather_nd(g, k0), axis=1) a = tf.abs(y - yk) b = g - gk c = tf.norm(tf.reshape(b, [-1, ydim, xdim]), axis=-1) # Assume 1) 0/0=tf.nan 2) tf.argmin ignores nan score = a / c ind = tf.argmin(score, axis=1, output_type=tf.int32) ind = tf.stack((tf.range(B), ind), axis=1) si, bi = tf.gather_nd(score, ind), tf.gather_nd(b, ind) si = tf.reshape(si, [-1] + [1]*len(xshape)) dx = si * bi return i+1, z+dx _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(original_model_X)], name='_deepfoolx_batch', back_prop=False) return noise
346e7fc18c634bfe240c85301b6c54ce0f4201a7
3,649,499
import re def tokenize(text): """ The function to tokenize and lemmatize the text. Inputs: text: the text which needs to be tokenized Outputs: tokens: tokens which can be used in machine learning """ stop_words = stopwords.words("english") lemmatizer = WordNetLemmatizer() text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) tokens = word_tokenize(text) tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words] return tokens
b41e66c4a065d898b2c3cf05fa261f6100d0f413
3,649,500
def remove_task(name: str): """ Delete a task based on information "name": - **name**: each tasks must have a name """ name_idx = _db_has_name(name) if name_idx == None: raise HTTPException(status_code = 400, detail = {"message" : "name doesn't exists"}) else: del db["tasks"][name_idx] _write_json() return name
4190e3e6a0ac55defe5ba6dcac3036f7c7df290b
3,649,501
def set_dj_definition(cls, type_map: dict = None) -> None: """Set the definition property of a class by inspecting its attributes. Params: cls: The class whose definition attribute should be set type_map: Optional additional type mappings """ # A mapping between python types and DataJoint types _type_map = { "int": "int", "str": "varchar(256)", "float": "float", "Quantity": "float", "datetime": "datetime", "datetime.datetime": "datetime", "bool": "tinyint", "list": "longblob", } # A list of python types which have no DataJoint # equivalent and so are unsupported unsupported = [dict] if type_map: _type_map.update(type_map) dj_def = "%s_id: int auto_increment\n---\n" % cls.__name__.lower() cls_items = cls.__annotations__.items() for attr, type_hint in cls_items: if type_hint in unsupported: continue name = getattr(type_hint, "__name__", type_hint) default = getattr(cls, attr) if isinstance(default, str): default = '"%s"' % default elif isinstance(default, bool): default = int(default) else: default = "NULL" if getattr(type_hint, '_name', "") == 'Dict': cls = handle_dict(cls, _type_map, attr, type_hint) continue elif name in _type_map: dj_def += "%s = %s : %s\n" % (attr, default, _type_map[name]) else: dj_def += "-> %s\n" % name cls.definition = dj_def return cls
9335e1b4413ce03f98ca885bcf4a888af9d014a1
3,649,502