content
stringlengths
22
815k
id
int64
0
4.91M
def test_choice_with_distribution(): """ Make sure that choice_with_distributions basically doesn't crash, and has the correct type of return value. However, do not test the distribution of return values unless we are certain of the value. The goal with this test is to NOT allow randomness to influence the result. """ assert isinstance(choice_with_distribution([('a', 1), ('b', 2), ('c', 3)]), type('d')) assert choice_with_distribution([('a', 1), ('b', 2), ('c', 3)]) in ['a', 'b', 'c'] assert choice_with_distribution([('a', 0), ('b', 0), ('c', 1)]) == 'c' assert choice_with_distribution([('a', 0), ('b', 0), ('c', 0.5)]) == 'c' assert choice_with_distribution([('a', 1)]) == 'a' assert choice_with_distribution([('a', 0.5)]) == 'a' with pytest.raises(ValueError): choice_with_distribution([])
5,354,500
async def sleepybot(time): """For .sleep command, let the userbot snooze for a few second.""" counter = int(time.pattern_match.group(1)) await time.edit("**Estou de mau humor e cochilando...**") if BOTLOG: str_counter = time_formatter(counter) await time.client.send_message( BOTLOG_CHATID, f"Você colocou o bot para dormir por {str_counter}.", ) sleep(counter) await time.edit("**OK, estou acordado agora.**")
5,354,501
def root_mean_square_ffinalise(out, sub_samples=None): """Divide the weighted sum by the sum of weights and take the square root. Also mask out any values derived from a too-small sample size. :Parameters: out: 3-`tuple` of `numpy.ndarray` An output from `root_mean_square_fpartial`. sub_samples: optional :Returns: 2-`tuple` of `numpy.ndarray` The sample size and the RMS. """ N, avg = mean_ffinalise(out, sub_samples=sub_samples) avg **= 0.5 return asanyarray(N, avg)
5,354,502
def md5sum_fileobj(f, start = 0, end = None): """Accepts a file object and returns the md5sum.""" m = hashlib.md5() for block in file_reader(f, start, end): assert block != "", "Got an empty read" m.update(block) return m.hexdigest()
5,354,503
def get_localization_scores(predicted_start: int, predicted_end: int, true_start: int, true_end: int): """ exp(-abs(t_pred_start-t_start)/(t_end-t_start)) exp(-abs(t_pred_end-t_end)/(t_end-t_start)) :param predicted_start: :param predicted_end: :param true_start: :param true_end: """ if true_end - true_start <= 0: return 0, 0 base = math.exp(1 / (true_start - true_end)) return base ** abs(predicted_start - true_start), base ** abs(predicted_end - true_end)
5,354,504
def urlopen(url, data=Nic, proxies=Nic): """urlopen(url [, data]) -> open file-like object""" global _urlopener jeżeli proxies jest nie Nic: opener = urllib.request.FancyURLopener(proxies=proxies) albo_inaczej nie _urlopener: przy support.check_warnings( ('FancyURLopener style of invoking requests jest deprecated.', DeprecationWarning)): opener = urllib.request.FancyURLopener() _urlopener = opener inaczej: opener = _urlopener jeżeli data jest Nic: zwróć opener.open(url) inaczej: zwróć opener.open(url, data)
5,354,505
def json_compatible_key(key: str) -> str: """As defined in :pep:`566#json-compatible-metadata`""" return key.lower().replace("-", "_")
5,354,506
def get_transitions(jira_host, username, password, issue_id): """ Returns transitions of the issue. jira_host -- JIRA host to contact username -- JIRA username with administrative permissions. password -- password of the username. issue_id -- id of the issue which transitions should be returned. """ headers = get_auth_header(username, password) response = https_helper.get(jira_host, issue_transitions_path % issue_id, None, headers) if response.status != 200: logging.debug('Did not find any transitions for issue: %s', issue_id) return [] return json.loads(response.read())['transitions']
5,354,507
def _scale_func(k): """ Return a lambda function that scales its input by k Parameters ---------- k : float The scaling factor of the returned lambda function Returns ------- Lambda function """ return lambda y_values_input: k * y_values_input
5,354,508
def focal_agents(dest, weight, source, fail=False): """ dest: point property set (determines property return type) weight: field property (weight/mask) source: point property (values to gather from) """ # hack rename... source_point = dest source_field = weight dest_prop = source if not isinstance(source_point.space_domain, Points): msg = _color_message(f'Property "{source_point.name}" must be of domain type Point') raise TypeError(msg) if not isinstance(source_field.space_domain, Areas): msg = _color_message(f'Property "{source_field.name}" must be of domain type Area') raise TypeError(msg) if not isinstance(dest_prop.space_domain, Points): msg = _color_message(f'Property "{dest_prop.name}" must be of domain type Point') raise TypeError(msg) dst_crs = source_point.space_domain.epsg field_crs = source_field.space_domain.epsg point_crs = dest_prop.space_domain.epsg cnt = 1 for arg in [dst_crs, field_crs, point_crs]: if not arg: msg = _color_message(f'Operation requires a CRS, set the EPSG code of the phenomenon (argument {cnt})') raise ValueError(msg) cnt += 1 if field_crs != point_crs: msg = _color_message(f'Incompatible CRS {field_crs} != {point_crs}') raise ValueError(msg) assert dst_crs == field_crs tmp_prop = Property('emptyfocal_agents', dest.uuid, dest.space_domain, dest.shapes, numpy.nan) #spatial_ref = osr.SpatialReference() #spatial_ref.ImportFromEPSG(point_crs) #ds = ogr.GetDriverByName('MEMORY').CreateDataSource('mem') ## Second we make a point feature from which we will obtain the locations ## Holding all objects #lyr_dst = ds.CreateLayer('locations', geom_type=ogr.wkbPoint, srs=spatial_ref) #field = ogr.FieldDefn('value', ogr.OFTReal) #lyr_dst.CreateField(field) #for idx, p in enumerate(dest_prop.space_domain): #point = ogr.Geometry(ogr.wkbPoint) #point.AddPoint(p[0], p[1]) #feat = ogr.Feature(lyr_dst.GetLayerDefn()) #feat.SetGeometry(point) #try: #val = dest_prop.values()[idx][0] #except: #val = dest_prop.values()[idx] #feat.SetField('value', float(val)) #lyr_dst.CreateFeature(feat) #lyr_dst = None #lyr_dst = ds.GetLayer('locations') nr_locs = dest_prop.nr_objects todos = [] for idx, p in enumerate(source_point.space_domain): values_weight = source_field.values()[idx] extent = source_field.space_domain._extent(idx) d_domain = dest_prop.space_domain d_values = dest_prop.values() item = (idx, 'tmp_prop', nr_locs, values_weight, extent, 'spatial_ref', 'lyr_dst', 'operation', fail, 'dprop', point_crs, d_domain, d_values) todos.append(item) cpus = multiprocessing.cpu_count() tasks = len(todos) chunks = tasks // cpus with futures.ProcessPoolExecutor(max_workers=cpus) as ex: results = ex.map(_focal_agents, todos, chunksize=chunks) for result in results: tmp_prop.values().values[result[0]] = result[1] return tmp_prop # sequential # nr_locs = dest_prop.nr_objects point_values = numpy.empty(nr_locs) point_values.fill(numpy.nan) for idx, p in enumerate(source_point.space_domain): values_weight = source_field.values()[idx] extent = source_field.space_domain._extent(idx) # Raster for points to query nr_rows = extent[4] nr_cols = extent[5] cellsize = math.fabs(extent[2] - extent[0]) / nr_cols minX = extent[0] maxY = extent[3] #if ds.GetLayerByName('extent'): # ds.DeleteLayer('extent') #ds.DeleteLayer('extent') ds_extent = ogr.GetDriverByName('MEMORY').CreateDataSource('ds_extent') extent_lyr = ds_extent.CreateLayer('extent', geom_type=ogr.wkbPolygon, srs=spatial_ref) feat = ogr.Feature(extent_lyr.GetLayerDefn()) ring = ogr.Geometry(ogr.wkbLinearRing) ring.AddPoint(minX, maxY) ring.AddPoint(minX + nr_cols * cellsize, maxY) ring.AddPoint(minX + nr_cols * cellsize, maxY - nr_rows * cellsize) ring.AddPoint(minX, maxY - nr_rows * cellsize) ring.AddPoint(minX, maxY) poly = ogr.Geometry(ogr.wkbPolygon) poly.AddGeometry(ring) feat.SetGeometry(poly) extent_lyr.CreateFeature(feat) #if ds.GetLayerByName('intersect'): # ds.DeleteLayer('intersect') intersect_layer = ds_extent.CreateLayer('locations', geom_type=ogr.wkbPoint, srs=spatial_ref) lyr_dst.Intersection(extent_lyr, intersect_layer) pcraster.setclone(nr_rows, nr_cols, cellsize, minX, maxY) raster = pcraster.numpy2pcr(pcraster.Scalar, values_weight, numpy.nan) point_values.fill(numpy.nan) for idx, feature in enumerate(intersect_layer): x = feature.GetGeometryRef().GetX() y = feature.GetGeometryRef().GetY() mask_value, valid = pcraster.cellvalue_by_coordinates(raster, x, y) agent_value = feature.GetField('value') point_values[idx] = mask_value * agent_value indices = ~numpy.isnan(point_values) masked = point_values[indices] res = 0 if operation == 'average': res = numpy.average(masked) elif operation == 'sum': res = numpy.sum(masked) else: raise NotImplementedError if fail == True: assert res != 0 tmp_prop.values()[idx] = res return tmp_prop
5,354,509
def erosion_dependent(input_tensor: torch.Tensor, structuring_element: torch.Tensor, origin: Optional[Union[tuple, List[int]]] = None, border_value: Union[int, float, str] = 'geodesic'): """ This type of erosion is needed when you want a structuring element to vary along one axis. Parameters ---------- :param input_tensor: torch.Tensor The input tensor that you want to erode. It should be a PyTorch tensor of 2 dimensions. :param structuring_element: torch.Tensor The structuring element to erode. The structuring element should be a PyTorch tensor of 3 dimensions; first dimension should coincide with first dimension of input_tensor and two other dimensions are the shape of the structuring element. :param origin: None, tuple, List[int] The origin of the structuring element. Default to center of the structuring element. Negative indexes are allowed. The origin will be the same for all the structuring elements. :param border_value: int, float, str The value used to pad the image in the border. Two options are allowed when a string is passed in parameter: - 'geodesic': only points within the input are considered when taking the minimum. - 'euclidean': extends naturally the image setting minus infinite value to the border. Default value is 'geodesic'. Outputs ------- :return: torch.Tensor The erosion dependent of the first axis as a PyTorch tensor of the same shape than the original input. """ # Check parameters check_parameters_dependent(input_tensor, structuring_element, origin, border_value) # Adapt origin if not origin: origin = (structuring_element.shape[1] // 2, structuring_element.shape[2] // 2) # Fill border value if needed border_value = fill_border(border_value, 'erosion') # Convert tensor to float if needed input_tensor = convert_float(input_tensor) # Pad input pad_list = [origin[1], structuring_element.shape[2] - origin[1] - 1, origin[0], structuring_element.shape[1] - origin[0] - 1] input_pad = f.pad(input_tensor, pad_list, mode='constant', value=border_value) # Compute erosion if str(input_tensor.device) == 'cpu': raise ValueError('Operation currently only implemented for GPU.') else: result = morphology_cuda.erosion_dependent(input_pad, structuring_element, BLOCK_SHAPE) return result
5,354,510
def bin_barcodes(barcodes, binsize=1000): """Binning barcodes into chunks Parameters ---------- barcodes : iterable Iterable of barcodes binsize : int Size of bin for grouping barcodes Returns ------- yields list of barcode (1 bin) """ binsize = int(float(binsize)) bins = np.digitize(np.arange(0,barcodes.shape[0]), np.arange(0,barcodes.shape[0],binsize)) return [barcodes[bins == x] for x in np.unique(bins)]
5,354,511
def test_apply_cli_subset_none(): """Ensure subset none works for apply CLI""" test_config = ApplicationConfiguration( application_name="test_application", internals=Internals(), post_processor=None, subcommands=[ SubCommand(name="list", description="list"), SubCommand(name="run", description="run"), ], entries=[ SettingsEntry( name="subcommand", short_description="Subcommands", subcommand_value=True, value=SettingsEntryValue(default="run"), ), SettingsEntry( name="z", apply_to_subsequent_cli=C.NONE, cli_parameters=CliParameters(short="-z"), short_description="the z parameter", value=SettingsEntryValue(), ), ], ) configurator = Configurator( params=["list", "-z", "zebra"], application_configuration=test_config, initial=True, ) _messages, exit_messages = configurator.configure() assert not exit_messages assert isinstance(test_config.initial, ApplicationConfiguration) expected = [ ("subcommand", "list"), ("z", "zebra"), ] for expect in expected: assert test_config.entry(expect[0]).value.current == expect[1] assert test_config.entry(expect[0]).value.source is C.USER_CLI configurator = Configurator( params=["run"], application_configuration=test_config, apply_previous_cli_entries=C.ALL, ) _messages, exit_messages = configurator.configure() assert not exit_messages expected = [ ("subcommand", "run", C.USER_CLI), ("z", C.NOT_SET, C.NOT_SET), ] for expect in expected: assert test_config.entry(expect[0]).value.current == expect[1] assert test_config.entry(expect[0]).value.source is expect[2]
5,354,512
def flash_regions(device, region_map): """divide the named memory into sized memory regions""" regions = [] for x in region_map: if len(x) == 2: # no meta information: set it all to None (name, region_sizes) = x meta = (None,) * len(region_sizes) elif len(x) == 3: # provided meta information - make sure it's per region (name, region_sizes, meta) = x assert len(region_sizes) == len(meta), 'need meta information for each flash region' else: assert False, 'bad flash region specification' # the regions are based on the peripheral memory space base_adr = device.peripherals[name].address total_size = device.peripherals[name].size adr = base_adr for (s, m) in zip(region_sizes, meta): regions.append(region(name, adr, s, m)) adr += s # make sure the regions cover the entire memory space of the peripheral assert base_adr + total_size == adr, "regions don't encompass all memory" return regions
5,354,513
def post_to_connection(Data=None, ConnectionId=None): """ Sends the provided data to the specified connection. See also: AWS API Documentation Exceptions :example: response = client.post_to_connection( Data=b'bytes'|file, ConnectionId='string' ) :type Data: bytes or seekable file-like object :param Data: [REQUIRED]\nThe data to be sent to the client specified by its connection id.\n :type ConnectionId: string :param ConnectionId: [REQUIRED]\nThe identifier of the connection that a specific client is using.\n :returns: ApiGatewayManagementApi.Client.exceptions.GoneException ApiGatewayManagementApi.Client.exceptions.LimitExceededException ApiGatewayManagementApi.Client.exceptions.PayloadTooLargeException ApiGatewayManagementApi.Client.exceptions.ForbiddenException """ pass
5,354,514
def _load_explorer_data(multiprocess=False): """ Load in all available corpora and make their initial tables This is run when the app starts up """ corpora = dict() tables = dict() for corpus in Corpus.objects.all(): if corpus.disabled: print(f"Skipping corpus because it is disabled: {corpus.name}") continue buzz_collection = Collection(corpus.path) # a corpus must have a feather or conll to be explorable. prefer feather. buzz_corpus = buzz_collection.feather or buzz_collection.conllu if buzz_corpus is None: print(f"No parsed data found for {corpus.path}") continue corpora[corpus.slug] = buzz_corpus if corpus.load: print(f"Loading corpus into memory: {corpus.name} ...") opts = dict(add_governor=corpus.add_governor, multiprocess=multiprocess) buzz_corpus = buzz_corpus.load(**opts) buzz_corpus = _postprocess_corpus(buzz_corpus, corpus) corpora[corpus.slug] = buzz_corpus else: print(f"NOT loading corpus into memory: {corpus.name} ...") # what should be shown in the frequencies space to begin with? if getattr(corpus, "initial_table", False): display = json.loads(corpus.initial_table) else: display = dict(show="p", subcorpora="file") print(f"Generating an initial table for {corpus.name} using {display}") initial_table = buzz_corpus.table(**display) tables[corpus.slug] = initial_table return corpora, tables
5,354,515
async def __write_html(path, file_content): """ Convert a base64 encoded string containing the md-formatted post content and write its html-conversion to disk. """ with open(path, "w") as _f: _f.write(convert_text(b64decode(file_content), "html5", format="md"))
5,354,516
def test_engine_default_base_content_path_can_be_overridden(): """If content_path is presented when the engine is initialized it can overwrite the default content_path.""" env = Engine(content_path='override_the_content_path') assert env.base_content_path == 'override_the_content_path'
5,354,517
def compute_CD_projected_psth(units, time_period=None): """ Routine for Coding Direction computation on all the units in the specified unit_keys Coding Direction is calculated in the specified time_period :param: unit_keys - list of unit_keys :return: coding direction unit-vector, contra-trials CD projected trial-psth, ipsi-trials CD projected trial-psth psth time-stamps """ unit_hemi = (ephys.ProbeInsertion.InsertionLocation * experiment.BrainLocation & units).fetch('hemisphere') if len(set(unit_hemi)) != 1: raise Exception('Units from both hemispheres found') else: unit_hemi = unit_hemi[0] session_key = experiment.Session & units if len(session_key) != 1: raise Exception('Units from multiple sessions found') # -- the computation part # get units and trials - ensuring they have trial-spikes contra_trials = (TrialCondition().get_trials( 'good_noearlylick_right_hit' if unit_hemi == 'left' else 'good_noearlylick_left_hit') & session_key & ephys.Unit.TrialSpikes).fetch('KEY') ipsi_trials = (TrialCondition().get_trials( 'good_noearlylick_left_hit' if unit_hemi == 'left' else 'good_noearlylick_right_hit') & session_key & ephys.Unit.TrialSpikes).fetch('KEY') # get per-trial unit psth for all units - unit# x (trial# x time) contra_trial_psths, contra_edges = zip(*(compute_unit_psth(unit, contra_trials, per_trial=True) for unit in units)) ipsi_trial_psths, ipsi_edges = zip(*(compute_unit_psth(unit, ipsi_trials, per_trial=True) for unit in units)) # compute trial-ave unit psth contra_psths = zip((p.mean(axis=0) for p in contra_trial_psths), contra_edges) ipsi_psths = zip((p.mean(axis=0) for p in ipsi_trial_psths), ipsi_edges) # compute coding direction cd_vec = compute_coding_direction(contra_psths, ipsi_psths, time_period=time_period) # get time vector, relying on all units PSTH shares the same time vector time_stamps = contra_edges[0] # get coding projection per trial - trial# x unit# x time contra_psth_per_trial = np.dstack(contra_trial_psths) ipsi_psth_per_trial = np.dstack(ipsi_trial_psths) proj_contra_trial = np.vstack(np.dot(tr_u, cd_vec) for tr_u in contra_psth_per_trial) # trial# x time proj_ipsi_trial = np.vstack(np.dot(tr_u, cd_vec) for tr_u in ipsi_psth_per_trial) # trial# x time return cd_vec, proj_contra_trial, proj_ipsi_trial, time_stamps, unit_hemi
5,354,518
def transform_count(in_gen, title=None): """ counts number of datamaps and prints the count out """ count = 0 for in_datamap in in_gen: count += 1 yield in_datamap if title is not None: print("%s count: %d" % (title, count)) else: print("count: %d" % count)
5,354,519
def _tvos_extension_impl(ctx): """Implementation of the `tvos_extension` Skylark rule.""" binary_artifact = binary_support.get_binary_provider( ctx.attr.deps, apple_common.AppleExecutableBinary).binary deps_objc_provider = binary_support.get_binary_provider( ctx.attr.deps, apple_common.AppleExecutableBinary).objc additional_providers, legacy_providers, additional_outputs = bundler.run( ctx, "TvosExtensionArchive", "tvOS extension", ctx.attr.bundle_id, binary_artifact=binary_artifact, deps_objc_providers=[deps_objc_provider], ) return struct( files=additional_outputs, providers=[ TvosExtensionBundleInfo(), ] + additional_providers, **legacy_providers )
5,354,520
def simplify_graph(G): """remove the scores, so the cycle_exits() function can work""" graph = copy.deepcopy(G) simplified = dict((k, graph[k][0]) for k in graph) # add dummy edges,so the cycle_exists() function works for source in simplified.keys(): for target in simplified[source]: if target not in simplified: simplified[target] = [] return simplified
5,354,521
def Run_INCR(num_vertices, edge_density, algorithm_name, k, init_tree=None): """ Initialize and run the MVA algorithm """ edges_bound = int(edge_density * (num_vertices * (num_vertices - 1) / 2)) k = max(1, k * edges_bound) runner = runner_factory(num_vertices, algorithm_name, None, edges_bound=edges_bound, edge_density=edge_density, k=k) randomizer = Randomizer(2 * num_vertices, runner["Parameters"]["seed"]) with Timer("t_expand_cliques", runner["Times"]): if init_tree == "ktree": ktree_k = 1 / 2 * (2 * num_vertices - 1 - sqrt(((2 * num_vertices - 1) * (2 * num_vertices - 1)) - (8 * edges_bound))) ktree_k = int(floor(ktree_k)) k_edges = (num_vertices - ktree_k - 1) * ktree_k + (ktree_k * (ktree_k + 1) / 2) p_mva = init_k_tree_incr(runner["Parameters"]["n"], ktree_k, randomizer) print("- Init with " + str(ktree_k) + "-tree:") elif init_tree == "tree": p_mva = expand_tree(runner["Parameters"]["n"], randomizer) print("- Expand tree:") else: p_mva = expand_cliques(runner["Parameters"]["n"], randomizer) print("- Expand cliques:") print(p_mva) with Timer("t_split_edges", runner["Times"]): loops = split_edges_k(p_mva, runner["Parameters"]["edges_bound"], randomizer, k) print("- Split edges:") runner["Stats"]["total"] = runner["Times"]["t_split_edges"] + runner["Times"]["t_expand_cliques"] runner["Stats"]["loops%"] = loops / edges_bound print(" loops:", runner["Stats"]["loops%"]) print(p_mva) return calculate_mva_statistics(p_mva, runner, randomizer, num_vertices)
5,354,522
def create_nrrd_from_dicoms(image, patient_id): """ Reads a folder that contains multiple DICOM files and converts the input into a single nrrd file using a command line app from MITK or MITK Phenotyping. Input: * path to one dicom (other are automatically found.) * Patient ID Output: Creates a single nrrd file with the path: $target_path / patient_id + '_ct_scan.nrrd' """ target_path = os.path.join(path_to_nrrds, patient_id) target_name = os.path.join(target_path, patient_id+"_ct_scan.nrrd") os.makedirs(target_path, exist_ok=True) cmd_string=r"MitkCLDicom2Nrrd "+\ "-i \"" + image + "\"" \ " -o \"" + target_name + "\"" print(cmd_string) a=subprocess.Popen(cmd_string,shell=True,cwd=path_to_executables) a.wait() return target_name
5,354,523
def _two_point_interp(times, altitudes, horizon=0*u.deg): """ Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between altitudes : array of `~astropy.units.Quantity` Two altitudes for linear interpolation between horizon : `~astropy.units.Quantity` Solve for the time when the altitude is equal to reference_alt. Returns ------- t : `~astropy.time.Time` Time when target crosses the horizon """ if not isinstance(times, Time): return MAGIC_TIME else: slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd) return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value, format='jd')
5,354,524
def setup_sample_data(no_of_records): """Generate the given number of sample data with 'id', 'name', and 'dt'""" rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'} for counter in range(0, no_of_records)] return rows_in_database
5,354,525
def generate_csv_string(csv_data): """ Turn 2d string array into a string representing a csv file """ output_buffer = StringIO() writer = csv.writer(output_buffer) csv_data = equalize_array(csv_data) csv_data = utf_8_encode_array(csv_data) for row in csv_data: writer.writerow(row) body = output_buffer.getvalue() output_buffer.close() return body
5,354,526
def finish_current_molecule(molecule_name, path_save_mol2, temp_file_name_full): """ Last procedures for current molecule Example: >>> finish_current_molecule(molecule_name, path_save_mol2, temp_file_name_full) @param molecule_name: main name of molecule @type molecule_name: string @param path_save_mol2: path of mol2 files will be saved @type path_save_mol2: string @param temp_file_name_full: full path of temp file @type temp_file_name_full: string """ #preparing name of mol2 file # Checking filenames of molecules based on molecule_name # Because of isomers, it is necessary to check how many files of # molecule_name there is in path_save_mol2 mol_name_aux = '' number_files = number_files_of_molecule(molecule_name, path_save_mol2) if number_files > 0: if number_files == 1: #means that there is only one molecule. #So it must be renamed with prefix _1 #number_files will be assigned to 2, because # the current molecule will be second molecule before_molecule = molecule_name+'.mol2' before_molecule_mol2 = os.path.join(path_save_mol2, before_molecule) new_molecule = molecule_name+'_1'+'.mol2' new_molecule_mol2 = os.path.join(path_save_mol2, new_molecule) shutil.move(before_molecule_mol2, new_molecule_mol2) number_files = number_files + 1 mol_name_aux = molecule_name+'_'+str(number_files) else: mol_name_aux = molecule_name mol2_file_name = mol_name_aux+'.mol2' mol2_file_name_full = os.path.join(path_save_mol2, mol2_file_name) #creating mol2 file - moving temp file to mol2_file_name_full shutil.move(temp_file_name_full, mol2_file_name_full)
5,354,527
def user_stats_birth(df): """Displays statistics of analysis based on the birth years of bikeshare users.""" # Display earliest, most recent, and most common year of birth birth_year = df['Birth Year'] # the most common birth year most_common_year = birth_year.value_counts().idxmax() print("The most common birth year:", most_common_year) # the most recent birth year most_recent = birth_year.max() print("The most recent birth year:", most_recent) # the most earliest birth year earliest_year = birth_year.min() print("The most earliest birth year:", earliest_year)
5,354,528
def create_parser() -> ArgumentParser: """ Constructs the MFA argument parser Returns ------- ArgumentParser MFA argument parser """ GLOBAL_CONFIG = load_global_config() def add_global_options(subparser: argparse.ArgumentParser, textgrid_output: bool = False): """ Add a set of global options to a subparser Parameters ---------- subparser: argparse.ArgumentParser Subparser to augment textgrid_output: bool Flag for whether the subparser is used for a command that generates TextGrids """ subparser.add_argument( "-t", "--temp_directory", type=str, default=GLOBAL_CONFIG["temp_directory"], help=f"Temporary directory root to store MFA created files, default is {GLOBAL_CONFIG['temp_directory']}", ) subparser.add_argument( "--disable_mp", help=f"Disable any multiprocessing during alignment (not recommended), default is {not GLOBAL_CONFIG['use_mp']}", action="store_true", default=not GLOBAL_CONFIG["use_mp"], ) subparser.add_argument( "-j", "--num_jobs", type=int, default=GLOBAL_CONFIG["num_jobs"], help=f"Number of data splits (and cores to use if multiprocessing is enabled), defaults " f"is {GLOBAL_CONFIG['num_jobs']}", ) subparser.add_argument( "-v", "--verbose", help=f"Output debug messages, default is {GLOBAL_CONFIG['verbose']}", action="store_true", default=GLOBAL_CONFIG["verbose"], ) subparser.add_argument( "--clean", help=f"Remove files from previous runs, default is {GLOBAL_CONFIG['clean']}", action="store_true", default=GLOBAL_CONFIG["clean"], ) subparser.add_argument( "--overwrite", help=f"Overwrite output files when they exist, default is {GLOBAL_CONFIG['overwrite']}", action="store_true", default=GLOBAL_CONFIG["overwrite"], ) subparser.add_argument( "--debug", help=f"Run extra steps for debugging issues, default is {GLOBAL_CONFIG['debug']}", action="store_true", default=GLOBAL_CONFIG["debug"], ) if textgrid_output: subparser.add_argument( "--disable_textgrid_cleanup", help=f"Disable extra clean up steps on TextGrid output, default is {not GLOBAL_CONFIG['cleanup_textgrids']}", action="store_true", default=not GLOBAL_CONFIG["cleanup_textgrids"], ) parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="subcommand") subparsers.required = True _ = subparsers.add_parser("version") align_parser = subparsers.add_parser("align") align_parser.add_argument("corpus_directory", help="Full path to the directory to align") align_parser.add_argument( "dictionary_path", help="Full path to the pronunciation dictionary to use" ) align_parser.add_argument( "acoustic_model_path", help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})", ) align_parser.add_argument( "output_directory", help="Full path to output directory, will be created if it doesn't exist", ) align_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) align_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) align_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) add_global_options(align_parser, textgrid_output=True) adapt_parser = subparsers.add_parser("adapt") adapt_parser.add_argument("corpus_directory", help="Full path to the directory to align") adapt_parser.add_argument( "dictionary_path", help="Full path to the pronunciation dictionary to use" ) adapt_parser.add_argument( "acoustic_model_path", help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})", ) adapt_parser.add_argument( "output_paths", nargs="+", help="Path to directory for aligned TextGrids, zip path to export acoustic model, or both", ) adapt_parser.add_argument( "-o", "--output_model_path", type=str, default="", help="Full path to save adapted acoustic model", ) adapt_parser.add_argument( "--full_train", action="store_true", help="Specify whether to do a round of speaker-adapted training rather than the default " "remapping approach to adaptation", ) adapt_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) adapt_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) adapt_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) add_global_options(adapt_parser, textgrid_output=True) train_parser = subparsers.add_parser("train") train_parser.add_argument( "corpus_directory", help="Full path to the source directory to align" ) train_parser.add_argument( "dictionary_path", help="Full path to the pronunciation dictionary to use", default="" ) train_parser.add_argument( "output_paths", nargs="+", help="Path to directory for aligned TextGrids, zip path to export acoustic model, or both", ) train_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) train_parser.add_argument( "-o", "--output_model_path", type=str, default="", help="Full path to save resulting acoustic model", ) train_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of filenames to use for determining speaker, " "default is to use directory names", ) train_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) add_global_options(train_parser, textgrid_output=True) validate_parser = subparsers.add_parser("validate") validate_parser.add_argument( "corpus_directory", help="Full path to the source directory to align" ) validate_parser.add_argument( "dictionary_path", help="Full path to the pronunciation dictionary to use", default="" ) validate_parser.add_argument( "acoustic_model_path", nargs="?", default="", help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})", ) validate_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) validate_parser.add_argument( "--test_transcriptions", help="Test accuracy of transcriptions", action="store_true" ) validate_parser.add_argument( "--ignore_acoustics", help="Skip acoustic feature generation and associated validation", action="store_true", ) add_global_options(validate_parser) g2p_model_help_message = f"""Full path to the archive containing pre-trained model or language ({', '.join(g2p_models)}) If not specified, then orthographic transcription is split into pronunciations.""" g2p_parser = subparsers.add_parser("g2p") g2p_parser.add_argument("g2p_model_path", help=g2p_model_help_message, nargs="?") g2p_parser.add_argument( "input_path", help="Corpus to base word list on or a text file of words to generate pronunciations", ) g2p_parser.add_argument("output_path", help="Path to save output dictionary") g2p_parser.add_argument( "--include_bracketed", help="Included words enclosed by brackets, job_name.e. [...], (...), <...>", action="store_true", ) g2p_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for G2P" ) add_global_options(g2p_parser) train_g2p_parser = subparsers.add_parser("train_g2p") train_g2p_parser.add_argument("dictionary_path", help="Location of existing dictionary") train_g2p_parser.add_argument("output_model_path", help="Desired location of generated model") train_g2p_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for G2P" ) train_g2p_parser.add_argument( "--validate", action="store_true", help="Perform an analysis of accuracy training on " "most of the data and validating on an unseen subset", ) add_global_options(train_g2p_parser) model_parser = subparsers.add_parser("model") model_subparsers = model_parser.add_subparsers(dest="action") model_subparsers.required = True model_download_parser = model_subparsers.add_parser("download") model_download_parser.add_argument( "model_type", help=f"Type of model to download, options: {', '.join(MODEL_TYPES)}" ) model_download_parser.add_argument( "name", help="Name of language code to download, if not specified, " "will list all available languages", nargs="?", ) model_list_parser = model_subparsers.add_parser("list") model_list_parser.add_argument( "model_type", nargs="?", help=f"Type of model to list, options: {', '.join(MODEL_TYPES)}" ) model_inspect_parser = model_subparsers.add_parser("inspect") model_inspect_parser.add_argument( "model_type", nargs="?", help=f"Type of model to download, options: {', '.join(MODEL_TYPES)}", ) model_inspect_parser.add_argument( "name", help="Name of pretrained model or path to MFA model to inspect" ) model_save_parser = model_subparsers.add_parser("save") model_save_parser.add_argument("model_type", help="Type of MFA model") model_save_parser.add_argument( "path", help="Path to MFA model to save for invoking with just its name" ) model_save_parser.add_argument( "--name", help="Name to use as reference (defaults to the name of the zip file", type=str, default="", ) model_save_parser.add_argument( "--overwrite", help="Flag to overwrite existing pretrained models with the same name (and model type)", action="store_true", ) train_lm_parser = subparsers.add_parser("train_lm") train_lm_parser.add_argument( "source_path", help="Full path to the source directory to train from, alternatively " "an ARPA format language model to convert for MFA use", ) train_lm_parser.add_argument( "output_model_path", type=str, help="Full path to save resulting language model" ) train_lm_parser.add_argument( "-m", "--model_path", type=str, help="Full path to existing language model to merge probabilities", ) train_lm_parser.add_argument( "-w", "--model_weight", type=float, default=1.0, help="Weight factor for supplemental language model, defaults to 1.0", ) train_lm_parser.add_argument( "--dictionary_path", help="Full path to the pronunciation dictionary to use", default="" ) train_lm_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training and alignment", ) add_global_options(train_lm_parser) train_dictionary_parser = subparsers.add_parser("train_dictionary") train_dictionary_parser.add_argument( "corpus_directory", help="Full path to the directory to align" ) train_dictionary_parser.add_argument( "dictionary_path", help="Full path to the pronunciation dictionary to use" ) train_dictionary_parser.add_argument( "acoustic_model_path", help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})", ) train_dictionary_parser.add_argument( "output_directory", help="Full path to output directory, will be created if it doesn't exist", ) train_dictionary_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for alignment" ) train_dictionary_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) add_global_options(train_dictionary_parser) train_ivector_parser = subparsers.add_parser("train_ivector") train_ivector_parser.add_argument( "corpus_directory", help="Full path to the source directory to " "train the ivector extractor", ) train_ivector_parser.add_argument( "dictionary_path", help="Full path to the pronunciation dictionary to use" ) train_ivector_parser.add_argument( "acoustic_model_path", type=str, default="", help="Full path to acoustic model for alignment", ) train_ivector_parser.add_argument( "output_model_path", type=str, default="", help="Full path to save resulting ivector extractor", ) train_ivector_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of filenames to use for determining speaker, " "default is to use directory names", ) train_ivector_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for training" ) add_global_options(train_ivector_parser) classify_speakers_parser = subparsers.add_parser("classify_speakers") classify_speakers_parser.add_argument( "corpus_directory", help="Full path to the source directory to " "run speaker classification", ) classify_speakers_parser.add_argument( "ivector_extractor_path", type=str, default="", help="Full path to ivector extractor model" ) classify_speakers_parser.add_argument( "output_directory", help="Full path to output directory, will be created if it doesn't exist", ) classify_speakers_parser.add_argument( "-s", "--num_speakers", type=int, default=0, help="Number of speakers if known" ) classify_speakers_parser.add_argument( "--cluster", help="Using clustering instead of classification", action="store_true" ) classify_speakers_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for ivector extraction", ) add_global_options(classify_speakers_parser) create_segments_parser = subparsers.add_parser("create_segments") create_segments_parser.add_argument( "corpus_directory", help="Full path to the source directory to " "run VAD segmentation" ) create_segments_parser.add_argument( "output_directory", help="Full path to output directory, will be created if it doesn't exist", ) create_segments_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for segmentation" ) add_global_options(create_segments_parser) transcribe_parser = subparsers.add_parser("transcribe") transcribe_parser.add_argument( "corpus_directory", help="Full path to the directory to transcribe" ) transcribe_parser.add_argument( "dictionary_path", help="Full path to the pronunciation dictionary to use" ) transcribe_parser.add_argument( "acoustic_model_path", help=f"Full path to the archive containing pre-trained model or language ({', '.join(acoustic_models)})", ) transcribe_parser.add_argument( "language_model_path", help=f"Full path to the archive containing pre-trained model or language ({', '.join(language_models)})", ) transcribe_parser.add_argument( "output_directory", help="Full path to output directory, will be created if it doesn't exist", ) transcribe_parser.add_argument( "--config_path", type=str, default="", help="Path to config file to use for transcription" ) transcribe_parser.add_argument( "-s", "--speaker_characters", type=str, default="0", help="Number of characters of file names to use for determining speaker, " "default is to use directory names", ) transcribe_parser.add_argument( "-a", "--audio_directory", type=str, default="", help="Audio directory root to use for finding audio files", ) transcribe_parser.add_argument( "-e", "--evaluate", help="Evaluate the transcription " "against golden texts", action="store_true", ) add_global_options(transcribe_parser) config_parser = subparsers.add_parser( "configure", help="The configure command is used to set global defaults for MFA so " "you don't have to set them every time you call an MFA command.", ) config_parser.add_argument( "-t", "--temp_directory", type=str, default="", help=f"Set the default temporary directory, default is {GLOBAL_CONFIG['temp_directory']}", ) config_parser.add_argument( "-j", "--num_jobs", type=int, help=f"Set the number of processes to use by default, defaults to {GLOBAL_CONFIG['num_jobs']}", ) config_parser.add_argument( "--always_clean", help="Always remove files from previous runs by default", action="store_true", ) config_parser.add_argument( "--never_clean", help="Don't remove files from previous runs by default", action="store_true", ) config_parser.add_argument( "--always_verbose", help="Default to verbose output", action="store_true" ) config_parser.add_argument( "--never_verbose", help="Default to non-verbose output", action="store_true" ) config_parser.add_argument( "--always_debug", help="Default to running debugging steps", action="store_true" ) config_parser.add_argument( "--never_debug", help="Default to not running debugging steps", action="store_true" ) config_parser.add_argument( "--always_overwrite", help="Always overwrite output files", action="store_true" ) config_parser.add_argument( "--never_overwrite", help="Never overwrite output files (if file already exists, " "the output will be saved in the temp directory)", action="store_true", ) config_parser.add_argument( "--disable_mp", help="Disable all multiprocessing (not recommended as it will usually " "increase processing times)", action="store_true", ) config_parser.add_argument( "--enable_mp", help="Enable multiprocessing (recommended and enabled by default)", action="store_true", ) config_parser.add_argument( "--disable_textgrid_cleanup", help="Disable postprocessing of TextGrids that cleans up " "silences and recombines compound words and clitics", action="store_true", ) config_parser.add_argument( "--enable_textgrid_cleanup", help="Enable postprocessing of TextGrids that cleans up " "silences and recombines compound words and clitics", action="store_true", ) config_parser.add_argument( "--disable_terminal_colors", help="Turn off colored text in output", action="store_true" ) config_parser.add_argument( "--enable_terminal_colors", help="Turn on colored text in output", action="store_true" ) config_parser.add_argument( "--terminal_width", help=f"Set width of terminal output, " f"currently set to {GLOBAL_CONFIG['terminal_width']}", default=GLOBAL_CONFIG["terminal_width"], type=int, ) config_parser.add_argument( "--blas_num_threads", help=f"Number of threads to use for BLAS libraries, 1 is recommended " f"due to how much MFA relies on multiprocessing. " f"Currently set to {GLOBAL_CONFIG['blas_num_threads']}", default=GLOBAL_CONFIG["blas_num_threads"], type=int, ) history_parser = subparsers.add_parser("history") history_parser.add_argument("depth", help="Number of commands to list", nargs="?", default=10) history_parser.add_argument( "--verbose", help="Flag for whether to output additional information", action="store_true" ) _ = subparsers.add_parser("annotator") _ = subparsers.add_parser("anchor") return parser
5,354,529
def build_params_comments(python_code, keyword, info): """Builds comments for parameters""" for arg, arg_info in zip(info.get('expected_url_params').keys(), info.get('expected_url_params').values()): python_code += '\n' + 2*TAB_BASE*SPACE + ':param ' + score_to_underscore(arg) + ': ' python_code += str(arg_info.get('description')) + ' ' + str(arg_info.get('possible_values')) return python_code
5,354,530
async def async_setup(hass: HomeAssistant, config: dict): """Set up the Netatmo component.""" hass.data[DOMAIN] = {} hass.data[DOMAIN][DATA_PERSONS] = {} if DOMAIN not in config: return True config_flow.NetatmoFlowHandler.async_register_implementation( hass, config_entry_oauth2_flow.LocalOAuth2Implementation( hass, DOMAIN, config[DOMAIN][CONF_CLIENT_ID], config[DOMAIN][CONF_CLIENT_SECRET], OAUTH2_AUTHORIZE, OAUTH2_TOKEN, ), ) return True
5,354,531
async def upload_artifact(req): """ Upload artifact created during sample creation using the Jobs API. """ db = req.app["db"] pg = req.app["pg"] sample_id = req.match_info["sample_id"] artifact_type = req.query.get("type") if not await db.samples.find_one(sample_id): raise NotFound() errors = virtool.uploads.utils.naive_validator(req) if errors: raise InvalidQuery(errors) name = req.query.get("name") artifact_file_path = ( virtool.samples.utils.join_sample_path(req.app["config"], sample_id) / name ) if artifact_type and artifact_type not in ArtifactType.to_list(): raise HTTPBadRequest(text="Unsupported sample artifact type") try: artifact = await create_artifact_file(pg, name, name, sample_id, artifact_type) except exc.IntegrityError: raise HTTPConflict( text="Artifact file has already been uploaded for this sample" ) upload_id = artifact["id"] try: size = await virtool.uploads.utils.naive_writer(req, artifact_file_path) except asyncio.CancelledError: logger.debug(f"Artifact file upload aborted for sample: {sample_id}") await delete_row(pg, upload_id, SampleArtifact) await req.app["run_in_thread"](os.remove, artifact_file_path) return aiohttp.web.Response(status=499) artifact = await virtool.uploads.db.finalize(pg, size, upload_id, SampleArtifact) headers = {"Location": f"/samples/{sample_id}/artifact/{name}"} return json_response(artifact, status=201, headers=headers)
5,354,532
def hotkey(x: int, y: int) -> bool: """Try to copy by dragging over the string, and then use hotkey.""" gui.moveTo(x + 15, y, 0) gui.mouseDown() gui.move(70, 0) gui.hotkey("ctrl", "c") gui.mouseUp() return check_copied()
5,354,533
def db_handler(args): """db_handler.""" if args.type == 'create': create_db() if args.type == 'status': current_rev = db_revision.current_db_revision() print('current_rev', current_rev) if args.type == 'upgrade': upgrade_db() if args.type == 'revision': db_revision.new_revision() if args.type == 'drop': if os.path.exists(DB_FILE_PATH): os.remove(DB_FILE_PATH)
5,354,534
def test_format_checks_warning(): """Test that unregistered checks raise a warning when formatting checks.""" with pytest.warns(UserWarning): io._format_checks({"my_check": None})
5,354,535
def get_tv_imdbid_by_id( tv_id, verify = True ): """ Returns the IMDb_ ID for a TV show. :param int tv_id: the TMDB_ series ID for the TV show. :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``. :returns: the IMDB_ ID for that TV show. Otherwise returns ``None`` if cannot be found. :rtype: str .. _IMDb: https://www.imdb.com """ response = requests.get( 'https://api.themoviedb.org/3/tv/%d/external_ids' % tv_id, params = { 'api_key' : tmdb_apiKey }, verify = verify ) if response.status_code != 200: print( 'problem here, %s.' % response.content ) return None data = response.json( ) if 'imdb_id' not in data: return None return data['imdb_id']
5,354,536
def validate(test_case, **__) -> TestCaseResult: """ Default function to validate test cases. Note that the first argument should be a positional argument. """ raise NotImplementedError( f"Missing test case validation implementation for {type(test_case)}." )
5,354,537
def test_reading_cosmos_catalog(): """Returns the cosmos catalog""" cosmos_catalog = CosmosCatalog.from_file(COSMOS_CATALOG_PATHS) return cosmos_catalog
5,354,538
def write_positions_as_pdbs(i, j, phase, state, annealing_steps, parent_dir, topology_pkl, direction='forward', output_pdb_filename=None, selection_string='resname MOL'): """ extract the positions files for an array of annealing steps and write the ligand positions to a pdb; this is primarily used to extract and view post-annealing snapshots for sanity checks (i.e. to make sure molecules aren't exploding) arguments i : int start ligand j : int end ligand phase : str phase state : str old/new direction : str, default 'forward' direction annealing_steps : int number of annealing steps parent_dir : str parent dir where 'positions.npz's live topology_pkl : str name of pickled openmm topology file output_pdb_filename : str, default None output pdb will output a pdb of the form: will output a pdb of the form: example: >>> import os >>> from qmlify.analysis import write_positions_to_pdbs >>> #let's query lig0to4, old, solvent (the more difficult transform), forward, at 500, 1000, 5000, 10000 annealing steps from cwd >>> annealing_list=[500, 1000, 5000, 10000] >>> for step in annealing_list: write_positions_to_pdbs(0,4,'solvent', 'old', step, os.getcwd(), 'lig0to4/solvent.old_topology.pkl') """ from qmlify.analysis import work_file_extractor import pickle import mdtraj import glob import os import numpy as np import tqdm from openeye import oechem with open(topology_pkl, 'rb') as f: topology = pickle.load(f) md_topology = mdtraj.Topology.from_openmm(topology) subset_indices = md_topology.select(selection_string) if direction != 'mm_endstate': query_template = os.path.join(parent_dir, '.'.join(DEFAULT_POSITION_TEMPLATE.split('.')[:4]) + '.*.' + '.'.join(DEFAULT_POSITION_TEMPLATE.split('.')[5:])) query_filename = query_template.format(i=i, j=j, phase=phase, state=state, direction=direction, annealing_steps=annealing_steps) filenames_list = glob.glob(query_filename) index_extractions = {int(filename.split('.')[4][4:]): os.path.join(parent_dir, filename) for filename in filenames_list} work_files = work_file_extractor(i, j, phase, state, direction, annealing_steps, parent_dir) positions = [] snapshots = [] counter=0 for snapshot_index, filename in tqdm.tqdm(sorted(index_extractions.items())): try: frame = np.load(filename)['positions'][0] work_value = np.load(work_files[snapshot_index])['works'][-1] snapshots.append([counter, work_value]) positions.append(frame[subset_indices,:]) counter+=1 except Exception as e: print(e) positions = np.array(positions) elif direction=='mm_endstate': #we are just going to pull the _before_ annealing trajectories letter, _lambda = ('A', 0) if state=='old' else ('B', 1) query_template = os.path.join(parent_dir, DEFAULT_MM_POSITION_TEMPLATE.format(i=i, j=j, _lambda=_lambda, letter=letter, phase=phase)) positions = np.load(query_template)['positions'] else: raise Exception(f"{direction} is not a supported direction") traj = mdtraj.Trajectory(xyz=np.array(positions), topology = md_topology.subset(subset_indices)) if output_pdb_filename is None: output_pdb_filename = f"lig{i}to{j}.{state}.{direction}.{annealing_steps}_steps.aggregate.pdb" else: assert output_pdb_filename[-3:] == 'pdb' output_array_filename = output_pdb_filename[:-3] + 'npz' traj.save(os.path.join(parent_dir, output_pdb_filename)) np.savez(os.path.join(parent_dir, output_array_filename), np.array(snapshots))
5,354,539
def is_terminal(p): """ Check if a given packet is a terminal element. :param p: element to check :type p: object :return: If ``p`` is a terminal element :rtype: bool """ return isinstance(p, _TerminalPacket)
5,354,540
def vgg11_bn(pretrained=False, **kwargs): """VGG 11-layer model (configuration "A") with batch normalization Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) return model
5,354,541
def trf_input_method(config, patient_id="", key_namespace="", **_): """Streamlit GUI method to facilitate TRF data provision. Notes ----- TRF files themselves have no innate patient alignment. An option for TRF collection is to use the CLI tool ``pymedphys trf orchestrate``. This connects to the SAMBA server hosted on the Elekta NSS and downloads the diagnostic backup zips. It then takes these TRF files and queries the Mosaiq database using time of delivery to identify these with a patient id (Ident.Pat_ID1) and name. As such, all references to patient ID and name within this ``trf_input_method`` are actually a reference to their Mosaiq database counterparts. """ FILE_UPLOAD = "File upload" INDEXED_TRF_SEARCH = "Search indexed TRF directory" import_method = st.radio( "TRF import method", [FILE_UPLOAD, INDEXED_TRF_SEARCH], key=f"{key_namespace}_trf_file_import_method", ) if import_method == FILE_UPLOAD: selected_files = st.file_uploader( "Upload TRF files", key=f"{key_namespace}_trf_file_uploader", accept_multiple_files=True, ) if not selected_files: return {} data_paths = [] individual_identifiers = ["Uploaded TRF file(s)"] if import_method == INDEXED_TRF_SEARCH: try: indexed_trf_directory = _config.get_indexed_trf_directory(config) except KeyError: st.write( _exceptions.ConfigMissing( "No indexed TRF directory is configured. Please use " f"'{FILE_UPLOAD}' instead." ) ) return {} patient_id = st.text_input( "Patient ID", patient_id, key=f"{key_namespace}_patient_id" ) st.write(patient_id) filepaths = list(indexed_trf_directory.glob(f"*/{patient_id}_*/*/*/*/*.trf")) raw_timestamps = [ "_".join(path.parent.name.split("_")[0:2]) for path in filepaths ] timestamps = list( pd.to_datetime(raw_timestamps, format="%Y-%m-%d_%H%M%S").astype(str) ) timestamp_filepath_map = dict(zip(timestamps, filepaths)) timestamps = sorted(timestamps, reverse=True) if len(timestamps) == 0: if patient_id != "": st.write( _exceptions.NoRecordsFound( f"No TRF log file found for patient ID {patient_id}" ) ) return {"patient_id": patient_id} if len(timestamps) == 1: default_timestamp = timestamps[0] else: default_timestamp = [] selected_trf_deliveries = st.multiselect( "Select TRF delivery timestamp(s)", timestamps, default=default_timestamp, key=f"{key_namespace}_trf_deliveries", ) if not selected_trf_deliveries: return {} st.write( """ #### TRF filepath(s) """ ) selected_files = [ timestamp_filepath_map[timestamp] for timestamp in selected_trf_deliveries ] st.write([str(path.resolve()) for path in selected_files]) individual_identifiers = [ f"{path.parent.parent.parent.parent.name} {path.parent.name}" for path in selected_files ] data_paths = selected_files st.write( """ #### Log file header(s) """ ) headers = [] tables = [] for path_or_binary in selected_files: try: path_or_binary.seek(0) except AttributeError: pass header, table = read_trf(path_or_binary) headers.append(header) tables.append(table) headers = pd.concat(headers) headers.reset_index(inplace=True) headers.drop("index", axis=1, inplace=True) st.write(headers) deliveries = _deliveries.cached_deliveries_loading( tables, _deliveries.delivery_from_trf ) identifier = f"TRF ({individual_identifiers[0]})" patient_name = _attempt_patient_name_from_mosaiq(config, headers) return { "site": None, "patient_id": patient_id, "patient_name": patient_name, "data_paths": data_paths, "identifier": identifier, "deliveries": deliveries, }
5,354,542
def plotter(fdict): """ Go """ ctx = get_autoplot_context(fdict, get_description()) station = ctx['station'] network = ctx['network'] year = ctx['year'] season = ctx['season'] nt = NetworkTable(network) table = "alldata_%s" % (station[:2],) pgconn = get_dbconn('coop') # Have to do a redundant query to get the running values obs = read_sql(""" WITH trail as ( SELECT day, year, avg((high+low)/2.) OVER (ORDER by day ASC ROWS 91 PRECEDING) as avgt from """ + table + """ WHERE station = %s) SELECT day, avgt from trail WHERE year between %s and %s ORDER by day ASC """, pgconn, params=(station, year, year + 2), index_col='day') df = read_sql(""" WITH trail as ( SELECT day, year, avg((high+low)/2.) OVER (ORDER by day ASC ROWS 91 PRECEDING) as avgt from """ + table + """ WHERE station = %s), extremes as ( SELECT day, year, avgt, rank() OVER (PARTITION by year ORDER by avgt ASC) as minrank, rank() OVER (PARTITION by year ORDER by avgt DESC) as maxrank from trail), yearmax as ( SELECT year, min(day) as summer_end, min(avgt) as summer from extremes where maxrank = 1 GROUP by year), yearmin as ( SELECT year, min(day) as winter_end, min(avgt) as winter from extremes where minrank = 1 GROUP by year) SELECT x.year, winter_end, winter, summer_end, summer, extract(doy from winter_end)::int as winter_end_doy, extract(doy from summer_end)::int as summer_end_doy from yearmax x JOIN yearmin n on (x.year = n.year) ORDER by x.year ASC """, pgconn, params=(station, ), index_col='year') # Throw out spring of the first year for col in ['winter', 'winter_end_doy', 'winter_end']: df.at[df.index.min(), col] = None # Need to cull current year if datetime.date.today().month < 8: for col in ['summer', 'summer_end_doy', 'summer_end']: df.at[datetime.date.today().year, col] = None if datetime.date.today().month < 2: for col in ['winter', 'winter_end_doy', 'winter_end']: df.at[datetime.date.today().year, col] = None df['spring_length'] = df['summer_end_doy'] - 91 - df['winter_end_doy'] # fall is a bit tricker df['fall_length'] = None df['fall_length'].values[:-1] = ((df['winter_end_doy'].values[1:] + 365) - 91 - df['summer_end_doy'].values[:-1]) df['fall_length'] = pd.to_numeric(df['fall_length']) (fig, ax) = plt.subplots(3, 1, figsize=(8, 9)) ax[0].plot(obs.index.values, obs['avgt'].values) ax[0].set_ylim(obs['avgt'].min() - 8, obs['avgt'].max() + 8) ax[0].set_title(("%s-%s [%s] %s\n91 Day Average Temperatures" ) % (nt.sts[station]['archive_begin'].year, year + 3, station, nt.sts[station]['name'])) ax[0].set_ylabel(r"Trailing 91 Day Avg T $^{\circ}$F") ax[0].xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y')) ax[0].grid(True) # Label the maxes and mins for yr in range(year, year+3): if yr not in df.index: continue date = df.at[yr, 'winter_end'] val = df.at[yr, 'winter'] if date is not None: ax[0].text( date, val - 1, r"%s %.1f$^\circ$F" % (date.strftime("%-d %b"), val), ha='center', va='top', bbox=dict(color='white', boxstyle='square,pad=0') ) date = df.at[yr, 'summer_end'] val = df.at[yr, 'summer'] if date is not None: ax[0].text( date, val + 1, r"%s %.1f$^\circ$F" % (date.strftime("%-d %b"), val), ha='center', va='bottom', bbox=dict(color='white', boxstyle='square,pad=0') ) df2 = df.dropna() p2col = 'winter_end_doy' if season == 'spring' else 'summer_end_doy' slp, intercept, r, _, _ = stats.linregress(df2.index.values, df2[p2col].values) ax[1].scatter(df.index.values, df[p2col].values) ax[1].grid(True) # Do labelling yticks = [] yticklabels = [] for doy in range(int(df[p2col].min()), int(df[p2col].max())): date = datetime.date(2000, 1, 1) + datetime.timedelta(days=(doy - 1)) if date.day in [1, 15]: yticks.append(doy) yticklabels.append(date.strftime("%-d %b")) ax[1].set_yticks(yticks) ax[1].set_yticklabels(yticklabels) lbl = ("Date of Minimum (Spring Start)" if season == 'spring' else "Date of Maximum (Fall Start)") ax[1].set_ylabel(lbl) ax[1].set_xlim(df.index.min() - 1, df.index.max() + 1) avgv = df[p2col].mean() ax[1].axhline(avgv, color='r') ax[1].plot(df.index.values, intercept + (df.index.values * slp)) d = (datetime.date(2000, 1, 1) + datetime.timedelta(days=int(avgv))).strftime("%-d %b") ax[1].text(0.02, 0.02, r"$\frac{\Delta days}{decade} = %.2f,R^2=%.2f, avg = %s$" % ( slp * 10.0, r ** 2, d), va='bottom', transform=ax[1].transAxes) ax[1].set_ylim(bottom=(ax[1].get_ylim()[0] - 10)) p3col = 'spring_length' if season == 'spring' else 'fall_length' slp, intercept, r, _, _ = stats.linregress(df2.index.values, df2[p3col]) ax[2].scatter(df.index.values, df[p3col]) ax[2].set_xlim(df.index.min() - 1, df.index.max() + 1) ax[2].set_ylabel("Length of '%s' [days]" % (season.capitalize(),)) ax[2].grid(True) avgv = df[p3col].mean() ax[2].axhline(avgv, color='r') ax[2].plot(df.index.values, intercept + (df.index.values * slp)) ax[2].text(0.02, 0.02, r"$\frac{\Delta days}{decade} = %.2f,R^2=%.2f, avg = %.1fd$" % ( slp * 10.0, r ** 2, avgv), va='bottom', transform=ax[2].transAxes) ax[2].set_ylim(bottom=(ax[2].get_ylim()[0] - 15)) return fig, df
5,354,543
def rlist(sub_command, params, query): """ Reading list for your daily life yoda rlist [OPTIONS] SUBCOMMAND [QUERY] ACTION: view [--params="tags"] [query]: view your reading list params: reading list parameter to be filtered (defaults to tags) query: keyword to be searched add: add something to your reading list """ sub_command = str(sub_command) params = str(params) query = str(query) opts = (params, query) if params and query else () # print opts sub_commands = {"view": view_reading_list, "add": add_to_reading_list} try: sub_commands[sub_command](opts) except KeyError: click.echo(chalk.red("Command " + sub_command + " does not exist!")) click.echo("Try 'yoda rlist --help' for more info'")
5,354,544
def coverage(c, report="term", opts="", codecov=False): """ Run pytest in coverage mode. See `invocations.pytest.coverage` for details. """ # Use our own test() instead of theirs. # Also add integration test so this always hits both. # (Not regression, since that's "weird" / doesn't really hit any new # coverage points) coverage_( c, report=report, opts=opts, tester=test, additional_testers=[integration], codecov=codecov, )
5,354,545
def test_cataloging_admin_can_register_permission_from_collection_view(user, collection, superuser, testapp): """Register new permission from collection view as cataloging admin.""" PermissionFactory(user=user, collection=collection, cataloging_admin=True).save_as(superuser) old_permission_count = len(Permission.query.all()) # Goes to homepage res = testapp.get('/') # Fills out login form login_form = res.forms['loginForm'] login_form['username'] = user.email login_form['password'] = 'myPrecious' # Submits res = login_form.submit().follow() # Clicks to View Collection from profile res = res.click(href=url_for('collection.view', collection_code=collection.code)) # Clicks Register New Permission res = res.click(_('New Permission')) # Finds that the intended user doesn't exist res = res.click(_('New User')) # Fills out the user registration form register_user_form = res.forms['registerUserForm'] register_user_form['username'] = '[email protected]' register_user_form['full_name'] = 'Registrant' register_user_form['send_password_reset_email'].checked = False res = register_user_form.submit() assert res.status_code == 302 assert url_for('permission.register', collection_id=collection.id) in res.location other_user = User.get_by_email('[email protected]') assert other_user is not None # Saves the form to grant 'other_user' permissions on 'collection' res = res.follow(headers={'Referer': res.request.referrer}) # FIXME: Webtest dropping referer. assert res.status_code == 200 register_permission_form = res.forms['registerPermissionForm'] # New user is preset, ``register_permission_form['user_id'] = other_user.id`` is redundant # Defaults are kept, ``register_permission_form['collection_id'] = collection.id`` is redundant register_permission_form['registrant'].checked = True register_permission_form['cataloger'].checked = True # Submits res = register_permission_form.submit() assert res.status_code == 302 assert url_for('collection.view', collection_code=collection.code) in res.location res = res.follow() assert res.status_code == 200 # The permission was created, and number of permissions are 1 more than initially assert _('Added permissions for "%(username)s" on collection "%(code)s".', username=other_user.email, code=collection.code) in res assert len(Permission.query.all()) == old_permission_count + 1 # The new permission is listed on the collection view. assert len(res.lxml.xpath("//td[contains(., '{0}')]".format(user.email))) == 1
5,354,546
def connectCells(self): """ Function for/to <short description of `netpyne.network.conn.connectCells`> Parameters ---------- self : <type> <Short description of self> **Default:** *required* """ from .. import sim # Instantiate network connections based on the connectivity rules defined in params sim.timing('start', 'connectTime') if sim.rank==0: print('Making connections...') if sim.nhosts > 1: # Gather tags from all cells allCellTags = sim._gatherAllCellTags() else: allCellTags = {cell.gid: cell.tags for cell in self.cells} allPopTags = {-i: pop.tags for i,pop in enumerate(self.pops.values())} # gather tags from pops so can connect NetStim pops if self.params.subConnParams: # do not create NEURON objs until synapses are distributed based on subConnParams origCreateNEURONObj = bool(sim.cfg.createNEURONObj) origAddSynMechs = bool(sim.cfg.addSynMechs) sim.cfg.createNEURONObj = False sim.cfg.addSynMechs = False gapJunctions = False # assume no gap junctions by default for connParamLabel,connParamTemp in self.params.connParams.items(): # for each conn rule or parameter set connParam = connParamTemp.copy() connParam['label'] = connParamLabel # find pre and post cells that match conditions preCellsTags, postCellsTags = self._findPrePostCellsCondition(allCellTags, connParam['preConds'], connParam['postConds']) # if conn function not specified, select based on params if 'connFunc' not in connParam: if 'probability' in connParam: connParam['connFunc'] = 'probConn' # probability based func elif 'convergence' in connParam: connParam['connFunc'] = 'convConn' # convergence function elif 'divergence' in connParam: connParam['connFunc'] = 'divConn' # divergence function elif 'connList' in connParam: connParam['connFunc'] = 'fromListConn' # from list function else: connParam['connFunc'] = 'fullConn' # convergence function connFunc = getattr(self, connParam['connFunc']) # get function name from params # process string-based funcs and call conn function if preCellsTags and postCellsTags: # initialize randomizer in case used in string-based function (see issue #89 for more details) self.rand.Random123(sim.hashStr('conn_'+connParam['connFunc']), sim.hashList(sorted(preCellsTags)+sorted(postCellsTags)), sim.cfg.seeds['conn']) self._connStrToFunc(preCellsTags, postCellsTags, connParam) # convert strings to functions (for the delay, and probability params) connFunc(preCellsTags, postCellsTags, connParam) # call specific conn function # check if gap junctions in any of the conn rules if not gapJunctions and 'gapJunction' in connParam: gapJunctions = True if sim.cfg.printSynsAfterRule: nodeSynapses = sum([len(cell.conns) for cell in sim.net.cells]) print((' Number of synaptic contacts on node %i after conn rule %s: %i ' % (sim.rank, connParamLabel, nodeSynapses))) # add presynaptoc gap junctions if gapJunctions: # distribute info on presyn gap junctions across nodes if not getattr(sim.net, 'preGapJunctions', False): sim.net.preGapJunctions = [] # if doesn't exist, create list to store presynaptic cell gap junctions data = [sim.net.preGapJunctions]*sim.nhosts # send cells data to other nodes data[sim.rank] = None gather = sim.pc.py_alltoall(data) # collect cells data from other nodes (required to generate connections) sim.pc.barrier() for dataNode in gather: if dataNode: sim.net.preGapJunctions.extend(dataNode) # add gap junctions of presynaptic cells (need to do separately because could be in different ranks) for preGapParams in getattr(sim.net, 'preGapJunctions', []): if preGapParams['gid'] in self.gid2lid: # only cells in this rank cell = self.cells[self.gid2lid[preGapParams['gid']]] cell.addConn(preGapParams) # apply subcellular connectivity params (distribution of synaspes) if self.params.subConnParams: self.subcellularConn(allCellTags, allPopTags) sim.cfg.createNEURONObj = origCreateNEURONObj # set to original value sim.cfg.addSynMechs = origAddSynMechs # set to original value cellsUpdate = [c for c in sim.net.cells if c.tags['cellModel'] not in ['NetStim', 'VecStim']] if sim.cfg.createNEURONObj: for cell in cellsUpdate: # Add synMechs, stim and conn NEURON objects cell.addStimsNEURONObj() #cell.addSynMechsNEURONObj() cell.addConnsNEURONObj() nodeSynapses = sum([len(cell.conns) for cell in sim.net.cells]) if sim.cfg.createPyStruct: nodeConnections = sum([len(set([conn['preGid'] for conn in cell.conns])) for cell in sim.net.cells]) else: nodeConnections = nodeSynapses print((' Number of connections on node %i: %i ' % (sim.rank, nodeConnections))) if nodeSynapses != nodeConnections: print((' Number of synaptic contacts on node %i: %i ' % (sim.rank, nodeSynapses))) sim.pc.barrier() sim.timing('stop', 'connectTime') if sim.rank == 0 and sim.cfg.timing: print((' Done; cell connection time = %0.2f s.' % sim.timingData['connectTime'])) return [cell.conns for cell in self.cells]
5,354,547
def convert_numpy_str_to_uint16(data): """ Converts a numpy.unicode\_ to UTF-16 in numpy.uint16 form. Convert a ``numpy.unicode_`` or an array of them (they are UTF-32 strings) to UTF-16 in the equivalent array of ``numpy.uint16``. The conversion will throw an exception if any characters cannot be converted to UTF-16. Strings are expanded along rows (across columns) so a 2x3x4 array of 10 element strings will get turned into a 2x30x4 array of uint16's if every UTF-32 character converts easily to a UTF-16 singlet, as opposed to a UTF-16 doublet. Parameters ---------- data : numpy.unicode\_ or numpy.ndarray of numpy.unicode\_ The string or array of them to convert. Returns ------- array : numpy.ndarray of numpy.uint16 The result of the conversion. Raises ------ UnicodeEncodeError If a UTF-32 character has no UTF-16 representation. See Also -------- convert_numpy_str_to_uint32 convert_to_numpy_str """ # An empty string should be an empty uint16 if data.nbytes == 0: return np.uint16([]) # We need to use the UTF-16 codec for our endianness. Using the # right one means we don't have to worry about removing the BOM. if sys.byteorder == 'little': codec = 'UTF-16LE' else: codec = 'UTF-16BE' # numpy.char.encode can do the conversion element wise. Then, we # just have convert to uin16 with the appropriate dimensions. The # dimensions are gotten from the shape of the converted data with # the number of column increased by the number of words (pair of # bytes) in the strings. cdata = np.char.encode(np.atleast_1d(data), codec) shape = list(cdata.shape) shape[-1] *= (cdata.dtype.itemsize // 2) return np.ndarray(shape=shape, dtype='uint16', buffer=cdata.tostring())
5,354,548
def _get_process_num_examples(builder, split, process_batch_size, process_index, process_count, drop_remainder): """Returns the number of examples in a given process's split.""" process_split = _get_process_split( split, process_index=process_index, process_count=process_count, drop_remainder=drop_remainder) num_examples = builder.info.splits[process_split].num_examples if drop_remainder: device_batch_size = process_batch_size // jax.local_device_count() num_examples = ( math.floor(num_examples / device_batch_size) * device_batch_size) return num_examples
5,354,549
def export_python_function(earth_model): """ Exports model as a pure python function, with no numpy/scipy/sklearn dependencies. :param earth_model: Trained pyearth model :return: A function that accepts an iterator over examples, and returns an iterator over transformed examples """ i = 0 accessors = [] for bf in earth_model.basis_: if not bf.is_pruned(): accessors.append(bf.func_factory(earth_model.coef_[0, i])) i += 1 def func(example_iterator): return [sum(accessor(row) for accessor in accessors) for row in example_iterator] return func
5,354,550
def thermostat_get_zone_information( address: Address, zone: int, info: int, topic=pub.AUTO_TOPIC ): """Create a THERMOSTAT_GET_ZONE_INFORMATION command. zone: (int) 0 to 31 info: (int) 0 = Temperature 1 = Setpoint 2 = Deadband 3 = Humidity """ zone = zone & 0x0F info = info & 0x03 << 5 cmd2 = info + zone _create_direct_message(topic=topic, address=address, cmd2=cmd2)
5,354,551
def ehi(data, thr_95, axis=0, keepdims=False): """ Calculate Excessive Heat Index (EHI). Parameters ---------- data: list/array 1D/2D array of daily temperature timeseries thr_95: float 95th percentile daily mean value from climatology axis: int The axis along which the calculation is applied (default 0). keepdims: boolean If data is 2d (time in third dimesion) and keepdims is set to True, calculation is applied to the zeroth axis (time) and returns a 2d array of freq-int dists. If set to False (default) all values are collectively assembled before calculation. Returns ------- EHI: float Excessive heat index """ def ehi_calc(pdata, thr_95): if all(np.isnan(pdata)): print("All data missing/masked!") ehi = np.nan else: # run_mean = moving_average(pdata, 3) rmean = run_mean(pdata, 3) ehi = ((rmean > thr_95)).sum() return ehi if keepdims: EHI = np.apply_along_axis(ehi_calc, axis, data, thr_95) else: EHI = ehi_calc(data, thr_95) return EHI
5,354,552
def make_transpose_tests(options): """Make a set of tests to do transpose.""" # TODO(nupurgarg): Add test for uint8. test_parameters = [{ "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[2, 2, 3]], "perm": [[0, 1, 2], [0, 2, 1]], "constant_perm": [True, False], }, { "dtype": [tf.float32], "input_shape": [[1, 2, 3, 4]], "perm": [[0, 1, 2, 3], [3, 0, 1, 2]], "constant_perm": [True, False], }, { "dtype": [tf.float32], "input_shape": [[1, 2, 3, 4, 5]], "perm": [[4, 3, 2, 1, 0]], "constant_perm": [True, False], }] def build_graph(parameters): """Build a transpose graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) if parameters["constant_perm"]: perm = parameters["perm"] input_tensors = [input_tensor] else: shape = [len(parameters["perm"]), 2] perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape) input_tensors = [input_tensor, perm] out = tf.transpose(input_tensor, perm=perm) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_perm"]: values.append(np.array(parameters["perm"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests( options, test_parameters, build_graph, build_inputs, expected_tf_failures=9)
5,354,553
def get_disable_migration_module(): """ get disable migration """ class DisableMigration: def __contains__(self, item): return True def __getitem__(self, item): return None return DisableMigration()
5,354,554
async def test_view_empty_namespace(client, sensor_entities): """Test prometheus metrics view.""" body = await generate_latest_metrics(client) assert "# HELP python_info Python platform information" in body assert ( "# HELP python_gc_objects_collected_total " "Objects collected during gc" in body ) assert ( 'entity_available{domain="sensor",' 'entity="sensor.radio_energy",' 'friendly_name="Radio Energy"} 1.0' in body ) assert ( 'last_updated_time_seconds{domain="sensor",' 'entity="sensor.radio_energy",' 'friendly_name="Radio Energy"} 86400.0' in body )
5,354,555
def weather(): """The weather route of My Weather API.""" # Load URL and KEY args of Current Weather API of OpenWeatherMap api_url = app.config.get("API_URL") api_key = app.config.get("API_KEY") validators.check_emptiness('API_URL', api_url) validators.check_emptiness('API_KEY', api_key) # Obtain and verify city and country args entered to route city = request.args.get('city') country = request.args.get('country') validators.check_emptiness('city', city) validators.check_emptiness('country', country) validators.check_regex('city', city, "[A-Za-z ]+") validators.check_regex('country', country, "[a-z]{2}") # Construct URL request of Current Weather API of OpenWeatherMap url = "{0}{1},{2}&units=metric&appid={3}".format(api_url, city, country, api_key) # Obtain response from Current Weather API of OpenWeatherMap input_json = requests.get(url).json() # Debugging: print the 'input_json' data in good style # webfunctions.beautiful_json(input_json) # If 'input_json' hasn't HTTP:200 status, # then the response will be same that it was obtained from OpenWeatherMap webfunctions.reply_bad_response(input_json) # Create and return the final API response from My Weather API output_json = webfunctions.create_response_body(input_json) return jsonify(output_json)
5,354,556
def __sbox_bytes(data, sbox): """S-Box substitution of a list of bytes""" return [__sbox_single_byte(byte, sbox) for byte in data]
5,354,557
def load_config_with_kwargs(cls, kwargs): """Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema.""" assert_is_a_marshmallow_class(cls) schema = cls.Schema() fields = schema.fields.keys() return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), { k: v for k, v in kwargs.items() if k not in fields }
5,354,558
def test_pop_the_cap_reform(): """ Test eliminating the maximum taxable earnings (MTE) used in the calculation of the OASDI payroll tax. """ # create Policy parameters object ppo = Policy() assert ppo.current_year == Policy.JSON_START_YEAR # confirm that MTE has current-law values in 2015 and 2016 mte = ppo._SS_Earnings_c syr = Policy.JSON_START_YEAR assert mte[2015 - syr] == 118500 assert mte[2016 - syr] == 118500 # specify a "pop the cap" reform that eliminates MTE cap in 2016 reform = {'SS_Earnings_c': {2016: 9e99}} ppo.implement_reform(reform) assert mte[2015 - syr] == 118500 assert mte[2016 - syr] == 9e99 assert mte[ppo.end_year - syr] == 9e99
5,354,559
def ConfigureInstanceTemplate(args, kube_client, project_id, network_resource, workload_namespace, workload_name, workload_manifest, membership_manifest, asm_revision, mesh_config): """Configure the provided instance template args with ASM metadata.""" is_mcp = _IsMCP(kube_client, asm_revision) service_proxy_metadata_args = _RetrieveServiceProxyMetadata( args, is_mcp, kube_client, project_id, network_resource, workload_namespace, workload_name, workload_manifest, membership_manifest, asm_revision, mesh_config) _ModifyInstanceTemplate(args, is_mcp, service_proxy_metadata_args)
5,354,560
def SetRandomSeed(seed): """Set the global random seed. Parameters ---------- seed : int The seed to use. Returns ------- None """ global option option['random_seed'] = seed
5,354,561
def p_skip_base(p): """ skip_base : skip_operator | skip_keyword | skip_constant | ID """ p[0] = p[1]
5,354,562
def main(argv=None): """Main program which parses args and runs Args: argv: List of command line arguments, if None uses sys.argv. """ if argv is None: argv = sys.argv[1:] opts = parse_args(argv) Main(opts.project_configs, opts.program_config, opts.output)
5,354,563
def cli(**cli_kwargs): """Rasterize a slide into smaller tiles Tiles are saved in the whole-slide tiles binary format (tiles.pil), and the corresponding manifest/header file (tiles.csv) is also generated Neccessary data for the manifest file are: address, x_coord, y_coord, full_resolution_tile_size, tile_image_binary, tile_image_length, tile_image_size_xy, and tile_image_mode \b Inputs: input_slide_image: slide image (virtual slide formats compatible with openslide, .svs, .tif, .scn, ...) Outputs: slide_tiles \b Example: generate_tiles 10001.svs -nc 8 -rts 244 -rmg 10 -bx 200 -o 10001/tiles """ cli_runner( cli_kwargs, _params_, generate_tiles)
5,354,564
def test_display_failed(): """Verify failed devices are showing""" cmd_list = [NETMIKO_GREP] + ['interface', 'all'] (output, std_err) = subprocess_handler(cmd_list) assert "Failed devices" in output failed_devices = output.split("Failed devices:")[1] failed_devices = failed_devices.strip().split("\n") failed_devices = [x.strip() for x in failed_devices] assert len(failed_devices) == 2 assert "bad_device" in failed_devices assert "bad_port" in failed_devices
5,354,565
def split_rows(sentences, column_names): """ Creates a list of sentence where each sentence is a list of lines Each line is a dictionary of columns :param sentences: :param column_names: :return: """ new_sentences = [] root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT'] start = [dict(zip(column_names, root_values))] for sentence in sentences: rows = sentence.split('\n') sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#'] sentence = start + sentence new_sentences.append(sentence) return new_sentences
5,354,566
def compute_pw_sparse_out_of_memory2(tr, row_size = 500, pm_processes = 2, pm_pbar = True, max_distance = 50, reassemble = True, cleanup = True, assign = True): """ Instead of calling TCRrep.compute_distances(), this function permits a parallelizable approach that does not require holding a large matrix in memory. Default behavior is to reassemble a scipy sparse matrix from a set of sub matrices written to disk fragment. With <reassemble = True> function returns a scipy sparse matrix. Space savings are achieved because any value above <max_distance> is set to zero. True zero distances are set to -1. Can be used to form a network of TCRs with tcrdistances < max_distance, Parameters ---------- tr : TCRrep TCRrep instance with clone_df row_size : int How many rows to process in memory at once pm_processes : int Numbe of concurrent parallel processes to run at once pm_bar : bool If True, show progress bar. max_distance : int Max distance matrix_name : str Name of matrix to return (i.e, 'rw_beta' or 'rw_alpha') reassemble: True If true, makes one matrix from all the sparse sub matrices. cleanup: bool, if True, deletes temporary files. assign : bool if True, assigns pw sparse matrices to TCRrep object. That is TCRrep.pw_beta, TCRrep.pw_alpha will be assigned the reassembled spare matrces. Returns ------- csr_full : sparse scipy matrix dest : str name of the folder that holds fragments Examples -------- import numpy as np import pandas as pd from tcrdist.repertoire import TCRrep from tcrdist.rep_funcs import compute_pw_sparse_out_of_memory df = pd.read_csv("dash.csv") #(1) tr = TCRrep(cell_df = df, #(2) organism = 'mouse', chains = ['beta'], db_file = 'alphabeta_gammadelta_db.tsv', compute_distances = True, store_all_cdr = False) S = compute_pw_sparse_out_of_memory(tr, matrix_name = "rw_beta", max_distance = 1000) # S is a <1920x1920 sparse matrix of type '<class 'numpy.int16'>' M = S.todense() M[M==1] = 0 np.all(M == tr.pw_beta) S, chunks = compute_pw_sparse_out_of_memory(tr, matrix_name = "rw_beta", max_distance = 50) print(S) # S is a <1920x1920 sparse matrix of type '<class 'numpy.int16'>' """ # Early warning to save heartache if assign is True and reassemble is False: raise ValueError("If you want to assign results to a TCRrep instance, you must set reassemble to True") dest = secrets.token_hex(6) os.mkdir(dest) print(f"CREATED /{dest}/ FOR HOLDING DISTANCE OUT OF MEMORY") row_chunks = memory._partition(range(tr.clone_df.shape[0]), row_size) smatrix_chunks = [(tr, ind, f"{dest}/{i}") for i,ind in enumerate(row_chunks)] csrfragments = parmap.starmap(memory.gen_sparse_rw_on_fragment2, smatrix_chunks, max_distance=max_distance, pm_pbar=pm_pbar, pm_processes = pm_processes) if reassemble: csr_full_dict = dict() for chain in tr.chains: chain_str = f"rw_{chain}" csr_full = memory.collapse_csrs([f"{x[2]}.{chain_str}.npz" for x in smatrix_chunks]) print(f"RETURNING scipy.sparse csr_matrix w/dims {csr_full.shape}") csr_full_dict[chain] = csr_full else: csr_full_dict= None if assign: for chain in tr.chains: setattr(tr, f"pw_{chain}", csr_full_dict[chain]) if cleanup: assert os.path.isdir(dest) print(f"CLEANING UP {dest}") shutil.rmtree(dest) return csr_full_dict, smatrix_chunks
5,354,567
def score_models(X_train = None, y_train = None, X_val = None, y_val = None, y_base = None, includeBase = False, model = None): """Score Models and return results as a dataframe Parameters ---------- X_train : Numpy Array X_train data y_train : Numpy Array Train target X_val : Numpy Array X_val data y_val : Numpy Array Val target includeBase: Boolean Calculate and display baseline model: model Model passed into function Returns ------- """ import pandas as pd import numpy as np df_model_scores = pd.DataFrame() if includeBase == True: df_model_scores_base = score_null_model(y_train = y_train, y_base = y_base, set_name='Base') df_model_scores = pd.concat([df_model_scores,df_model_scores_base],ignore_index = True, axis=0) if X_train.size > 0: df_model_scores_train = score_model(X_train, y_train, set_name='Train', model=model) df_model_scores = pd.concat([df_model_scores,df_model_scores_train],ignore_index = True, axis=0) if X_val.size > 0: df_model_scores_val = score_model(X_val, y_val, set_name='Validate', model=model) df_model_scores = pd.concat([df_model_scores,df_model_scores_val],ignore_index = True, axis=0) display(df_model_scores) return
5,354,568
def _rollup_date(dts, interval=None): """format date/time string based on interval spec'd for summation For Daily, it returns just the date. No time or timezeone. For Hourly, it returns an ISO-8061 datetime range. This provides previously missing clarity around whether the rainfall amount shown was for the period starting at the returned datetime or the period preceeding it (the latter being the correct but approach for datetimes but not dates.) """ if interval == INTERVAL_DAILY: # strip the time entirely from the datetime string. Timezone is lost. return parse(dts).strftime("%Y-%m-%d") elif interval == INTERVAL_HOURLY: # set the minutes, seconds, and microsecond to zeros. Timezone is preserved. # This method returns the total for the hour, e.g a # rainfall total of 1 inch with a timestamp of "2020-04-07T10:00:00-04:00" # is actually 1 inch for intervals within the 10 o'clock hour. # return parse(dts).replace(minute=0, second=0, microsecond=0).isoformat() # NOTE: It may be more appropriate to use a timedelta+1 hour here, # if the rainfall is to be interpreted as the total *up to* a point in time. # Because we're looking at accumulation, we want timestamps that # represent rainfall accumulated during the previous fifteen minutes # within the hour represented. So in a list of [1:00, 1:15, 1:30, 1:45, # 2:00], we scratch the 1:00 since it represents accumulation from # 12:45 to 1:00, outside our hour of interest. Everything else rep's # rain recorded between >1 and <=2 o'clock. We can get that by # bumping everything back 15 minutes, then generating the hourly. # start_dt = parse(dts).replace(minute=0, second=0, microsecond=0) start_dt = parse(dts) start_dt = start_dt - timedelta(minutes=MIN_INTERVAL) start_dt = start_dt.replace(minute=0, second=0, microsecond=0) end_dt = start_dt + timedelta(hours=1) end_dt.replace(minute=0, second=0, microsecond=0) return "{0}/{1}".format(start_dt.isoformat(), end_dt.isoformat()) else: # return it as-is return dts
5,354,569
def offset_compensation(time_signal): """ Offset compensation filter. """ return lfilter([1., -1], [1., -0.999], time_signal)
5,354,570
def process_dir(thisdir): """Process /thisdir/ recursively""" res = [] shellparams = {'stdin':subprocess.PIPE,'stdout':sys.stdout,'shell':True} command = [utils.assimp_bin_path,"testbatchload"] for f in os.listdir(thisdir): if os.path.splitext(f)[-1] in settings.exclude_extensions: continue fullpath = os.path.join(thisdir, f) if os.path.isdir(fullpath): if f != ".svn": res += process_dir(fullpath) continue # import twice, importing the same file again introduces extra risk # to crash due to garbage data lying around in the importer. command.append(fullpath) command.append(fullpath) if len(command)>2: # testbatchload returns always 0 if more than one file in the list worked. # however, if it should segfault, the OS will return something not 0. command += reversed(command[2:]) if subprocess.call(command, **shellparams): res.append(thisdir) return res
5,354,571
def _alias(default: Callable) -> Callable[[T], T]: """ Decorator which re-assigns a function `_f` to point to `default` instead. Since global function calls in Python are somewhat expensive, this is mainly done to reduce a bit of overhead involved in the functions calls. For example, consider the below example:: def f2(o): return o def f1(o): return f2(o) Calling function `f1` will incur some additional overhead, as opposed to simply calling `f2`. Now assume we wrap `f1` with the `_alias` decorator:: def f2(o): return o @_alias(f2) def f1(o): ... This will essentially perform the assignment of `f1 = f2`, so calling `f1()` in this case has no additional function overhead, as opposed to just calling `f2()`. """ def new_func(_f: T) -> T: return cast(T, default) return new_func
5,354,572
def test_image(filename): """ Return the absolute path to image file having *filename* in test_files directory. """ return absjoin(thisdir, 'test_files', filename)
5,354,573
def menu( ticker: str, start: str, interval: str, stock: pd.DataFrame, ): """Sector and Industry Analysis Menu""" sia_controller = SectorIndustryAnalysisController(ticker, start, interval, stock) sia_controller.call_help(None) while True: # Get input command from user if session and gtff.USE_PROMPT_TOOLKIT: completer = NestedCompleter.from_nested_dict( {c: None for c in sia_controller.CHOICES} ) an_input = session.prompt( f"{get_flair()} (stocks)>(sia)> ", completer=completer, ) else: an_input = input(f"{get_flair()} (stocks)>(sia)> ") try: process_input = sia_controller.switch(an_input) if process_input is not None: return process_input except SystemExit: print("The command selected doesn't exist\n") similar_cmd = difflib.get_close_matches( an_input, sia_controller.CHOICES, n=1, cutoff=0.7 ) if similar_cmd: print(f"Did you mean '{similar_cmd[0]}'?\n") continue
5,354,574
def fname_template(orun, detname, ofname, nevts, tsec=None, tnsec=None): """Replaces parts of the file name specified as #src, #exp, #run, #evts, #type, #date, #time, #fid, #sec, #nsec with actual values """ template = replace(ofname, '#src', detname) template = replace(template, '#exp', orun.expt) template = replace(template, '#run', 'r%04d'%orun.runnum) template = replace(template, '#type', '%s') t_sec = tsec if tsec is not None else int(orun.timestamp>>32 & 0xFFFFFFFF) t_nsec = tnsec if tnsec is not None else int(orun.timestamp & 0xFFFFFFFF) template = replace(template, '#date', str_tstamp('%Y-%m-%d', t_sec)) template = replace(template, '#time', str_tstamp('%H%M%S', t_sec)) template = replace(template, '#sec', '%d' % t_sec) template = replace(template, '#nsec', '%09d' % t_nsec) template = replace(template, '#evts', 'e%06d' % nevts) if not '%s' in template: template += '-%s' return template
5,354,575
def opt_checked(method): """Like `@checked`, but it is legal to not specify the value. In this case, the special `Unset` value is passed to the validation function. Storing `Unset` causes the key to not be emitted during serialization.""" return Checked(method.__name__, method.__doc__, method, True)
5,354,576
def _metadata(case_study): """Collect metadata in a dictionnary.""" return { 'creation_date': datetime.strftime(datetime.now(), '%c'), 'imagery': case_study.imagery, 'latitude': case_study.lat, 'longitude': case_study.lon, 'area_of_interest': case_study.aoi_latlon.wkt, 'crs': str(case_study.crs), 'country': case_study.country }
5,354,577
def CoP_constraints_ds( m, foot_angles, next_support_foot_pos, stateX, stateY, N=16, dt=0.1, h=1.0, g=9.81, tPf=8, ): """ INPUTS m (int): remaining time steps in current foot step; foot_angles ([N, 1] vector): containing the orientations in radians of the foot steps at each time step; next_support_foot_pos ([2, 1] vec): next support foot position; stateX ([3, 1] matrix): position, velocity, acceleration of CoM along x-axis; stateY ([3, 1] matrix): position, velocity, acceleration of CoM along y-axis; N (int): is the length of the preview horizon; dt (float): time step size; h (float): CoM height; g (float): gravitational acceleration; tPf (int): time steps per foot step; Also calls a function that load the data for the foot edge normal vectors and edge to center distances; OUTPUTS leftHandSide: size [ef*N, 2N+2l] Matrix, where l is the number of remaining foots steps contained in the preview horizon and ef is the number of edges in the robot foot, e being the number of the edges of the foot, using a rectangular foot, ef=4; rightHandSide: size [ef*N, 1] Matrix; """ Uz = get_Uz(N=N) FutureStepsMat = stepsInFutureStepsMat(m, N=N) middleMat_diag = np.hstack((Uz, -FutureStepsMat[:, 1:])) middleMat = block_diag(middleMat_diag, middleMat_diag) Sz = get_Sz(N=N) rightVecX = FutureStepsMat[:, :1] * next_support_foot_pos[0] - Sz @ stateX rightVecY = FutureStepsMat[:, :1] * next_support_foot_pos[1] - Sz @ stateY rightVex = np.vstack((rightVecX, rightVecY)) # set_trace() for i in range(N): RotMat = angle2RotMat(foot_angles[i]) if i < m: d, b = init_double_support_CoP() else: d, b = rectangular_foot_CoP() # (Rd^T)^T = dR^T dRot = d @ RotMat.T if i == 0: DMatX = block_diag(dRot[:, :1]) DMatY = block_diag(dRot[:, 1:]) bVec = b else: DMatX = block_diag(DMatX, dRot[:, :1]) DMatY = block_diag(DMatY, dRot[:, 1:]) bVec = np.vstack((bVec, b)) DMat = np.hstack((DMatX, DMatY)) leftHandSide = DMat @ middleMat rightHandSide = bVec + DMat @ rightVex return leftHandSide, rightHandSide
5,354,578
def HARRIS(img_path): """ extract HARR features :param img_path: :return: :Version:1.0 """ img = io.imread(img_path) img = skimage.color.rgb2gray(img) img = (img - np.mean(img)) / np.std(img) feature = corner_harris(img, method='k', k=0.05, eps=1e-06, sigma=1) return feature.reshape(feature.shape[0] * feature.shape[1])
5,354,579
def upgrade(): """ Change upload_area primary key to be integer sequence, and update any foreign keys that reference it. """ # Upload Area op.execute("ALTER TABLE file DROP CONSTRAINT file_upload_area;") op.execute("ALTER TABLE upload_area DROP CONSTRAINT upload_area_pkey;") op.execute("ALTER TABLE upload_area RENAME COLUMN id TO uuid;") # reindex uuid op.execute("CREATE UNIQUE INDEX upload_area_uuid ON upload_area (uuid);") op.execute("ALTER TABLE upload_area ADD CONSTRAINT unique_uuid UNIQUE USING INDEX upload_area_uuid;") # add new primary key op.execute("ALTER TABLE upload_area ADD COLUMN id SERIAL PRIMARY KEY;") # update foreign keys pointing at upload_area_id op.execute("UPDATE file " "SET upload_area_id = upload_area.id " "FROM upload_area " "WHERE file.upload_area_id = upload_area.uuid;") op.execute("ALTER TABLE file " "ALTER COLUMN upload_area_id TYPE integer USING (upload_area_id::integer);") op.execute("ALTER TABLE file " "ADD CONSTRAINT file_upload_area FOREIGN KEY (upload_area_id) " "REFERENCES upload_area (id) ON DELETE CASCADE;")
5,354,580
def test_admin_noauth_fail(fn, args): """ Verify that an admin-only call fails when invoked without authentication. """ with pytest.raises(AuthorizationError): fn(*args)
5,354,581
def load_object(f_name, directory=None): """Load a custom object, from a pickle file. Parameters ---------- f_name : str File name of the object to be loaded. directory : str or SCDB, optional Folder or database object specifying the save location. Returns ------- object Custom object loaded from pickle file. """ load_path = None if isinstance(directory, SCDB): if check_ext(f_name, '.p') in directory.get_files('counts'): load_path = os.path.join(directory.get_folder_path('counts'), f_name) elif check_ext(f_name, '.p') in directory.get_files('words'): load_path = os.path.join(directory.get_folder_path('words'), f_name) elif isinstance(directory, str) or directory is None: if f_name in os.listdir(directory): load_path = os.path.join(directory, f_name) if not load_path: raise ValueError('Can not find requested file name.') return pickle.load(open(check_ext(load_path, '.p'), 'rb'))
5,354,582
def http_req(blink, url='http://example.com', data=None, headers=None, reqtype='get', stream=False, json_resp=True, is_retry=False): """ Perform server requests and check if reauthorization neccessary. :param blink: Blink instance :param url: URL to perform request :param data: Data to send (default: None) :param headers: Headers to send (default: None) :param reqtype: Can be 'get' or 'post' (default: 'get') :param stream: Stream response? True/FALSE :param json_resp: Return JSON response? TRUE/False :param is_retry: Is this a retry attempt? True/FALSE """ if reqtype == 'post': req = Request('POST', url, headers=headers, data=data) elif reqtype == 'get': req = Request('GET', url, headers=headers) else: raise BlinkException(ERROR.REQUEST) prepped = req.prepare() response = blink.session.send(prepped, stream=stream) if json_resp and 'code' in response.json(): if is_retry: raise BlinkAuthenticationException( (response.json()['code'], response.json()['message'])) else: headers = attempt_reauthorization(blink) return http_req(blink, url=url, data=data, headers=headers, reqtype=reqtype, stream=stream, json_resp=json_resp, is_retry=True) if json_resp: return response.json() return response
5,354,583
def register_encryptor(method: Union[FactorEncryptMethod, str], encryptor: Encryptor) -> None: """ register writer on startup """ encryptor_registry.register(method, encryptor)
5,354,584
def make_dataset(path, seq_length, mem_length, local_rank, lazy=False, xl_style=False, shuffle=True, split=None, tokenizer=None, tokenizer_type='CharacterLevelTokenizer', tokenizer_model_path=None, vocab_size=None, model_type='bpe', pad_token=0, character_converage=1.0, non_binary_cols=None, sample_one_document=False, pre_tokenize=False, **kwargs): """function to create datasets+tokenizers for common options""" if split is None: split = [1.] if non_binary_cols is not None: # multilabel dataset support (only for csvs) label_key = non_binary_cols # make tokenizer for dataset if tokenizer is None: tokenizer = make_tokenizer(tokenizer_type, None, tokenizer_model_path, vocab_size, model_type, pad_token, character_converage, **kwargs) # get one or multiple datasets and concatenate if isinstance(path, str): ds = get_dataset(path, tokenizer=tokenizer, pre_tokenize=pre_tokenize, local_rank=local_rank) else: ds = [get_dataset(p, tokenizer=tokenizer, pre_tokenize=pre_tokenize, local_rank=local_rank) for p in path] ds = ConcatDataset(ds) ds_type = '' if 'ds_type' in kwargs: ds_type = kwargs['ds_type'] # Split dataset into train/val/test (and wrap bert dataset) if should_split(split): ds = split_ds(ds, split, shuffle=shuffle) if ds_type.lower() == 'bert': presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False ds = [bert_sentencepair_dataset(d, max_seq_len=seq_length, presplit_sentences=presplit_sentences) if d is not None else None for d in ds] elif ds_type.lower() == 'gpt2': if xl_style: ds = [XLDataset(d, tokenizer, max_seq_len=seq_length, mem_len=mem_length, sample_across_doc=not sample_one_document) if d is not None else None for d in ds] else: ds = [GPT2Dataset(d, tokenizer, max_seq_len=seq_length, sample_across_doc=not sample_one_document) if d is not None else None for d in ds] else: if ds_type.lower() == 'bert': presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False ds = bert_sentencepair_dataset(ds, max_seq_len=seq_length, presplit_sentences=presplit_sentences) elif ds_type.lower() == 'gpt2': if xl_style: ds = XLDataset(ds, tokenizer, max_seq_len=seq_length, mem_len=mem_length, sample_across_doc=not sample_one_document) else: ds = GPT2Dataset(ds, tokenizer, max_seq_len=seq_length, sample_across_doc=not sample_one_document) return ds, tokenizer
5,354,585
def fix_reference_name(name, blacklist=None): """Return a syntax-valid Python reference name from an arbitrary name""" name = "".join(re.split(r'[^0-9a-zA-Z_]', name)) while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name): if not re.match(r'[a-zA-Z]', name[0]): name = name[1:] continue name = str(name) if not name: name = "data" if blacklist is not None and name in blacklist: get_new_name = lambda index: name+('_%03d' % index) index = 0 while get_new_name(index) in blacklist: index += 1 name = get_new_name(index) return name
5,354,586
def handle_args(): """Handles arguments both in the command line and in IDLE. Output: Tuple, consisting of: - string (input filename or stdin) - string (output filename or stdout) - integer (number of CPUs) """ version_num = "0.0.2" # Tries to execute the script with command line arguments. try: # Creates an instance of argparse. argparser = ThrowingArgumentParser(prog=sys.argv[0], description='samConcat2Tag, processes bwa mem sam format where \ the read comment has been appended to the mapping line following process_10\ xReads.py', epilog='For questions or comments, please contact Matt Settles \ <[email protected]>\n%(prog)s version: ' + version_num, add_help=True) except ArgumentParserError: print("Please run this script on the command line, with the \ correct arguments. Type -h for help.\n") sys.exit() else: # Adds the positional arguments. argparser.add_argument('inputfile', metavar='inputsam', type=str, nargs='?', help='Sam file to process [default: %(default)s]', default="stdin") # Adds the optional arguments. argparser.add_argument('--version', action='version', version="%(prog)s version: " + version_num) # TODO: ADD parameter for sample ID argparser.add_argument('-o', '--output_base', help="Directory + prefix to output, [default: %(default)s]", action="store", type=str, dest="output_base", default="stdout") argparser.add_argument("-@", "--cpus", help="The number of CPUs to use.", type=int, default=1) # Parses the arguments given in the shell. args = argparser.parse_args() inp = args.inputfile outb = args.output_base cpus = args.cpus return inp, outb, cpus
5,354,587
def load_scripts(reload_scripts=False, refresh_scripts=False): """ Load scripts and run each modules register function. :arg reload_scripts: Causes all scripts to have their unregister method called before loading. :type reload_scripts: bool :arg refresh_scripts: only load scripts which are not already loaded as modules. :type refresh_scripts: bool """ use_time = _bpy.app.debug prefs = _bpy.context.user_preferences if use_time: import time t_main = time.time() loaded_modules = set() if refresh_scripts: original_modules = _sys.modules.values() if reload_scripts: _bpy_types.TypeMap.clear() # just unload, don't change user defaults, this means we can sync # to reload. note that they will only actually reload of the # modification time changes. This `won't` work for packages so... # its not perfect. for module_name in [ext.module for ext in prefs.addons]: _addon_utils.disable(module_name, default_set=False) def register_module_call(mod): register = getattr(mod, "register", None) if register: try: register() except: import traceback traceback.print_exc() else: print("\nWarning! '%s' has no register function, " "this is now a requirement for registerable scripts" % mod.__file__) def unregister_module_call(mod): unregister = getattr(mod, "unregister", None) if unregister: try: unregister() except: import traceback traceback.print_exc() def test_reload(mod): import imp # reloading this causes internal errors # because the classes from this module are stored internally # possibly to refresh internal references too but for now, best not to. if mod == _bpy_types: return mod try: return imp.reload(mod) except: import traceback traceback.print_exc() def test_register(mod): if refresh_scripts and mod in original_modules: return if reload_scripts and mod: print("Reloading:", mod) mod = test_reload(mod) if mod: register_module_call(mod) _global_loaded_modules.append(mod.__name__) if reload_scripts: # module names -> modules _global_loaded_modules[:] = [_sys.modules[mod_name] for mod_name in _global_loaded_modules] # loop over and unload all scripts _global_loaded_modules.reverse() for mod in _global_loaded_modules: unregister_module_call(mod) for mod in _global_loaded_modules: test_reload(mod) _global_loaded_modules[:] = [] for base_path in script_paths(): for path_subdir in _script_module_dirs: path = _os.path.join(base_path, path_subdir) if _os.path.isdir(path): _sys_path_ensure(path) # only add this to sys.modules, don't run if path_subdir == "modules": continue for mod in modules_from_path(path, loaded_modules): test_register(mod) # deal with addons separately _addon_utils.reset_all(reload_scripts) # run the active integration preset filepath = preset_find(prefs.inputs.active_keyconfig, "keyconfig") if filepath: keyconfig_set(filepath) if reload_scripts: import gc print("gc.collect() -> %d" % gc.collect()) if use_time: print("Python Script Load Time %.4f" % (time.time() - t_main))
5,354,588
def plot_repeat_transaction_over_time(data, median, output_folder, time_label): """Creates and saves an image containing the plot the transactions over time. Args: data: Pandas DataFrame containing the data to plot. median: Median line that shows split between calibration and holdout period. output_folder: Folder where the image file containing the plot will be saved. time_label: String describing the time granularity. Returns: Nothing. Just save the image file to the output folder. """ if not output_folder: return import matplotlib matplotlib.use('Agg') from matplotlib import pyplot as plt if time_label == TimeGranularityParams.GRANULARITY_DAILY: time_label_short = 'Day' elif time_label == TimeGranularityParams.GRANULARITY_MONTHLY: time_label_short = 'Month' else: time_label_short = 'Week' txs = data[ ['time_unit_number', 'repeat_transactions', 'predicted_transactions'] ] txs.columns = [time_label_short, 'Actual', 'Model'] ax = txs.plot(kind='line', x=time_label_short, style=['-', '--']) # Median line that shows split between calibration and holdout period plt.axvline(median, color='k', linestyle='--') plt.legend() plt.title('Tracking %s Transactions' % time_label.capitalize()) plt.ylabel('Transactions') plt.xlabel(time_label_short) # Save to file save_to_file(output_folder + 'repeat_transactions_over_time.png', lambda f: plt.savefig(f, bbox_inches='tight'))
5,354,589
def get_available_gates() -> tuple[str, ...]: """ Return available gates. """ from hybridq.gate.gate import _available_gates return tuple(_available_gates)
5,354,590
def main(): """ Entry point Collect all reviews from the file system (FS) & Dump it into JSON representation back to the FS Returns: int: The status code """ collector = Collector() return collector.collect()
5,354,591
def __parse_tokens(sentence: spacy.tokens.Doc) -> ParsedUniversalDependencies: """Parses parts of speech from the provided tokens.""" #tokenize # remove the stopwards, convert to lowercase #bi/n-grams adj = __get_word_by_ud_pos(sentence, "ADJ") adp = __get_word_by_ud_pos(sentence, "ADP") adv = __get_word_by_ud_pos(sentence, "ADV") aux = __get_word_by_ud_pos(sentence, "AUX") verb = __get_word_by_ud_pos(sentence, "VERB") cconj = __get_word_by_ud_pos(sentence, "CCONJ") det = __get_word_by_ud_pos(sentence, "DET") intj = __get_word_by_ud_pos(sentence, "INTJ") noun = __get_word_by_ud_pos(sentence, "NOUN") num = __get_word_by_ud_pos(sentence, "NUM") part = __get_word_by_ud_pos(sentence, "PART") pron = __get_word_by_ud_pos(sentence, "PRON") propn = __get_word_by_ud_pos(sentence, "PROPN") punct = __get_word_by_ud_pos(sentence, "PUNCT") sconj = __get_word_by_ud_pos(sentence, "SCONJ") sym = __get_word_by_ud_pos(sentence, "SYM") verb = __get_word_by_ud_pos(sentence, "VERB") x = __get_word_by_ud_pos(sentence, "X") return ParsedUniversalDependencies( adj = adj, adp = adp, adv = adv, aux = aux, cconj = cconj, det = det, intj = intj, noun = noun, num = num, part = part, pron = pron, propn = propn, punct = punct, sconj = sconj, sym = sym, verb = verb, x = x)
5,354,592
def main(): """ In this main function, we connect to the database, and we create position table and intern table and after that we create new position and new interns and insert the data into the position/intern table """ database = r"interns.db" sql_drop_positions_table=""" DROP TABLE positions """ sql_drop_interns_table=""" DROP TABLE interns """ sql_create_positions_table = """ CREATE TABLE IF NOT EXISTS positions ( name text PRIMARY KEY, description text ); """ sql_create_interns_table = """CREATE TABLE IF NOT EXISTS interns ( id integer PRIMARY KEY, last_name text NOT NULL, first_name text NOT NULL, position_applied text NOT NULL, school text NOT NULL, program text NOT NULL, date_of_entry text NOT NULL, FOREIGN KEY (position_applied) REFERENCES positions (name) ON UPDATE NO ACTION );""" # create a database connection conn = create_connection(database) # create tables if conn is not None: #drop interns table before everything else drop_table(conn, sql_drop_interns_table) #drop positions table before everything else drop_table(conn, sql_drop_positions_table) # create projects table create_table(conn, sql_create_positions_table) # create tasks table create_table(conn, sql_create_interns_table) else: print("Error! cannot create the database connection.") with conn: #create position-later on change the check condition position=("Software Development Intern", "This position is for software development intern"); create_position(conn, position) #create interns: intern_1=("A","B","Software Development Intern","GWU","Data Analytics",datetime.datetime.now()) intern_2=("C","D","Software Development Intern","GWU","Data Analytics",datetime.datetime.now()) create_intern(conn,intern_1) create_intern(conn,intern_2) conn.commit() conn.close() return database
5,354,593
def parse_date(month: int, day: int) -> Tuple[int, int, int]: """Parse a date given month and day only and convert to a tuple. Args: month (int): 1-index month value (e.g. 1 for January) day (int): a day of the month Returns: Tuple[int, int, int]: (year, month, day) """ if month < config.TODAY.month: # Note that if you have not yet recorded/cached the current # records, you should comment out the +1. The +1 is only # meant to increment for future events that happen in # the new year. year = config.TODAY.year + 1 elif month - config.TODAY.month > 1: # I realized that on June 10th, 2020, the schedule for UQs was # posted June 10th but included June 9th (which had passed). # There is a distinct possibility that this will happen again, # when the schedule is posted on New Year's Day (around there) # and includes a day for December. Because events are only # at most a month away in the future, we should check whether # the difference in months is greater than 1. # e.g. 12 - 1 > 1 to represent December of previous year and # January of the current year year = config.TODAY.year - 1 else: year = config.TODAY.year return year, month, day
5,354,594
def usgs_perlite_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Mine production2"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['perlite'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Mine production2": prod = "production" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption:3": prod = "import" elif df.iloc[index]["Production"].strip() == "Exports:3": prod = "export" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["FlowAmount"] = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe
5,354,595
def downgrade(): """Remove unique key constraint to the UUID column.""" op.drop_constraint('db_dblog_uuid_key', 'db_dblog')
5,354,596
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): # token from https://github.com/bioinf-jku/TTUR/blob/master/fid.py """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1 : Numpy array containing the activations of the pool_3 layer of the inception net ( like returned by the function 'get_predictions') for generated samples. -- mu2 : The sample mean over activations of the pool_3 layer, precalcualted on an representive data set. -- sigma1: The covariance matrix over activations of the pool_3 layer for generated samples. -- sigma2: The covariance matrix over activations of the pool_3 layer, precalcualted on an representive data set. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths" assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions" diff = mu1 - mu2 # product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps warnings.warn(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) #raise ValueError("Imaginary component {}".format(m)) print('FID is fucked up') covmean = covmean.real tr_covmean = np.trace(covmean) return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
5,354,597
def _svdvals_eig(x): # pragma: no cover """SVD-decomposition via eigen, but return singular values only. """ if x.shape[0] > x.shape[1]: s2 = np.linalg.eigvalsh(dag(x) @ x) else: s2 = np.linalg.eigvalsh(x @ dag(x)) return s2**0.5
5,354,598
def get_3d_object_section(target_object): """Returns 3D section includes given object like stl. """ target_object = target_object.flatten() x_min = min(target_object[0::3]) x_max = max(target_object[0::3]) y_min = min(target_object[1::3]) y_max = max(target_object[1::3]) z_min = min(target_object[2::3]) z_max = max(target_object[2::3]) return [x_min, x_max, y_min, y_max, z_min, z_max]
5,354,599