content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _count_partial_errors(client: GoogleAdsClient, conversion_upload_response) -> int: """Counts the partial errors in the GAds response. Args: client: A GoogleAdsClient instance conversion_upload_response: Google Upload Conversion service response. Returns: An integer representing the total number of partial errors in the response failure error. A list containing the code, message and number of times that each unique error code was returned by the API for one of the conversions uploaded. """ error_count = 0 error_stats = {} error_array = [] if _is_partial_failure_error_present(conversion_upload_response): partial_failure = getattr(conversion_upload_response, 'partial_failure_error', None) error_details = getattr(partial_failure, 'details', []) for error_detail in error_details: failure_message = client.get_type('GoogleAdsFailure') google_ads_failure = type(failure_message) failure_object_des = google_ads_failure.deserialize(error_detail.value) error_count += len(failure_object_des.errors) for error in failure_object_des.errors: str_code = str(error.error_code).strip() if str_code in error_stats: error_stats[str_code]['count'] += 1 else: error_stats[str_code] = {} error_stats[str_code]['count'] = 1 error_stats[str_code]['message'] = str(error.message).strip() print('A partial failure at index ' f'{error.location.field_path_elements[0].index} occurred ' f'\nError message: {error.message}\nError code: ' f'{error.error_code}') for code_key in error_stats: error_array.append({ 'code': code_key, 'message': error_stats[code_key]['message'], 'count': error_stats[code_key]['count'] }) return error_count, error_array
aaa0ab8c3668765539a05374afb06bc3e661af23
3,656,400
def cumulative_similarity(atoms, representations, threshold=0.98): """ """ u_representations = [representations[0]] s_idxs = [0] for i, representation in enumerate(representations[1:]): i += 1 similar = merge_asymmetric_similarity(atoms, [representation], u_representations, threshold=threshold) # We are only looking at one representation similar = similar[0] if len(similar) > 0: continue u_representations += [representation] s_idxs += [i] return np.asarray(s_idxs)
6f2c065233a6b7b7931bfdfcfe08a032287d6ffc
3,656,401
def get_project_by_id(project_id): """ Retrieve a project by its Id. Returns None if no project is found. """ try: return Project.objects.get(pk=project_id) except Project.DoesNotExist: return None
e7ae842d7b9daa5bde08a00f6dd9ac84246d4e13
3,656,402
from typing import Dict from typing import Tuple import os def create_colorbar( labels: pd.DataFrame, tree: CassiopeiaTree, colormap: Dict[str, Tuple[int, int, int]], dataset_name: str, output_directory: str = ".tmp/", create_legend: bool = False, ) -> str: """Creates a colorbar file for the iTOL batch uploader Creates a colorbar file for iTOL from categorical data. This will write out the file to the specified location, which can then be uploaded to iTOL. Args: labels: A pandas series with categorical data (can be represented as strings or categories) tree: CassiopeiaTree colormap: A mapping from category to RGB colors dataset_name: Name for the dataset output_directory: Where to write the output file create_legend: Include legend for this colorbar. Returns: The filepath to new colorbar file. """ _leaves = tree.leaves labelcolors_iTOL = [] for i in labels.loc[_leaves].values: colors_i = colormap[i] color_i = ( "rgb(" + str(colors_i[0]) + "," + str(colors_i[1]) + "," + str(colors_i[2]) + ")" ) labelcolors_iTOL.append(color_i) dfCellColor = pd.DataFrame() dfCellColor["cellBC"] = _leaves dfCellColor["color"] = labelcolors_iTOL # save file with header header = [ "DATASET_COLORSTRIP", "SEPARATOR TAB", "COLOR\t#FF0000", "MARGIN\t100", f"DATASET_LABEL\t{dataset_name}", "STRIP_WIDTH\t100", "SHOW_INTERNAL\t0", "", ] outfp = os.path.join(output_directory, f"{dataset_name}.txt") with open(outfp, "w") as SIDout: for line in header: SIDout.write(line + "\n") if create_legend: number_of_items = len(colormap) SIDout.write(f"LEGEND_TITLE\t{dataset_name} legend\n") SIDout.write("LEGEND_SHAPES") for _ in range(number_of_items): SIDout.write("\t1") SIDout.write("\nLEGEND_COLORS") for col in colormap.values(): SIDout.write(f"\t{rgb_to_hex(col)}") SIDout.write("\nLEGEND_LABELS") for key in colormap.keys(): SIDout.write(f"\t{key}") SIDout.write("\n") SIDout.write("\nDATA\n") df_writeout = dfCellColor.to_csv( None, sep="\t", header=False, index=False ) SIDout.write(df_writeout) return outfp
a0907476c4f5b027106b347a4500808805179112
3,656,403
def one_c(rand_gen): """ KS Test :param rand_gen: :return: """ # Now need to do the ks test # This calculates the value for KS at given points def ks_test(z): if z == 0: return 1 elif z < 1.18: # Numerically optimal cutoff block = ((np.exp((-1. * np.pi ** 2) / (8 * z ** 2)))) p = (np.sqrt(2 * np.pi) / z) * \ (block + block ** 9 + block ** 25) else: block = np.exp(-2 * z ** 2) p = 1 - 2 * (block - block ** 4 + block ** 9) return 1 - p def ks_test_part(points, values, bins): summed_bins = sum(values) distribution = [] for i in range(len(values)): distribution.append(abs(sum(values[:i]) / summed_bins - norm.cdf(bins[i]))) distribution = np.asarray(distribution) D = max(distribution) z = D * (np.sqrt(len(points)) + 0.12 + 0.11 / np.sqrt(len(points))) return D, ks_test(z) sigma = 1 u = 0 num_samples = np.logspace(np.log10(10), np.log10(10 ** 5), num=50) reference_ks = np.zeros(50) reference_p_value = np.zeros(50) ks = np.zeros(50) p_value = np.zeros(50) for index, sample in enumerate(num_samples): sample = int(sample) gauss = box_muller(rand_gen, sample) gauss = map_to_guass(gauss, u=u, sigma=sigma) ks[index], p_value[index] = common_test(gauss, ks_test_part) reference_ks[index], reference_p_value[index] = kstest(gauss, "norm") plt.plot(num_samples, ks, c='b', label='My KS Test') plt.plot(num_samples, reference_ks, c='r', label='Scipy KS Test') plt.xscale('log') plt.yscale('log') plt.xlabel("Number of Points") plt.ylabel("KS Statistic (D)") plt.legend(loc='best') plt.savefig("plots/KStest.png", dpi=300) plt.cla() plt.plot(num_samples, p_value, c='b', label='My KS Test Probability') plt.plot(num_samples, reference_p_value, c='r', label='Scipy KS Test Probability') plt.xscale('log') plt.yscale('log') plt.xlabel("Number of Points") plt.ylabel("Probability") plt.legend(loc='best') plt.savefig("plots/KStest_pvalue.png", dpi=300) plt.cla()
9a2b420f3620bc198bc880c01d90cc743ac5c2ec
3,656,404
from typing import List def divide_into_sentences( text: str, num_of_senteces: int, is_reversed: bool = False, offset: int = 0 ) -> str: """ This function divides the text into sentences and returns either the first X sentences or the last X sentences. """ tokens_sent = nltk.sent_tokenize(text) # fix uncorrect dialog sentences tokens_sent = fix_direct_speech_sentences(tokens_sent) output_text: List[str] = [] if not is_reversed: for i, sentence in enumerate(tokens_sent): if i < offset: continue if i < num_of_senteces + offset: output_text.append(sentence) else: break else: for i, sentence in enumerate(reversed(tokens_sent)): if i < offset: continue if i < num_of_senteces + offset: output_text.append(sentence) else: break output_text.reverse() return " ".join(output_text)
e57e772942953c890b12e91c888688295dcf89ae
3,656,405
def intersection(bbox1: BoundingBox, bbox2: BoundingBox) -> BoundingBox: """ Calculate the intersection of two bounding boxes. """ assert bbox1.x_min <= bbox1.x_max assert bbox1.y_min <= bbox1.y_max assert bbox2.x_min <= bbox2.x_max assert bbox2.y_min <= bbox2.y_max # determine the coordinates of the intersection rectangle x_left = max(bbox1.x_min, bbox2.x_min) y_top = max(bbox1.y_min, bbox2.y_min) x_right = min(bbox1.x_max, bbox2.x_max) y_bottom = min(bbox1.y_max, bbox2.y_max) if x_right < x_left or y_bottom < y_top: return EMPTY_BBOX return BoundingBox(x_left, x_right, y_top, y_bottom)
71ce5b562f5f6fbfe6dba51db43240a98b0d7d49
3,656,406
from typing import Optional from typing import Iterable import logging def maybe_load_checkpoint(train_loop_rngs: jnp.ndarray, save_checkpoint_path: str, init_optimizer: flax.optim.Optimizer, init_params: Params, init_fixed_model_states: Optional[Params], default_reinit_params: Iterable[str], config: ml_collections.ConfigDict) -> CheckpointData: """Loads a model from an existing checkpoint if so indicated by the config. Whether to resume training, initialize from a previous checkpoint, or do nothing is set by the `config` ConfigDict, based on the existence of fields `resume` (resume training) or `model_init` (initialize from pretrained checkpoint). When resuming training, both the model weights and optimizer state (including the training step) are restored. When initializing, only the model parameters are updated. The way in which initializing is prioritized in the following way: 1. Always resume from an existing checkpoint, e.g. resume a finetune job. 2. Resume from a previous checkpoint, e.g. start a cooldown training job. 3. Initialize model from something, e,g, start a fine-tuning job. 4. Do nothing (training from scratch). Args: train_loop_rngs: unreplicated jax.PRNGKey. save_checkpoint_path: File pointing to pretrained checkpoint stored in NumPy `.npz` file. init_optimizer: flax.Optimizer to be updated. init_params: Tree of (possibly randomly) initialized parameters for the model. init_fixed_model_states: Optional pytree of non-trainable parameters. Currently only passed when using SNGP models. default_reinit_params: List of parameter names to reinitialize if not provided by the config file. config: ConfigDict which contains fields indicating if, and how, to load an available checkpoint into the optimizer. If resuming from a previous checkpoint *to start a cooldown job*, the flag `resume` must be set. If initializing a (subset of) model parameters to start a file tuning job, fields `model_init`, `representation_size` and `classifier` must be set. Returns: A CheckpointData instance containing a new rng key, the new optimizer state, the new untrainable parameters (if resuming from a checkpoint), and a dictionary of information about the reloaded state. """ optimizer = init_optimizer fixed_model_states = init_fixed_model_states accum_train_time = 0.0 # TODO(dusenberrymw, zmariet): Directly return an unreplicated rng and the # cumulative training time instead of storing them in `checkpoint_extra`. checkpoint_extra = dict( accum_train_time=accum_train_time, rngs_loop=flax_utils.replicate(train_loop_rngs)) # Parse config file to figure out which setting we are in. resume_from_checkpoint = ( (save_checkpoint_path is not None and gfile.exists(save_checkpoint_path)) or config.get("resume") is not None) reinitialize_model = config.get( "model_init") is not None and not resume_from_checkpoint if resume_from_checkpoint: logging.info("Resume training from checkpoint...") # Always prioritize loading from a checkpoint from the current training job. if save_checkpoint_path and gfile.exists(save_checkpoint_path): resume_checkpoint_path = save_checkpoint_path # Otherwise, we reload from a previous checkpoint provided by the config. else: resume_checkpoint_path = config.resume checkpoint_tree = {"opt": init_optimizer, "extra": checkpoint_extra} if init_fixed_model_states is not None: checkpoint_tree["states"] = init_fixed_model_states checkpoint = load_checkpoint(checkpoint_tree, resume_checkpoint_path) optimizer, checkpoint_extra = checkpoint["opt"], checkpoint["extra"] fixed_model_states = checkpoint.get("states", None) elif reinitialize_model: logging.info("Initialize model...") reinit_params = config.get("model_reinit_params", default_reinit_params) logging.info("Reinitializing these parameters: %s", reinit_params) loaded = load_from_pretrained_checkpoint( init_params=init_params, pretrained_path=config.model_init, model_representation_size=config.model.representation_size, model_classifier=config.model.classifier, reinit_params=reinit_params) optimizer = init_optimizer.replace(target=loaded) if jax.process_index() == 0: logging.info("Restored parameter overview:") parameter_overview.log_parameter_overview(loaded) else: logging.info("No checkpoint to recover from; using default initialization.") return CheckpointData( optimizer=optimizer, fixed_model_states=fixed_model_states, train_loop_rngs=checkpoint_extra["rngs_loop"], accumulated_train_time=checkpoint_extra["accum_train_time"])
8b21fe6ac806a3d153d8dde11045a26432f52be0
3,656,407
def test_if_tech_defined(enduse_fueltypes_techs): """Test if a technology has been configured, i.e. a fuel share has been assgined to one of the fueltpyes in `fuel_shares`. Arguments --------- enduse_fueltypes_techs : dict Configured technologies and fuel shares of an enduse Returns ------- c_tech_defined : bool Criteria whether technologies have been configured for an enduse or not """ c_tech_defined = False for fueltype in enduse_fueltypes_techs: if enduse_fueltypes_techs[fueltype] == {}: pass else: c_tech_defined = True break return c_tech_defined
a727b375dc1bc7e76fe63090d8e278013fa2c6bb
3,656,408
from mindboggle.guts.segment import segment_regions def segment_rings(region, seeds, neighbor_lists, step=1, background_value=-1, verbose=False): """ Iteratively segment a region of surface mesh as concentric segments. Parameters ---------- region : list of integers indices of region vertices to segment (such as a fold) seeds : list of integers indices of seed vertices neighbor_lists : list of lists of integers indices to neighboring vertices for each vertex step : integer number of segmentation steps before assessing segments background_value : integer background value verbose : bool print statements? Returns ------- segments : list of lists of integers indices to vertices for each concentric segment Examples -------- >>> import numpy as np >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.guts.mesh import find_neighbors_from_file >>> from mindboggle.guts.segment import extract_borders >>> from mindboggle.guts.segment import segment_rings >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> values, name = read_scalars(vtk_file, True, True) >>> neighbor_lists = find_neighbors_from_file(vtk_file) >>> background_value = -1 >>> fold, name = read_scalars(folds_file) >>> indices = [i for i,x in enumerate(fold) if x != background_value] >>> # Initialize seeds with the boundary of thresholded indices: >>> use_threshold = True >>> if use_threshold: ... # Threshold at the median depth or within maximum values in boundary: ... threshold = np.median(values[indices]) #+ np.std(values[indices]) ... indices_high = [x for x in indices if values[x] >= threshold] ... # Make sure threshold is within the maximum values of the boundary: ... B = np.ones(len(values)) ... B[indices] = 2 ... borders, foo1, foo2 = extract_borders(list(range(len(B))), B, neighbor_lists) ... borders = [x for x in borders if values[x] != background_value] ... if list(frozenset(indices_high).intersection(borders)): ... threshold = np.max(values[borders]) + np.std(values[borders]) ... indices_high = [x for x in indices if values[x] >= threshold] ... # Extract threshold boundary vertices as seeds: ... B = background_value * np.ones(len(values)) ... B[indices_high] = 2 ... seeds, foo1, foo2 = extract_borders(list(range(len(values))), B, neighbor_lists) ... # Or initialize P with the maximum value point: ... else: ... seeds = [indices[np.argmax(values[indices])]] ... indices_high = [] >>> indices = list(frozenset(indices).difference(indices_high)) >>> indices = list(frozenset(indices).difference(seeds)) >>> step = 1 >>> verbose = False >>> segments = segment_rings(indices, seeds, neighbor_lists, step, ... background_value, verbose) >>> len(segments) 56 >>> [len(x) for x in segments][0:10] [5540, 5849, 6138, 5997, 4883, 3021, 1809, 1165, 842, 661] >>> segments[0][0:10] [65539, 65540, 98308, 98316, 131112, 131121, 131122, 131171, 131175, 131185] Write results to vtk file and view (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars # doctest: +SKIP >>> S = background_value * np.ones(len(values)) # doctest: +SKIP >>> for i, segment in enumerate(segments): S[segment] = i # doctest: +SKIP >>> rewrite_scalars(vtk_file, 'segment_rings.vtk', S, 'segment_rings', ... [], -1) # doctest: +SKIP >>> plot_surfaces('segment_rings.vtk') # doctest: +SKIP """ segments = [] while seeds: # Segment step-wise starting from seeds and through the region: seeds_plus_new = segment_regions(region, neighbor_lists, 1, [seeds], False, False, [], [], [], step, background_value, verbose) seeds_plus_new = [i for i,x in enumerate(seeds_plus_new) if x != background_value] # Store the new segment after removing the previous segment: region = list(frozenset(region).difference(seeds)) seeds = list(frozenset(seeds_plus_new).difference(seeds)) if seeds: # Add the new segment and remove it from the region: segments.append(seeds) region = list(frozenset(region).difference(seeds)) return segments
3b2c5c1a68ecef7f036a332b966a3aa8610157af
3,656,409
def classification_result(y, y_pred): """ :param y: :param y_pred: :return: """ assert len(y) == len(y_pred) correct = [] wrong = [] for i in range(len(y)): if y[i] == y_pred[i]: correct.append(i) else: wrong.append(i) return correct, wrong
bdab32eeded40691a721fe8e1463819605c5639c
3,656,410
def flatgrad(loss, var_list, clip_norm=None): """Calculate the gradient and flatten it. Parameters ---------- loss : float the loss value var_list : list of tf.Tensor the variables clip_norm : float clip the gradients (disabled if None) Returns ------- list of tf.Tensor flattened gradient """ grads = tf.gradients(loss, var_list) if clip_norm is not None: grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads] return tf.concat(axis=0, values=[ tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)]) for (v, grad) in zip(var_list, grads) ])
cd359f78c882bbd57876e4011818422799727ce7
3,656,411
async def get_image_from_message( ctx, url=None, *, return_type="image_RGBA", search_last_messages=True, accept_emojis=True, accept_templates=True, ): """Get an image from a discord Context or check on images among the 100 last messages sent in the channel. Return bytes or PIL.Image image and the image url""" assert return_type and return_type in ["image_RGBA", "image", "bytes"] message_limit = 100 initial_message = None if isinstance(ctx, commands.Context): initial_message = ctx.message try: # try to get the image from the initial message return await get_image( initial_message, url, return_type, accept_emojis, accept_templates, accept_embeds=False, ) except ImageNotFoundError as e: # if the message is a reply, we try to find an image in the replied message ref = initial_message.reference if initial_message else None if ref and isinstance(ref.resolved, disnake.Message): reply_message = ref.resolved try: return await get_image( reply_message, url=None, return_type=return_type, accept_emojis=False, accept_templates=False, accept_embeds=True, ) except Exception: pass # if no image was found in the message we check for images in the last # 100 messages sent in the channel if search_last_messages: async for message in ctx.channel.history(limit=message_limit): if message != initial_message: try: return await get_image( message, url=None, return_type=return_type, accept_emojis=False, accept_templates=False, accept_embeds=True, ) except Exception: pass # no image was found in the last 100 images raise ValueError(e) except ValueError as e: # if an image was found but an error occurred, we raise it raise ValueError(e)
fd135153dd6db0fb7e6e1990560da0aa69af6ac7
3,656,412
import tqdm def test_write(size, iterations, exclude_formats, test_compress): """ Test writting for one file Args: size: size of the file to test (0: small, 1: mediumn, 2: big) iterations: number of times to run the test exclude_formats: formats to exclude in this test test_compress: if True it will try all compressions Returns: dictionary with out """ out = {} df = pd.read_csv(f"{PATH_DATA}{FILES[size]}.csv") for extension, func in tqdm(FUNCS["write"].items(), desc=f"{'write':10}", leave=True): # Skip this extension if extension in exclude_formats: continue if not test_compress or extension not in COMPRESSIONS: args = [df, f"{PATH_DATA}data.{extension}"] out[extension] = iterate_one_test(iterations, extension, func, args, {}) # Try all compressions else: if extension not in COMPRESSIONS: continue # Get name of compression parameter and list of extensions comp_list = COMPRESSIONS[extension]["list"] comp_param_name = COMPRESSIONS[extension]["param_name"] for comp in tqdm(comp_list, desc=f"{extension:10}", leave=True): name = f"{extension}_{str(comp)}" out[name] = iterate_one_test( iterations, extension=name, func=func, args=[df, f"{PATH_DATA}data.{extension}_{comp}"], kwargs={comp_param_name: comp}, ) return out
847de26005a291d9505a6c66221eec19e7924e54
3,656,413
from typing import Optional def get_game_server_group(game_server_group_arn: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGameServerGroupResult: """ The AWS::GameLift::GameServerGroup resource creates an Amazon GameLift (GameLift) GameServerGroup. :param str game_server_group_arn: A generated unique ID for the game server group. """ __args__ = dict() __args__['gameServerGroupArn'] = game_server_group_arn if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:gamelift:getGameServerGroup', __args__, opts=opts, typ=GetGameServerGroupResult).value return AwaitableGetGameServerGroupResult( auto_scaling_group_arn=__ret__.auto_scaling_group_arn, auto_scaling_policy=__ret__.auto_scaling_policy, balancing_strategy=__ret__.balancing_strategy, game_server_group_arn=__ret__.game_server_group_arn, game_server_group_name=__ret__.game_server_group_name, game_server_protection_policy=__ret__.game_server_protection_policy, instance_definitions=__ret__.instance_definitions, launch_template=__ret__.launch_template, max_size=__ret__.max_size, min_size=__ret__.min_size, role_arn=__ret__.role_arn, tags=__ret__.tags, vpc_subnets=__ret__.vpc_subnets)
ad5d517b5c05f0b2bbe300f6fd430d9780ebbc34
3,656,414
def create_wcscorr(descrip=False, numrows=1, padding=0): """ Return the basic definitions for a WCSCORR table. The dtype definitions for the string columns are set to the maximum allowed so that all new elements will have the same max size which will be automatically truncated to this limit upon updating (if needed). The table is initialized with rows corresponding to the OPUS solution for all the 'SCI' extensions. """ trows = numrows + padding c1 = pyfits.Column(name='WCS_ID',format='24A',array=np.array(['OPUS']*numrows+['']*padding,dtype="S24")) c2 = pyfits.Column(name='EXTVER',format='I',array=np.array(list(range(1,numrows+1)),dtype=np.int16)) c3 = pyfits.Column(name='CRVAL1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c4 = pyfits.Column(name='CRVAL2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c5 = pyfits.Column(name='CD1_1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c6 = pyfits.Column(name='CD1_2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c7 = pyfits.Column(name='CD2_1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c8 = pyfits.Column(name='CD2_2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c9 = pyfits.Column(name='ORIENTAT',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c10 = pyfits.Column(name='PA_V3',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c11 = pyfits.Column(name='Delta_RA',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c12 = pyfits.Column(name='Delta_Dec',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c13 = pyfits.Column(name='RMS_RA',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c14 = pyfits.Column(name='RMS_Dec',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c15 = pyfits.Column(name='Delta_Orientat',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c16 = pyfits.Column(name='Delta_Scale',format='D',array=np.array([1.0]*trows,dtype=np.float64)) c17 = pyfits.Column(name='NMatch',format='J',array=np.array([0]*trows,dtype=np.int32)) c18 = pyfits.Column(name='Catalog',format='40A',array=np.array([''],dtype="S40")) if descrip: c19 = pyfits.Column(name='Descrip',format='128A',array=np.array(['Original WCS computed by OPUS']*numrows,dtype="S128")) cdefs = pyfits.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19]) else: cdefs = pyfits.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18]) return pyfits.new_table(cdefs,nrows=trows)
3764f412bdbae771fe75fec7b8623906fddf01b1
3,656,415
import requests def get_token(): """ Acquire an OAuth token for Koha returns: OAuth token (string) """ data = { "client_id": config['client_id'], "client_secret": config['client_secret'], "grant_type": "client_credentials", } response = requests.post(config['api_root'] + '/oauth/token', data=data, verify=False) token = str(response.json()['access_token']) return token
02b362a27d8c9101ca24f6a853513f99d167f4c9
3,656,416
from typing import List import time def build_step(incr: bool, cfg: RunConfig, repo: Repo, time_dict: dict) -> List[str]: """ Build jar for multiple versions of a repo :param incr: incremental build or not :param cfg: all configurations read from *.toml :param repo: a GitPython git.Repo object :param time_dict: recording running time :return: the list of commits (sha1 strings) """ outdirs: DirsTuple = cfg.out_dirs check_dir(str(outdirs.jar), make_if_not=True) commit_list: List[str] = interp_rev_range(repo, cfg.rev_range) commit_list_repr = '\n'.join(commit_list) logger.debug(f"Commits to be processed are:\n{commit_list_repr}") start_time: float = time.time() if incr: logger.debug("Build mode: Incremental build") step_compile(outdirs.jar, repo, commit_list, cfg.njobs, True) else: # default logger.debug("Build mode: Non-incremental build") step_compile(outdirs.jar, repo, commit_list, cfg.njobs, False) time_dict["compile"] = time.time() - start_time return commit_list
348f32e8ab5881c3563cd442390e4af203a2b15d
3,656,417
def isfile(value): """Validate that the value is an existing file.""" return vol.IsFile('not a file')(value)
2513516dbe0bdb765cbff78780f6386c5809a8d7
3,656,418
def is_in_form(dg: "streamlit.delta_generator.DeltaGenerator") -> bool: """True if the DeltaGenerator is inside an st.form block.""" return current_form_id(dg) != ""
e0f60c4b320d325db2cc27ece9395cae20b92fda
3,656,419
import sys def fetch(opts): """ support fetching from scp sources With provided fetch options (``RelengFetchOptions``), the fetch stage will be processed. Args: opts: fetch options Returns: ``True`` if the fetch stage is completed; ``False`` otherwise """ assert opts cache_file = opts.cache_file name = opts.name site = opts.site work_dir = opts.work_dir if not SCP.exists(): err('unable to fetch package; scp is not installed') return None note('fetching {}...', name) sys.stdout.flush() if not SCP.execute(['-o', 'BatchMode yes', site, cache_file], cwd=work_dir): err('unable to secure-copied file from target') return None log('successfully secure-copied file from target') return cache_file
e94e684d50965a8617f4a03899503784f5840b5f
3,656,420
def install_from_deb(deb_path,additional_options): """ Installs package with dpkg command using -i options and some extra options, if needed Raises an exception on non-zero exit code Input: apt file path, additional optons Output: Combined stdout and stderror """ return run_shell_command("dpkg -i "+additional_options+" "+deb_path)
8d452b96e1a3cb8c6b134747fe351158625fed27
3,656,421
import pytz from datetime import datetime import json def sign_award(award: Award) -> FlexSendMessage: """Sign Award Result Args: award (Award): Award Object Returns: FlexSendMessage: Flex Message """ tz = pytz.timezone("Asia/Taipei") now = datetime.now(tz=tz) now_text = now.strftime("%Y/%m/%d %H:%M:%S") with open("line/flex_message_template/sign_award.json") as json_file: contents = json.load(json_file) contents["hero"]["url"] = award.icon contents["body"]["contents"][1]["contents"][1][ "text" ] = f"{award.name} * {award.count}" contents["body"]["contents"][3]["contents"][1]["text"] = now_text message = FlexSendMessage(alt_text=f"簽到成功!", contents=contents) return message
e44f42d8563d641ef9136f4121841c430e95288b
3,656,422
import math import time def create_l5_block(block_id: str) -> l5_block_model.L5BlockModel: """ Creates unfinalized L5 block that needs confirmation """ l5_block = l5_block_model.L5BlockModel( dc_id=keys.get_public_id(), current_ddss=party.get_address_ddss(ADDRESS), # Get DDSS from party, cached hourly block_id=str(block_id), timestamp=str(math.floor(time.time())), prev_proof="", scheme=PROOF_SCHEME, l4_blocks=get_pending_l4_blocks(block_id), ) return l5_block
c4508e4aff53315a0fa84924f8a3fc66d99e0c8f
3,656,423
from typing import List from typing import Tuple def gridgen(xbry: List, ybry: List, beta: List, shape: Tuple, ul_idx=0, focus=None, proj=None, nnodes=14, precision=1.0e-12, nppe=3, newton=True, thin=True, checksimplepoly=True, verbose=False): """ External wrapping function to call Gridgen grid builder. xbry, ybry - nodes coordinates of grid boundary beta - vertex type shape - tuple of grid shape (eta, xi) """ # Prepare the Gridgen object. gn = Gridgen(xbry, ybry, beta, shape, ul_idx=ul_idx, focus=focus, proj=None, nnodes=nnodes, precision=precision, nppe=nppe, newton=newton, thin=thin, checksimplepoly=checksimplepoly, verbose=verbose) # Generate the C-Grid. if proj is not None: lon_vert, lat_vert = proj(gn.x, gn.y, inverse=True) grd = CGridGeo(lon_vert, lat_vert, proj) else: grd = CGrid(gn.x, gn.y) # Attach the Gridgen object to grid. grd.Gridgen = gn print('Grid construction complete.') return grd
e1e3eea43aff3301f317b1103e820bfb79169fbd
3,656,424
def get_output(): """Gets the current global output stream""" global OUTPUT return OUTPUT
63480fb1dc071f3f3df878204fd2af6994cc9ea0
3,656,425
import scipy def load_pascal_annotation(index, pascal_root): """ This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http://arxiv.org/abs/1504.08083). Thanks Ross! """ classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename) def get_data_from_tag(node, tag): return node.getElementsByTagName(tag)[0].childNodes[0].data with open(filename) as f: data = minidom.parseString(f.read()) objs = data.getElementsByTagName('object') num_objs = len(objs) boxes = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros((num_objs), dtype=np.int32) overlaps = np.zeros((num_objs, 21), dtype=np.float32) # Load object bounding boxes into a data frame. for ix, obj in enumerate(objs): # Make pixel indexes 0-based x1 = float(get_data_from_tag(obj, 'xmin')) - 1 y1 = float(get_data_from_tag(obj, 'ymin')) - 1 x2 = float(get_data_from_tag(obj, 'xmax')) - 1 y2 = float(get_data_from_tag(obj, 'ymax')) - 1 cls = class_to_ind[ str(get_data_from_tag(obj, "name")).lower().strip()] boxes[ix, :] = [x1, y1, x2, y2] gt_classes[ix] = cls overlaps[ix, cls] = 1.0 overlaps = scipy.sparse.csr_matrix(overlaps) return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_overlaps': overlaps, 'flipped': False, 'index': index}
62495a50995f9fb0cec30d63b627f1b66022561b
3,656,426
def run_pca( X_train, y_train, mean_widget, std_widget, x_widget, labels_map=labels_map, labels_inv_map=labels_inv_map, ): """Runs PCA on the passed data based on the defined parameters and returns a pandas Dataframe. Consider the PCA is always fitted on the whole dataset X_train and the returned Dataframe isdependable on the values from the x_widget object. Parameters ========== X_train : numpy.ndarray Data matrix to run PCA on it y_train : numpy.ndarray Ground truth vector with integer class labels mean_widget : ipywidgets.widgets.widget_bool.Checkbox Widgets that indicates to center the data before scaling std_widget : ipywidgets.widgets.widget_bool.Checkbox Widget that indicates to scale the data to unit variance x_widget : ipywidgets.widgets.widget_selection.SelectMultiple Widget that defines, which data observation is returned, based on the containing labels in the widget object labels_map : dict Dictionary that maps from plant species representation to integer class represention. labels_inv_map : dict Dictionary that maps from integer class represention to plant species representation. Returns ======= pc_df : pandas.DataFrame Data matrix with 4 PCA-Components and the regarding label entry as 'Species' in plant species representation . """ ss = StandardScaler(with_mean=mean_widget.value, with_std=std_widget.value) train_data = ss.fit_transform(X_train) pca = decomposition.PCA(n_components=4) _ = pca.fit_transform(train_data) chosen_labels = np.array([labels_map.get(name) for name in x_widget.value]) ix_true = np.argwhere(np.in1d(y_train, chosen_labels)).flatten() pc = pca.transform(X_train[ix_true, ...]) pc_df = pd.DataFrame(data=pc, columns=["PC1", "PC2", "PC3", "PC4"]) pc_df["Species"] = np.array( [labels_inv_map.get(label_nr) for label_nr in y_train[ix_true]] ) return pc_df
de84adbc9a7779c05557941d1c4714e7a3eaf8c7
3,656,427
import os import shutil def cleanup_dir(dir_path=WORKING_DIR): """ A function decorator that cleans up file directory before executing. """ def rm_content(dir_path): rm_count = 0 for filename in os.listdir(dir_path): filepath = os.path.join(dir_path, filename) if os.path.isfile(filepath) or os.path.islink(filepath): os.remove(filepath) else: shutil.rmtree(filepath) rm_count += 1 logger.info(f'removed {rm_count} file/directories from {dir_path}') def inner(f): @wraps(f) def dir_cleanup_wrapper(*args, **kwargs): rm_content(dir_path) result = f(*args, **kwargs) return result return dir_cleanup_wrapper return inner
34a03406e1c47aa27597d98cce3d3e9c8506d68e
3,656,428
def validate_url(url): """ Validates the URL :param url: :return: """ if validators.url(url): return url elif validators.domain(url): return "http://{}".format(url) return ""
cd1ea3a834e1e67c4f438a28dcfa08e1dbd041c6
3,656,429
def map_class_to_id(classes): """ Get a 1-indexed id for each class given as an argument Note that for MASATI, len(classes) == 1 when only considering boats Args: classes (list): A list of classes present in the dataset Returns: dict[str, int] """ class_ids = list(range(1, len(classes) + 1)) return dict(zip(classes, class_ids))
7c2b47249f61f446327c0a798c1a129c62fde6b3
3,656,430
def vec2text(vector): """ vector to captcha text :param vector: np array :return: text """ if not isinstance(vector, np.ndarray): vector = np.asarray(vector) vector = np.reshape(vector, [CAPTCHA_LENGTH, -1]) text = '' for item in vector: text += CAPTCHA_LIST[np.argmax(item)] return text
c819407caca85e4ced798b6c4058918708af0095
3,656,431
def get_data_nasdaq_fall(specified_value): """ :param specified_value: the number of datapoints to fetch from the backend :param collection: specify which collection to be fetched :return: list of dictionaries """ data_points = NasdaqAsc.objects.order_by('difference_close') data_points = data_points[:specified_value] return data_points
20d811f7276c410cb8aefb71cfd8ef23bea66977
3,656,432
import os import subprocess def check_qe_completed(folder,prefix,output_file,calc_type='pw'): """ Check if qe calculation has correctly completed. - folder: where the calculation has been run. - prefix: qe prefix - output_file: name of output file - calc_type: either 'pw' or 'ph' or 'gkkp' """ status = True # If save folder does not exist, return False (= NOT completed) immediately if calc_type=='pw' and not os.path.isdir('%s/%s.save'%(folder,prefix)): status = False return status elif calc_type=='ph' and not os.path.isdir('%s/_ph0'%folder): status = False return status elif calc_type=='gkkp' and not os.path.isdir('%s/elph_dir'%folder): status = False return status if calc_type != 'pw' and calc_type != 'ph' and calc_type != 'gkkp': raise ValueError("calc_type not recognised: it has to be either 'pw' or 'ph' or 'gkkp'.") # Next, check if output is correctly completed try: check = subprocess.check_output("grep JOB %s/%s*"%(folder,output_file), shell=True, stderr=subprocess.STDOUT) check = check.decode('utf-8') check = check.strip().split()[-1] except subprocess.CalledProcessError as e: check = "" if check != "DONE.": status = False return status
778375406379996d86d0e033da8531566f8fa7dd
3,656,433
import re import subprocess def require_openssl(required_version): """ This function checks that the required version of OpenSSL is present, and skips the test if not. Use it as a test function decorator: @require_openssl("2.3.4") def test_something(): ... :param required_version: minimal required version as a string: "1.2.3" """ def versiontuple(v): clean_v = re.sub(r"[^\d\.]", "", v) return tuple(map(int, (clean_v.split(".")))) try: command_output = subprocess.check_output(["openssl", "version"]) except OSError: return pytest.mark.skip("openssl command is not available in test environment") else: if not command_output: raise Exception("Could not get openssl version") openssl_version = str(command_output.split()[1]) return pytest.mark.skipif( versiontuple(openssl_version) < versiontuple(required_version), reason=f"openssl v{openssl_version} is less than required version {required_version}")
7750c2f4de2eda82e8d6765405648677e48e8ac7
3,656,434
def login_required(f): """页面要求登录装饰器""" @wraps(f) def decorated_function(*args, **kwargs): if not g.signin: nu = get_redirect_url() if nu and ( nu.startswith("/") or nu.startswith(request.url_root) ): return redirect(url_for('front.login', next=nu)) else: return redirect(url_for('front.login')) return f(*args, **kwargs) return decorated_function
b9e7db40ebb50a71d4d56064fce8ec8e30fb6fbe
3,656,435
def get_agent_type(opt): """ Returns the type of model agent, specified by --model and --model_file. """ model_file = opt['model_file'] optfile = model_file + '.opt' if isfile(optfile): new_opt = _load_opt_file(optfile) if 'batchindex' in new_opt: del new_opt['batchindex'] if opt.get('override'): for k, v in opt['override'].items(): if str(v) != str(new_opt.get(k, None)): print( "[ warning: overriding opt['{}'] to {} (" "previously: {} )]".format( k, v, new_opt.get(k, None))) new_opt[k] = v for k, v in opt.items(): if k not in new_opt: new_opt[k] = v new_opt['model_file'] = model_file if (new_opt.get('dict_file') and not isfile(new_opt['dict_file'])): raise RuntimeError( 'WARNING: Dict file does not exist, check ' 'to make sure it is correct: {}'.format( new_opt['dict_file'])) model_class = get_agent_module(new_opt['model']) return model_class else: return None
59e53f961c29c9cf993bb176d4d993b80848bbcd
3,656,436
from typing import Any import types def is_sparse_or_ragged_tensor_value(tensor: Any) -> bool: """Returns true if sparse or ragged tensor.""" return (isinstance(tensor, types.SparseTensorValue) or isinstance(tensor, types.RaggedTensorValue) or isinstance(tensor, tf.compat.v1.SparseTensorValue))
8c82ffd04dfae89f19f34770b67724ffb9c66fc1
3,656,437
import os def _is_file_not_empty(file_path): """Return True when buildinfo file is not empty""" # NOTE: we can assume, that when file exists, all # content have been dowloaded to the directory. return os.path.getsize(file_path) > 0
08ef68719eaf57adbb946412dc259ea3d42117d1
3,656,438
def arcsin(tensor): """Returns the element-wise inverse sine of the tensor""" return TensorBox(tensor).arcsin(wrap_output=False)
c49f520610e0a59c8a29f25ff0f02c81b2226b14
3,656,439
def get_one_pokemon(id: hug.types.number): """Affichage d'un pokemon de la base de donnees""" cursor.execute("""SELECT * FROM pokemon WHERE id=%s """, [id]) row = cursor.fetchone() conn.commit() conn.close() return row
b36b2a5b50c1f0edfc39f3a74341bed583c36e13
3,656,440
import os def check_file_exists(filename): """Try to open the file `filename` and return True if it's valid """ return os.path.exists(filename)
93edc12c8d87863b560637f0bff73f0545f38270
3,656,441
import numpy import scipy def shift_fft(input_img, shift_val, method="fft"): """Do shift using FFTs Shift an array like scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order="infinity") but faster :param input_img: 2d numpy array :param shift_val: 2-tuple of float :return: shifted image """ if method == "fft": d0, d1 = input_img.shape v0, v1 = shift_val f0 = numpy.fft.ifftshift(numpy.arange(-d0 // 2, d0 // 2)) f1 = numpy.fft.ifftshift(numpy.arange(-d1 // 2, d1 // 2)) m1, m0 = numpy.meshgrid(f1, f0) e0 = numpy.exp(-2j * numpy.pi * v0 * m0 / float(d0)) e1 = numpy.exp(-2j * numpy.pi * v1 * m1 / float(d1)) e = e0 * e1 out = abs(numpy.fft.ifft2(numpy.fft.fft2(input_img) * e)) else: out = scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order="infinity") return out
2729d187d222ef83635abea5bc29a633abce9e61
3,656,442
def get_output_detections_image_file_path(input_file_path, suffix="--detections"): """Get the appropriate output image path for a given image input. Effectively appends "--detections" to the original image file and places it within the same directory. Parameters ----------- input_file_path: str Path to input image. suffix: str Suffix appended to the file. Default: "--detections" Returns ------- str Full path for detections output image. """ input_file_path = input_file_path.replace('--original.', '.') input_file_paths = input_file_path.split('.') input_file_paths[-2] = input_file_paths[-2]+suffix return '.'.join(input_file_paths)
b8d060dff6800750c418c70c61bd4d8e0b7bb416
3,656,443
from scipy import stats def split_errorSC(tr, t1, t2, q, Emat, maxdt, ddt, dphi): """ Calculate error bars based on a F-test and a given confidence interval q Parameters ---------- tr : :class:`~obspy.core.Trace` Seismogram t1 : :class:`~obspy.core.utcdatetime.UTCDateTime` Start time of picking window t2 : :class:`~obspy.core.utcdatetime.UTCDateTime` End time of picking window q : float Confidence level Emat : :class:`~numpy.ndarray` Energy minimization matrix Returns ------- err_dtt : float Error in dt estimate (sec) err_phi : float Error in phi estimate (degrees) err_contour : :class:`~numpy.ndarray` Error contour for plotting """ # Bounds on search phi = np.arange(-90.0, 90.0, dphi)*np.pi/180. dtt = np.arange(0., maxdt, ddt) # Copy trace to avoid overriding tr_tmp = tr.copy() tr_tmp.trim(t1, t2) # Get degrees of freedom dof = split_dof(tr_tmp) if dof < 3: dof = 3 print( "Degrees of freedom < 3. Fixing to DOF = 3, which may " + "result in accurate errors") n_par = 2 # Error contour vmin = Emat.min() vmax = Emat.max() err_contour = vmin*(1. + n_par/(dof - n_par) * stats.f.ppf(1. - q, n_par, dof - n_par)) # Estimate uncertainty (q confidence interval) err = np.where(Emat < err_contour) if len(err) == 0: return False, False, False err_phi = max( 0.25*(phi[max(err[0])] - phi[min(err[0])])*180./np.pi, 0.25*dphi) err_dtt = max(0.25*(dtt[max(err[1])] - dtt[min(err[1])]), 0.25*ddt) return err_dtt, err_phi, err_contour
41c56204884bafc32effe5f96557b703da589e05
3,656,444
from re import DEBUG import time def get_image(): """ Returns an image taken using raspberry pi camera. This image can be directly used with OpenCV library. """ if DEBUG: print("\tTakes image using camera") camera = PiCamera() camera.resolution = (512,512) raw_img = PiRGBArray(camera) time.sleep(0.1) # Let camera warm up camera.capture(raw_img, format="bgr") camera.close() image = raw_img.array return image
25f0e90378e70d443107ccfa519b0c36fd224e47
3,656,445
def add(x, y): """Creates an SMTLIB addition statement formatted string Parameters ---------- x, y: float First and second numerical arguments to include in the expression """ return "(+ " + x + " " + y + ")"
5145573a4616cc92be72301eae0a5dfffecf9234
3,656,446
def build_put_cat_request( **kwargs # type: Any ): # type: (...) -> HttpRequest """Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in our example to find the input shape. Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. :paramtype json: any :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # JSON input template you can fill out and use as your body input. json = { "hisses": bool, # Optional. "likesMilk": bool, # Optional. "meows": bool, # Optional. "name": "str" # Required. } """ content_type = kwargs.pop('content_type', None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/multipleInheritance/cat') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, headers=header_parameters, **kwargs )
5191eac67e6cdeedfa32ca32fad1cdd96e7a2870
3,656,447
from typing import Callable def partial(fn: Callable, *args, **kwargs) -> Callable: """Takes a function and fewer than normal arguments, and returns a function That will consume the remaining arguments and call the function""" def partial_fn(*rem_args, **rem_kwargs): return fn(*args, *rem_args, **kwargs, **rem_kwargs) return partial_fn
80f0df16915593fa0c5212e7560626db78147da6
3,656,448
import re def parse_lipid(name): """ parse_lipid description: parses a lipid name into lipid class and fatty acid composition, returning a dictionary with the information. Handles total fatty acid composition, as well as individual composition, examples: PC(38:3) --> class: PC, n_carbon: 38, n_unsat: 3 PC(18:1/20:2) --> class: PC, n_carbon: 38, n_unsat: 3, fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 20, n_unsat: 2)) Also, handles special fatty acid notations (modifiers) used for ceramides and plasmalogen lipids, examples: Cer(d36:2) --> class: Cer, n_carbon: 36, n_unsat: 2, fa_mod: d Cer(d18:1/18:1) --> class: PC, n_carbon: 38, n_unsat: 3, fa_mod: d, fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 18, n_unsat: 1)) PE(p40:4) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p PE(p20:2/20:2) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p, fa_comp: ((n_carbon: 20, n_unsat: 2), (n_carbon: 20, n_unsat: 2)) lipid name must conform to the general format: <lipid_class>([modifier]<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>]]) parameters: name (str) -- lipid name to parse returns: (dict or None) -- parsed lipid information (always contains 'class', 'n_carbon', and 'n_unsat' attributes) or None if it cannot be parsed as a lipid """ parsed = {} # compile regex pattern l_pat = re.compile( r"^(?P<cls>[A-Za-z123]+)\((?P<mod>[pdoe]*)(?P<fc1>[0-9]+):(?P<fu1>[0-9]+)/*((?P<fc2>[0-9]+):(?P<fu2>[0-9]+))*/*((?P<fc3>[0-9]+):(?P<fu3>[0-9]+))*\)") # parse the name using regex l_res = l_pat.match(name) if l_res: # lipid class (required) if l_res.group('cls'): parsed["lipid_class"] = l_res.group('cls') else: # msg = "parse_lipid: failed to parse lipid class for: {}".format(name) # raise ValueError(msg) return None # value error due to failure to parse fatty acid composition # def raise_fatty_acid_value_error(): # msg = "parse_lipid: failed to parse fatty acid composition for: {}".format(name) # raise ValueError(msg) # fc1 and fu1 are always required if not l_res.group('fc1') or not l_res.group('fu1'): # raise_fatty_acid_value_error() return None # check if a second fatty acid composition is supplied, e.g. (18:1/16:0) # if so, need to compute total fatty acid composition and add individual # fatty acids to a list if l_res.group('fc2'): if not l_res.group('fu2'): # raise_fatty_acid_value_error() return None # add info from the first two fatty acid compositions fc1, fu1 = int(l_res.group('fc1')), int(l_res.group('fu1')) fc2, fu2 = int(l_res.group('fc2')), int(l_res.group('fu2')) parsed["fa_comp"] = [ {"n_carbon": fc1, "n_unsat": fu1}, {"n_carbon": fc2, "n_unsat": fu2} ] # check for 3rd FA composition fc3, fu3 = 0, 0 if l_res.group('fc3'): if not l_res.group('fu3'): # raise_fatty_acid_value_error() return None fc3, fu3 = int(l_res.group('fc3')), int(l_res.group('fu3')) parsed["fa_comp"].append({"n_carbon": fc3, "n_unsat": fu3}) # compute total fatty acid composition parsed["n_carbon"] = fc1 + fc2 + fc3 parsed["n_unsat"] = fu1 + fu2 + fc3 else: # fc1 and fu1 are the total fatty acid composition parsed["n_carbon"] = int(l_res.group('fc1')) parsed["n_unsat"] = int(l_res.group('fu1')) # add fatty acid modifier if present if l_res.group('mod'): parsed["fa_mod"] = l_res.group('mod') else: # could not parse name as a lipid parsed = None return parsed
31a26cf57edfd08c6025c07982b7d6805704088e
3,656,449
import chunk import requests import json def query(lon, lat, coordsys='gal', mode='full', limit=500000): """ Send a line-of-sight reddening query to the Argonaut web server. lon, lat: longitude and latitude, in degrees. coordsys: 'gal' for Galactic, 'equ' for Equatorial (J2000). mode: 'full', 'lite' or 'sfd' In 'full' mode, outputs a dictionary containing, among other things: - 'distmod': The distance moduli that define the distance bins. - 'best': The best-fit (maximum proability density) line-of-sight reddening, in units of SFD-equivalent E(B-V), to each distance modulus in 'distmod.' See Schlafly & Finkbeiner (2011) for a definition of the reddening vector (use R_V = 3.1). - 'samples': Samples of the line-of-sight reddening, drawn from the probability density on reddening profiles. - 'success': 1 if the query succeeded, and 0 otherwise. - 'converged': 1 if the line-of-sight reddening fit converged, and 0 otherwise. - 'n_stars': # of stars used to fit the line-of-sight reddening. - 'DM_reliable_min': Minimum reliable distance modulus in pixel. - 'DM_reliable_max': Maximum reliable distance modulus in pixel. Less information is returned in 'lite' mode, while in 'sfd' mode, the Schlegel, Finkbeiner & Davis (1998) E(B-V) is returned. """ # make sure we have list if type(lon) == float: lon, lat = [lon], [lat] # Make sure to have less than 500000 objects (the limit). # Cut the list in smaller pieces if that is the case. if len(lon) >= limit: dicts = [query(loni, lati, coordsys=coordsys, mode=mode) for loni, lati in zip(chunk(lon, limit - 1), chunk(lat, limit - 1))] for dic in dicts[1:]: for k in dic: dicts[0][k].extend(dic[k]) return dicts[0] if coordsys.lower() in ['gal', 'g']: payload = {'mode': mode, 'l': lon, 'b': lat} elif coordsys.lower() in ['equ', 'e']: payload = {'mode': mode, 'ra': lon, 'dec': lat} else: raise ValueError("coordsys '{0}' not understood.".format(coordsys)) req = requests.post('http://argonaut.skymaps.info/gal-lb-query-light', data=json.dumps(payload), headers={'content-type': 'application/json'}) try: req.raise_for_status() except requests.exceptions.HTTPError as excep: print('Response received from Argonaut:') print(req.text) raise excep return json.loads(req.text)
1975d69b1c01d0cbb824b813d994941a63728750
3,656,450
def f(x0,x1,l,mig_spont,mig_ind,eps): """ function defining the model dx/dt=f(x)""" return [f0(x0,x1,l,mig_spont,mig_ind,eps),f1(x0,x1,l,mig_spont,mig_ind,eps)]
f6c6a9bdfd9eac7db40306388035bb2127301753
3,656,451
from amara.lib import inputsource from amara.xpath.util import parameterize from amara.xslt.result import streamresult, stringresult from amara.xslt.processor import processor def transform(source, transforms, params=None, output=None): """ Convenience function for applying an XSLT transform. Returns a result object. source - XML source document in the form of a string (not Unicode object), file-like object (stream), file path, URI or amara.lib.inputsource instance. If string or stream it must be self-contained XML (i.e. not requiring access to any other resource such as external entities or includes) transforms - XSLT document (or list thereof) in the form of a string, stream, URL, file path or amara.lib.inputsource instance params - optional dictionary of stylesheet parameters, the keys of which may be given as unicode objects if they have no namespace, or as (uri, localname) tuples if they do. output - optional file-like object to which output is written (incrementally, as processed) """ #do the imports within the function: a tad bit less efficient, but #avoid circular crap params = parameterize(params) if params else {} proc = processor() if isinstance(transforms, (list, tuple)): for transform in transforms: proc.append_transform(inputsource(transform)) else: proc.append_transform(inputsource(transforms)) if output is not None: result = streamresult(output) else: result = stringresult() return proc.run(inputsource(source), params, result)
4a9bbb7a27a9daff977ccefc151a0c480b27f71b
3,656,452
from typing import Optional import requests def lookup_user_github_username(user_github_id: int) -> Optional[str]: """ Given a user github ID, looks up the user's github login/username. :param user_github_id: the github id :return: the user's github login/username """ try: headers = { 'Authorization': 'Bearer {}'.format(cla.conf['GITHUB_OAUTH_TOKEN']), 'Accept': 'application/json', } r = requests.get(f'https://api.github.com/user/{user_github_id}', headers=headers) r.raise_for_status() except requests.exceptions.HTTPError as err: msg = f'Could not get user github user from id: {user_github_id}: error: {err}' cla.log.warning(msg) return None github_user = r.json() if 'message' in github_user: cla.log.warning(f'Unable to lookup user from id: {user_github_id} ' f'- message: {github_user["message"]}') return None else: if 'login' in github_user: return github_user['login'] else: cla.log.warning('Malformed HTTP response from GitHub - expecting "login" attribute ' f'- response: {github_user}') return None
2943ff8760ff02006efcd33ecb59508fc2262520
3,656,453
def get_plot_values(radar): """ Return the values specific to a radar for plotting the radar fields. """ return _DEFAULT_PLOT_VALUES[radar].copy()
579cf303a7be1201e71831a13f156b72766bad7f
3,656,454
import time def time_series_dict_to_list(dictionary, key=lambda x: time.mktime(x.timetuple()), value=identity): """ Convert the incoming dictionary of keys to a list of sorted tuples. :param dictionary: dictionary to retrieve data from :param key: expression used to retrieve the time_series key from the key :param value: expression used to retrieve the time_series value from the value :return: list of tuples where index 0 is seconds since epoch, and index 1 is value """ if key is None: key = identity if value is None: value = identity time_series = [[key(k), value(v)] for k, v in dictionary.iteritems()] return sorted(time_series, key=itemgetter(0))
f5234ec0d5296c17f6758b2815b65ac33009944d
3,656,455
from datetime import datetime def get_data_from_csv(csv_reader): """Creates a list of StatEntry objects based on data in CSV data. Input CSV data must be in the format: Description,timestamp,num_batches,time mean value,time sd Args: csv_reader: csv.reader instance. Returns: A tuple of datetime timestamp and list of benchmark_util.StatEntry objects. Raises: ValueError: if CSV is invalid. """ timestamp = None stat_entries = [] for row in csv_reader: if len(row) != 5: raise ValueError('Expected 5 entries per line in the input CSV file, ' 'but found %d entries.' % len(row)) if '' in row: raise ValueError('Found empty entries in row: %s' % row) # Set timestamp based on the first line in CSV file. if timestamp is None: # Example of time formatting: 2017-06-26 02:59:29.325579 timestamp = datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S.%f") stat_entries.append( benchmark_util.StatEntry(row[0], float(row[3]), 1)) return timestamp, stat_entries
31473648b91b605d8537da720f316a22f8584f2a
3,656,456
import re import os def rootUrlJoin(base, extend): """ Adds a path element to the path within a ROOT url """ if base: match = re.match("^root://([^/]+)/(.+)", base) if match: host = match.group(1) path = match.group(2) newpath = os.path.join(path, extend) newurl = "root://%s/%s" % (host, newpath) return newurl return None
613db9d8ae230bfba064e16cf4fe1712b061be91
3,656,457
import re def getChildren(SOUP, ADDRESS_SET, FOLDER_LIST, FOLDER_PTR, DEBUG, LEVEL=0): """ Loop interative call to move into soup Args: SOUP ie bs.BeautifulSoup( doc ) or a sub-portion there-of ADDRESS_SET list of address information FOLDER_LIST list of folders FOLDER_PTR integer pointer into FOLDER_LIST DEBUG boolean, if true print messages LEVEL integer counter that tracks recursive getChildren call only prints when DEBUG = True Returns: address set FOLDER_LIST FOLDER_PTR dev note: using SOUP.get_text() vs str(SOUP) solves some recursion issues except get_text() does not include html formatting, which breaks code that tries to match the formatting; therefore use SOUP.name for that note: str(SOUP) returns UTF-8 SOUP.decode() also returns str but in unicode, SOUP.decode_contents() returns str but without leading element SOUP.get_text() is only the human readable text per ref: https://stackoverflow.com/questions/31528600/beautifulsoup-runtimeerror-maximum-recursion-depth-exceeded """ if DEBUG: print(f'getChildren call level = {LEVEL}') LEVEL = LEVEL + 1 # - first handle if new folder or not soup_text = (SOUP.decode()).replace('\r', ' ').replace('\n', ' ') # was using SOUP.get_text() but it doesn't include html formatting # need html formatting for the next line to work # SOUP.name gives the current element so don't have to use a string if SOUP.name == 'dl': #if (re.search('^<dl>', stringNChar(soup_text, 10)) is not None): newFolder = True if (DEBUG): print('SOUPI' + str(len(SOUP)) + ':enter:' + stringNChar(soup_text, 100)) else: newFolder = False if (DEBUG): print('SOUPI' + str(len(SOUP)) + '::' + stringNChar(soup_text, 100)) # - now handle the sub elements of the passed SOUP tagNowI = -1 while (tagNowI < (len(SOUP)-1)): tagNowI = tagNowI + 1 # only process Tags if (re.search('Tag', str(type(SOUP.contents[tagNowI]))) is None): continue soupLength = len(SOUP.contents[tagNowI]) if (DEBUG): print('getChildren: ' + str(tagNowI) + '::' + str(soupLength)) if (soupLength == 0): continue if (soupLength == 1): if (DEBUG): if type(SOUP.contents[tagNowI]) is bs.element.NavigableString: print('found:: ' + (SOUP.contents[tagNowI].title())) else: print('found:: ' + (SOUP.contents[tagNowI].get_text())) (addr, FOLDER_LIST, elemType) = tagElement( SOUP.contents[tagNowI], FOLDER_LIST, DEBUG) if (DEBUG): print('element type: ' + str(elemType)) if (elemType == 0 and addr is not None): # append the dd information string to the last address ADDRESS_SET[len(ADDRESS_SET)-1].append(addr) elif (elemType == 1 and addr is not None): # append the latest address information to the ADDRESS_SET ADDRESS_SET.append(addr) elif (elemType == 2): # 2: increment the folder pointer; QQQ okay but how to leave folder? if (tagNowI < len(SOUP)-2): x=1 if (len(SOUP.contents[tagNowI+1]) == 1): # empty folder must leave (fixes Raspberry pi issue but not Entertainment and Lifestyle not-leaving folder issue) x = FOLDER_LIST.pop() if (DEBUG): print('Drop Bad folder:' + x) elif (elemType == 3 or elemType == 4): # 3: folder name new; QQQ: already appended at a lower level # 4: folder name new; QQQ: already appended at a lower level; parent folder # this doesn't do anything anymore except prevent no match message # script not optimized so don't remove; leave for documentation x = 1 else: # nothing happened; why? # <p> gets here; needs to be folder type or is it dl that marks folders? technically both # title gets here also # \n gets here if (DEBUG): print('no match by type:: ' + (SOUP.contents[tagNowI].get_text())) else: # pseudo-code if len > 1 then need to call getChildren # when exit after a call to getChildren then reduce FOLDER_PTR??? # problem decrementing FOLDER_PTR here is too overzealous if (re.search('empty_folder_auto_can_bus', stringNChar(SOUP.contents[tagNowI].get_text(), 100)) is not None): x = 1 if (DEBUG): print('Calling getChildren:' + str(tagNowI) + ': ' + stringNChar(SOUP.contents[tagNowI].get_text(), 100)) (ADDRESS_SET, FOLDER_LIST, FOLDER_PTR) = getChildren( SOUP.contents[tagNowI], ADDRESS_SET, FOLDER_LIST, FOLDER_PTR, DEBUG, LEVEL) if newFolder: pre_folder = FOLDER_LIST FOLDER_LIST.pop() if (DEBUG): print('Exit folder (' + str(FOLDER_PTR) + ') from' + ':'.join(pre_folder) + '\n\tnow' + ':'.join(FOLDER_LIST)) FOLDER_PTR = 0 # should it -1 instead if odd/even return(ADDRESS_SET, FOLDER_LIST, FOLDER_PTR)
34cef9312b9ee3f44bd40f4067cbbff170da875b
3,656,458
def either(a, b): """ :param a: Uncertain value (might be None). :param b: Default value. :return: Either the uncertain value if it is not None or the default value. """ return b if a is None else a
3fd2f99fa0851dae6d1b5f11b09182dbd29bb8c1
3,656,459
def get_app_label_and_model_name(path): """Gets app_label and model_name from the path given. :param str path: Dotted path to the model (without ".model", as stored in the Django `ContentType` model. :return tuple: app_label, model_name """ parts = path.split('.') return (''.join(parts[:-1]), parts[-1])
998e8d81f59491a51f3ae463c76c8627ed63b435
3,656,460
def get_item_editor(val): """ (val: Any) -> Editor Returns customized View editor type for given attribute value. """ if isinstance(val, list): # later might need tuple with label case if isinstance(val[0], str): return CheckListEditor(values=val) else: return CheckListEditor(values=[str(item) for item in val]) if isinstance(val, bool): return BooleanEditor() else: return TextEditor(auto_set=False, enter_set=True)
46dd3011bcd34ee7ecf5a311d45e8fa7a6d8603e
3,656,461
from typing import List import json def index_js_to_enriched_function_blocks(index_js: str) -> List[EnrichedFunctionBlock]: """ Main function of the file. Converts raw index.js file into the output dataclass. """ trimmed_index_js = trim_index_js(index_js) index_json = json.loads(trimmed_index_js) rtn_blocks = [] for package_name, list_of_scala_types in index_json.items(): for scala_type in list_of_scala_types: enriched_blocks = extract_enriched_function_blocks(package_name, scala_type) rtn_blocks.extend(enriched_blocks) return rtn_blocks
46776f7a277da4f111fb8b2d797c91777d84c2a7
3,656,462
from typing import Union def compute_single_results(base_path: str, file_name: str, selection_metric: str, selection_scheme: Union[None, str], selection_mode: str, selection_domain: str, result_scheme: str, result_mode: str, result_metric: str): """ Parameters ---------- base_path file_name selection_metric selection_mode selection_scheme selection_domain result_scheme result_mode result_metric """ path = base_path + file_name csv_path = base_path + 'results.csv' # read the data from the tensorboard summary writer file iterator = summary_iterator(path) tag_dict = create_tag_dict(iterator) # create a csv file for storing the results create_csv(csv_path, tag_dict) # read the results data_frame = read_csv_file(csv_path) # get the desired results in columns column_indices, target_groups = get_metric_columns(data_frame, selection_metric, selection_scheme, mode=selection_mode) # determine the time step of the best results of the desired result selection_col_index = target_groups.index(selection_domain) _, time_step = get_max_val(column_indices[selection_col_index], data_frame) # get the targets and columns of the metrics, which should be reported column_indices, target_groups = get_metric_columns(data_frame, result_metric, result_scheme, mode=result_mode) results = select_results_by_time_step(column_indices, data_frame, time_step) result_dict = {} for key, value in zip(target_groups, results): result_dict[key] = value return result_dict
b176f7d2a3ae3c19c67794c47969a3bf4d5d203b
3,656,463
def run(): """ Step through each row and every 3rd column to find collisions """ trees = 0 x = 0 width = len(rows[0]) for line in rows[1:]: x += 3 if x >= width: x -= width if line[x] == "#": trees += 1 return trees
f8c3f05ad411990bf16c6161f8ebcb544d5930df
3,656,464
def div_col(*items, size=None, style=None, id=None, classes=None) -> HTML: """Generate a new div with a col class Parameters ---------- items: argument list DOM children of this div """ children = ''.join(items) attr = [] if style is not None: attr.append(f'style="{style}"') if id is not None: attr.append(f'id="{id}"') if classes is not None: attr.append(f'class="{classes}"') elif size is not None: attr.append(f'class="col-{size}"') else: attr.append(f'class="col"') attr = ' '.join(attr) return f'<div {attr}>{children}</div>'
9200c23481756fa8d82813d1e95edc7328e63497
3,656,465
import face_recognition def predict(image: bytes) -> ndarray: """ Call the model returning the image with the faces blured :param image: the image to blur the faces from :return: the image with the faces blured """ sigma = 50 image = face_recognition.load_image_file(image) locations = face_recognition.face_locations(image) for location in locations: (startY, endY) = location[0:2] (startX, endX) = location[2:4] image = blur_image(image, startX, endX, startY, endY, sigma=sigma) is_successful, im_png = cv2.imencode(".png", image) if is_successful: return im_png raise Exception("Error encoding image")
87cfffa2a63c3e90baf023f2d37effa753c9ab89
3,656,466
from frappe.defaults import get_user_default_as_list from erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate import json def get_basic_details(args, item): """ :param args: { "item_code": "", "warehouse": None, "customer": "", "conversion_rate": 1.0, "selling_price_list": None, "price_list_currency": None, "price_list_uom_dependant": None, "plc_conversion_rate": 1.0, "doctype": "", "name": "", "supplier": None, "transaction_date": None, "conversion_rate": 1.0, "buying_price_list": None, "is_subcontracted": "Yes" / "No", "ignore_pricing_rule": 0/1 "project": "", barcode: "", serial_no: "", warehouse: "", currency: "", update_stock: "", price_list: "", company: "", order_type: "", is_pos: "", ignore_pricing_rule: "", project: "", qty: "", stock_qty: "", conversion_factor: "" } :param item: `item_code` of Item object :return: frappe._dict """ if not item: item = frappe.get_doc("Item", args.get("item_code")) if item.variant_of: item.update_template_tables() user_default_warehouse_list = get_user_default_as_list('Warehouse') user_default_warehouse = user_default_warehouse_list[0] \ if len(user_default_warehouse_list) == 1 else "" item_defaults = get_item_defaults(item.name, args.company) warehouse = user_default_warehouse or item_defaults.get("default_warehouse") or args.warehouse material_request_type = '' if args.get('doctype') == "Material Request" and not args.get('material_request_type'): args['material_request_type'] = frappe.db.get_value('Material Request', args.get('name'), 'material_request_type') #Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master if not args.uom: if args.get('doctype') in ['Quotation', 'Sales Order', 'Delivery Note', 'Sales Invoice']: args.uom = item.sales_uom if item.sales_uom else item.stock_uom elif (args.get('doctype') in ['Purchase Order', 'Purchase Receipt', 'Purchase Invoice']) or \ (args.get('doctype') == 'Material Request' and args.get('material_request_type') == 'Purchase'): args.uom = item.purchase_uom if item.purchase_uom else item.stock_uom else: args.uom = item.stock_uom out = frappe._dict({ "item_code": item.name, "item_name": item.item_name, "description": cstr(item.description).strip(), "image": cstr(item.image).strip(), "warehouse": warehouse, "income_account": get_default_income_account(args, item_defaults), "expense_account": get_default_expense_account(args, item_defaults), "cost_center": get_default_cost_center(args, item_defaults), 'has_serial_no': item.has_serial_no, 'has_batch_no': item.has_batch_no, "batch_no": None, "item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in item.get("taxes")))), "uom": args.uom, "min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "", "qty": args.qty or 1.0, "stock_qty": args.qty or 1.0, "price_list_rate": 0.0, "base_price_list_rate": 0.0, "rate": 0.0, "base_rate": 0.0, "amount": 0.0, "base_amount": 0.0, "net_rate": 0.0, "net_amount": 0.0, "discount_percentage": 0.0, "supplier": item_defaults.get("default_supplier"), "update_stock": args.get("update_stock") if args.get('doctype') in ['Sales Invoice', 'Purchase Invoice'] else 0, "delivered_by_supplier": item.delivered_by_supplier if args.get("doctype") in ["Sales Order", "Sales Invoice"] else 0, "is_fixed_asset": item.is_fixed_asset, "weight_per_unit":item.weight_per_unit, "weight_uom":item.weight_uom, "last_purchase_rate": item.last_purchase_rate if args.get("doctype") in ["Purchase Order"] else 0 }) if item.enable_deferred_revenue: service_end_date = add_months(args.transaction_date, item.no_of_months) out.update({ "enable_deferred_revenue": item.enable_deferred_revenue, "deferred_revenue_account": get_default_deferred_revenue_account(args, item), "service_start_date": args.transaction_date, "service_end_date": service_end_date }) # calculate conversion factor if item.stock_uom == args.uom: out.conversion_factor = 1.0 else: out.conversion_factor = args.conversion_factor or \ get_conversion_factor(item.item_code, args.uom).get("conversion_factor") or 1.0 args.conversion_factor = out.conversion_factor out.stock_qty = out.qty * out.conversion_factor # calculate last purchase rate out.last_purchase_rate = item_last_purchase_rate(args.name, args.conversion_rate, item.item_code, out.conversion_factor) # if default specified in item is for another company, fetch from company for d in [ ["Account", "income_account", "default_income_account"], ["Account", "expense_account", "default_expense_account"], ["Cost Center", "cost_center", "cost_center"], ["Warehouse", "warehouse", ""]]: if not out[d[1]]: out[d[1]] = frappe.db.get_value("Company", args.company, d[2]) if d[2] else None for fieldname in ("item_name", "item_group", "barcodes", "brand", "stock_uom"): out[fieldname] = item.get(fieldname) return out
f79438ebdcb7de48f48ca92e80f160a396d6ea6a
3,656,467
import math def vec_len(x): """ Length of the 2D vector""" length = math.sqrt(x[0]**2 + x[1]**2) return length
a357d31df808720eb2c4dfc12f4d6194ef904f67
3,656,468
def part1_count_increases(measurements): """Count increases of a measure with the next.""" windows = zip(measurements[1:], measurements[:-1]) increases = filter(lambda w: w[0] > w[1], windows) return len(list(increases))
59311b940ff7fe72cd6fe9cd4d0705918e796e69
3,656,469
def remove_empties(seq): """ Remove items of length 0 >>> remove_empties([1, 2, ('empty', np.nan), 4, 5]) [1, 2, 4, 5] >>> remove_empties([('empty', np.nan)]) [nan] >>> remove_empties([]) [] """ if not seq: return seq seq2 = [x for x in seq if not (isinstance(x, tuple) and x and x[0] == 'empty')] if seq2: return seq2 else: return [seq[0][1]]
500cbbd942682bfde1b9c1babe9a2190413b07fd
3,656,470
def breadth_first_graph_search(problem): """Grafo paieškos į plotį algoritmas""" global frontier, node, explored, counter if counter == -1: node = Node(problem.initial) display_current(node) if problem.goal_test(node.state): return node frontier = deque([node]) # FIFO queue display_frontier(frontier) explored = set() if counter % 3 == 0 and counter >= 0: node = frontier.popleft() display_current(node) explored.add(node.state) if counter % 3 == 1 and counter >= 0: for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): return child frontier.append(child) display_frontier(frontier) if counter % 3 == 2 and counter >= 0: display_explored(node) return None
332d5d300615f30f1125c5fafed882f1941b8f39
3,656,471
def to_smiles(rdm): """ SMILES string from an rdkit molecule object """ smi = _rd_chem.MolToSmiles(rdm) return smi
bd0c79b8b0066bd0cf2f49d99c0ec80543f50c9b
3,656,472
import collections import logging import scipy def merge_bins(adata, bin_size): """Merge bins.""" orig_bins = collections.defaultdict(list) for coor in adata.var_names: chrom, start, end = coor.split(':')[0], int( coor.split(':')[1].split('-')[0]), int( coor.split(':')[1].split('-')[1]) orig_bins[chrom].append((start, end)) logging.info('Done with counting the bins') resized_bins_index = [] resized_chrs = [] resized_bins_counts = [] for chrom, ranges in orig_bins.items(): curr_bin = 0 curr_acc = [] for (start, end) in sorted(ranges): if start // bin_size == curr_bin: curr_acc.append(f'{chrom}:{start}-{end}') else: if curr_acc: # For the empty initialisation at the beginning of the chr. resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1)) resized_bins_index.append( f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}') curr_acc = [f'{chrom}:{start}-{end}'] curr_bin = start // bin_size resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1)) resized_bins_index.append( f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}') resized_chrs.append(scipy.sparse.csr_matrix(np.hstack(resized_bins_counts))) resized_bins_counts = [] logging.info('Done with %s', chrom) new_adata = anndata.AnnData( scipy.sparse.csr_matrix( np.hstack([chrom.toarray() for chrom in resized_chrs]))) new_adata.var_names = resized_bins_index new_adata.obs = adata.obs return new_adata
dc1f939e5bcd1604b525d616ee94868e6baae8c6
3,656,473
def show_all_fruits(): """Show all fruits in the database.""" fruits = fruits_collection.find({}) for fruit in fruits: print(fruit) context = { 'list_of_fruits': fruits_collection.find({}) } return render_template('show_fruits.html', **context)
6329c6f6f1a7a30f6e35ea83aecd7fd71e81fe24
3,656,474
import json def load_fields(path: str = f'{DEFAULT_FIELD_PATH}{FIELD_FILENAME}') -> dict: """Load Fields. PARAMETERS ---------- :param: path: string path to the fields file. Returns ------- A dictionary of fields, with the following format: { "field_name": { "help_text": "", "type": "" } """ with open(path, 'r') as json_file: return json.load(json_file)
b6bc8916fa3a9d8a53f7cda5e13acf32a9b57860
3,656,475
def set_max_concurrency( uses: int, bucket: t.Type[buckets.Bucket] ) -> t.Callable[[commands.base.CommandLike], commands.base.CommandLike]: """ Second order decorator that defines the max concurrency limit for a command. Args: uses (:obj:`int`): The maximum number of uses of the command that can be executing concurrently before a :obj:`~.errors.MaxConcurrencyLimitReached` will be raised upon invocation. bucket (Type[:obj:`~.buckets.Bucket`]): Bucket that command max concurrency will be processed under. """ if uses < 1 or not isinstance(uses, int): raise ValueError("'uses' must be a positive integer") def decorate(c_like: commands.base.CommandLike) -> commands.base.CommandLike: if not isinstance(c_like, commands.base.CommandLike): raise SyntaxError("'set_max_concurrency' decorator must be above the 'command' decorator") c_like.max_concurrency = (uses, bucket) return c_like return decorate
b0677bc71f68d9ae674b424795bb29e93915726b
3,656,476
def three_to_one_protocol_bob(q1, q2, q3, bob, socket): """ Implements Bob's side of the 3->1 distillation protocol. This function should perform the gates and measurements for 3->1 using qubits q1 and q2, then send the measurement outcome to Alice and determine if the distillation was successful. :param q1: Bob's qubit from the first entangled pair :param q2: Bob's qubit from the second entangled pair :param q3: Bob's qubit from the third entangled pair :param bob: Bob's NetQASMConnection :param socket: Alice's classical communication socket to Bob :return: True/False indicating if protocol was successful """ b1, b2 = three_to_one_gates_and_measurement_bob(q1, q2, q3) bob.flush() # Send measurement result to Bob, receive measurement result from Bob and check if protocol was successful b1 = int(b1) b2 = int(b2) socket.send_structured(StructuredMessage("The outcome is: ", (b1, b2))) a1, a2 = socket.recv_structured().payload if (a1, a2) == (b1, b2): return True else: return False
0eab81d5d860c4314be4411b0dca429fc58cdb28
3,656,477
def read_code_blocks_from_md(md_path): """ Read ```python annotated code blocks from a markdown file. Args: md_path (str): Path to the markdown fle Returns: py_blocks ([str]): The blocks of python code. """ with open(md_path, "r") as f: full_md = f.read() md_py_splits = full_md.split("```python")[1:] py_blocks = [split.split("```")[0] for split in md_py_splits] return py_blocks
ca920f74e9326cf5f3635fbb6ebe125b6d97a349
3,656,478
from re import T import math def CBOW(vocab_size, emb_size): """ CBOW: Function to define the CBOW model parameters: vocab_size: the vocabulary size emb_size: dimension of the embedding vector return: List of theano variables [context, target], represents the model input, Theano function represents the loss (i.e. the cose or the objective) function, List of theano (shared) variable params, represents the parameters of the model. """ context = T.imatrix(name='context') target = T.ivector('target') W_in_values = np.asarray(np.random.uniform(-1.0, 1.0, (vocab_size, emb_size)), dtype=theano.config.floatX) W_out_values = np.asarray(np.random.normal(scale=1.0 / math.sqrt(emb_size), size=(emb_size, vocab_size)), dtype=theano.config.floatX) W_in = theano.shared( value=W_in_values, name='W_in', borrow=True) W_out = theano.shared( value=W_out_values, name='W_out', borrow=True) h = T.mean(W_in[context], axis=1) # compute the hidden (projection) layer output : input -> hidden (eq. 1) uj = T.dot(h, W_out) # hidden -> output (eq. 2) p_target_given_contex = T.nnet.softmax(uj) # softmax activation (eq. 3) loss = -T.mean(T.log(p_target_given_contex)[T.arange(target.shape[0]), target]) # loss function (eq. 4) params = [W_in, W_out] return [context, target], loss, params
82275f52528715fc783247b649cc5b56e51e1ce2
3,656,479
def subject(request, clas_slug, subject_slug, page=1): """ Список гдз сборников для предмета """ gdz_clas = get_object_or_404(GdzClas, slug=clas_slug) gdz_subject = get_object_or_404(GdzSubject, slug=subject_slug, gdz_clas=gdz_clas) book_list = GdzBook.published.filter(gdz_clas=gdz_clas, gdz_subject=gdz_subject).order_by('-public_time') paginator = Paginator(book_list, PAGE_ITEM) try: books = paginator.page(page) except EmptyPage: raise Http404 h1 = "Гдз {subject_title} {clas_slug} клас".format(subject_title=gdz_subject.title, clas_slug=gdz_clas.slug) page_title = "Гдз {subject_title} {clas_slug} клас".format(subject_title=gdz_subject.title, clas_slug=gdz_clas.slug) return render(request, 'gdz/subject.html', {'books': books, 'h1': h1, 'page_title': page_title, 'gdz_clas': gdz_clas, 'gdz_subject': gdz_subject, 'paginate_link': 'gdz:subject_paginate', 'link': 'gdz:subject'})
1b7a3bd6314de87ec059313cd020a8249586619f
3,656,480
def root_mean_square_ffinalise(out, sub_samples=None): """Divide the weighted sum by the sum of weights and take the square root. Also mask out any values derived from a too-small sample size. :Parameters: out: 3-`tuple` of `numpy.ndarray` An output from `root_mean_square_fpartial`. sub_samples: optional :Returns: 2-`tuple` of `numpy.ndarray` The sample size and the RMS. """ N, avg = mean_ffinalise(out, sub_samples=sub_samples) avg **= 0.5 return asanyarray(N, avg)
82562ef562b2e7dfaeefee9de42224568900f5a1
3,656,481
import hashlib def md5sum_fileobj(f, start = 0, end = None): """Accepts a file object and returns the md5sum.""" m = hashlib.md5() for block in file_reader(f, start, end): assert block != "", "Got an empty read" m.update(block) return m.hexdigest()
db1046c2466d408b0de9e402af31930b72ce9d76
3,656,482
import math def get_localization_scores(predicted_start: int, predicted_end: int, true_start: int, true_end: int): """ exp(-abs(t_pred_start-t_start)/(t_end-t_start)) exp(-abs(t_pred_end-t_end)/(t_end-t_start)) :param predicted_start: :param predicted_end: :param true_start: :param true_end: """ if true_end - true_start <= 0: return 0, 0 base = math.exp(1 / (true_start - true_end)) return base ** abs(predicted_start - true_start), base ** abs(predicted_end - true_end)
dfcef55e0594507b48aa83027c5b55a2a6530717
3,656,483
def json_compatible_key(key: str) -> str: """As defined in :pep:`566#json-compatible-metadata`""" return key.lower().replace("-", "_")
b914ba17b3da5df84d72497048565a118fc4fb05
3,656,484
import logging import json def get_transitions(jira_host, username, password, issue_id): """ Returns transitions of the issue. jira_host -- JIRA host to contact username -- JIRA username with administrative permissions. password -- password of the username. issue_id -- id of the issue which transitions should be returned. """ headers = get_auth_header(username, password) response = https_helper.get(jira_host, issue_transitions_path % issue_id, None, headers) if response.status != 200: logging.debug('Did not find any transitions for issue: %s', issue_id) return [] return json.loads(response.read())['transitions']
ec1546b6595e4482f245507e98dfd830673c9920
3,656,485
def _scale_func(k): """ Return a lambda function that scales its input by k Parameters ---------- k : float The scaling factor of the returned lambda function Returns ------- Lambda function """ return lambda y_values_input: k * y_values_input
65fd06bfb1a278b106eecc4974bc9317b1dea67f
3,656,486
import numpy import multiprocessing import math def focal_agents(dest, weight, source, fail=False): """ dest: point property set (determines property return type) weight: field property (weight/mask) source: point property (values to gather from) """ # hack rename... source_point = dest source_field = weight dest_prop = source if not isinstance(source_point.space_domain, Points): msg = _color_message(f'Property "{source_point.name}" must be of domain type Point') raise TypeError(msg) if not isinstance(source_field.space_domain, Areas): msg = _color_message(f'Property "{source_field.name}" must be of domain type Area') raise TypeError(msg) if not isinstance(dest_prop.space_domain, Points): msg = _color_message(f'Property "{dest_prop.name}" must be of domain type Point') raise TypeError(msg) dst_crs = source_point.space_domain.epsg field_crs = source_field.space_domain.epsg point_crs = dest_prop.space_domain.epsg cnt = 1 for arg in [dst_crs, field_crs, point_crs]: if not arg: msg = _color_message(f'Operation requires a CRS, set the EPSG code of the phenomenon (argument {cnt})') raise ValueError(msg) cnt += 1 if field_crs != point_crs: msg = _color_message(f'Incompatible CRS {field_crs} != {point_crs}') raise ValueError(msg) assert dst_crs == field_crs tmp_prop = Property('emptyfocal_agents', dest.uuid, dest.space_domain, dest.shapes, numpy.nan) #spatial_ref = osr.SpatialReference() #spatial_ref.ImportFromEPSG(point_crs) #ds = ogr.GetDriverByName('MEMORY').CreateDataSource('mem') ## Second we make a point feature from which we will obtain the locations ## Holding all objects #lyr_dst = ds.CreateLayer('locations', geom_type=ogr.wkbPoint, srs=spatial_ref) #field = ogr.FieldDefn('value', ogr.OFTReal) #lyr_dst.CreateField(field) #for idx, p in enumerate(dest_prop.space_domain): #point = ogr.Geometry(ogr.wkbPoint) #point.AddPoint(p[0], p[1]) #feat = ogr.Feature(lyr_dst.GetLayerDefn()) #feat.SetGeometry(point) #try: #val = dest_prop.values()[idx][0] #except: #val = dest_prop.values()[idx] #feat.SetField('value', float(val)) #lyr_dst.CreateFeature(feat) #lyr_dst = None #lyr_dst = ds.GetLayer('locations') nr_locs = dest_prop.nr_objects todos = [] for idx, p in enumerate(source_point.space_domain): values_weight = source_field.values()[idx] extent = source_field.space_domain._extent(idx) d_domain = dest_prop.space_domain d_values = dest_prop.values() item = (idx, 'tmp_prop', nr_locs, values_weight, extent, 'spatial_ref', 'lyr_dst', 'operation', fail, 'dprop', point_crs, d_domain, d_values) todos.append(item) cpus = multiprocessing.cpu_count() tasks = len(todos) chunks = tasks // cpus with futures.ProcessPoolExecutor(max_workers=cpus) as ex: results = ex.map(_focal_agents, todos, chunksize=chunks) for result in results: tmp_prop.values().values[result[0]] = result[1] return tmp_prop # sequential # nr_locs = dest_prop.nr_objects point_values = numpy.empty(nr_locs) point_values.fill(numpy.nan) for idx, p in enumerate(source_point.space_domain): values_weight = source_field.values()[idx] extent = source_field.space_domain._extent(idx) # Raster for points to query nr_rows = extent[4] nr_cols = extent[5] cellsize = math.fabs(extent[2] - extent[0]) / nr_cols minX = extent[0] maxY = extent[3] #if ds.GetLayerByName('extent'): # ds.DeleteLayer('extent') #ds.DeleteLayer('extent') ds_extent = ogr.GetDriverByName('MEMORY').CreateDataSource('ds_extent') extent_lyr = ds_extent.CreateLayer('extent', geom_type=ogr.wkbPolygon, srs=spatial_ref) feat = ogr.Feature(extent_lyr.GetLayerDefn()) ring = ogr.Geometry(ogr.wkbLinearRing) ring.AddPoint(minX, maxY) ring.AddPoint(minX + nr_cols * cellsize, maxY) ring.AddPoint(minX + nr_cols * cellsize, maxY - nr_rows * cellsize) ring.AddPoint(minX, maxY - nr_rows * cellsize) ring.AddPoint(minX, maxY) poly = ogr.Geometry(ogr.wkbPolygon) poly.AddGeometry(ring) feat.SetGeometry(poly) extent_lyr.CreateFeature(feat) #if ds.GetLayerByName('intersect'): # ds.DeleteLayer('intersect') intersect_layer = ds_extent.CreateLayer('locations', geom_type=ogr.wkbPoint, srs=spatial_ref) lyr_dst.Intersection(extent_lyr, intersect_layer) pcraster.setclone(nr_rows, nr_cols, cellsize, minX, maxY) raster = pcraster.numpy2pcr(pcraster.Scalar, values_weight, numpy.nan) point_values.fill(numpy.nan) for idx, feature in enumerate(intersect_layer): x = feature.GetGeometryRef().GetX() y = feature.GetGeometryRef().GetY() mask_value, valid = pcraster.cellvalue_by_coordinates(raster, x, y) agent_value = feature.GetField('value') point_values[idx] = mask_value * agent_value indices = ~numpy.isnan(point_values) masked = point_values[indices] res = 0 if operation == 'average': res = numpy.average(masked) elif operation == 'sum': res = numpy.sum(masked) else: raise NotImplementedError if fail == True: assert res != 0 tmp_prop.values()[idx] = res return tmp_prop
20acc643f725b0935e67041061fe8d7ebe9b44d3
3,656,487
import torch from typing import Optional from typing import Union from typing import List def erosion_dependent(input_tensor: torch.Tensor, structuring_element: torch.Tensor, origin: Optional[Union[tuple, List[int]]] = None, border_value: Union[int, float, str] = 'geodesic'): """ This type of erosion is needed when you want a structuring element to vary along one axis. Parameters ---------- :param input_tensor: torch.Tensor The input tensor that you want to erode. It should be a PyTorch tensor of 2 dimensions. :param structuring_element: torch.Tensor The structuring element to erode. The structuring element should be a PyTorch tensor of 3 dimensions; first dimension should coincide with first dimension of input_tensor and two other dimensions are the shape of the structuring element. :param origin: None, tuple, List[int] The origin of the structuring element. Default to center of the structuring element. Negative indexes are allowed. The origin will be the same for all the structuring elements. :param border_value: int, float, str The value used to pad the image in the border. Two options are allowed when a string is passed in parameter: - 'geodesic': only points within the input are considered when taking the minimum. - 'euclidean': extends naturally the image setting minus infinite value to the border. Default value is 'geodesic'. Outputs ------- :return: torch.Tensor The erosion dependent of the first axis as a PyTorch tensor of the same shape than the original input. """ # Check parameters check_parameters_dependent(input_tensor, structuring_element, origin, border_value) # Adapt origin if not origin: origin = (structuring_element.shape[1] // 2, structuring_element.shape[2] // 2) # Fill border value if needed border_value = fill_border(border_value, 'erosion') # Convert tensor to float if needed input_tensor = convert_float(input_tensor) # Pad input pad_list = [origin[1], structuring_element.shape[2] - origin[1] - 1, origin[0], structuring_element.shape[1] - origin[0] - 1] input_pad = f.pad(input_tensor, pad_list, mode='constant', value=border_value) # Compute erosion if str(input_tensor.device) == 'cpu': raise ValueError('Operation currently only implemented for GPU.') else: result = morphology_cuda.erosion_dependent(input_pad, structuring_element, BLOCK_SHAPE) return result
7646d56ceab9a7ec27182c485954f748cf4afd75
3,656,488
def bin_barcodes(barcodes, binsize=1000): """Binning barcodes into chunks Parameters ---------- barcodes : iterable Iterable of barcodes binsize : int Size of bin for grouping barcodes Returns ------- yields list of barcode (1 bin) """ binsize = int(float(binsize)) bins = np.digitize(np.arange(0,barcodes.shape[0]), np.arange(0,barcodes.shape[0],binsize)) return [barcodes[bins == x] for x in np.unique(bins)]
3cc063f68a89a325a53def31bfe779d3aa8e62c6
3,656,489
def flash_regions(device, region_map): """divide the named memory into sized memory regions""" regions = [] for x in region_map: if len(x) == 2: # no meta information: set it all to None (name, region_sizes) = x meta = (None,) * len(region_sizes) elif len(x) == 3: # provided meta information - make sure it's per region (name, region_sizes, meta) = x assert len(region_sizes) == len(meta), 'need meta information for each flash region' else: assert False, 'bad flash region specification' # the regions are based on the peripheral memory space base_adr = device.peripherals[name].address total_size = device.peripherals[name].size adr = base_adr for (s, m) in zip(region_sizes, meta): regions.append(region(name, adr, s, m)) adr += s # make sure the regions cover the entire memory space of the peripheral assert base_adr + total_size == adr, "regions don't encompass all memory" return regions
43f444c1bdfee8441a8e6bf6c72dbc06dccb56df
3,656,490
from typing import Collection import json def _load_explorer_data(multiprocess=False): """ Load in all available corpora and make their initial tables This is run when the app starts up """ corpora = dict() tables = dict() for corpus in Corpus.objects.all(): if corpus.disabled: print(f"Skipping corpus because it is disabled: {corpus.name}") continue buzz_collection = Collection(corpus.path) # a corpus must have a feather or conll to be explorable. prefer feather. buzz_corpus = buzz_collection.feather or buzz_collection.conllu if buzz_corpus is None: print(f"No parsed data found for {corpus.path}") continue corpora[corpus.slug] = buzz_corpus if corpus.load: print(f"Loading corpus into memory: {corpus.name} ...") opts = dict(add_governor=corpus.add_governor, multiprocess=multiprocess) buzz_corpus = buzz_corpus.load(**opts) buzz_corpus = _postprocess_corpus(buzz_corpus, corpus) corpora[corpus.slug] = buzz_corpus else: print(f"NOT loading corpus into memory: {corpus.name} ...") # what should be shown in the frequencies space to begin with? if getattr(corpus, "initial_table", False): display = json.loads(corpus.initial_table) else: display = dict(show="p", subcorpora="file") print(f"Generating an initial table for {corpus.name} using {display}") initial_table = buzz_corpus.table(**display) tables[corpus.slug] = initial_table return corpora, tables
f302b2529402b4ed75fa554ef915d7c117bca149
3,656,491
def compute_CD_projected_psth(units, time_period=None): """ Routine for Coding Direction computation on all the units in the specified unit_keys Coding Direction is calculated in the specified time_period :param: unit_keys - list of unit_keys :return: coding direction unit-vector, contra-trials CD projected trial-psth, ipsi-trials CD projected trial-psth psth time-stamps """ unit_hemi = (ephys.ProbeInsertion.InsertionLocation * experiment.BrainLocation & units).fetch('hemisphere') if len(set(unit_hemi)) != 1: raise Exception('Units from both hemispheres found') else: unit_hemi = unit_hemi[0] session_key = experiment.Session & units if len(session_key) != 1: raise Exception('Units from multiple sessions found') # -- the computation part # get units and trials - ensuring they have trial-spikes contra_trials = (TrialCondition().get_trials( 'good_noearlylick_right_hit' if unit_hemi == 'left' else 'good_noearlylick_left_hit') & session_key & ephys.Unit.TrialSpikes).fetch('KEY') ipsi_trials = (TrialCondition().get_trials( 'good_noearlylick_left_hit' if unit_hemi == 'left' else 'good_noearlylick_right_hit') & session_key & ephys.Unit.TrialSpikes).fetch('KEY') # get per-trial unit psth for all units - unit# x (trial# x time) contra_trial_psths, contra_edges = zip(*(compute_unit_psth(unit, contra_trials, per_trial=True) for unit in units)) ipsi_trial_psths, ipsi_edges = zip(*(compute_unit_psth(unit, ipsi_trials, per_trial=True) for unit in units)) # compute trial-ave unit psth contra_psths = zip((p.mean(axis=0) for p in contra_trial_psths), contra_edges) ipsi_psths = zip((p.mean(axis=0) for p in ipsi_trial_psths), ipsi_edges) # compute coding direction cd_vec = compute_coding_direction(contra_psths, ipsi_psths, time_period=time_period) # get time vector, relying on all units PSTH shares the same time vector time_stamps = contra_edges[0] # get coding projection per trial - trial# x unit# x time contra_psth_per_trial = np.dstack(contra_trial_psths) ipsi_psth_per_trial = np.dstack(ipsi_trial_psths) proj_contra_trial = np.vstack(np.dot(tr_u, cd_vec) for tr_u in contra_psth_per_trial) # trial# x time proj_ipsi_trial = np.vstack(np.dot(tr_u, cd_vec) for tr_u in ipsi_psth_per_trial) # trial# x time return cd_vec, proj_contra_trial, proj_ipsi_trial, time_stamps, unit_hemi
44025b200855cb685efa052e106b4b5a1ed47b6e
3,656,492
import struct def _tvos_extension_impl(ctx): """Implementation of the `tvos_extension` Skylark rule.""" binary_artifact = binary_support.get_binary_provider( ctx.attr.deps, apple_common.AppleExecutableBinary).binary deps_objc_provider = binary_support.get_binary_provider( ctx.attr.deps, apple_common.AppleExecutableBinary).objc additional_providers, legacy_providers, additional_outputs = bundler.run( ctx, "TvosExtensionArchive", "tvOS extension", ctx.attr.bundle_id, binary_artifact=binary_artifact, deps_objc_providers=[deps_objc_provider], ) return struct( files=additional_outputs, providers=[ TvosExtensionBundleInfo(), ] + additional_providers, **legacy_providers )
e1bbd3711e7b449fdb23ebb6bbb755c4dbbe14c9
3,656,493
import copy def simplify_graph(G): """remove the scores, so the cycle_exits() function can work""" graph = copy.deepcopy(G) simplified = dict((k, graph[k][0]) for k in graph) # add dummy edges,so the cycle_exists() function works for source in simplified.keys(): for target in simplified[source]: if target not in simplified: simplified[target] = [] return simplified
fc9b052c83ce500d20842367b3b6f011268a5a7d
3,656,494
def Run_INCR(num_vertices, edge_density, algorithm_name, k, init_tree=None): """ Initialize and run the MVA algorithm """ edges_bound = int(edge_density * (num_vertices * (num_vertices - 1) / 2)) k = max(1, k * edges_bound) runner = runner_factory(num_vertices, algorithm_name, None, edges_bound=edges_bound, edge_density=edge_density, k=k) randomizer = Randomizer(2 * num_vertices, runner["Parameters"]["seed"]) with Timer("t_expand_cliques", runner["Times"]): if init_tree == "ktree": ktree_k = 1 / 2 * (2 * num_vertices - 1 - sqrt(((2 * num_vertices - 1) * (2 * num_vertices - 1)) - (8 * edges_bound))) ktree_k = int(floor(ktree_k)) k_edges = (num_vertices - ktree_k - 1) * ktree_k + (ktree_k * (ktree_k + 1) / 2) p_mva = init_k_tree_incr(runner["Parameters"]["n"], ktree_k, randomizer) print("- Init with " + str(ktree_k) + "-tree:") elif init_tree == "tree": p_mva = expand_tree(runner["Parameters"]["n"], randomizer) print("- Expand tree:") else: p_mva = expand_cliques(runner["Parameters"]["n"], randomizer) print("- Expand cliques:") print(p_mva) with Timer("t_split_edges", runner["Times"]): loops = split_edges_k(p_mva, runner["Parameters"]["edges_bound"], randomizer, k) print("- Split edges:") runner["Stats"]["total"] = runner["Times"]["t_split_edges"] + runner["Times"]["t_expand_cliques"] runner["Stats"]["loops%"] = loops / edges_bound print(" loops:", runner["Stats"]["loops%"]) print(p_mva) return calculate_mva_statistics(p_mva, runner, randomizer, num_vertices)
4b64891d773f8e5f43833984727d514e089937cb
3,656,495
import os import subprocess def create_nrrd_from_dicoms(image, patient_id): """ Reads a folder that contains multiple DICOM files and converts the input into a single nrrd file using a command line app from MITK or MITK Phenotyping. Input: * path to one dicom (other are automatically found.) * Patient ID Output: Creates a single nrrd file with the path: $target_path / patient_id + '_ct_scan.nrrd' """ target_path = os.path.join(path_to_nrrds, patient_id) target_name = os.path.join(target_path, patient_id+"_ct_scan.nrrd") os.makedirs(target_path, exist_ok=True) cmd_string=r"MitkCLDicom2Nrrd "+\ "-i \"" + image + "\"" \ " -o \"" + target_name + "\"" print(cmd_string) a=subprocess.Popen(cmd_string,shell=True,cwd=path_to_executables) a.wait() return target_name
f930d9ccfab7c20d8afb80fb0b9a422df8bc77e8
3,656,496
def _two_point_interp(times, altitudes, horizon=0*u.deg): """ Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between altitudes : array of `~astropy.units.Quantity` Two altitudes for linear interpolation between horizon : `~astropy.units.Quantity` Solve for the time when the altitude is equal to reference_alt. Returns ------- t : `~astropy.time.Time` Time when target crosses the horizon """ if not isinstance(times, Time): return MAGIC_TIME else: slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd) return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value, format='jd')
b7b9bd53464d17c9e8fc51006a938b4c6b9cfac1
3,656,497
import string def setup_sample_data(no_of_records): """Generate the given number of sample data with 'id', 'name', and 'dt'""" rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'} for counter in range(0, no_of_records)] return rows_in_database
65659f931a103ea80dce19eabe277bba88653279
3,656,498
from io import StringIO import csv def generate_csv_string(csv_data): """ Turn 2d string array into a string representing a csv file """ output_buffer = StringIO() writer = csv.writer(output_buffer) csv_data = equalize_array(csv_data) csv_data = utf_8_encode_array(csv_data) for row in csv_data: writer.writerow(row) body = output_buffer.getvalue() output_buffer.close() return body
70861f363ed3d8445b38f448ffdea9ea1d479239
3,656,499