content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import json def solve_with_log(board, out_fname): """Wrapper for solve: write log to out_fname""" log = [] ret = solve(board, log) with open(out_fname, 'w') as f: f.write(json.dumps({'model': log}, indent=4)) return ret
c550980f252df724d68f9eb22159463e361997bc
3,650,800
def discrepancy(sample, bounds=None): """Discrepancy. Compute the centered discrepancy on a given sample. It is a measure of the uniformity of the points in the parameter space. The lower the value is, the better the coverage of the parameter space is. Parameters ---------- sample : array_like (n_samples, k_vars) The sample to compute the discrepancy from. bounds : tuple or array_like ([min, k_vars], [max, k_vars]) Desired range of transformed data. The transformation apply the bounds on the sample and not the theoretical space, unit cube. Thus min and max values of the sample will coincide with the bounds. Returns ------- discrepancy : float Centered discrepancy. References ---------- [1] Fang et al. "Design and modeling for computer experiments", Computer Science and Data Analysis Series Science and Data Analysis Series, 2006. """ sample = np.asarray(sample) n_sample, dim = sample.shape # Sample scaling from bounds to unit hypercube if bounds is not None: min_ = bounds.min(axis=0) max_ = bounds.max(axis=0) sample = (sample - min_) / (max_ - min_) abs_ = abs(sample - 0.5) disc1 = np.sum(np.prod(1 + 0.5 * abs_ - 0.5 * abs_ ** 2, axis=1)) prod_arr = 1 for i in range(dim): s0 = sample[:, i] prod_arr *= (1 + 0.5 * abs(s0[:, None] - 0.5) + 0.5 * abs(s0 - 0.5) - 0.5 * abs(s0[:, None] - s0)) disc2 = prod_arr.sum() c2 = ((13.0 / 12.0) ** dim - 2.0 / n_sample * disc1 + 1.0 / (n_sample ** 2) * disc2) return c2
f54cf5efa3cf12410d5522971983d41ea767767f
3,650,801
def rz(psi, r): """ Wrapper for ERFA function ``eraRz``. Parameters ---------- psi : double array r : double array Returns ------- r : double array Notes ----- The ERFA documentation is below. - - - - - - e r a R z - - - - - - Rotate an r-matrix about the z-axis. Given: psi double angle (radians) Given and returned: r double[3][3] r-matrix, rotated Notes: 1) Calling this function with positive psi incorporates in the supplied r-matrix r an additional rotation, about the z-axis, anticlockwise as seen looking towards the origin from positive z. 2) The additional rotation can be represented by this matrix: ( + cos(psi) + sin(psi) 0 ) ( ) ( - sin(psi) + cos(psi) 0 ) ( ) ( 0 0 1 ) Copyright (C) 2013-2017, NumFOCUS Foundation. Derived, with permission, from the SOFA library. See notes at end of file. """ r = ufunc.rz(psi, r, r) return r
1ea4e9322ba187e91d3b976d74d416ae99a74ee6
3,650,802
import locale def _get_ticklabels(band_type, kHz, separator): """ Return a list with all tick labels for octave or third octave bands cases. """ if separator is None: separator = locale.localeconv()['decimal_point'] if band_type == 'octave': if kHz is True: ticklabels = TICKS_OCTAVE_KHZ else: ticklabels = TICKS_OCTAVE else: if kHz is True: ticklabels = TICKS_THIRD_OCTAVE_KHZ else: ticklabels = TICKS_THIRD_OCTAVE ticklabels = _set_separator(ticklabels, separator) return ticklabels
95ebdc670a23fdb8561a431e863901df6734fdb9
3,650,803
def SpearmanP(predicted, observed): """abstracts out p from stats.spearmanr""" if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)): return np.asarray([np.nan]) coef, p = stats.spearmanr(np.squeeze(predicted).astype(float), np.squeeze(observed).astype(float)) return p
41986483ea3d466d94af5c86cedee62165d81d98
3,650,804
def get_zebra_route_type_by_name(route_type='BGP'): """ Returns the constant value for Zebra route type named "ZEBRA_ROUTE_*" from its name. See "ZEBRA_ROUTE_*" constants in "ryu.lib.packet.zebra" module. :param route_type: Route type name (e.g., Kernel, BGP). :return: Constant value for Zebra route type. """ return getattr(zebra, "ZEBRA_ROUTE_%s" % route_type.upper())
8cdc3a8384f71c4c04172a8c37f51e3789929e42
3,650,805
def preprocess(arr): """Preprocess image array with simple normalization. Arguments: ---------- arr (np.array): image array Returns: -------- arr (np.array): preprocessed image array """ arr = arr / 255.0 arr = arr * 2.0 - 1.0 return arr
3bccf2f4433c4da62954db4f25f5e9bfabc03c3a
3,650,806
def remove_const(type): """removes const from the type definition If type is not const type, it will be returned as is """ nake_type = remove_alias(type) if not is_const(nake_type): return type else: return nake_type.base
b00d7cca79222d5ac2b6a12019b73a8169df96b7
3,650,807
def populate_institute_form(form, institute_obj): """Populate institute settings form Args: form(scout.server.blueprints.institutes.models.InstituteForm) institute_obj(dict) An institute object """ # get all other institutes to populate the select of the possible collaborators institutes_tuples = [] for inst in store.institutes(): if not inst["_id"] == institute_obj["_id"]: institutes_tuples.append(((inst["_id"], inst["display_name"]))) form.display_name.default = institute_obj.get("display_name") form.institutes.choices = institutes_tuples form.coverage_cutoff.default = institute_obj.get("coverage_cutoff") form.frequency_cutoff.default = institute_obj.get("frequency_cutoff") # collect all available default HPO terms and populate the pheno_groups form select with these values default_phenotypes = [choice[0].split(" ")[0] for choice in form.pheno_groups.choices] if institute_obj.get("phenotype_groups"): for key, value in institute_obj["phenotype_groups"].items(): if not key in default_phenotypes: custom_group = " ".join( [key, ",", value.get("name"), "( {} )".format(value.get("abbr"))] ) form.pheno_groups.choices.append((custom_group, custom_group)) # populate gene panels multiselect with panels from institute available_panels = list(store.latest_panels(institute_obj["_id"])) # And from institute's collaborators for collaborator in institute_obj.get("collaborators", []): available_panels += list(store.latest_panels(collaborator)) panel_set = set() for panel in available_panels: panel_set.add((panel["panel_name"], panel["display_name"])) form.gene_panels.choices = list(panel_set) return default_phenotypes
836850a55a02b199b2c7607a236f77e6b95051e0
3,650,808
def closestMedioidI(active_site, medioids, distD): """ returns the index of the closest medioid in medioids to active_site input: active_site, an ActiveSite instance medioids, a list of ActiveSite instances distD, a dictionary of distances output: the index of the ActiveSite closest to active_site in medioids """ closest = (float('Inf'), None) for i, medioid in enumerate(medioids): thisDist = distD[frozenset([active_site, medioid])] if thisDist < closest[0]: closest = (thisDist, i) return closest[1]
379f98a84751c0a392f8f9b1703b89b299979676
3,650,809
from typing import Dict from typing import Any import traceback import sys def watchPoint(filename, lineno, event="call"): """whenever we hit this line, print a stack trace. event='call' for lines that are function definitions, like what a profiler gives you. Switch to 'line' to match lines inside functions. Execution speed will be much slower.""" seenTraces: Dict[Any, int] = {} # trace contents : count def trace(frame, ev, arg): if ev == event: if (frame.f_code.co_filename, frame.f_lineno) == (filename, lineno): stack = ''.join(traceback.format_stack(frame)) if stack not in seenTraces: print("watchPoint hit") print(stack) seenTraces[stack] = 1 else: seenTraces[stack] += 1 return trace sys.settrace(trace) # atexit, print the frequencies?
5c7017a180e254f5651c6cf737ca798d570d669c
3,650,810
def no_op_job(): """ A no-op parsl.python_app to return a future for a job that already has its outputs. """ return 0
ad8d6379ba35dae14ce056d9900fb6e62c769d85
3,650,811
def identity(dim, shape=None): """Return identity operator with appropriate shape. Parameters ---------- dim : int Dimension of real space. shape : int (optional) Size of the unitary part of the operator. If not provided, U is set to None. Returns ------- id : PointGroupElement """ R = ta.identity(dim, int) if shape is not None: U = np.eye(shape) else: U = None return PointGroupElement(R, False, False, U)
0cd40246f4ccf2805a852dcea09d451e7f8c63a5
3,650,812
def configure_checkout_session(request): """ Configure the payment session for Stripe. Return the Session ID. Key attributes are: - mode: payment (for one-time charge) or subscription - line_items: including price_data because users configure the donation price. TODOs - Standard amounts could use active Prices, rather than ad-hoc price_data. - Tie Stripe customers to site User accounts. - If a user is logged in, we can create the session for the correct customer. - Stripe's documented flows are VERY keen that we create the customer first, although the session will do that if we don't. - Allow selecting currency. (Smaller task.) Users receive an additional charge making payments in foreign currencies. Stripe will convert all payments without further charge. """ # Form data: # - The interval: which determines the Product and the mode. # - The amount: which goes to the Price data. form = PaymentForm(request.POST) if not form.is_valid(): data = { 'success': False, 'error': form.errors } return JsonResponse(data) amount = form.cleaned_data["amount"] interval = form.cleaned_data["interval"] product_details = settings.PRODUCTS[interval] is_subscription = product_details.get('recurring', True) price_data = { 'currency': 'usd', 'unit_amount': amount * 100, 'product': product_details['product_id'] } if is_subscription: price_data['recurring'] = { 'interval': product_details['interval'], "interval_count": product_details["interval_count"], } try: session = stripe.checkout.Session.create( payment_method_types=['card'], line_items=[{'price_data': price_data, 'quantity': 1}], mode='subscription' if is_subscription else 'payment', success_url=request.build_absolute_uri( reverse('fundraising:thank-you') ), cancel_url=request.build_absolute_uri( reverse('fundraising:index') ), # TODO: Drop this when updating API. stripe_version="2020-08-27", ) return JsonResponse({'success': True, "sessionId": session["id"]}) except Exception as e: logger.exception('Error configuring Stripe session.') return JsonResponse({'success': False, "error": str(e)})
f53bd6ecd488d214d3ebc43a2c049bf4315c1494
3,650,813
import os import json def load_schemas(): """Return all of the schemas in this directory in a dictionary where the keys are the filename (without the .json extension) and the values are the JSON schemas (in dictionary format) :raises jsonschema.exceptions.SchemaError if any of the JSON files in this directory are not valid (Draft 7) JSON schemas""" schemas = {} for filename in os.listdir(THIS_DIR): if ( os.path.isfile(os.path.join(THIS_DIR, filename)) and os.path.splitext(filename)[1].lower() == ".json" ): key = os.path.splitext(filename)[0] with open(os.path.join(THIS_DIR, filename)) as file_obj: value = json.load(file_obj) Draft7Validator.check_schema(value) schemas[key] = value return schemas
cbd14a4cdcc37f7fc861e00de84928abd3f8a557
3,650,814
from typing import Optional from typing import Union import torch from pathlib import Path import json def load_separator( model_str_or_path: str = "umxhq", targets: Optional[list] = None, niter: int = 1, residual: bool = False, wiener_win_len: Optional[int] = 300, device: Union[str, torch.device] = "cpu", pretrained: bool = True, filterbank: str = "torch", ): """Separator loader Args: model_str_or_path (str): Model name or path to model _parent_ directory E.g. The following files are assumed to present when loading `model_str_or_path='mymodel', targets=['vocals']` 'mymodel/separator.json', mymodel/vocals.pth', 'mymodel/vocals.json'. Defaults to `umxhq`. targets (list of str or None): list of target names. When loading a pre-trained model, all `targets` can be None as all targets will be loaded niter (int): Number of EM steps for refining initial estimates in a post-processing stage. `--niter 0` skips this step altogether (and thus makes separation significantly faster) More iterations can get better interference reduction at the price of artifacts. Defaults to `1`. residual (bool): Computes a residual target, for custom separation scenarios when not all targets are available (at the expense of slightly less performance). E.g vocal/accompaniment Defaults to `False`. wiener_win_len (int): The size of the excerpts (number of frames) on which to apply filtering independently. This means assuming time varying stereo models and localization of sources. None means not batching but using the whole signal. It comes at the price of a much larger memory usage. Defaults to `300` device (str): torch device, defaults to `cpu` pretrained (bool): determines if loading pre-trained weights filterbank (str): filterbank implementation method. Supported are `['torch', 'asteroid']`. `torch` is about 30% faster compared to `asteroid` on large FFT sizes such as 4096. However, asteroids stft can be exported to onnx, which makes is practical for deployment. """ model_path = Path(model_str_or_path).expanduser() # when path exists, we assume its a custom model saved locally if model_path.exists(): if targets is None: raise UserWarning("For custom models, please specify the targets") target_models = load_target_models( targets=targets, model_str_or_path=model_path, pretrained=pretrained ) with open(Path(model_path, "separator.json"), "r") as stream: enc_conf = json.load(stream) separator = model.Separator( target_models=target_models, niter=niter, residual=residual, wiener_win_len=wiener_win_len, sample_rate=enc_conf["sample_rate"], n_fft=enc_conf["nfft"], n_hop=enc_conf["nhop"], nb_channels=enc_conf["nb_channels"], filterbank=filterbank, ).to(device) # otherwise we load the separator from torchhub else: hub_loader = getattr(openunmix, model_str_or_path) separator = hub_loader( targets=targets, device=device, pretrained=True, niter=niter, residual=residual, filterbank=filterbank, ) return separator
bb9d0ecf47174ebac9181710a1bc4689ca122ecf
3,650,815
from datetime import datetime def transform_datetime(date_str, site): """ 根据site转换原始的date为正规的date类型存放 :param date_str: 原始的date :param site: 网站标识 :return: 转换后的date """ result = None if site in SITE_MAP: if SITE_MAP[site] in (SiteType.SINA, SiteType.HACKERNEWS): try: time_int = int(date_str) result = datetime.fromtimestamp(time_int).strftime(DATE_FMT) except Exception as e: result = parse(date_str).strftime(DATE_FMT) elif SITE_MAP[site] == SiteType.TENCENT: result = date_str elif SITE_MAP[site] == SiteType.TUICOOL: result = date_str elif SITE_MAP[site] == SiteType.HACKER: result = date_str elif SITE_MAP[site] == SiteType.DMZJ: result = parse(date_str).strftime(DATE_FMT) elif SITE_MAP[site] == SiteType.ACGMH: result = parse(date_str).strftime(DATE_FMT) elif SITE_MAP[site] == SiteType.CTOLIB: result = parse(date_str).strftime(DATE_FMT) elif date_str.strip() == '': result = datetime.now().strftime(DATE_FMT) else: result = parse(date_str).strftime(DATE_FMT) return result
647ab633b0d5ce0887042ef42a762f1bc3196242
3,650,816
import re import numpy def ParseEventsForTTLs(eventsFileName, TR = 2.0, onset = False, threshold = 5.0): """ Parses the events file from Avotec for TTLs. Use if history file is not available. The events files does not contain save movie start/stops, so use the history file if possible @param eventsFileName: name of events file from avotec @param TR: TR duration in seconds @param onset: use the TTL pulse onset instead of the offset for timestamps? @param threshold: multiple of the TR interval to use as a threshold as a break between runs @type eventsFileName: str @type TR: float @type onset: bool @type threshold: float @return: timestamps of TTLs in each run, each run is a list of TTL timestamps and the number of TTLs @rtype: list<tuple<list<float>, int>> """ eventsFile = open(eventsFileName, 'r') TTLtoken = 'S' if onset else 's' TTLs = [] lastTime = (0, 0, 0, 0) duplicates = 0 runs = [] thisRun = [] line = eventsFile.readline() while line != '': tokens = line.split() if len(tokens) > 0 and tokens[-1] == TTLtoken: time = [] for token in re.split('[:\.]', re.match('[0-9\. ]+:[0-9\. ]+:[0-9 ]+\.[0-9]+', line).group()): if (len(token) > 2): # the milliseconds have rather high precision time.append(int(numpy.round(float(token) * 0.001))) else: time.append(int(token)) time = tuple(time) if (TimeToSeconds(time) - TimeToSeconds(lastTime) > 0.1): # long enough of an interval since last one such that it's not a duplicate TTLs.append(time) lastTime = time else: duplicates += 1 line = eventsFile.readline() nTRs = 1 thisRun.append(TTLs[0]) for i in range(1, len(TTLs) - 1): this = TTLs[i] last = TTLs[i - 1] dt = TimeToSeconds(this) - TimeToSeconds(last) if dt > threshold * TR: runs.append((thisRun, nTRs)) thisRun = [this] nTRs = 1 else: thisRun.append(this) nTRs += 1 runs.append((thisRun, nTRs + 1)) # account for last run without a faraway TTL eventsFile.close() print('{} duplicated TTLs'.format(duplicates)) for i in range(len(runs)): duration = TimeToSeconds(runs[i][0][-1]) - TimeToSeconds(runs[i][0][0]) expectedTRs = int(numpy.round(duration / TR)) if (i == len(runs) - 1): expectedTRs += 1 # account for last run without a faraway TTL print('Run {} expected {} TTLs from duration, actual recorded {} TTLs'.format(i + 1, expectedTRs, len(runs[i][0]))) return runs
59fa31df066424df3625e55496f0ccefa39f2d64
3,650,817
def _to_native_string(string, encoding='ascii'): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. """ if isinstance(string, str): out = string else: out = string.decode(encoding) return out
b50fd0fc62b2cfc024c847b98e1f85b4b67d07e3
3,650,818
def load(path: str) -> model_lib.Model: """Deserializes a TensorFlow SavedModel at `path` to a `tff.learning.Model`. Args: path: The `str` path pointing to a SavedModel. Returns: A `tff.learning.Model`. """ py_typecheck.check_type(path, str) if not path: raise ValueError('`path` must be a non-empty string, cannot deserialize ' 'models without an output path.') return _LoadedSavedModel(tf.saved_model.load(path))
1bd16ed7b4a7955f2a78fc638e896bbd6d1ee5ac
3,650,819
import os def plot_feature_importance(obj, top_n=None, save_path=None): """ 输出LGBM模型的feature importance,并绘制条形图 Parameters ---------- obj: lgbm object or DataFrame 训练好的Lightgbm模型,或是已经计算好的feature importan DataFrame top_n: int, default None 展示TOP N的变量,若不填则展示全部变量,为保证显示效果,建议当变量个数多于30个时进行限制 save_path: str, default None 图片存放路径 Returns ------- df_fi: DataFrame 模型的feature importance,包括以下几个指标: 'split': 被节点选为分裂特征的次数 'total_gain': 作为分裂特征时对损失函数的总增益 'cover': 作为分裂特征时平均每次覆盖的样本数量 'avg_gain': 作为分裂特征时平均每次对损失函数的增益 'split_weight': 单个特征的分裂次数占总分裂次数的比例 'gain_weight': 单个特征的增益占总分裂增益的比例 """ if obj.__class__.__name__ == 'LGBMClassifier' or obj.__class__.__name__ == 'Booster': if obj.__class__.__name__ == 'LGBMClassifier': booster = obj.booster_ else: booster = obj df_fi = get_feature_importance(booster) df_fi['avg_gain'] = df_fi['total_gain'] / df_fi['split'] df_fi['split_weight'] = df_fi['split'] / df_fi['split'].sum() df_fi['gain_weight'] = df_fi['total_gain'] / df_fi['total_gain'].sum() df_fi['split_rank'] = df_fi['split'].rank(method='first', ascending=False).values.reshape((-1,)) df_fi['gain_rank'] = df_fi['total_gain'].rank(method='first', ascending=False).values.reshape((-1,)) df_fi['avg_gain_rank'] = df_fi['avg_gain'].rank(method='first', ascending=False).values.reshape((-1,)) df_fi['cover_rank'] = df_fi['cover'].rank(method='first', ascending=False).values.reshape((-1,)) elif isinstance(obj, pd.DataFrame): df_fi = obj else: raise ValueError('Unknown object type') if top_n is not None: df_gain_fi = df_fi.loc[df_fi['gain_rank'] <= top_n, :].copy().sort_values(by='gain_rank', ascending=False) df_split_fi = df_fi.loc[df_fi['split_rank'] <= top_n, :].copy().sort_values(by='split_rank', ascending=False) df_cover_fi = df_fi.loc[df_fi['cover_rank'] <= top_n, :].copy().sort_values(by='cover_rank', ascending=False) title1 = 'Weight of Split Gain (Top {0})'.format(top_n) title2 = 'Weight of Split Count (Top {0})'.format(top_n) title3 = 'Sample Coverage across all splits (Top {0})'.format(top_n) else: df_gain_fi = df_fi.copy().sort_values(by='gain_rank', ascending=False) df_split_fi = df_fi.copy().sort_values(by='split_rank', ascending=False) df_cover_fi = df_fi.copy().sort_values(by='cover_rank', ascending=False) title1 = 'Weight of Split Gain' title2 = 'Weight of Split Count' title3 = 'Sample coverage across all splits' plt.figure(figsize=(4, 9), dpi=200) plt.subplot(3, 1, 1) plt.barh(np.arange(df_gain_fi.shape[0]), df_gain_fi['gain_weight'], height=0.6, color='lightskyblue') for i, var in enumerate(df_gain_fi['var_name']): plt.annotate(var, xy=(0.001, i), va='center', ha='left', fontsize=4, color='black', fontweight='normal') ax = plt.gca() for at in ['left', 'right', 'bottom', 'top']: ax.spines[at].set_linewidth(0.7) plt.xticks(fontsize=5) plt.yticks([]) plt.xlabel('gain weight', fontsize=5) plt.title(title1, fontsize=6) plt.subplot(3, 1, 2) plt.barh(np.arange(df_gain_fi.shape[0]), df_split_fi['split_weight'], height=0.6, color='lightgreen') for i, var in enumerate(df_split_fi['var_name']): plt.annotate(var, xy=(0.001, i), va='center', ha='left', fontsize=4, color='black', fontweight='normal') ax = plt.gca() for at in ['left', 'right', 'bottom', 'top']: ax.spines[at].set_linewidth(0.7) plt.xticks(fontsize=5) plt.yticks([]) plt.xlabel('split weight', fontsize=5) plt.title(title2, fontsize=6) plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=2.0) plt.subplot(3, 1, 3) plt.barh(np.arange(df_gain_fi.shape[0]), df_cover_fi['cover'], height=0.6, color='Salmon') for i, var in enumerate(df_cover_fi['var_name']): plt.annotate(var, xy=(0.001, i), va='center', ha='left', fontsize=4, color='black', fontweight='normal') ax = plt.gca() for at in ['left', 'right', 'bottom', 'top']: ax.spines[at].set_linewidth(0.7) plt.xticks(fontsize=5) plt.yticks([]) plt.xlabel('sample coverage', fontsize=5) plt.title(title3, fontsize=6) plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=2.0) if save_path is not None: if save_path.endswith('.png') or save_path.endswith('.jpg'): plt.savefig(save_path, bbox_inches='tight') elif os.path.isdir(save_path): plt.savefig(os.path.join(save_path, 'lgbm_feature_importance.png'), bbox_inches='tight') else: raise ValueError('No such file or directory: {0}'.format(save_path)) plt.show() plt.close() return df_fi
e1d8fc6d05693ad4381281d21e0e5672b54a863e
3,650,820
def parameters_from_object_schema(schema, in_='formData'): """Convert object schema to parameters.""" # We can only extract parameters from schema if schema['type'] != 'object': return [] properties = schema.get('properties', {}) required = schema.get('required', []) parameters = [] for name, property in properties.items(): parameter = { 'name': name, 'in_': in_, 'required': (name in required), } parameter.update(property) parameter = Parameter(**parameter) parameters.append(parameter) parameters = sorted(parameters, key=lambda x: x['name']) return parameters
7508fb066d6924fc0af4a10338636b70ef64b9b2
3,650,821
import os def env_vars(request): """Sets environment variables to use .env and config.json files.""" os.environ["ENV"] = "TEST" os.environ["DOTENV_FILE"] = str(DOTENV_FILE) os.environ["CONFIG_FILE"] = str(CONFIG_FILE) os.environ["DATABASE_URL"] = get_db_url() return True
22f4953d9f2defd4f2af159b821e408bb60e7db7
3,650,822
def any_toggle_enabled(*toggles): """ Return a view decorator for allowing access if any of the given toggles are enabled. Example usage: @toggles.any_toggle_enabled(REPORT_BUILDER, USER_CONFIGURABLE_REPORTS) def delete_custom_report(): pass """ def decorator(view_func): @wraps(view_func) def wrapped_view(request, *args, **kwargs): for t in toggles: if ( (hasattr(request, 'user') and t.enabled(request.user.username)) or (hasattr(request, 'domain') and t.enabled(request.domain)) ): return view_func(request, *args, **kwargs) raise Http404() return wrapped_view return decorator
25f48e9227f5c6ff74ae9874ac0b3b7ad010861b
3,650,823
def moguls(material, height, randomize, coverage, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=True, bf=True, optimize=True, xtraParams=defaultXtraParams): """moguls(material, radius, randomize, det, [e0=20.0], [withPoisson=True], [nTraj=defaultNumTraj], [dose = 120.0], [sf=True], [bf=True], [optimize=True], [xtraParams={}]) Monte Carlo simulate a spectrum from a rough surface made up of close packed spheres. + material - Composition of material + height - mogul height = 0.5 * mogul radius + randomize - randomize the beam start position? + coverage - fractional likelihood of each bump existing (0.0 to 1.0)""" tmp = u"MC simulation of a %0.2lg um %d%% %smogul bed of %s at %0.1f keV%s%s" % (1.0e6 * height, int(100.0*coverage), (" rand " if randomize else " "), material, e0, (" + CSF" if sf else ""), (" + BSF" if bf else "")) return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildMoguls, { "Coverage" : coverage, "Optimize": optimize, "Height" : height, "Material" : material, "Randomize" : randomize }, xtraParams)
182aa248962877636e18860b46d20335eb535074
3,650,824
def link_datasets(yelp_results, dj_df, df_type="wages"): """ (Assisted by Record Linkage Toolkit library and documentation) This functions compares the Yelp query results to database results and produces the best matches based on computing the qgram score. Depending on the specific database table characteristics the qgram calculation will be between the zip_code, business name, address strings, latitude, longitude, or a combination of those charateristics. Inputs: - yelp_results: a pandas dataframe of yelp business results based on a user's input - dj_df: a pandas dataframe of django results. Ex. labour statistics, healthcode violations, Divvy, etc. - df_type: a string of which specific dataframe is being passed to be compared to the Yelp results Outputs: - link: a tuple containing the indices of Yelp query results dataframe and the database dataframe AND the best matches qgram scores """ # set thresholds for comparing strings using qgram method name_thresh = 0.55 addr_thresh = 0.55 strong_addr_thresh = 0.90 # initialize a Record Linkage comparison object compare = rl.Compare() # Labour & Food data comparisons to Yelp are made on zip, business name, # and address if df_type == "wages" or df_type == "food": indexer = rl.BlockIndex(on='zip_code') # block on zip code compare.numeric('zip_code', 'zip_code', method='linear', scale=30.0, label='zip_score') compare.string('name', 'name', method='qgram', threshold=name_thresh, label='name_score') compare.string('addr', 'addr', method='qgram', threshold=addr_thresh, label='addr_score') # Environmental data comparisons to Yelp are made on address elif df_type == "enviro": indexer = rl.FullIndex() # no blocking available compare.string('addr', 'addr', method='qgram', threshold=strong_addr_thresh, label='addr_score') # all other data comparisons to Yelp else: indexer = rl.FullIndex() pairs = indexer.index(yelp_results, dj_df) # In addition to above comparisons, ALL data sets are also compared to # Yelp based on latitude and longitude compare.geo('latitude', 'longitude', 'latitude', 'longitude', method='linear', scale=30.0, label='coord_score') # compute record linkage scores features = compare.compute(pairs, yelp_results, dj_df) # set classification thresholds zip_classif_thresh = 1.0 addr_classif_thresh = 1.0 coord_classif_thresh = 0.99 name_classif_thresh = 1.0 # Classification and final filtering if df_type == "wages" or df_type == "food": best_matches = features[(features['zip_score'] == zip_classif_thresh) & (features['name_score'] == name_classif_thresh) & (features['addr_score'] == addr_classif_thresh) & (features['coord_score'] >= coord_classif_thresh)] elif df_type == "enviro": best_matches = features[(features['addr_score'] == addr_classif_thresh) & (features['coord_score'] >= coord_classif_thresh)] else: best_matches = features[(features['coord_score'] >= coord_classif_thresh)] # obtain the index values from best_matches index_array = best_matches.index.values # create tuple of indices and best matches df link = (index_array, best_matches) return link
326857d5060ac5cedcac3de90ce284048b2d2fa7
3,650,825
def hello(): """Say Hello, so that we can check shared code.""" return b"hello"
7197ed31c5fde419d4607ca1b5dbec7f8cb20608
3,650,826
def loglog_mean_lines(x, ys, axis=0, label=None, alpha=0.1): """ Log-log plot of lines and their mean. """ return _plot_mean_lines(partial(plt.loglog, x), ys, axis, label, alpha)
2f4461ca21c2f8db9ddfd763f474ebc73f3bf636
3,650,827
import osgeo.ogr def read_lines_from_shapefile(fpath): """ Read coordinates of cutting line segments from a ESRI Shapefile containing line features. Parameters ---------- fpath Name of a file containing coordinates of cutting lines Returns -------- list List of coordinates of cutting lines """ driver = osgeo.ogr.GetDriverByName('ESRI Shapefile') datasource = driver.Open(fpath, 0) if datasource is None: print('Could not open ' + fpath) raise RuntimeError() layer = datasource.GetLayer() feature = layer.GetNextFeature() lines = list() while feature: geometry = feature.GetGeometryRef() line = geometry.GetPoints() lines.append(line) feature = layer.GetNextFeature() return lines
9eca9204a577dc0f7d675703c75ba5d407a0338b
3,650,828
def generate_identifier(endpoint_description: str) -> str: """Generate ID for model.""" return ( Config.fdk_publishers_base_uri() + "/fdk-model-publisher/catalog/" + sha1(bytes(endpoint_description, encoding="utf-8")).hexdigest() # noqa )
30bfc15c12b47f637627391a45bb9b5f9355c4f7
3,650,829
def depthFirstSearch(problem): """Search the deepest nodes in the search tree first.""" stack = util.Stack() # Stack used as fringe list stack.push((problem.getStartState(),[],0)) return genericSearch(problem,stack)
67452934a29e9857f90b88f3fead67d101468471
3,650,830
import argparse def parse_cli_args() -> argparse.Namespace: """ Parse arguments passed via Command Line Interface (CLI). :return: namespace with arguments """ parser = argparse.ArgumentParser(description='Algorithmic composition of dodecaphonic music.') parser.add_argument( '-c', '--config_path', type=str, default=None, help='path to configuration file' ) cli_args = parser.parse_args() return cli_args
9014ee342b810ec1b63f7ed80811f55b7ed4d00f
3,650,831
def create_app(): """ Method to init and set up the Flask application """ flask_app = MyFlask(import_name="dipp_app") _init_config(flask_app) _setup_context(flask_app) _register_blueprint(flask_app) _register_api_error(flask_app) return flask_app
bfb64ac71fcd076fe26c3b342c33af30370be8db
3,650,832
def find_consumes(method_type): """ Determine mediaType for input parameters in request body. """ if method_type in ('get', 'delete'): return None return ['application/json']
785e70e41629b0386d8b86f247afaf5bff3b7ba9
3,650,833
def preprocess(text): """ Simple Arabic tokenizer and sentencizer. It is a space-based tokenizer. I use some rules to handle tokenition exception like words containing the preposition 'و'. For example 'ووالدته' is tokenized to 'و والدته' :param text: Arabic text to handle :return: list of tokenized sentences """ try: text = text.decode('utf-8') except(UnicodeDecodeError, AttributeError): pass text = text.strip() tokenizer_exceptions = ["وظف", "وضعها", "وضعه", "وقفنا", "وصفوها", "وجهوا", "والدته", "والده", "وادي", "وضعية", "واجهات", "وفرتها", "وقاية", "وفا", "وزيرنا", "وزارتي", "وجهاها", "واردة", "وضعته", "وضعتها", "وجاهة", "وهمية", "واجهة", "واضعاً", "واقعي", "ودائع", "واعدا", "واع", "واسعا", "ورائها", "وحدها", "وزارتي", "وزارتي", "والدة", "وزرائها", "وسطاء", "وليامز", "وافق", "والدها", "وسم", "وافق", "وجهها", "واسعة", "واسع", "وزنها", "وزنه", "وصلوا", "والدها", "وصولاً", "وضوحاً", "وجّهته", "وضعته", "ويكيليكس", "وحدها", "وزيراً", "وقفات", "وعر", "واقيًا", "وقوف", "وصولهم", "وارسو", "واجهت", "وقائية", "وضعهم", "وسطاء", "وظيفته", "ورائه", "واسع", "ورط", "وظفت", "وقوف", "وافقت", "وفدًا", "وصلتها", "وثائقي", "ويليان", "وساط", "وُقّع", "وَقّع", "وخيمة", "ويست", "والتر", "وهران", "ولاعة", "ولايت", "والي", "واجب", "وظيفتها", "ولايات", "واشنطن", "واصف", "وقح", "وعد", "وقود", "وزن", "وقوع", "ورشة", "وقائع", "وتيرة", "وساطة", "وفود", "وفات", "وصاية", "وشيك", "وثائق", "وطنية", "وجهات", "وجهت", "وعود", "وضعهم", "وون", "وسعها", "وسعه", "ولاية", "واصفاً", "واصلت", "وليان", "وجدتها", "وجدته", "وديتي", "وطأت", "وطأ", "وعودها", "وجوه", "وضوح", "وجيز", "ورثنا", "ورث", "واقع", "وهم", "واسعاً", "وراثية", "وراثي", "والاس", "واجهنا", "وابل", "ويكيميديا", "واضحا", "واضح", "وصفته", "واتساب", "وحدات", "ون", "وورلد", "والد", "وكلاء", "وتر", "وثيق", "وكالة", "وكالات", "و احدة", "واحد", "وصيته", "وصيه", "ويلمينغتون", "ولد", "وزر", "وعي", "وفد", "وصول", "وقف", "وفاة", "ووتش", "وسط", "وزراء", "وزارة", "ودي", "وصيف", "ويمبلدون", "وست", "وهج", "والد", "وليد", "وثار", "وجد", "وجه", "وقت", "ويلز", "وجود", "وجيه", "وحد", "وحيد", "ودا", "وداد", "ودرو", "ودى", "وديع", "وراء", "ورانس", "ورث", "ورَّث", "ورد", "وردة", "ورق", "ورم", "وزير", "وسام", "وسائل", "وستون", "وسط", "وسن", "وسيط", "وسيلة", "وسيم", "وصاف", "وصف", "وصْفَ", "وصل", "وضع", "وطن", "وعاء", "وفاء", "وفق", "وفيق", "وقت", "وقع", "وكال", "وكيل", "ولاء", "ولف", "وهب", "وباء", "ونستون", "وضح", "وجب", "وقّع", "ولنغتون", "وحش", "وفر", "ولادة", "ولي", "وفيات", "وزار", "وجّه", "وهماً", "وجَّه", "وظيفة", "وظائف", "وقائي"] sentence_splitter_exceptions = ["د.", "كي.", "في.", "آر.", "بى.", "جى.", "دى.", "جيه.", "ان.", "ال.", "سى.", "اس.", "اتش.", "اف."] sentence_splitters = ['.', '!', '؟', '\n'] text = text.replace('،', ' ، ') text = text.replace('*', ' * ') text = text.replace('’', ' ’ ') text = text.replace('‘', ' ‘ ') text = text.replace(',', ' , ') text = text.replace('(', ' ( ') text = text.replace(')', ' ) ') text = text.replace('/', ' / ') text = text.replace('[', ' [ ') text = text.replace(']', ' ] ') text = text.replace('|', ' | ') text = text.replace('؛', ' ؛ ') text = text.replace('«', ' « ') text = text.replace('»', ' » ') text = text.replace('!', ' ! ') text = text.replace('-', ' - ') text = text.replace('“', ' “ ') text = text.replace('”', ' ” ') text = text.replace('"', ' " ') text = text.replace('؟', ' ؟ ') text = text.replace(':', ' : ') text = text.replace('…', ' … ') text = text.replace('..', ' .. ') text = text.replace('...', ' ... ') text = text.replace('\'', ' \' ') text = text.replace('\n', ' \n ') text = text.replace(' ', ' ') tokens = text.split() for i, token in enumerate(tokens): if token[-1] in sentence_splitters: is_exceptions = token in sentence_splitter_exceptions if not is_exceptions: tokens[i] = token[:-1] + ' ' + token[-1] + 'SENT_SPLITTER' tokens = ' '.join(tokens).split() for i, token in enumerate(tokens): if token.startswith('و'): is_exceptions = [token.startswith(exception) and len(token) <= len(exception) + 1 for exception in tokenizer_exceptions] if True not in is_exceptions: tokens[i] = token[0] + ' ' + token[1:] text = (' '.join(tokens)) text = text.replace(' وال', ' و ال') text = text.replace(' لل', ' ل ل') text = text.replace(' لإ', ' ل إ') text = text.replace(' بالأ', ' ب الأ') text = text.replace('وفقا ل', 'وفقا ل ') text = text.replace('نسبة ل', 'نسبة ل ') sentences = text.split('SENT_SPLITTER') return sentences
48a44391413045a49d6d9f2dff20dcd89734b4f2
3,650,834
def login(client, password="pass", ): """Helper function to log into our app. Parameters ---------- client : test client object Passed here is the flask test client used to send the request. password : str Dummy password for logging into the app. Return ------- post request object The test client is instructed to send a post request to the /login route. The request contains the fields values to be posted by the form. """ return client.post('/login', data=dict(pass_field=password, remember_me=True), follow_redirects=True)
5adca2e7d54dabe47ae92f0bcebb93e0984617b1
3,650,835
def define_dagstermill_solid( name, notebook_path, input_defs=None, output_defs=None, config_schema=None, required_resource_keys=None, output_notebook=None, output_notebook_name=None, asset_key_prefix=None, description=None, tags=None, ): """Wrap a Jupyter notebook in a solid. Arguments: name (str): The name of the solid. notebook_path (str): Path to the backing notebook. input_defs (Optional[List[InputDefinition]]): The solid's inputs. output_defs (Optional[List[OutputDefinition]]): The solid's outputs. Your notebook should call :py:func:`~dagstermill.yield_result` to yield each of these outputs. required_resource_keys (Optional[Set[str]]): The string names of any required resources. output_notebook (Optional[str]): If set, will be used as the name of an injected output of type :py:class:`~dagster.FileHandle` that will point to the executed notebook (in addition to the :py:class:`~dagster.AssetMaterialization` that is always created). This respects the :py:class:`~dagster.core.storage.file_manager.FileManager` configured on the pipeline resources via the "file_manager" resource key, so, e.g., if :py:class:`~dagster_aws.s3.s3_file_manager` is configured, the output will be a : py:class:`~dagster_aws.s3.S3FileHandle`. output_notebook_name: (Optional[str]): If set, will be used as the name of an injected output of type of :py:class:`~dagster.BufferedIOBase` that is the file object of the executed notebook (in addition to the :py:class:`~dagster.AssetMaterialization` that is always created). It allows the downstream solids to access the executed notebook via a file object. asset_key_prefix (Optional[Union[List[str], str]]): If set, will be used to prefix the asset keys for materialized notebooks. description (Optional[str]): If set, description used for solid. tags (Optional[Dict[str, str]]): If set, additional tags used to annotate solid. Dagster uses the tag keys `notebook_path` and `kind`, which cannot be overwritten by the user. Returns: :py:class:`~dagster.SolidDefinition` """ check.str_param(name, "name") check.str_param(notebook_path, "notebook_path") input_defs = check.opt_list_param(input_defs, "input_defs", of_type=InputDefinition) output_defs = check.opt_list_param(output_defs, "output_defs", of_type=OutputDefinition) required_resource_keys = check.opt_set_param( required_resource_keys, "required_resource_keys", of_type=str ) extra_output_defs = [] if output_notebook_name is not None: required_resource_keys.add("output_notebook_io_manager") extra_output_defs.append( OutputDefinition(name=output_notebook_name, io_manager_key="output_notebook_io_manager") ) # backcompact if output_notebook is not None: rename_warning( new_name="output_notebook_name", old_name="output_notebook", breaking_version="0.14.0" ) required_resource_keys.add("file_manager") extra_output_defs.append(OutputDefinition(dagster_type=FileHandle, name=output_notebook)) if isinstance(asset_key_prefix, str): asset_key_prefix = [asset_key_prefix] asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str) default_description = f"This solid is backed by the notebook at {notebook_path}" description = check.opt_str_param(description, "description", default=default_description) user_tags = validate_tags(tags) if tags is not None: check.invariant( "notebook_path" not in tags, "user-defined solid tags contains the `notebook_path` key, but the `notebook_path` key is reserved for use by Dagster", ) check.invariant( "kind" not in tags, "user-defined solid tags contains the `kind` key, but the `kind` key is reserved for use by Dagster", ) default_tags = {"notebook_path": notebook_path, "kind": "ipynb"} return SolidDefinition( name=name, input_defs=input_defs, compute_fn=_dm_solid_compute( name, notebook_path, output_notebook_name, asset_key_prefix=asset_key_prefix, output_notebook=output_notebook, # backcompact ), output_defs=output_defs + extra_output_defs, config_schema=config_schema, required_resource_keys=required_resource_keys, description=description, tags={**user_tags, **default_tags}, )
48097a7bed7ef84ad8d9df4eeef835f3723cb391
3,650,836
import torch def denormalize_laf(LAF: torch.Tensor, images: torch.Tensor) -> torch.Tensor: """De-normalizes LAFs from scale to image scale. B,N,H,W = images.size() MIN_SIZE = min(H,W) [a11 a21 x] [a21 a22 y] becomes [a11*MIN_SIZE a21*MIN_SIZE x*W] [a21*MIN_SIZE a22*MIN_SIZE y*H] Args: LAF: images: images, LAFs are detected in. Returns: the denormalized lafs. Shape: - Input: :math:`(B, N, 2, 3)` - Output: :math:`(B, N, 2, 3)` """ raise_error_if_laf_is_not_valid(LAF) n, ch, h, w = images.size() wf = float(w) hf = float(h) min_size = min(hf, wf) coef = torch.ones(1, 1, 2, 3).to(LAF.dtype).to(LAF.device) * min_size coef[0, 0, 0, 2] = wf coef[0, 0, 1, 2] = hf return coef.expand_as(LAF) * LAF
51b8c81359237a9e102c1cd33bb7d1ab16c39893
3,650,837
import os import shutil def project_main(GIS_files_path, topath): """ This main function reads the GIS-layers in GIS_files_path and separates them by raster and vector data. Projects the data to WGS84 UMT37S Moves all files to ../Projected_files Merges the files named 'kV' to two merged shape file of Transmission and Medium Voltage lines Merges the files named 'MiniGrid' to one merged shape file :param GIS_files_path: :return: """ print(os.getcwd()) basedir = os.getcwd() os.chdir(GIS_files_path) current = os.getcwd() print(os.getcwd()) #All shp-files in all folders in dir current adm = project_vector(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0.shp')) adm.to_file(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0_UMT37S.shp')) shpFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(current) for f in filenames if os.path.splitext(f)[1] == '.shp'] for s in shpFiles: path, filename = os.path.split(s) projected = project_vector(s) clip_vector(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0_UMT37S.shp'), projected, s, os.path.join(path, "UMT37S_%s" % (filename))) #All tif-files in all folders in dir current tifFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(current) for f in filenames if os.path.splitext(f)[1] == '.tif'] for t in tifFiles: path, filename = os.path.split(t) masking(os.path.join(current,'gadm36_KEN_shp\gadm36_KEN_0.shp'), t) project_raster(os.path.join(path,'%s' % (filename)), os.path.join(path,"masked_UMT37S_%s" % (filename))) #All files containing "UMT37S" is copied to ../Projected_files dir def create_dir(dir): if not os.path.exists(dir): os.makedirs(dir) create_dir((topath)) allFiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(current) for f in filenames] keyword = 'UMT37S' for fname in allFiles: if keyword in fname: shutil.copy("\\\\?\\"+fname, os.path.join(current, topath)) #Due to really long name the \\\\?\\ can trick Windows accepting it os.chdir(basedir) merge_transmission(topath) merge_minigrid(topath) merge_mv(topath) return ()
a686608a7c82aed75e7a8606e2ca1ca5b3bc7f02
3,650,838
import re def parse_regex_flags(raw_flags: str = 'gim'): """ parse flags user input and convert them to re flags. Args: raw_flags: string chars representing er flags Returns: (re flags, whether to return multiple matches) """ raw_flags = raw_flags.lstrip('-') # compatibility with original MatchRegex script. multiple_matches = 'g' in raw_flags raw_flags = raw_flags.replace('g', '') flags = re.RegexFlag(0) for c in raw_flags: if c in LETTER_TO_REGEX_FLAGS: flags |= LETTER_TO_REGEX_FLAGS[c] else: raise ValueError(f'Invalid regex flag "{c}".\n' f'Supported flags are {", ".join(LETTER_TO_REGEX_FLAGS.keys())}') return flags, multiple_matches
71816c57f4e4f6dac82b4746b534a680745bc730
3,650,839
import argparse def create_parser(): """ Create argparse object for this CLI """ parser = argparse.ArgumentParser( description="Remove doubled extensions from files") parser.add_argument("filename", metavar="file", help="File to process") return parser
c5acd1d51161d7001d7a6842fa87ff0cf61a03ef
3,650,840
def has_answer(answers, retrieved_text, match='string', tokenized: bool = False): """Check if retrieved_text contains an answer string. If `match` is string, token matching is done between the text and answer. If `match` is regex, we search the whole text with the regex. """ if not isinstance(answers, list): answers = [answers] if match == 'string': if tokenized: text = md.detokenize(retrieved_text) t_text = retrieved_text else: text = retrieved_text t_text = spacy_tokenize(retrieved_text, uncase=True) for single_answer in answers: single_answer = spacy_tokenize(single_answer, uncase=True) for i in range(0, len(t_text) - len(single_answer) + 1): if single_answer == t_text[i: i + len(single_answer)]: return True for single_answer in answers: # If raw covered. if single_answer in text: return True elif match == 'regex': if tokenized: text = md.detokenize(retrieved_text) else: text = retrieved_text # Answer is a regex single_answer = normalize(answers[0]) if regex_match(text, single_answer): return True return False
f0107006d2796e620cd1a47ef9e79c1c5cc1fd7a
3,650,841
def get_utm_zone(srs): """ extracts the utm_zone from an osr.SpatialReference object (srs) returns the utm_zone as an int, returns None if utm_zone not found """ if not isinstance(srs, osr.SpatialReference): raise TypeError('srs is not a osr.SpatialReference instance') if srs.IsProjected() != 1: return None projcs = srs.GetAttrValue('projcs') assert 'UTM' in projcs datum = None if 'NAD83' in projcs: datum = 'NAD83' elif 'WGS84' in projcs: datum = 'WGS84' elif 'NAD27' in projcs: datum = 'NAD27' # should be something like NAD83 / UTM zone 11N... if '/' in projcs: utm_token = projcs.split('/')[1] else: utm_token = projcs if 'UTM' not in utm_token: return None # noinspection PyBroadException try: utm_zone = int(''.join([k for k in utm_token if k in '0123456789'])) except Exception: return None if utm_zone < 0 or utm_zone > 60: return None hemisphere = projcs[-1] return datum, utm_zone, hemisphere
3ee1f9780ce0fbfd843ea6b72627e90e16fd1549
3,650,842
def get_documents_meta_url(project_id: int, limit: int = 10, host: str = KONFUZIO_HOST) -> str: """ Generate URL to load meta information about the Documents in the Project. :param project_id: ID of the Project :param host: Konfuzio host :return: URL to get all the Documents details. """ return f"{host}/api/projects/{project_id}/docs/?limit={limit}"
b538d028844a2f769e8700995d1052b440592046
3,650,843
def parse_params_from_string(paramStr: str) -> dict: """ Create a dictionary representation of parameters in PBC format """ params = dict() lines = paramStr.split('\n') for line in lines: if line: name, value = parse_param_line(line) add_param(params, name, value) return params
fbf8c8cfffd0c411cc4a83760f373dd4e02eec1e
3,650,844
def hstack(gctoos, remove_all_metadata_fields=False, error_report_file=None, fields_to_remove=[], reset_ids=False): """ Horizontally concatenate gctoos. Args: gctoos (list of gctoo objects) remove_all_metadata_fields (bool): ignore/strip all common metadata when combining gctoos error_report_file (string): path to write file containing error report indicating problems that occurred during hstack, mainly for inconsistencies in common metadata fields_to_remove (list of strings): fields to be removed from the common metadata because they don't agree across files reset_ids (bool): set to True if sample ids are not unique Return: concated (gctoo object) """ # Separate each gctoo into its component dfs row_meta_dfs = [] col_meta_dfs = [] data_dfs = [] srcs = [] for g in gctoos: row_meta_dfs.append(g.row_metadata_df) col_meta_dfs.append(g.col_metadata_df) data_dfs.append(g.data_df) srcs.append(g.src) logger.debug("shapes of row_meta_dfs: {}".format([x.shape for x in row_meta_dfs])) # Concatenate row metadata all_row_metadata_df = assemble_common_meta(row_meta_dfs, fields_to_remove, srcs, remove_all_metadata_fields, error_report_file) # Concatenate col metadata all_col_metadata_df = assemble_concatenated_meta(col_meta_dfs, remove_all_metadata_fields) # Concatenate the data_dfs all_data_df = assemble_data(data_dfs, "horiz") # Make sure df shapes are correct assert all_data_df.shape[0] == all_row_metadata_df.shape[0], "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}".format(all_data_df.shape[0], all_row_metadata_df.shape[0]) assert all_data_df.shape[1] == all_col_metadata_df.shape[0], "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}".format(all_data_df.shape[1], all_col_metadata_df.shape[0]) # If requested, reset sample ids to be unique integers and move old sample # ids into column metadata if reset_ids: do_reset_ids(all_col_metadata_df, all_data_df, "horiz") logger.info("Build GCToo of all...") concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df, col_metadata_df=all_col_metadata_df, data_df=all_data_df) return concated
5da84b3db052dd54c8f3a41ecf0cc20dd3d2f187
3,650,845
def number_fixed_unused_variables(block): """ Method to return the number of fixed Var components which do not appear within any activated Constraint in a model. Args: block : model to be studied Returns: Number of fixed Var components which do not appear within any activated Constraints in block """ return len(fixed_unused_variables_set(block))
a6432160bc52ac3e5682b255c951388242bbc2b0
3,650,846
def tunnelX11( node, display=None): """Create an X11 tunnel from node:6000 to the root host display: display on root host (optional) returns: node $DISPLAY, Popen object for tunnel""" if display is None and 'DISPLAY' in environ: display = environ[ 'DISPLAY' ] if display is None: error( "Error: Cannot connect to display\n" ) return None, None host, screen = display.split( ':' ) # Unix sockets should work if not host or host == 'unix': # GDM3 doesn't put credentials in .Xauthority, # so allow root to just connect quietRun( 'xhost +si:localuser:root' ) return display, None else: # Create a tunnel for the TCP connection port = 6000 + int( float( screen ) ) connection = r'TCP\:%s\:%s' % ( host, port ) cmd = [ "socat", "TCP-LISTEN:%d,fork,reuseaddr" % port, "EXEC:'mnexec -a 1 socat STDIO %s'" % connection ] return 'localhost:' + screen, node.popen( cmd )
a0e824bef4d23dd3a8a5c25653bf778731de180e
3,650,847
import os def static_docs(file_path): """Serve the 'docs' folder static files and redirect folders to index.html. :param file_path: File path inside the 'docs' folder. :return: Full HTTPResponse for the static file. """ if os.path.isdir(os.path.join(document_root, 'docs', file_path)): return redirect('/docs/%s/index.html' % file_path) return static_file(file_path, root=os.path.join(document_root, 'docs'))
14af4c310d09756e3dcd63335bc3d03d2be28dca
3,650,848
import collections def get_aws_account_id_file_section_dict() -> collections.OrderedDict: """~/.aws_accounts_for_set_aws_mfa から Section 情報を取得する""" # ~/.aws_accounts_for_set_aws_mfa の有無を確認し、なければ生成する prepare_aws_account_id_file() # 該当 ini ファイルのセクション dictionary を取得 return Config._sections
51eb94857d62b91c5fcfe978b3cd2a32cbefb6ae
3,650,849
from datetime import datetime def profile(request, session_key): """download_audio.html renderer. :param request: rest API request object. :type request: Request :param session_key: string representing the session key for the user :type session_key: str :return: Just another django mambo. :rtype: HttpResponse """ # This may be different from the one provided in the URL. my_session_key = request.session.session_key last_week = datetime.date.today() - datetime.timedelta(days=7) # Get the weekly counts. last_weeks = [datetime.date.today() - datetime.timedelta(days=days) for days in [6, 13, 20, 27, 34]] dates = [] weekly_counts = [] for week in last_weeks: dates.append(week.strftime('%m/%d/%Y')) count = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key, timestamp__gt=week, timestamp__lt=week + datetime.timedelta(days=7)).count() weekly_counts.append(count) recording_count = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False).count() # Construct dictionaries of the user's recordings. user_recording_count = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key).count() recent_recordings = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key, timestamp__gt=last_week) recent_dict = defaultdict(list) [recent_dict[rec.surah_num].append((rec.ayah_num, rec.file.url)) for rec in recent_recordings] old_recordings = AnnotatedRecording.objects.filter( file__gt='', file__isnull=False, session_id=session_key, timestamp__lt=last_week) old_dict = defaultdict(list) [old_dict[rec.surah_num].append((rec.ayah_num, rec.file.url)) for rec in old_recordings] recent_lists = _sort_recitations_dict_into_lists(recent_dict) old_lists = _sort_recitations_dict_into_lists(old_dict) return render(request, 'audio/profile.html', {'session_key': my_session_key, 'recent_dict': dict(recent_dict), 'recent_lists': recent_lists, 'old_lists': old_lists, 'dates': dates[::-1], 'weekly_counts': weekly_counts[::-1], 'old_dict': dict(old_dict), 'recording_count': recording_count, 'user_recording_count': user_recording_count})
ba39b5a69c062ab62f83f46f7044f403120016ca
3,650,850
import requests def pipFetchLatestVersion(pkg_name: str) -> str: """ Fetches the latest version of a python package from pypi.org :param pkg_name: package to search for :return: latest version of the package or 'not found' if error was returned """ base_url = "https://pypi.org/pypi" request = f"{base_url}/{pkg_name}/json" response = requests.get(request) if response.status_code == requests.codes.ok: json = response.json() newest_version = json["info"]["version"] else: newest_version = NOT_FOUND return newest_version
f1a49d31f4765a1a2ddc5942792a74be211fef49
3,650,851
import subprocess def _GetLastAuthor(): """Returns a string with the author of the last commit.""" author = subprocess.check_output(['git', 'log', '-1', '--pretty=format:"%an"']).splitlines() return author
82159cf4d882d6cace29802892dacda1bfe6b6b2
3,650,852
def mock_datasource_http_oauth2(mock_datasource): """Mock DataSource object with http oauth2 credentials""" mock_datasource.credentials = b"client_id: FOO\nclient_secret: oldisfjowe84uwosdijf" mock_datasource.location = "http://foo.com" return mock_datasource
8496f6b9ac60af193571f762eb2ea925915a1223
3,650,853
def find_certificate_name(file_name): """Search the CRT for the actual aggregator name.""" # This loop looks for the collaborator name in the key with open(file_name, 'r') as f: for line in f: if 'Subject: CN=' in line: col_name = line.split('=')[-1].strip() break return col_name
853ec62b69feebd86c7a56e1d47b2c12e7f56d63
3,650,854
import sys import os import imp def _find_module(module): """Find module using imp.find_module. While imp is deprecated, it provides a Python 2/3 compatible interface for finding a module. We use the result later to load the module with imp.load_module with the '__main__' name, causing it to execute. The non-deprecated method of using importlib.util.find_spec and loader.execute_module is not supported in Python 2. The _find_module implementation uses a novel approach to bypass imp.find_module's requirement that package directories contain __init__.py/__init__.pyc markers. This lets users specify namespace packages in main modules, which are not otherwise supported by imp.find_module. """ parts = module.split(".") module_path = parts[0:-1] module_name_part = parts[-1] # See function docstring for the rationale of this algorithm. for sys_path_item in sys.path: cur_path = os.path.join(sys_path_item, *module_path) try: return imp.find_module(module_name_part, [cur_path]) except ImportError: pass raise ImportError("No module named %s" % module)
b76b72cfc666e78b5b880c95bdc196b469722822
3,650,855
from typing import List def float2bin(p: float, min_bits: int = 10, max_bits: int = 20, relative_error_tol=1e-02) -> List[bool]: """ Converts probability `p` into binary list `b`. Args: p: probability such that 0 < p < 1 min_bits: minimum number of bits before testing relative error. max_bits: maximum number of bits for truncation. relative_error_tol: relative error tolerance Returns: b: List[bool] Examples: Probability 0.5 becomes: >>> float2bin(0.5) # Is 0.1 [1] Moreover 0.125 is: >>> float2bin(0.125) # Is 0.001 [0, 0, 1] Some numbers get truncated. For example, probability 1/3 becomes: >>> float2bin(1/3) # Is 0.0101010101... [0, 1, 0, 1, 0, 1, 0, 1, 0] You can increase the maximum number of bits to reach float precision, for example: >>> 1/3 0.3333333333333333 >>> q = float2bin(1/3, 64) >>> bin2float(q) 0.3333333333333333 >>> 1/3 == bin2float(q) True """ assert 1 > p > 0 b = [] i = 1 original_p = 1 - p while p != 0 or i > max_bits: if i > min_bits: if isclose(1 - bin2float(b), original_p, rtol=relative_error_tol, atol=0): break if p >= 2 ** -i: b.append(True) p -= 2 ** -i else: b.append(False) i += 1 return b
1b25f84255ace0503f06ae2ab9f8dc650206176c
3,650,856
def bin_thresh(img: np.ndarray, thresh: Number) -> np.ndarray: """ Performs binary thresholding of an image Parameters ---------- img : np.ndarray Image to filter. thresh : int Pixel values >= thresh are set to 1, else 0. Returns ------- np.ndarray : Binarized image, same shape as input """ res = img >= thresh return res
9064fb5f50c22aabc73bf63d3a818b6898a19a58
3,650,857
from mathutils import Matrix, Vector, Euler def add_object_align_init(context, operator): """ Return a matrix using the operator settings and view context. :arg context: The context to use. :type context: :class:`bpy.types.Context` :arg operator: The operator, checked for location and rotation properties. :type operator: :class:`bpy.types.Operator` :return: the matrix from the context and settings. :rtype: :class:`mathutils.Matrix` """ properties = operator.properties if operator is not None else None space_data = context.space_data if space_data and space_data.type != 'VIEW_3D': space_data = None # location if operator and properties.is_property_set("location"): location = Matrix.Translation(Vector(properties.location)) else: if space_data: # local view cursor is detected below location = Matrix.Translation(space_data.cursor_location) else: location = Matrix.Translation(context.scene.cursor_location) if operator: properties.location = location.to_translation() # rotation view_align = (context.user_preferences.edit.object_align == 'VIEW') view_align_force = False if operator: if properties.is_property_set("view_align"): view_align = view_align_force = operator.view_align else: if properties.is_property_set("rotation"): # ugh, 'view_align' callback resets value = properties.rotation[:] properties.view_align = view_align properties.rotation = value del value else: properties.view_align = view_align if operator and (properties.is_property_set("rotation") and not view_align_force): rotation = Euler(properties.rotation).to_matrix().to_4x4() else: if view_align and space_data: rotation = space_data.region_3d.view_matrix.to_3x3().inverted() rotation.resize_4x4() else: rotation = Matrix() # set the operator properties if operator: properties.rotation = rotation.to_euler() return location * rotation
6bd32226c7024245b1252c3a51f5ae713f43a1b2
3,650,858
import pickle def load_dataset(): """ load dataset :return: dataset in numpy style """ data_location = 'data.pk' data = pickle.load(open(data_location, 'rb')) return data
9467826bebfc9ca3ad1594904e9f3195e345c065
3,650,859
def video_feed(): """Return camera live feed.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
87c9ae8aa84fe17a16b040d56fbdaac6351e0706
3,650,860
def area_in_squaremeters(geodataframe): """Calculates the area sizes of a geo dataframe in square meters. Following https://gis.stackexchange.com/a/20056/77760 I am choosing equal-area projections to receive a most accurate determination of the size of polygons in the geo dataframe. Instead of Gall-Peters, as suggested in the answer, I am using EPSG_3035 which is particularly usefull for Europe. Returns a pandas series of area sizes in square meters. """ return geodataframe.to_crs(EPSG_3035_PROJ4).area
47a2ae042c8cda7fa6b66ccd011d0293afb36504
3,650,861
import scipy def add_eges_grayscale(image): """ Edge detect. Keep original image grayscale value where no edge. """ greyscale = rgb2gray(image) laplacian = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]) edges = scipy.ndimage.filters.correlate(greyscale, laplacian) for index,value in np.ndenumerate(edges): edges[index] = 255-greyscale[index] if value == 0 else 0 return edges
0cba5152578722693d0d796252a99973e980b365
3,650,862
def generateFromSitePaymentObject(signature: str, account_data: dict, data: dict)->dict: """[summary] Creates object for from site chargment request Args: signature (str): signature hash string account_data (dict): merchant_account: str merchant_domain: str data (dict): order + personal data to create charge orderReference (str): timestamp amount (float): order total amount currency (str): 'USD', 'UAH', 'RUB' card (str): user card number expMonth (str): card expires month expYear (str): card expires year cardCvv (str): card cvv cardHolder (str): full name of card holder "Test test" productName (list[str]): product names list productPrice (list[float]): product price list productCount (list[int]): product count list clientFirstName (str): client first name clientLastName (str): client last name clientCountry (str): client country clientEmail (str): client email clientPhone (str): client phone Returns: dict: [description] """ return { "transactionType":"CHARGE", 'merchantAccount': account_data['merchant_account'], "merchantAuthType":"SimpleSignature", 'merchantDomainName': account_data['merchant_domain'], "merchantTransactionType":"AUTH", "merchantTransactionSecureType": "NON3DS", 'merchantSignature': signature, "apiVersion":1, 'orderReference': str(data['orderReference']), 'orderDate': str(data['orderReference']), "amount":data["amount"], 'currency': data['currency'], "card":data['card'], "expMonth":data['expMonth'], "expYear":data['expYear'], "cardCvv":data['cardCvv'], "cardHolder":data['cardHolder'], 'productName': list(map(str, data['productName'])), 'productPrice': list(map(float, data['productPrice'])), 'productCount': list(map(int, data['productCount'])), "clientFirstName":data['clientFirstName'], "clientLastName":data['clientLastName'], "clientCountry":data['clientCountry'], "clientEmail":data['clientEmail'], "clientPhone":data['clientPhone'], }
149434694e985956dede9bf8b6b0da1215ac9963
3,650,863
def deal_weights(node, data=None): """ deal the weights of the custom layer """ layer_type = node.layer_type weights_func = custom_layers[layer_type]['weights'] name = node.layer_name return weights_func(name, data)
a2a271ea0aeb94a1267dbc06da8997985b81633e
3,650,864
def label_brand_generic(df): """ Correct the formatting of the brand and generic drug names """ df = df.reset_index(drop=True) df = df.drop(['drug_brand_name', 'drug_generic_name'], axis=1) df['generic_compare'] = df['generic_name'].str.replace('-', ' ') df['generic_compare'] = df['generic_compare'].str.replace('with ', '') df['generic_compare'] = df['generic_compare'].str.replace('/', ' ') df['brand_compare'] = df['brand_name'].str.replace('-', ' ') df['brand_compare'] = df['brand_compare'].str.replace('with ', '') df['brand_compare'] = df['brand_compare'].str.replace('/', ' ') df_na = df.fillna(0) #df.dropna().sort_values(by='generic_name') risk_class_list = [] # Find contingency table for each generic # format [[brand_ad_ev, brand_bene], [generic_ad_ev, generic_bene]] for i, val in enumerate(df_na['generic_compare']): if ((df_na.iloc[i]['brand_compare'] == val) | (df_na.iloc[i]['brand_compare'] in val) | (val in df_na.iloc[i]['brand_compare'])): # GENERIC NEG = -1 risk_class_list.append(-1) else: # BRAND POS = 1 risk_class_list.append(1) risk_series = pd.Series(risk_class_list).replace(np.inf, np.nan) risk_series = risk_series.replace(-np.inf, np.nan) df_na['risk_class'] = risk_series df['risk_class'] = risk_series # Drop columns that are redunant from name matching df_na = df_na.drop(['generic_compare', 'brand_compare'], axis = 1) df = df.drop(['generic_compare', 'brand_compare'], axis = 1) df_class_generic_count = pd.pivot_table(df, index = ['generic_name'], values = ['risk_class'], aggfunc = 'count') df_class_generic_count.rename(columns={'risk_class' : 'risk_count'}, inplace=True) df = df.merge(df_class_generic_count, right_index=True, left_on = 'generic_name', how='inner') return df
a421eece6e595159847821abcaf2cf7dd8dc88c5
3,650,865
def RMSRE( image_true: np.ndarray, image_test: np.ndarray, mask: np.ndarray = None, epsilon: float = 1e-9, ) -> float: """Root mean squared relative error (RMSRE) between two images within the specified mask. If not mask is specified the entire image is used. Parameters ---------- image_true : np.ndarray ground truth image. image_test : np.ndarray predicted image. mask : np.ndarray, optional mask to compute the RMSRE in, by default None epsilon : float, optional epsilon used to stabilize the calculation of the relative error, by default 1e-9 Returns ------- float RMSRE value between the images within the specified mask. """ if mask is None: mask = np.ones_like(image_true) mask_flat = mask.reshape(-1).astype(bool) # flatten relativeErrorImageFlat = ( image_test.reshape(-1)[mask_flat] - image_true.reshape(-1)[mask_flat] ) / (image_true.reshape(-1)[mask_flat] + epsilon) return np.sqrt( np.mean(relativeErrorImageFlat) ** 2 + np.std(relativeErrorImageFlat) ** 2 )
6b377b2588ef0c02f059248d3214e0d7960ca25b
3,650,866
import PIL import logging def getImage(imageData, flag): """ Returns the PIL image object from imageData based on the flag. """ image = None try: if flag == ENHANCED: image = PIL.Image.open(imageData.enhancedImage.file) elif flag == UNENHANCED: image = PIL.Image.open(imageData.unenhancedImage.file) elif flag == DISPLAY: image = PIL.Image.open(imageData.image.file) except: logging.error("image cannot be read from the image data") return None return image
a3aaa80bc396fcdf099d5963706d21d63a6dcf0d
3,650,867
def save_record(record_type, record_source, info, indicator, date=None): """ A convenience function that calls 'create_record' and also saves the resulting record. :param record_type: The record type, which should be a value from the RecordType enumeration :param record_source: The source for the record, which should be a value from the RecordSource enumeration :param info: The actual data to be stored in the record :param date: The date to use with this record, or None to use the current date :return: The new IndicatorRecord instance """ record = create_record(record_type, record_source, info, indicator, date) record.save() logger.info("%s (%s) record from %s saved successfully", record_type.name, record_type.title, record_source.title) return record
903eb7333cfd2cc534812c5417e5e32a7769ffe4
3,650,868
def update_product_price(pid: str, new_price: int): """ Update product's price Args: pid (str): product id new_price (int): new price Returns: dict: status(success, error) """ playload = {'status': ''} try: connection = create_connection() with connection: with connection.cursor() as cursor: sql = "UPDATE `product` SET `PRICE` = %s WHERE `PID` = %s" cursor.execute(sql, (new_price, pid)) connection.commit() playload['status'] = 'success' return playload except: playload['status'] = 'error' return playload
fff3723a9138724f1957cd9a669cdcf79e4ed4e5
3,650,869
def select_n_products(lst, n): """Select the top N products (by number of reviews) args: lst: a list of lists that are (key,value) pairs for (ASIN, N-reviews) sorted on the number of reviews in reverse order n: a list of three numbers, returns: a list of lists with N products """ top_products = [] first_third = lst[100:100 + n[0] + 1] second_third = lst[1000:1000 + n[1] + 1] third_third = lst[50000:50000 + n[2] + 1] top_products.extend(first_third) top_products.extend(second_third) top_products.extend(third_third) n_reviews = sum([x[1] for x in top_products]) print "The number of products is: {} and the number of reviews is: {}".format( sum(n), n_reviews) return(top_products)
ed052708010512758845186ae9e4fb33b41bc511
3,650,870
def load_vanHateren(params): """ Load van Hateren data and format as a Dataset object Inputs: params [obj] containing attributes: data_dir [str] directory to van Hateren data rand_state (optional) [obj] numpy random state object num_images (optional) [int] how many images to extract. Default (None) is all images. image_edge_size (optional) [int] how many pixels on an edge. Default (None) is full-size. """ # Parse params assert hasattr(params, "data_dir"), ("function input must have 'data_dir' kwarg") data_dir = params.data_dir if hasattr(params, "rand_state"): rand_state = params.rand_state else: #assert hasattr(params, "rand_seed"), ("Params must specify a random state or seed") if hasattr(params, "rand_seed"): rand_state = np.random.RandomState(params.rand_seed) else: rand_state = np.random.RandomState(None) print("WARNING: Params did not specify a random state or seed") num_images = int(params.num_images) if hasattr(params, "num_images") else None image_edge_size = int(params.image_edge_size) if hasattr(params, "image_edge_size") else None # Get data img_filename = data_dir+"/img/images_curated.h5" # pre-curated dataset vh_data = vanHateren(img_filename, num_images, rand_state) image_dataset = Dataset(vh_data.images, lbls=None, ignore_lbls=None, rand_state=rand_state) # Resize data if image_edge_size is not None: edge_scale = image_edge_size/image_dataset.shape[1] #vh has square images assert edge_scale <= 1.0, ( "image_edge_size (%g) must be less than or equal to the original size (%g)."%(image_edge_size, image_dataset.shape[1])) scale_factor = [1.0, edge_scale, edge_scale, 1.0] # batch & channel don't get downsampled image_dataset.downsample(scale_factor, order=3) return {"train":image_dataset}
ca32f182f5534da89df0bd5454e74a586c6ca4d6
3,650,871
import argparse def build_parser() -> argparse.ArgumentParser: """Builds and returns the CLI parser.""" # Help parser help_parser = argparse.ArgumentParser(add_help=False) group = help_parser.add_argument_group('Help and debug') group.add_argument('--debug', help='Enable debug output.', action='store_true') group.add_argument('-h', '--help', help='Show this help message and exit.', action='help') # IO parser io_parser = argparse.ArgumentParser(add_help=False) group = io_parser.add_argument_group('Input/Output') group.add_argument('-i', '--input', help='Input document.', required=True) group.add_argument('-o', '--output', help='Output file path.') # Main parser main_parser = argparse.ArgumentParser(prog=EXE_NAME, description='Detects design patterns in class diagrams.', parents=[help_parser], add_help=False) subparsers = main_parser.add_subparsers(title='Subcommands') # 'patterns' subcommand description = 'List detected design patterns.' patterns_parser = subparsers.add_parser('patterns', description=description, help=description, parents=[help_parser, io_parser], add_help=False) patterns_parser.add_argument('-p', '--pattern', choices=ALL_PATTERNS, nargs='*', help='Patterns to match.') patterns_parser.set_defaults(func=patterns_sub) # 'cycles' subcommand description = 'List detected dependency cycles.' cycles_parser = subparsers.add_parser('cycles', description=description, help=description, parents=[help_parser, io_parser], add_help=False) cycles_parser.set_defaults(func=cycles_sub) # 'metrics' subcommand description = 'Print metrics computed from the class diagram.' metrics_parser = subparsers.add_parser('metrics', description=description, help=description, parents=[help_parser, io_parser], add_help=False) metrics_parser.add_argument('-c', '--config', help='Configuration file.') metrics_parser.set_defaults(func=metrics_sub) return main_parser
0be83ee2e497f2c5ccfd21c4a4414c587304e6ee
3,650,872
import argparse import sys def parse_args(): """Parse command-line args. """ parser = argparse.ArgumentParser(description = 'Upload (JSON-encoded) conformance resources from FHIR IGPack tar archive.', add_help = False) parser.add_argument('-h', '--help', action = 'store_true', help = 'show this help message and exit') parser.add_argument('-i', '--igpack', help = 'IGPack filename (e.g. us-core-v3.1.1-package.tgz)') parser.add_argument('-t', '--target', help = 'FHIR API base URL for target server (e.g. http://localhost:8080/r4)') args = parser.parse_args() usage = False error = False if getattr(args, 'help'): usage = True else: for arg in vars(args): if getattr(args, arg) == None: print('Error - missing required argument: --{}'.format(arg), file=sys.stderr, flush=True) error = True if usage or error: parser.print_help() print() print('Additionally, if the ACCESS_TOKEN environment variable is defined,') print('its value will be used as an OAuth bearer token for the FHIR API.', flush=True) if error: raise RuntimeError('Command-line argument error.') return args
7c0ae02e07706ef212417ee7d0c4dd11a1de945c
3,650,873
import torch def wrap_to_pi(inp, mask=None): """Wraps to [-pi, pi)""" if mask is None: mask = torch.ones(1, inp.size(1)) if mask.dim() == 1: mask = mask.unsqueeze(0) mask = mask.to(dtype=inp.dtype) val = torch.fmod((inp + pi) * mask, 2 * pi) neg_mask = (val * mask) < 0 val = val + 2 * pi * neg_mask.to(val.dtype) val = (val - pi) inp = (1 - mask) * inp + mask * val return inp
7aca43bb2146c1cad07f9a070a7099e6fb8ad857
3,650,874
import pandas def if_pandas(func): """Test decorator that skips test if pandas not installed.""" @wraps(func) def run_test(*args, **kwargs): try: except ImportError: pytest.skip('Pandas not available.') else: return func(*args, **kwargs) return run_test
b39f88543559c4f4f1b9bb5bb30768916d3708d6
3,650,875
def handle_front_pots(pots, next_pots): """Handle front, additional pots in pots.""" if next_pots[2] == PLANT: first_pot = pots[0][1] pots = [ [next_pots[2], first_pot - 1]] + pots return pots, next_pots[2:] return pots, next_pots[3:]
53ec905a449c0402946cb8c28852e81da80a92ef
3,650,876
import types def environment(envdata): """ Class decorator that allows to run tests in sandbox against different Qubell environments. Each test method in suite is converted to <test_name>_on_environemnt_<environment_name> :param params: dict """ #assert isinstance(params, dict), "@environment decorator should take 'dict' with environments" def copy(func, name=None): return types.FunctionType(func.func_code, func.func_globals, name=name, argdefs=func.func_defaults, closure=func.func_closure) def wraps_class(clazz): if "environments" in clazz.__dict__: log.warn("Class {0} environment attribute is overridden".format(clazz.__name__)) params = format_as_api(envdata) clazz.environments = params methods = [method for _, method in clazz.__dict__.items() if isinstance(method, types.FunctionType) and method.func_name.startswith("test")] for env in params: if env['name'] != DEFAULT_ENV_NAME(): env['name'] += '_for_%s' % clazz.__name__ # Each test class should have it's own set of envs. for method in methods: delattr(clazz, method.func_name) log.info("Test '{0}' multiplied per environment in {1}".format(method.func_name, clazz.__name__)) for env in params: new_name = method.func_name + "_on_environment_" + env['name'] setattr(clazz, new_name, copy(method, new_name)) return clazz return wraps_class
9ce82ff8ee3627f8795b7bc9634c298e8ff195bc
3,650,877
def get_domain_name(url): """ Returns the domain name from a URL """ parsed_uri = urlparse(url) return parsed_uri.netloc
00160285a29a4b2d1fe42fb8ec1648ca4c31fa8b
3,650,878
def get_answer_str(answers: list, scale: str): """ :param ans_type: span, multi-span, arithmetic, count :param ans_list: :param scale: "", thousand, million, billion, percent :param mode: :return: """ sorted_ans = sorted(answers) ans_temp = [] for ans in sorted_ans: ans_str = str(ans) if is_number(ans_str): ans_num = to_number(ans_str) if ans_num is None: if scale: ans_str = ans_str + " " + str(scale) else: if '%' in ans_str: # has been handled the answer itself is a percentage ans_str = '%.4f' % ans_num else: ans_str = '%.4f' % (round(ans_num, 2) * scale_to_num(scale)) else: if scale: ans_str = ans_str + " " + str(scale) ans_temp.append(ans_str) return [" ".join(ans_temp)]
734015503ccec63265a0531aa05e8bd8514c7c15
3,650,879
def user_0post(users): """ Fixture that returns a test user with 0 posts. """ return users['user2']
5401e7f356e769b5ae68873f2374ef74a2d439c6
3,650,880
import os def initialize(): """ Initialize some parameters, such as API key """ api_key = os.environ.get("api_key") # None when not exist if api_key and len(api_key) == 64: # length of a key should be 64 return api_key print("Please set a valid api_key in the environment variables.") exit()
2589aeea4db2d1d1f20de03bc2425e1835eb2f69
3,650,881
def plot_tuning_curve_evo(data, epochs=None, ax=None, cmap='inferno_r', linewidth=0.3, ylim='auto', include_true=True, xlabel='Bandwidths', ylabel='Average Firing Rate'): """ Plot evolution of TC averaged over noise (zs). .. WARNING:: It is not used for a long time. .. TODO:: Make `plot_tuning_curve_evo` accept `.GANRecords`. Parameters ---------- data : `.GANData` """ if ax is None: _, ax = pyplot.subplots() if epochs is None: epochs = len(data.tuning) elif isinstance(epochs, int): epochs = range(10) cmap = matplotlib.cm.get_cmap(cmap) norm = matplotlib.colors.Normalize(min(epochs), max(epochs)) mappable = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap) mappable.set_array([]) fig = ax.get_figure() cb = fig.colorbar(mappable, ax=ax) cb.set_label('epochs') bandwidths = data.bandwidths for i in epochs: ax.plot(bandwidths, data.model_tuning[i], color=cmap(norm(i)), linewidth=linewidth) if include_true: ax.plot(bandwidths, data.true_tuning[0], linewidth=3, linestyle='--') if ylim == 'auto': y = data.model_tuning[epochs] q3 = np.percentile(y, 75) q1 = np.percentile(y, 25) iqr = q3 - q1 yamp = y[y < q3 + 1.5 * iqr].max() ax.set_ylim(- yamp * 0.05, yamp * 1.2) elif ylim: ax.set_ylim(ylim) if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) return ax
f571339b8a306304e1807ef3dd0f4b93e6856dd5
3,650,882
import json def transportinfo_decoder(obj): """Decode programme object from json.""" transportinfo = json.loads(obj) if "__type__" in transportinfo and transportinfo["__type__"] == "__transportinfo__": return TransportInfo(**transportinfo["attributes"]) return transportinfo
8a311cb419e9985ef0a184b82888220c0f3258b2
3,650,883
def group_events_data(events): """ Group events according to the date. """ # e.timestamp is a datetime.datetime in UTC # change from UTC timezone to current seahub timezone def utc_to_local(dt): tz = timezone.get_default_timezone() utc = dt.replace(tzinfo=timezone.utc) local = timezone.make_naive(utc, tz) return local event_groups = [] for e in events: e.time = utc_to_local(e.timestamp) e.date = e.time.strftime("%Y-%m-%d") if e.etype == 'repo-update': e.author = e.commit.creator_name elif e.etype == 'repo-create': e.author = e.creator else: e.author = e.repo_owner if len(event_groups) == 0 or \ len(event_groups) > 0 and e.date != event_groups[-1]['date']: event_group = {} event_group['date'] = e.date event_group['events'] = [e] event_groups.append(event_group) else: event_groups[-1]['events'].append(e) return event_groups
de2f2031bdcaaf2faffdb99c67bbbb1e15828ef8
3,650,884
def create_matrix(PBC=None): """ Used for calculating distances in lattices with periodic boundary conditions. When multiplied with a set of points, generates additional points in cells adjacent to and diagonal to the original cell Args: PBC: an axis which does not have periodic boundary condition. Ex: PBC=1 cancels periodic boundary conditions along the x axis Returns: A numpy array of matrices which can be multiplied by a set of coordinates """ matrix = [] i_list = [-1, 0, 1] j_list = [-1, 0, 1] k_list = [-1, 0, 1] if PBC == 1: i_list = [0] elif PBC == 2: j_list = [0] elif PBC == 3: k_list = [0] for i in i_list: for j in j_list: for k in k_list: matrix.append([i,j,k]) return np.array(matrix, dtype=float)
7470803fe8297ef2db1ce4bd159e9d9c93d34787
3,650,885
def get_additive_seasonality_linear_trend() -> pd.Series: """Get example data for additive seasonality tutorial""" dates = pd.date_range(start="2017-06-01", end="2021-06-01", freq="MS") T = len(dates) base_trend = 2 state = np.random.get_state() np.random.seed(13) observations = base_trend * np.arange(T) + np.random.normal(loc=4, size=T) np.random.set_state(state) seasonality = 12 time = np.arange(0, T / seasonality, 1 / seasonality) amplitude = 10 sin_cos_wave = amplitude * np.cos(2 * np.pi * time) + amplitude * np.sin( 2 * np.pi * time ) observations += sin_cos_wave output = pd.Series(observations, index=dates) return output
034b4ca9e086e95fa1663704fda91ae3986694b4
3,650,886
def is_client_trafic_trace(conf_list, text): """Determine if text is client trafic that should be included.""" for index in range(len(conf_list)): if text.find(conf_list[index].ident_text) != -1: return True return False
0b7fdf58e199444ea52476d5621ea9353475b0a0
3,650,887
def isinf(x): """ For an ``mpf`` *x*, determines whether *x* is infinite:: >>> from sympy.mpmath import * >>> isinf(inf), isinf(-inf), isinf(3) (True, True, False) """ if not isinstance(x, mpf): return False return x._mpf_ in (finf, fninf)
4d5ca6ac2f8ed233a70c706b7fff97bf171c4f21
3,650,888
def formalize_switches(switches): """ Create all entries for the switches in the topology.json """ switches_formal=dict() for s, switch in enumerate(switches): switches_formal["s_"+switch]=formalize_switch(switch, s) return switches_formal
8dbb9987e5bc9c9f81afc0432428a746e2f05fc4
3,650,889
def arp_scores(run): """ This function computes the Average Retrieval Performance (ARP) scores according to the following paper: Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff. How to Measure the Reproducibility of System-oriented IR Experiments. Proceedings of SIGIR, pages 349-358, 2020. The ARP score is defined by the mean across the different topic scores of a run. For all measures outputted by trec_eval, the ARP scores will be determined. @param run: The run to be evaluated. @return: Dictionary containing the ARP scores for every measure outputted by trec_eval. """ return dict(_arp_scores(run))
0e23eb1d6ee3c2502408585b1d0dbb0993ca7628
3,650,890
from typing import Tuple from typing import Optional import scipy def bayesian_proportion_test( x:Tuple[int,int], n:Tuple[int,int], prior:Tuple[float,float]=(0.5,0.5), prior2:Optional[Tuple[float,float]]=None, num_samples:int=1000, seed:int=8675309) -> Tuple[float,float,float]: """ Perform a Bayesian test to identify significantly different proportions. This test is based on a beta-binomial conjugate model. It uses Monte Carlo simulations to estimate the posterior of the difference between the proportions, as well as the likelihood that :math:`\pi_1 > \pi_2` (where :math:`\pi_i` is the likelihood of success in sample :math:`i`). Parameters ---------- x : typing.Tuple[int,int] The number of successes in each sample n : typing.Tuple[int,int] The number of trials in each sample prior : typing.Tuple[float,float] The parameters of the beta distribution used as the prior in the conjugate model for the first sample. prior2 : typing.Optional[typing.Tuple[float,float]] The parameters of the beta distribution used as the prior in the conjugate model for the second sample. If this is not specified, then `prior` is used. num_samples : int The number of simulations seed : int The seed for the random number generator Returns ------- difference_{mean,var} : float The posterior mean and variance of the difference in the likelihood of success in the two samples. A negative mean indicates that the likelihood in sample 2 is higher. p_pi_1_greater : float The probability that :math:`\pi_1 > \pi_2` """ # copy over the prior if not specified for sample 2 if prior2 is None: prior2 = prior # check the bounds if len(x) != 2: msg = "[bayesian_proportion_test]: please ensure x has exactly two elements" raise ValueError(msg) if len(n) != 2: msg = "[bayesian_proportion_test]: please ensure n has exactly two elements" raise ValueError(msg) if len(prior) != 2: msg = "[bayesian_proportion_test]: please ensure prior has exactly two elements" raise ValueError(msg) if len(prior2) != 2: msg = "[bayesian_proportion_test]: please ensure prior2 has exactly two elements" raise ValueError(msg) # set the seed if seed is not None: np.random.seed(seed) # perform the test a = prior[0]+x[0] b = prior[0]+n[0]-x[0] s1_posterior_samples = scipy.stats.beta.rvs(a, b, size=num_samples) a = prior[1]+x[1] b = prior[1]+n[1]-x[1] s2_posterior_samples = scipy.stats.beta.rvs(a, b, size=num_samples) diff_posterior_samples = s1_posterior_samples - s2_posterior_samples diff_posterior_mean = np.mean(diff_posterior_samples) diff_posterior_var = np.var(diff_posterior_samples) p_pi_1_greater = sum(s1_posterior_samples > s2_posterior_samples) / num_samples return diff_posterior_mean, diff_posterior_var, p_pi_1_greater
5f63424b9dcb6e235b13a9e63f0b9a2dc1e95b31
3,650,891
import torch def _create_triangular_filterbank( all_freqs: Tensor, f_pts: Tensor, ) -> Tensor: """Create a triangular filter bank. Args: all_freqs (Tensor): STFT freq points of size (`n_freqs`). f_pts (Tensor): Filter mid points of size (`n_filter`). Returns: fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`). """ # Adopted from Librosa # calculate the difference between each filter mid point and each stft freq point in hertz f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2) # create overlapping triangles zero = torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter) up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) return fb
1ad5bd58d673626a15e27b6d9d68829299fe7636
3,650,892
def convert_millis(track_dur_lst): """ Convert milliseconds to 00:00:00 format """ converted_track_times = [] for track_dur in track_dur_lst: seconds = (int(track_dur)/1000)%60 minutes = int(int(track_dur)/60000) hours = int(int(track_dur)/(60000*60)) converted_time = '%02d:%02d:%02d' % (hours, minutes, seconds) converted_track_times.append(converted_time) return converted_track_times
3d5199da01529f72b7eb6095a26e337277f3c2c9
3,650,893
def sync_xlims(*axes): """Synchronize the x-axis data limits for multiple axes. Uses the maximum upper limit and minimum lower limit across all given axes. Parameters ---------- *axes : axis objects List of matplotlib axis objects to format Returns ------- out : yxin, xmax The computed bounds """ xmins, xmaxs = zip(*[ax.get_xlim() for ax in axes]) xmin = min(xmins) xmax = max(xmaxs) for ax in axes: ax.set_xlim(xmin, xmax) return xmin, xmax
a377877a9647dfc241db482f8a2c630fe3eed146
3,650,894
def algo_config_to_class(algo_config): """ Maps algo config to the IRIS algo class to instantiate, along with additional algo kwargs. Args: algo_config (Config instance): algo config Returns: algo_class: subclass of Algo algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm """ pol_cls, _ = algo_name_to_factory_func("bc")(algo_config.actor) plan_cls, _ = algo_name_to_factory_func("gl")(algo_config.value_planner.planner) value_cls, _ = algo_name_to_factory_func("bcq")(algo_config.value_planner.value) return IRIS, dict(policy_algo_class=pol_cls, planner_algo_class=plan_cls, value_algo_class=value_cls)
884ab7a91d9d8c901d078f9b477d5d21cba3e5ff
3,650,895
def group_by_key(dirnames, key): """Group a set of output directories according to a model parameter. Parameters ---------- dirnames: list[str] Output directories key: various A field of a :class:`Model` instance. Returns ------- groups: dict[various: list[str]] For each value of `key` that is found at least once in the models, a list of the output directories where `key` is that value. """ groups = defaultdict(lambda: []) for dirname in dirnames: m = get_recent_model(dirname) groups[m.__dict__[key]].append(dirname) return dict(groups)
b291cd889c72fb198400b513e52ff9417c8d93b7
3,650,896
def redistrict_grouped(df, kind, group_cols, district_col=None, value_cols=None, **kwargs): """Redistrict dataframe by groups Args: df (pandas.DataFrame): input dataframe kind (string): identifier of redistrict info (e.g. de/kreise) group_cols (list): List of column names to group by district_col (string): Name of district column value_cols (list): List of column names with values to operate on **kwargs: see redistrict function Returns: pandas.Dataframe: Redistricted dataframe """ return pd.concat(redistrict_grouped_dataframe(df, kind, group_cols, district_col=district_col, value_cols=value_cols, **kwargs))
21f6514ca15d5fff57d03dab9d0bb7693c132e95
3,650,897
from typing import Tuple from typing import List import torch def count_wraps_rand( nr_parties: int, shape: Tuple[int] ) -> Tuple[List[ShareTensor], List[ShareTensor]]: """Count wraps random. The Trusted Third Party (TTP) or Crypto provider should generate: - a set of shares for a random number - a set of shares for the number of wraparounds for that number Those shares are used when doing a public division, such that the end result would be the correct one. Args: nr_parties (int): Number of parties shape (Tuple[int]): The shape for the random value Returns: List[List[List[ShareTensor, ShareTensor]]: a list of instaces with the shares for a random integer value and shares for the number of wraparounds that are done when reconstructing the random value """ rand_val = torch.empty(size=shape, dtype=torch.long).random_( generator=ttp_generator ) r_shares = MPCTensor.generate_shares( secret=rand_val, nr_parties=nr_parties, tensor_type=torch.long, encoder_precision=0, ) wraps = count_wraps([share.tensor for share in r_shares]) theta_r_shares = MPCTensor.generate_shares( secret=wraps, nr_parties=nr_parties, tensor_type=torch.long, encoder_precision=0 ) # We are always creating only an instance primitives_sequential = [(r_shares, theta_r_shares)] primitives = list( map(list, zip(*map(lambda x: map(list, zip(*x)), primitives_sequential))) ) return primitives
b16e21be2d421e134866df8929a319a19bdd304a
3,650,898
from typing import Sequence def text_sim( sc1: Sequence, sc2: Sequence, ) -> float: """Returns the Text_Sim similarity measure between two pitch class sets. """ sc1 = prime_form(sc1) sc2 = prime_form(sc2) corpus = [text_set_class(x) for x in sorted(allClasses)] vectorizer = TfidfVectorizer() trsfm = vectorizer.fit_transform(corpus) text_similarity = cosine_similarity(trsfm) names = [str(x) for x in sorted(allClasses)] df = pd.DataFrame(text_similarity.round(3), columns=names, index=names) return df[str(sc1)][str(sc2)]
6479ad4916fb78d69935fb9b618c5eb02951f05a
3,650,899