content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_pip_package_name(provider_package_id: str) -> str: """ Returns PIP package name for the package id. :param provider_package_id: id of the package :return: the name of pip package """ return "apache-airflow-providers-" + provider_package_id.replace(".", "-")
e7aafbdfb0e296e60fedfcf7e4970d750e4f3ffa
3,657,462
import numpy def func_asymmetry_f_b(z, flag_z: bool = False): """Function F_b(z) for asymmetry factor. """ f_a , dder_f_a = func_asymmetry_f_a(z, flag_z=flag_z) res = 2*(2*numpy.square(z)-3)*f_a dder = {} if flag_z: dder["z"] = 8 * z * f_a + 2*(2*numpy.square(z)-3)*dder_f_a["z"] return res, dder
5fc157856c379267c12137551f0eb5e6c4ddd3aa
3,657,463
def parse_args(): """Command-line argument parser for generating scenes.""" # New parser parser = ArgumentParser(description='Monte Carlo rendering generator') # Rendering parameters parser.add_argument('-t', '--tungsten', help='tungsten renderer full path', default='tungsten', type=str) parser.add_argument('-d', '--scene-path', help='scene root path', type=str) parser.add_argument('-r', '--resolution', help='image resolution (w, h)', nargs='+', type=int) parser.add_argument('-s', '--spp', help='sample per pixel', default=16, type=int) parser.add_argument('-n', '--nb-renders', help='number of renders', default=10, type=int) parser.add_argument('--hdr-buffers', help='save buffers as hdr images', action='store_true') parser.add_argument('--hdr-targets', help='save targets as hdr images', action='store_true') parser.add_argument('-o', '--output-dir', help='output directory', default='../../data/renders', type=str) return parser.parse_args()
4fad89d60f5446f9dbd66f4624a43b9436ee97a5
3,657,464
def unique_id(token_id): """Return a unique ID for a token. The returned value is useful as the primary key of a database table, memcache store, or other lookup table. :returns: Given a PKI token, returns it's hashed value. Otherwise, returns the passed-in value (such as a UUID token ID or an existing hash). """ return cms.cms_hash_token(token_id)
9526e483f617728b4a9307bd10097c78ec361ad0
3,657,465
def encode_aval_types(df_param: pd.DataFrame, df_ret: pd.DataFrame, df_var: pd.DataFrame, df_aval_types: pd.DataFrame): """ It encodes the type of parameters and return according to visible type hints """ types = df_aval_types['Types'].tolist() def trans_aval_type(x): for i, t in enumerate(types): if x in t: return i return len(types) - 1 # If the arg type doesn't exist in top_n available types, we insert n + 1 into the vector as it represents the other type. df_param['param_aval_enc'] = df_param['arg_type'].progress_apply(trans_aval_type) df_ret['ret_aval_enc'] = df_ret['return_type'].progress_apply(trans_aval_type) df_var['var_aval_enc'] = df_var['var_type'].progress_apply(trans_aval_type) return df_param, df_ret
a68ff812f69c264534daf16935d88f528ba35464
3,657,466
def first(iterable, default=None): """ Returns the first item or a default value >>> first(x for x in [1, 2, 3] if x % 2 == 0) 2 >>> first((x for x in [1, 2, 3] if x > 42), -1) -1 """ return next(iter(iterable), default)
6907e63934967c332eea9cedb5e0ee767a88fe8f
3,657,467
def generate_uuid_from_wf_data(wf_data: np.ndarray, decimals: int = 12) -> str: """ Creates a unique identifier from the waveform data, using a hash. Identical arrays yield identical strings within the same process. Parameters ---------- wf_data: The data to generate the unique id for. decimals: The number of decimal places to consider. Returns ------- : A unique identifier. """ waveform_hash = hash(wf_data.round(decimals=decimals).tobytes()) return str(waveform_hash)
e12a6a8807d68181f0e04bf7446cf5e381cab3f9
3,657,468
def aggregate(table, key, aggregation=None, value=None, presorted=False, buffersize=None, tempdir=None, cache=True): """Group rows under the given key then apply aggregation functions. E.g.:: >>> import petl as etl >>> >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 3, True], ... ['a', 7, False], ... ['b', 2, True], ... ['b', 2, False], ... ['b', 9, False], ... ['c', 4, True]] >>> # aggregate whole rows ... table2 = etl.aggregate(table1, 'foo', len) >>> table2 +-----+-------+ | foo | value | +=====+=======+ | 'a' | 2 | +-----+-------+ | 'b' | 3 | +-----+-------+ | 'c' | 1 | +-----+-------+ >>> # aggregate single field ... table3 = etl.aggregate(table1, 'foo', sum, 'bar') >>> table3 +-----+-------+ | foo | value | +=====+=======+ | 'a' | 10 | +-----+-------+ | 'b' | 13 | +-----+-------+ | 'c' | 4 | +-----+-------+ >>> # alternative signature using keyword args ... table4 = etl.aggregate(table1, key=('foo', 'bar'), ... aggregation=list, value=('bar', 'baz')) >>> table4 +-----+-----+-------------------------+ | foo | bar | value | +=====+=====+=========================+ | 'a' | 3 | [(3, True)] | +-----+-----+-------------------------+ | 'a' | 7 | [(7, False)] | +-----+-----+-------------------------+ | 'b' | 2 | [(2, True), (2, False)] | +-----+-----+-------------------------+ | 'b' | 9 | [(9, False)] | +-----+-----+-------------------------+ | 'c' | 4 | [(4, True)] | +-----+-----+-------------------------+ >>> # aggregate multiple fields ... from collections import OrderedDict >>> import petl as etl >>> >>> aggregation = OrderedDict() >>> aggregation['count'] = len >>> aggregation['minbar'] = 'bar', min >>> aggregation['maxbar'] = 'bar', max >>> aggregation['sumbar'] = 'bar', sum >>> # default aggregation function is list ... aggregation['listbar'] = 'bar' >>> aggregation['listbarbaz'] = ('bar', 'baz'), list >>> aggregation['bars'] = 'bar', etl.strjoin(', ') >>> table5 = etl.aggregate(table1, 'foo', aggregation) >>> table5 +-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+ | foo | count | minbar | maxbar | sumbar | listbar | listbarbaz | bars | +=====+=======+========+========+========+===========+=====================================+===========+ | 'a' | 2 | 3 | 7 | 10 | [3, 7] | [(3, True), (7, False)] | '3, 7' | +-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+ | 'b' | 3 | 2 | 9 | 13 | [2, 2, 9] | [(2, True), (2, False), (9, False)] | '2, 2, 9' | +-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+ | 'c' | 1 | 4 | 4 | 4 | [4] | [(4, True)] | '4' | +-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+ If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. """ if callable(aggregation): return SimpleAggregateView(table, key, aggregation=aggregation, value=value, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache) elif aggregation is None or isinstance(aggregation, (list, tuple, dict)): # ignore value arg return MultiAggregateView(table, key, aggregation=aggregation, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache) else: raise ArgumentError('expected aggregation is callable, list, tuple, dict ' 'or None')
22d857001d0dcadaed82a197101125e5ca922e07
3,657,469
def most_similar(sen, voting_dict): """ Input: the last name of a senator, and a dictionary mapping senator names to lists representing their voting records. Output: the last name of the senator whose political mindset is most like the input senator (excluding, of course, the input senator him/herself). Resolve ties arbitrarily. Example: >>> vd = {'Klein': [1,1,1], 'Fox-Epstein': [1,-1,0], 'Ravella': [-1,0,0]} >>> most_similar('Klein', vd) 'Fox-Epstein' Note that you can (and are encouraged to) re-use you policy_compare procedure. """ most_sim = -1000 most_sim_senator = "" for key, val in voting_dict.items(): if key != sen: cmp = policy_compare(sen, key, voting_dict) if most_sim < cmp: most_sim = cmp most_sim_senator = key return most_sim_senator
6889d08af21d4007fa01dbe4946748aef0d9e3e6
3,657,471
def fixed_ro_bci_edge(ascentlat, lat_fixed_ro_ann, zero_bounds_guess_range=np.arange(0.1, 90, 5)): """Numerically solve fixed-Ro, 2-layer BCI model of HC edge.""" def _solver(lat_a, lat_h): # Reasonable to start guess at the average of the two given latitudes. init_guess = 0.5 * (lat_a + lat_h) return brentq_solver_sweep_param( _fixed_ro_bci_edge, lat_a, init_guess, zero_bounds_guess_range, funcargs=(lat_h,), ) return xr.apply_ufunc(_solver, ascentlat, lat_fixed_ro_ann, vectorize=True, dask="parallelized")
544c1747450cac52d161aa267a6332d4902798d1
3,657,472
def fresh_jwt_required(fn): """ A decorator to protect a Flask endpoint. If you decorate an endpoint with this, it will ensure that the requester has a valid and fresh access token before allowing the endpoint to be called. See also: :func:`~flask_jwt_extended.jwt_required` """ @wraps(fn) def wrapper(*args, **kwargs): jwt_data = _decode_jwt_from_request(request_type='access') ctx_stack.top.jwt = jwt_data if not jwt_data['fresh']: raise FreshTokenRequired('Fresh token required') if not verify_token_claims(jwt_data[config.user_claims_key]): raise UserClaimsVerificationError('User claims verification failed') _load_user(jwt_data[config.identity_claim_key]) return fn(*args, **kwargs) return wrapper
e5f30192c68018a419bb086522217ce86b27e6f6
3,657,473
import random def random_small_number(): """ 随机生成一个小数 :return: 返回小数 """ return random.random()
45143c2c78dc72e21cbbe0a9c10babd00100be77
3,657,474
def get_sample(df, col_name, n=100, seed=42): """Get a sample from a column of a dataframe. It drops any numpy.nan entries before sampling. The sampling is performed without replacement. Example of numpydoc for those who haven't seen yet. Parameters ---------- df : pandas.DataFrame Source dataframe. col_name : str Name of the column to be sampled. n : int Sample size. Default is 100. seed : int Random seed. Default is 42. Returns ------- pandas.Series Sample of size n from dataframe's column. """ np.random.seed(seed) random_idx = np.random.choice(df[col_name].dropna().index, size=n, replace=False) return df.loc[random_idx, col_name]
a4fb8e1bbc7c11026b54b2ec341b85310596de13
3,657,475
def gen_mail_content(content, addr_from): """ 根据邮件体生成添加了dkim的新邮件 @param content: string 邮件体内容 @return str_mail: 加上dkim的新邮件 """ try: domain = addr_from.split('@')[-1] dkim_info = get_dkim_info(domain) if dkim_info: content = repalce_mail(content, addr_from) selector, private = dkim_info private = private.replace('\r\n', '\n') dkim_sig = dkim.sign(content, selector, domain, private, include_headers=['From', 'To', 'Subject', 'Date']) dk_sig = domainkeys(dkim_sig + content, selector, domain, private, include_heads=['From', 'To', 'Subject']) return dk_sig + dkim_sig + content else: return content except Exception, e: print >>sys.stderr, e print >>sys.stderr, traceback.format_exc() return content
90ad569f8f69b7fa39edab799b41522fddc3ce97
3,657,476
import warnings def autocov(ary, axis=-1): """Compute autocovariance estimates for every lag for the input array. Parameters ---------- ary : Numpy array An array containing MCMC samples Returns ------- acov: Numpy array same size as the input array """ axis = axis if axis > 0 else len(ary.shape) + axis n = ary.shape[axis] m = next_fast_len(2 * n) ary = ary - ary.mean(axis, keepdims=True) # added to silence tuple warning for a submodule with warnings.catch_warnings(): warnings.simplefilter("ignore") ifft_ary = np.fft.rfft(ary, n=m, axis=axis) ifft_ary *= np.conjugate(ifft_ary) shape = tuple( slice(None) if dim_len != axis else slice(0, n) for dim_len, _ in enumerate(ary.shape) ) cov = np.fft.irfft(ifft_ary, n=m, axis=axis)[shape] cov /= n return cov
e17dcfcbdee37022a5ab98561287f891acfefaf6
3,657,477
def _optimize_rule_mip( set_opt_model_func, profile, committeesize, resolute, max_num_of_committees, solver_id, name="None", committeescorefct=None, ): """Compute rules, which are given in the form of an optimization problem, using Python MIP. Parameters ---------- set_opt_model_func : callable sets constraints and objective and adds additional variables, see examples below for its signature profile : abcvoting.preferences.Profile approval sets of voters committeesize : int number of chosen alternatives resolute : bool max_num_of_committees : int maximum number of committees this method returns, value can be None solver_id : str name : str name of the model, used for error messages committeescorefct : callable a function used to compute the score of a committee Returns ------- committees : list of sets a list of winning committees, each of them represented as set of integers from `0` to `num_cand` """ maxscore = None committees = [] if solver_id not in ["gurobi", "cbc"]: raise ValueError(f"Solver {solver_id} not known in Python MIP.") while True: model = mip.Model(solver_name=solver_id) # note: verbose = 1 causes issues with unittests, seems as if output is printed too late # and anyway the output does not seem to be very helpful model.verbose = 0 # `in_committee` is a binary variable indicating whether `cand` is in the committee in_committee = [ model.add_var(var_type=mip.BINARY, name=f"cand{cand}_in_committee") for cand in profile.candidates ] set_opt_model_func( model, profile, in_committee, committeesize, ) # find a new committee that has not been found yet by excluding previously found committees for committee in committees: model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1 # emphasis is optimality: # activates procedures that produce improved lower bounds, focusing in pruning the search # tree even if the production of the first feasible solutions is delayed. model.emphasis = 2 model.opt_tol = ACCURACY model.max_mip_gap = ACCURACY model.integer_tol = ACCURACY status = model.optimize() if status not in [mip.OptimizationStatus.OPTIMAL, mip.OptimizationStatus.INFEASIBLE]: raise RuntimeError( f"Python MIP returned an unexpected status code: {status}" f"Warning: solutions may be incomplete or not optimal (model {name})." ) elif status == mip.OptimizationStatus.INFEASIBLE: if len(committees) == 0: # we are in the first round of searching for committees # and Gurobi didn't find any raise RuntimeError("Python MIP found no solution (INFEASIBLE) (model {name})") break committee = set( cand for cand in profile.candidates if in_committee[cand].x >= 0.9 # this should be >= 1 - ACCURACY, but apparently it is not necessarily the case that # integers are only ACCURACY apart from either 0 or 1 ) if len(committee) != committeesize: raise RuntimeError( "_optimize_rule_mip produced a committee with " "fewer than `committeesize` members (model {name})." ) if committeescorefct is None: objective_value = model.objective_value # numeric value from MIP else: objective_value = committeescorefct(profile, committee) # exact value if maxscore is None: maxscore = objective_value elif (committeescorefct is not None and objective_value > maxscore) or ( committeescorefct is None and objective_value > maxscore + CMP_ACCURACY ): raise RuntimeError( "Python MIP found a solution better than a previous optimum. This " f"should not happen (previous optimal score: {maxscore}, " f"new optimal score: {objective_value}, model {name})." ) elif (committeescorefct is not None and objective_value < maxscore) or ( committeescorefct is None and objective_value < maxscore - CMP_ACCURACY ): # no longer optimal break committees.append(committee) if resolute: break if max_num_of_committees is not None and len(committees) >= max_num_of_committees: return committees return committees
41c3ace270be4dcb4321e4eeedb23d125e6766c3
3,657,479
import itertools def Zuo_fig_3_18(verbose=True): """ Input for Figure 3.18 in Zuo and Spence \"Advanced TEM\", 2017 This input acts as an example as well as a reference Returns: dictionary: tags is the dictionary of all input and output paramter needed to reproduce that figure. """ # INPUT # Create Silicon structure (Could be produced with Silicon routine) if verbose: print('Sample Input for Figure 3.18 in Zuo and Spence \"Advanced TEM\", 2017') tags = {'crystal_name': 'Silicon'} if verbose: print('tags[\'crystal\'] = ', tags['crystal_name']) a = 0.514 # nm tags['lattice_parameter_nm'] = a if verbose: print('tags[\'lattice_parameter_nm\'] =', tags['lattice_parameter_nm']) tags['unit_cell'] = [[a, 0, 0], [0, a, 0], [0, 0, a]] if verbose: print('tags[\'unit_cell\'] =', tags['unit_cell']) tags['elements'] = list(itertools.repeat('Si', 8)) if verbose: print('tags[\'elements\'] =', tags['elements']) base = [(0., 0., 0.), (0.5, 0.0, 0.5), (0.5, 0.5, 0.), (0., 0.5, 0.5)] tags['base'] = np.array(base + (np.array(base) + (.25, .25, .25)).tolist()) if verbose: print('tags[\'base\'] =', tags['base']) # Define Experimental Conditions tags['convergence_angle_mrad'] = 7 tags['acceleration_voltage_V'] = 101.6*1000.0 # V if verbose: print('tags[\'acceleration_voltage_V\'] =', tags['acceleration_voltage_V']) tags['convergence_angle_mrad'] = 7.1 # mrad; 0 is parallel illumination if verbose: print('tags[\'convergence_angle_mrad\'] =', tags['convergence_angle_mrad']) tags['zone_hkl'] = np.array([-2, 2, 1]) # incident neares zone axis: defines Laue Zones!!!! if verbose: print('tags[\'zone_hkl\'] =', tags['zone_hkl']) tags['mistilt'] = np.array([0, 0, 0]) # mistilt in degrees if verbose: print('tags[\'mistilt\'] =', tags['mistilt']) # Define Simulation Parameters tags['Sg_max'] = .2 # 1/nm maximum allowed excitation error if verbose: print('tags[\'Sg_max\'] =', tags['Sg_max']) tags['hkl_max'] = 9 # Highest evaluated Miller indices if verbose: print('tags[\'hkl_max\'] =', tags['hkl_max']) print('##################') print('# Output Options #') print('##################') # Output options tags['background'] = 'black' # 'white' 'grey' if verbose: print('tags[\'background\'] =', tags['background'], '# \'white\', \'grey\' ') tags['color map'] = 'plasma' if verbose: print('tags[\'color map\'] =', tags['color map'], '#,\'cubehelix\',\'Greys\',\'jet\' ') tags['plot HOLZ'] = 1 if verbose: print('tags[\'plot HOLZ\'] =', tags['plot HOLZ']) tags['plot HOLZ excess'] = 1 if verbose: print('tags[\'plot HOLZ excess\'] =', tags['plot HOLZ excess']) tags['plot Kikuchi'] = 1 if verbose: print('tags[\'plot Kikuchi\'] =', tags['plot Kikuchi']) tags['plot reflections'] = 1 if verbose: print('tags[\'plot reflections\'] =', tags['plot reflections']) tags['label HOLZ'] = 0 if verbose: print('tags[\'label HOLZ\'] =', tags['label HOLZ']) tags['label Kikuchi'] = 0 if verbose: print('tags[\'label Kikuchi\'] =', tags['label Kikuchi']) tags['label reflections'] = 0 if verbose: print('tags[\'label reflections\'] =', tags['label reflections']) tags['label color'] = 'black' if verbose: print('tags[\'label color\'] =', tags['label color']) tags['label size'] = 10 if verbose: print('tags[\'label size\'] =', tags['label size']) tags['color Laue Zones'] = ['red', 'blue', 'green', 'blue', 'green'] # , 'green', 'red'] #for OLZ give a sequence if verbose: print('tags[\'color Laue Zones\'] =', tags['color Laue Zones'], ' #[\'red\', \'blue\', \'lightblue\']') tags['color Kikuchi'] = 'green' if verbose: print('tags[\'color Kikuchi\'] =', tags['color Kikuchi']) tags['linewidth HOLZ'] = -1 # -1: linewidth according to intensity (structure factor F^2 if verbose: print('tags[\'linewidth HOLZ\'] =', tags['linewidth HOLZ'], '# -1: linewidth according to intensity ' '(structure factor F^2)') tags['linewidth Kikuchi'] = -1 # -1: linewidth according to intensity (structure factor F^2 if verbose: print('tags[\'linewidth Kikuchi\'] =', tags['linewidth Kikuchi'], '# -1: linewidth according to intensity ' '(structure factor F^2)') tags['color reflections'] = 'intensity' # 'Laue Zone' if verbose: print('tags[\'color reflections\'] =', tags['color reflections'], '#\'Laue Zone\' ') tags['color zero'] = 'white' # 'None', 'white', 'blue' if verbose: print('tags[\'color zero\'] =', tags['color zero'], '#\'None\', \'white\', \'blue\' ') tags['color ring zero'] = 'None' # 'Red' #'white' #, 'None' if verbose: print('tags[\'color ring zero\'] =', tags['color ring zero'], '#\'None\', \'white\', \'Red\' ') print('########################') print('# End of Example Input #') print('########################\n\n') return tags
560272c4c28c8e0628403573dd76c0573ae9d937
3,657,480
def subscribe_feed(feed_link: str, title: str, parser: str, conn: Conn) -> str: """Return the feed_id if nothing wrong.""" feed_id = new_feed_id(conn) conn.execute( stmt.Insert_feed, dict( id=feed_id, feed_link=feed_link, website="", title=title, author_name="", updated=arrow.now().format(RFC3339), notes="", parser=parser, ), ) return feed_id
88a49ebaa4f766bfb228dc3ba271e8c98d50da99
3,657,481
def process_grid(procstatus, dscfg, radar_list=None): """ Puts the radar data in a regular grid Parameters ---------- procstatus : int Processing status: 0 initializing, 1 processing volume, 2 post-processing dscfg : dictionary of dictionaries data set configuration. Accepted Configuration Keywords:: datatype : string. Dataset keyword The data type where we want to extract the point measurement gridconfig : dictionary. Dataset keyword Dictionary containing some or all of this keywords: xmin, xmax, ymin, ymax, zmin, zmax : floats minimum and maximum horizontal distance from grid origin [km] and minimum and maximum vertical distance from grid origin [m] Defaults -40, 40, -40, 40, 0., 10000. hres, vres : floats horizontal and vertical grid resolution [m] Defaults 1000., 500. latorig, lonorig, altorig : floats latitude and longitude of grid origin [deg] and altitude of grid origin [m MSL] Defaults the latitude, longitude and altitude of the radar wfunc : str. Dataset keyword the weighting function used to combine the radar gates close to a grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST Default NEAREST roif_func : str. Dataset keyword the function used to compute the region of interest. Possible values: dist_beam, constant roi : float. Dataset keyword the (minimum) radius of the region of interest in m. Default half the largest resolution beamwidth : float. Dataset keyword the radar antenna beamwidth [deg]. If None that of the key radar_beam_width_h in attribute instrument_parameters of the radar object will be used. If the key or the attribute are not present a default 1 deg value will be used beam_spacing : float. Dataset keyword the beam spacing, i.e. the ray angle resolution [deg]. If None, that of the attribute ray_angle_res of the radar object will be used. If the attribute is None a default 1 deg value will be used radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict dictionary containing the gridded data ind_rad : int radar index """ if procstatus != 1: return None, None field_names_aux = [] for datatypedescr in dscfg['datatype']: radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr) field_names_aux.append(get_fieldname_pyart(datatype)) ind_rad = int(radarnr[5:8])-1 if (radar_list is None) or (radar_list[ind_rad] is None): warn('ERROR: No valid radar') return None, None radar = radar_list[ind_rad] # keep only fields present in radar object field_names = [] nfields_available = 0 for field_name in field_names_aux: if field_name not in radar.fields: warn('Field name '+field_name+' not available in radar object') continue field_names.append(field_name) nfields_available += 1 if nfields_available == 0: warn("Fields not available in radar data") return None, None # default parameters xmin = -40. xmax = 40. ymin = -40. ymax = 40. zmin = 0. zmax = 10000. hres = 1000. vres = 500. lat = float(radar.latitude['data']) lon = float(radar.longitude['data']) alt = float(radar.altitude['data']) if 'gridConfig' in dscfg: if 'xmin' in dscfg['gridConfig']: xmin = dscfg['gridConfig']['xmin'] if 'xmax' in dscfg['gridConfig']: xmax = dscfg['gridConfig']['xmax'] if 'ymin' in dscfg['gridConfig']: ymin = dscfg['gridConfig']['ymin'] if 'ymax' in dscfg['gridConfig']: ymax = dscfg['gridConfig']['ymax'] if 'zmin' in dscfg['gridConfig']: zmin = dscfg['gridConfig']['zmin'] if 'zmax' in dscfg['gridConfig']: zmax = dscfg['gridConfig']['zmax'] if 'hres' in dscfg['gridConfig']: hres = dscfg['gridConfig']['hres'] if 'vres' in dscfg['gridConfig']: vres = dscfg['gridConfig']['vres'] if 'latorig' in dscfg['gridConfig']: lat = dscfg['gridConfig']['latorig'] if 'lonorig' in dscfg['gridConfig']: lon = dscfg['gridConfig']['lonorig'] if 'altorig' in dscfg['gridConfig']: alt = dscfg['gridConfig']['altorig'] wfunc = dscfg.get('wfunc', 'NEAREST') roi_func = dscfg.get('roi_func', 'dist_beam') # number of grid points in cappi nz = int((zmax-zmin)/vres)+1 ny = int((ymax-ymin)*1000./hres)+1 nx = int((xmax-xmin)*1000./hres)+1 min_radius = dscfg.get('roi', np.max([vres, hres])/2.) # parameters to determine the gates to use for each grid point beamwidth = dscfg.get('beamwidth', None) beam_spacing = dscfg.get('beam_spacing', None) if beamwidth is None: if (radar.instrument_parameters is not None and 'radar_beam_width_h' in radar.instrument_parameters): beamwidth = radar.instrument_parameters[ 'radar_beam_width_h']['data'][0] else: warn('Unknown radar beamwidth. Default 1 deg will be used') beamwidth = 1 if beam_spacing is None: if radar.ray_angle_res is not None: beam_spacing = radar.ray_angle_res['data'][0] else: warn('Unknown beam spacing. Default 1 deg will be used') beam_spacing = 1 # cartesian mapping grid = pyart.map.grid_from_radars( (radar,), gridding_algo='map_to_grid', weighting_function=wfunc, roi_func=roi_func, h_factor=1.0, nb=beamwidth, bsp=beam_spacing, min_radius=min_radius, constant_roi=min_radius, grid_shape=(nz, ny, nx), grid_limits=((zmin, zmax), (ymin*1000., ymax*1000.), (xmin*1000., xmax*1000.)), grid_origin=(lat, lon), grid_origin_alt=alt, fields=field_names) new_dataset = {'radar_out': grid} return new_dataset, ind_rad
b414fb327d3658cc6f9ba1296ec1226b5d2a7ff6
3,657,483
def float_to_16(value): """ convert float value into fixed exponent (8) number returns 16 bit integer, as value * 256 """ value = int(round(value*0x100,0)) return value & 0xffff
0a587e4505c9c19b0cbdd2f94c8a964f2a5a3ccd
3,657,484
def create_keras_one_layer_dense_model(*, input_size, output_size, verbose=False, **kwargs ): """ Notes: https://www.tensorflow.org/tutorials/keras/save_and_load """ # ................................................... # Create model model = Sequential() #.. add fully connected layer model.add(Dense( input_dim=input_size, # IE 784 PIXELS, ! units=output_size, activation=kwargs["out_activation"], kernel_regularizer=tf.keras.regularizers.l2(0.001), kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0) )) # Print network summary if verbose==True: print(model.summary()) else: pass # ................................................... # Define Loss Function and Trianing Operation """ # [option]: Use only default values, model.compile( optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['acc']) """ model.compile( optimizer= kwargs["optimizer"], loss= losses.sparse_categorical_crossentropy, metrics= kwargs["metrics"] # even one arg must be in the list ) return model
d7d34d9981aac318bca5838c42ed7f844c27cfda
3,657,485
def API_encrypt(key, in_text, formatting:str = "Base64", nonce_type:str = "Hybrid"): """ Returns: Input Text 147 Encrypted with Input Key. """ try: # Ensure an Appropriate Encoding Argument is Provided. try: encoding = FORMATS[formatting] except: raise ValueError("Invalid Encoding Argument") # Generate Nonce Integer Based on Input Argument. nonce = gen_nonce(nonce_type) # Encode Text into Specified Encoding and Remove any Padding. encoded_text = convert_input(in_text, "encode", encoding) # Encode Key into Decimal Number (Base10). dec_key = key_convert(key, encoding) # Substitute Down Input Text. shifted_text = substitution(dec_key, nonce, encoded_text, encoding, "encrypt", "normal") # Randomly join Shifted Text and Nonce into one Text. full_text = pair_function(shifted_text, dec_key, encoding, nonce) # Substitute Up Input Text. return substitution(dec_key + 135, 147, full_text, encoding, "encrypt", "reverse") except: raise ValueError(f"Encryption with Key: {key} Failed for Input: {in_text}")
d7336197ba1d32d89c8dc0c098bfbc20c795168d
3,657,486
def convert_log_dict_to_np(logs): """ Take in logs and return params """ # Init params n_samples_after_warmup = len(logs) n_grid = logs[0]['u'].shape[-1] u = np.zeros((n_samples_after_warmup, n_grid)) Y = np.zeros((n_samples_after_warmup, n_grid)) k = np.zeros((n_samples_after_warmup, n_grid)) kl_trunc_errs = np.empty((n_samples_after_warmup,1)) n_stoch_disc = logs[0]['coefs'].shape[-1] # e.g., n_alpha_indices for PCE, or kl_dim for KL-E coefs = np.empty((n_samples_after_warmup, n_grid, n_stoch_disc)) stoch_dim = logs[0]['rand_insts'].shape[-1] rand_insts = np.empty((n_samples_after_warmup, stoch_dim)) # Copy logs into params for n, log in enumerate(logs): k[n,:] = log['rand_param'] Y[n,:] = log['Y'] u[n,:] = log['u'] kl_trunc_errs[n,0] = log['kl_trunc_err'] coefs[n,:,:] = log['coefs'] rand_insts[n,:] = log['rand_insts'] return k, Y, u, kl_trunc_errs, coefs, rand_insts
1962fa563ee5d741f7f1ec6453b7fd5693efeca2
3,657,487
from pathlib import Path from typing import Callable def map_links_in_markdownfile( filepath: Path, func: Callable[[Link], None] ) -> bool: """Dosyadaki tüm linkler için verilen fonksiyonu uygular Arguments: filepath {Path} -- Dosya yolu objesi func {Callable[[Link], None]} -- Link alan ve değiştiren fonksiyon Returns: bool -- Değişim olduysa True """ content = filesystem.read_file(filepath) content = map_links_in_string(content, func) return filesystem.write_to_file(filepath, content)
4f6aa7ee5ecb7aed1df8551a69161305601d0489
3,657,489
def half_cell_t_2d_triangular_precursor(p, t): """Creates a precursor to horizontal transmissibility for prism grids (see notes). arguments: p (numpy float array of shape (N, 2 or 3)): the xy(&z) locations of cell vertices t (numpy int array of shape (M, 3)): the triangulation of p for which the transmissibility precursor is required returns: a pair of numpy float arrays, each of shape (M, 3) being the normal length and flow length relevant for flow across the face opposite each vertex as defined by t notes: this function acts as a precursor to the equivalent of the half cell transmissibility functions but for prism grids; for a resqpy VerticalPrismGrid, the triangulation can be shared by many layers with this function only needing to be called once; the first of the returned values (normal length) is the length of the triangle edge, in xy, when projected onto the normal of the flow direction; multiplying the normal length by a cell height will yield the area needed for transmissibility calculations; the second of the returned values (flow length) is the distance from the trangle centre to the midpoint of the edge and can be used as the distance term for a half cell transmissibilty; this function does not account for dip, it only handles the geometric aspects of half cell transmissibility in the xy plane """ assert p.ndim == 2 and p.shape[1] in [2, 3] assert t.ndim == 2 and t.shape[1] == 3 # centre points of triangles, in xy centres = np.mean(p[t], axis = 1)[:, :2] # midpoints of edges of triangles, in xy edge_midpoints = np.empty(tuple(list(t.shape) + [2]), dtype = float) edge_midpoints[:, 0, :] = 0.5 * (p[t[:, 1]] + p[t[:, 2]])[:, :2] edge_midpoints[:, 1, :] = 0.5 * (p[t[:, 2]] + p[t[:, 0]])[:, :2] edge_midpoints[:, 2, :] = 0.5 * (p[t[:, 0]] + p[t[:, 1]])[:, :2] # triangle edge vectors, projected in xy edge_vectors = np.empty(edge_midpoints.shape, dtype = float) edge_vectors[:, 0] = (p[t[:, 2]] - p[t[:, 1]])[:, :2] edge_vectors[:, 1] = (p[t[:, 0]] - p[t[:, 2]])[:, :2] edge_vectors[:, 2] = (p[t[:, 1]] - p[t[:, 0]])[:, :2] # vectors from triangle centres to mid points of edges (3 per triangle), in xy plane cem_vectors = edge_midpoints - centres.reshape((-1, 1, 2)) cem_lengths = vec.naive_lengths(cem_vectors) # unit length vectors normal to cem_vectors, in the xy plane normal_vectors = np.zeros(edge_midpoints.shape) normal_vectors[:, :, 0] = cem_vectors[:, :, 1] normal_vectors[:, :, 1] = -cem_vectors[:, :, 0] normal_vectors = vec.unit_vectors(normal_vectors) # edge lengths projected onto normal vectors (length perpendicular to nominal flow direction) normal_lengths = np.abs(vec.dot_products(edge_vectors, normal_vectors)) # return normal (cross-sectional) lengths and nominal flow direction lengths assert normal_lengths.shape == t.shape and cem_lengths.shape == t.shape return normal_lengths, cem_lengths
555f8b260f7c5b3f2e215378655710533ee344d5
3,657,490
def count_datavolume(sim_dict): """ Extract from the given input the amount of time and the memory you need to process each simulation through the JWST pipeline :param dict sim_dict: Each key represent a set of simulations (a CAR activity for instance) each value is a list of simulation. Each simulation being a dict with detailled info :return: Return (mem, time) where mem and time are dictionnaries with the same keys as the input dict. :rtype: Memory is in GB, Time is in hours """ mem_volume = {} # Total memory required in GB time_volume = {} # Pipeline estimated run time in s for (car, sim_list) in sim_dict.items(): memory = [] times = [] for sim in sim_list: if "detector" in sim.keys(): if sim["detector"] in ["IMAGER", "ALL"]: tmp = { "integrations": sim["ima_integrations"], "frames": sim["ima_frames"], "exposures": sim["exposures"], "subarray": sim["subarray"], "NDither": sim["NDither"], } (ram, time, nb_exps) = get_prediction(tmp) memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse if sim["detector"] in ["ALL", "MRS"]: tmp = { "integrations": sim["LW_integrations"], "frames": sim["LW_frames"], "exposures": sim["exposures"], "subarray": "FULL", "NDither": sim["NDither"], } (ram, time, nb_exps) = get_prediction(tmp) memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse tmp = { "integrations": sim["SW_integrations"], "frames": sim["SW_frames"], "exposures": sim["exposures"], "subarray": "FULL", "NDither": sim["NDither"], } (ram, time, nb_exps) = get_prediction(tmp) memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse else: (ram, time, nb_exps) = get_prediction(sim) memory.extend([ram] * nb_exps) # For each exposure we have one identical file to analyse times.extend([time] * nb_exps) # For each exposure we have one identical file to analyse mem_volume[car] = np.array(memory) time_volume[car] = np.array(times) return mem_volume, time_volume
dabe86d64be0342486d1680ee8e5a1cb72162550
3,657,491
def context(): """Return an instance of the JIRA tool context.""" return dict()
e24e859add22eef279b650f28dce4f6732c346b8
3,657,493
def dist_to_group(idx: int, group_type: str, lst): """ A version of group_count that allows for sorting with solo agents Sometimes entities don't have immediately adjacent neighbors. In that case, the value represents the distance to any neighbor, e.g -1 means that an entity one to the left or right has a neighbor of that type. Args: idx (int):index in the list group_type (str):group type we care about matching lst ([type]): [description] """ my_count = group_count(idx, group_count, lst) if my_count > 0: return my_count adjacent_counts = [] l_neighbor_count = dist_to_group(idx-1, group_type, lst) if idx > 0 else None r_neighbor_count = dist_to_group(idx+1, group_type, lst) if idx < len(lst)-1 else None for neighbor_count in (l_neighbor_count, r_neighbor_count): if neighbor_count != 0: if neighbor_count < 0 and neighbor_count is not None: #The neighbor doesn't have any next directly to it either adjacent_counts.append(neighbor_count - 1) else: #The neighbor does have one next to it! adjacent_counts.append(neighbor_count) return max(adjacent_counts)
74ae510de4145f097fbf9daf406a6156933bae20
3,657,494
from typing import AnyStr from typing import List from typing import Dict def get_nodes_rating(start: AnyStr, end: AnyStr, tenant_id: AnyStr, namespaces: List[AnyStr]) -> List[Dict]: """ Get the rating by node. :start (AnyStr) A timestamp, as a string, to represent the starting time. :end (AnyStr) A timestamp, as a string, to represent the ending time. :tenant_id (AnyStr) A string representing the tenant, only used by decorators. :namespaces (List[AnyStr]) A list of namespaces accessible by the tenant. Return the results of the query as a list of dictionary. """ qry = sa.text(""" SELECT frame_begin, sum(frame_price) as frame_price, node FROM frames WHERE frame_begin >= :start AND frame_end <= :end AND namespace != 'unspecified' AND pod != 'unspecified' AND namespace IN :namespaces GROUP BY frame_begin, node ORDER BY frame_begin, node """).bindparams(bindparam('namespaces', expanding=True)) params = { 'start': start, 'end': end, 'tenant_id': tenant_id, 'namespaces': namespaces } return process_query(qry, params)
b75d35fc195b8317ed8b84ab42ce07339f2f1bf3
3,657,495
def f(OPL,R): """ Restoration function calculated from optical path length (OPL) and from rational function parameter (R). The rational is multiplied along all optical path. """ x = 1 for ii in range(len(OPL)): x = x * (OPL[ii] + R[ii][2]) / (R[ii][0] * OPL[ii] + R[ii][1]) return x
5b64b232646768d2068b114d112a8da749c84706
3,657,496
def _str_conv(number, rounded=False): """ Convenience tool to convert a number, either float or int into a string. If the int or float is None, returns empty string. >>> print(_str_conv(12.3)) 12.3 >>> print(_str_conv(12.34546, rounded=1)) 12.3 >>> print(_str_conv(None)) <BLANKLINE> >>> print(_str_conv(1123040)) 11.2e5 """ if not number: return str(' ') if not rounded and isinstance(number, (float, int)): if number < 100000: string = str(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant) elif rounded == 2 and isinstance(number, (float, int)): if number < 100000: string = '{0:.2f}'.format(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.2f}'.format(number / divisor) + 'e' + str(exponant) elif rounded == 1 and isinstance(number, (float, int)): if number < 100000: string = '{0:.1f}'.format(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant) else: return str(number) return string
d352e8f0956b821a25513bf4a4eecfae5a6a7dcd
3,657,497
def build_eval_graph(input_fn, model_fn, hparams): """Build the evaluation computation graph.""" dataset = input_fn(None) batch = dataset.make_one_shot_iterator().get_next() batch_holder = { "transform": tf.placeholder( tf.float32, [1, 1, hparams.n_parts, hparams.n_dims + 1, hparams.n_dims + 1]), "joint": tf.placeholder(tf.float32, [1, 1, hparams.n_parts, hparams.n_dims]), "point": tf.placeholder(tf.float32, [1, 1, None, hparams.n_dims]), "label": tf.placeholder(tf.float32, [1, 1, None, 1]), } latent_holder, latent, occ = model_fn(batch_holder, None, None, "gen_mesh") # Eval Summary iou_holder = tf.placeholder(tf.float32, []) best_holder = tf.placeholder(tf.float32, []) tf.summary.scalar("IoU", iou_holder) tf.summary.scalar("Best_IoU", best_holder) return { "batch_holder": batch_holder, "latent_holder": latent_holder, "latent": latent, "occ": occ, "batch": batch, "iou_holder": iou_holder, "best_holder": best_holder, "merged_summary": tf.summary.merge_all(), }
3f3d1425d08e964de68e99ea0c6cb4397975427a
3,657,498
def _encodeLength(length): """ Encode length as a hex string. Args: length: write your description """ assert length >= 0 if length < hex160: return chr(length) s = ("%x" % length).encode() if len(s) % 2: s = "0" + s s = BinaryAscii.binaryFromHex(s) lengthLen = len(s) return chr(hex160 | lengthLen) + str(s)
fd85d5faf85da6920e4a0704118e41901f327d9c
3,657,499
def stemmer(stemmed_sent): """ Removes stop words from a tokenized sentence """ porter = PorterStemmer() stemmed_sentence = [] for word in literal_eval(stemmed_sent): stemmed_word = porter.stem(word) stemmed_sentence.append(stemmed_word) return stemmed_sentence
96337684deb7846f56acf302d1e0d8c8ab9743dd
3,657,500
def _queue_number_priority(v): """Returns the task's priority. There's an overflow of 1 bit, as part of the timestamp overflows on the laster part of the year, so the result is between 0 and 330. See _gen_queue_number() for the details. """ return int(_queue_number_order_priority(v) >> 22)
e61d6e1d04551ce55a533bfe7805f3358bb8d0ca
3,657,501
def test_generator_aovs(path): """Generate a function testing given `path`. :param path: gproject path to test :return: function """ def test_func(self): """test render pass render layer and AOV particularities """ assert path in g_parsed p = g_parsed[path] aov = grl_util.aov_node(p, 'RenderPass', 'Layer', 'Beauty') self.assertIsInstance(aov, guerilla_parser.GuerillaNode) self.assertEqual(aov.path, "|RenderPass|Layer|Input1") rp_iter = (n for n in p.nodes if n.type == 'RenderPass') for rp in rp_iter: rl_iter = (n for n in rp.children if n.type == 'RenderLayer') for rl in rl_iter: for aov in rl.children: self.assertEqual(aov.type, "LayerOut") aov_2 = grl_util.aov_node(p, rp.name, rl.name, aov.display_name) self.assertIs(aov, aov_2) return test_func
a67b8f741a19f4d3733ab35699ef11a713e283b5
3,657,502
from typing import Union def delimited_list( expr: Union[str, ParserElement], delim: Union[str, ParserElement] = ",", combine: bool = False, min: OptionalType[int] = None, max: OptionalType[int] = None, *, allow_trailing_delim: bool = False, ) -> ParserElement: """Helper to define a delimited list of expressions - the delimiter defaults to ','. By default, the list elements and delimiters can have intervening whitespace, and comments, but this can be overridden by passing ``combine=True`` in the constructor. If ``combine`` is set to ``True``, the matching tokens are returned as a single token string, with the delimiters included; otherwise, the matching tokens are returned as a list of tokens, with the delimiters suppressed. If ``allow_trailing_delim`` is set to True, then the list may end with a delimiter. Example:: delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ if isinstance(expr, str_type): expr = ParserElement._literalStringClass(expr) dlName = "{expr} [{delim} {expr}]...{end}".format( expr=str(expr.copy().streamline()), delim=str(delim), end=" [{}]".format(str(delim)) if allow_trailing_delim else "", ) if not combine: delim = Suppress(delim) if min is not None: if min < 1: raise ValueError("min must be greater than 0") min -= 1 if max is not None: if min is not None and max <= min: raise ValueError("max must be greater than, or equal to min") max -= 1 delimited_list_expr = expr + (delim + expr)[min, max] if allow_trailing_delim: delimited_list_expr += Opt(delim) if combine: return Combine(delimited_list_expr).set_name(dlName) else: return delimited_list_expr.set_name(dlName)
d1ac80f138a21ee21ecf76f918f1c7878863f80c
3,657,503
def get_minion_node_ips(k8s_conf): """ Returns a list IP addresses to all configured minion hosts :param k8s_conf: the configuration dict :return: a list IPs """ out = list() node_tuple_3 = get_minion_nodes_ip_name_type(k8s_conf) for hostname, ip, node_type in node_tuple_3: out.append(ip) return out
9a93ddcd025e605805a9693dd14d58c92f53dc42
3,657,504
def calculate_ri(column): """ Function that calculates radiant intensity """ return float(sc.h * sc.c / 1e-9 * np.sum(column))
eac136f520ebbad0ea11f506c742e75fc524c4bb
3,657,505
def find_kw_in_lines(kw, lines, addon_str=' = '): """ Returns the index of a list of strings that had a kw in it Args: kw: Keyword to find in a line lines: List of strings to search for the keyword addon_str: String to append to your key word to help filter Return: i: Integer of the index of a line containing a kw. -1 otherwise """ str_temp = '{}' + addon_str for i, line in enumerate(lines): s = str_temp.format(kw) uncommented = line.strip('#') if s in uncommented: if s[0] == uncommented[0]: break # No match if i == len(lines) - 1: i = -1 return i
4b50c4eaecc55958fca6b134cc748d672c78d014
3,657,506
def delete_group(current_session, groupname): """ Deletes a group """ projects_to_purge = gp.get_group_projects(current_session, groupname) remove_projects_from_group(current_session, groupname, projects_to_purge) gp.clear_users_in_group(current_session, groupname) gp.clear_projects_in_group(current_session, groupname) gp.delete_group(current_session, groupname) return {"result": "success"}
1a27cec1c3273bb56564587823ad04565867277f
3,657,507
def label_smoothed_nll_loss(lprobs, target, epsilon: float = 1e-8, ignore_index=None): """Adapted from fairseq Parameters ---------- lprobs Log probabilities of amino acids per position target Target amino acids encoded as integer indices epsilon Smoothing factor between 0 and 1, by default 1e-8 ignore_index, optional Amino acid (encoded as integer) to ignore, by default None Returns ------- Negative log-likelihood loss """ nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / lprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss
eb09b7dd5c800b01b723f33cd0f7a84ae93b3489
3,657,508
import re def parse_date(regexen, date_str): """ Parse a messy string into a granular date `regexen` is of the form [ (regex, (granularity, groups -> datetime)) ] """ if date_str: for reg, (gran, dater) in regexen: m = re.match(reg, date_str) if m: try: return gran, dater(m.groups()) except ValueError: return 0, None return 0, None
a141cad6762556115699ca0327b801537bab1c7e
3,657,511
def PreNotebook(*args, **kwargs): """PreNotebook() -> Notebook""" val = _controls_.new_PreNotebook(*args, **kwargs) return val
1974d3ed08a6811a871f7e069c4b74b97cb32e35
3,657,512
def user_voted(message_id: int, user_id: int) -> bool: """ CHECK IF A USER VOTED TO A DETECTION REPORT """ return bool( c.execute( """ SELECT * FROM reports WHERE message_id=? AND user_id=? """, (message_id, user_id), ).fetchone() )
baddfb69470699d611c050b6732d553f4f415212
3,657,513
import io def get_values(wsdl_url, site_code, variable_code, start=None, end=None, suds_cache=("default",), timeout=None, user_cache=False): """ Retrieves site values from a WaterOneFlow service using a GetValues request. Parameters ---------- wsdl_url : str URL of a service's web service definition language (WSDL) description. All WaterOneFlow services publish a WSDL description and this url is the entry point to the service. site_code : str Site code of the site you'd like to get values for. Site codes MUST contain the network and be of the form <network>:<site_code>, as is required by WaterOneFlow. variable_code : str Variable code of the variable you'd like to get values for. Variable codes MUST contain the network and be of the form <vocabulary>:<variable_code>, as is required by WaterOneFlow. start : ``None`` or datetime (see :ref:`dates-and-times`) Start of the query datetime range. If omitted, data from the start of the time series to the ``end`` timestamp will be returned (but see caveat, in note below). end : ``None`` or datetime (see :ref:`dates-and-times`) End of the query datetime range. If omitted, data from the ``start`` timestamp to end of the time series will be returned (but see caveat, in note below). suds_cache : ``None`` or tuple SOAP local cache duration for WSDL description and client object. Pass a cache duration tuple like ('days', 3) to set a custom duration. Duration may be in months, weeks, days, hours, or seconds. If unspecified, the default duration (1 day) will be used. Use ``None`` to turn off caching. timeout : int or float suds SOAP URL open timeout (seconds). If unspecified, the suds default (90 seconds) will be used. user_cache : bool If False (default), use the system temp location to store cache WSDL and other files. Use the default user ulmo directory if True. Returns ------- site_values : dict a python dict containing values Notes ----- If both ``start`` and ``end`` parameters are omitted, the entire time series available will typically be returned. However, some service providers will return an error if either start or end are omitted; this is specially true for services hosted or redirected by CUAHSI via the CUAHSI HydroPortal, which have a 'WSDL' url using the domain http://hydroportal.cuahsi.org. For HydroPortal, a start datetime of '1753-01-01' has been known to return valid results while catching the oldest start times, though the response may be broken up into chunks ('paged'). """ suds_client = _get_client(wsdl_url, suds_cache, timeout, user_cache) # Note from Emilio: # Not clear if WOF servers really do handle time zones (time offsets or # "Z" in the iso8601 datetime strings. In the past, I (Emilio) have # passed naive strings to GetValues(). if a datetime object is passed to # this ulmo function, the isodate code above will include it in the # resulting iso8601 string; if not, no. Test effect of dt_isostr having # a timezone code or offset, vs not having it (the latter, naive dt # strings, is what I've been using all along) # the interpretation of start and end time zone is server-dependent start_dt_isostr = None end_dt_isostr = None if start is not None: start_datetime = util.convert_datetime(start) start_dt_isostr = isodate.datetime_isoformat(start_datetime) if end is not None: end_datetime = util.convert_datetime(end) end_dt_isostr = isodate.datetime_isoformat(end_datetime) waterml_version = _waterml_version(suds_client) response = suds_client.service.GetValues( site_code, variable_code, startDate=start_dt_isostr, endDate=end_dt_isostr) response_buffer = io.BytesIO(util.to_bytes(response)) if waterml_version == '1.0': values = waterml.v1_0.parse_site_values(response_buffer) elif waterml_version == '1.1': values = waterml.v1_1.parse_site_values(response_buffer) if not variable_code is None: return list(values.values())[0] else: return values
57b9cbfbf713f5ac858a8d7a36464aae2a657757
3,657,514
def GetDot1xInterfaces(): """Retrieves attributes of all dot1x compatible interfaces. Returns: Array of dict or empty array """ interfaces = [] for interface in GetNetworkInterfaces(): if interface['type'] == 'IEEE80211' or interface['type'] == 'Ethernet': if (interface['builtin'] and 'AppleThunderboltIPPort' not in interface['bus']): interfaces.append(interface) return interfaces
829cc1badf5917cc6302847311e5c8ef6aeebc11
3,657,515
def get_v_l(mol, at_name, r_ea): """ Returns list of the l's, and a nconf x nl array, v_l values for each l: l= 0,1,2,...,-1 """ vl = generate_ecp_functors(mol._ecp[at_name][1]) v_l = np.zeros([r_ea.shape[0], len(vl)]) for l, func in vl.items(): # -1,0,1,... v_l[:, l] = func(r_ea) return vl.keys(), v_l
d987e5ceb28169d73ec23aaac2f7ab30a5e881c7
3,657,516
def search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=1, n_max=1000, dn_min=1, dn_max=10, z=0.0, screening=False, extendsearch=None): """ --------------------------------------------------------------------------- Search for electronic transitions of recombination lines at a specified redshift that lie within the specified frequency range Inputs: freq_min [scalar] Minimum in the frequency range (Hz) freq_max [scalar] Maximum in the frequency range (Hz) atomic_number [integer] Atomic number of the atom. It is equal to the number of protons in the nucleus. Must be positive and greater than or equal to unity. atomic_mass [integer] Atomic mass of the atom. It is equal to the sum of the number of protons and neutrons in the nucleus. Must be positive and greater than or equal to unity. n_min [scalar] Minimum in the range of principal quantum numbers of lower electron orbit to search for transitions. Must be positive and greater than or equal to unity unity. n_max [scalar] Maximum in the range of principal quantum numbers of lower electron orbit to search for transitions. Must be positive and greater than or equal to unity unity. dn_min [scalar] Minimum in the range of difference in principal quantum numbers search for transitions. Must be positive and greater than or equal to unity unity. dn_max [scalar] Maximum in the range of difference in principal quantum numbers search for transitions. Must be positive and greater than or equal to unity unity. z [scalar or numpy array] The redshift (when positive) or blueshift (when negative) by which the recombination lines are shifted. Default=0 screening [boolean] If set to False (default), assume the effective charge is equal to the number of protons. If set to True, assume the charges from the nucleus are screened and the effecctive nuclear charge is equal to unity. extendsearch [None or dictionary] Specifies if the search should be extended beyond the ranges for n and dn by calling this function recursively. If set to None (default), the search will not be extended. Otherwise, search will extend along n and/or dn if in-range frequencies are found at the specified boundaries of n and dn. This parameter must be specified as a dictionary with the following keys and values: 'n' [None or list] If set to None, do not extend search for more values of n. Otherwise it must be a list containing one or both of the strings 'up' and 'down'. If 'up' is present, extend search for higher values of n from the previous iteration. If 'down' is present in the list, extend search for values of n lower than specified in the range in previous iteration. 'dn' [None or list] If set to None, do not extend search for more values of dn. Otherwise it must be a list containing one or both of the strings 'up' and 'down'. If 'up' is present, extend search for higher values of dn from the previous iteration. If 'down' is present in the list, extend search for values of dn lower than specified in the range in previous iteration. Output: Tuple of (n, dn, freq) where each of the elements in the tuple is an array such that the transitions of combinations of n and dn produces recombination lines for a given redshift in the specified frequency range. freq will be returned as an instance of class astropy.units.Quantity --------------------------------------------------------------------------- """ try: freq_min, freq_max, atomic_number, atomic_mass except NameError: raise NameError('Inputs freq_min, freq_max, atomic_number, atomic_mass must be specified') if not isinstance(n_min, int): raise TypeError('Input n_min must be an integer') if n_min < 1: raise ValueError('Input n_min must be greater than 1') if not isinstance(n_max, int): raise TypeError('Input n_max must be an integer') if n_max < n_min: raise ValueError('Input n_max must be greater than n_min') if not isinstance(dn_min, int): raise TypeError('Input dn_min must be an integer') if dn_min < 1: raise ValueError('Input dn_min must be greater than 1') if not isinstance(dn_max, int): raise TypeError('Input dn_max must be an integer') if dn_max < dn_min: raise ValueError('Input dn_max must be greater than dn_min') if not isinstance(z, (int,float)): if isinstance(z, NP.ndarray): if z.size != 1: raise TypeError('Input z must be a scalar') else: raise TypeError('Input z must be a scalar') if not isinstance(freq_min, (int,float,units.Quantity)): raise TypeError('Input freq_min must be a scalar') if not isinstance(freq_min, units.Quantity): freq_min = freq_min * units.Hertz if freq_min <= 0.0 * units.Hertz: raise ValueError('Input freq_min must be positive') if not isinstance(freq_max, (int,float,units.Quantity)): raise TypeError('Input freq_max must be a scalar') if not isinstance(freq_max, units.Quantity): freq_max = freq_max * units.Hertz if freq_max <= freq_min: raise ValueError('Input freq_max must be greater than freq_min') if extendsearch is not None: if not isinstance(extendsearch, dict): raise TypeError('Input extendsearch must be a dictionary') for key in extendsearch: if extendsearch[key] is not None: if not isinstance(extendsearch[key], list): raise TypeError('Value under key {0} of input dictionary extendsearch must be a list'.format(key)) nvect = NP.arange(n_min, n_max+1) dnvect = NP.arange(dn_min, dn_max+1) ngrid, dngrid = NP.meshgrid(nvect, dnvect, indexing='ij') nu = redshifted_freq_recomb(atomic_number, atomic_mass, ngrid.reshape(-1), dngrid.reshape(-1), z=z, screening=screening) nu = nu.reshape(nvect.size, dnvect.size, -1) ind_select = NP.where(NP.logical_and(nu >= freq_min, nu <= freq_max)) nu_select = nu[ind_select] n_select = ngrid[:,:,NP.newaxis][ind_select] dn_select = dngrid[:,:,NP.newaxis][ind_select] nu_in_range = None n_in_range = None dn_in_range = None if nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, nu_select.value)), nu_select.unit) n_in_range = NP.concatenate((n_in_range, n_select)) dn_in_range = NP.concatenate((dn_in_range, dn_select)) else: nu_in_range = nu_select.copy() n_in_range = NP.copy(n_select) dn_in_range = NP.copy(dn_select) if extendsearch is not None: new_extendsearch = None for key in extendsearch: if extendsearch[key] is not None: if key == 'n': if n_select.max() == n_max: if 'up' in extendsearch[key]: new_n_min = n_max + 1 new_n_max = 2 * n_max + 1 - n_min if new_extendsearch is None: new_extendsearch = {key: ['up']} elif key not in new_extendsearch: new_extendsearch[key] = ['up'] else: new_extendsearch[key] += ['up'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=new_n_min, n_max=new_n_max, dn_min=dn_min, dn_max=dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, new_nu_select.value)), new_nu_select.unit) n_in_range = NP.concatenate((n_in_range, new_n_select)) dn_in_range = NP.concatenate((dn_in_range, new_dn_select)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) if n_select.min() == n_min: if 'down' in extendsearch[key]: if n_min > 1: new_n_min = max([1, 2*n_min - n_max - 1]) new_n_max = n_max - 1 if new_extendsearch is None: new_extendsearch = {key: ['down']} elif key not in new_extendsearch: new_extendsearch[key] = ['down'] else: new_extendsearch[key] += ['down'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=new_n_min, n_max=new_n_max, dn_min=dn_min, dn_max=dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((new_nu_select.value, nu_in_range.value)), new_nu_select.unit) n_in_range = NP.concatenate((new_n_select, n_in_range)) dn_in_range = NP.concatenate((new_dn_select, dn_in_range)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) if key == 'dn': if dn_select.max() == dn_max: if 'up' in extendsearch[key]: new_dn_min = dn_max + 1 new_dn_max = 2 * dn_max + 1 - dn_min if new_extendsearch is None: new_extendsearch = {key: ['up']} elif key not in new_extendsearch: new_extendsearch[key] = ['up'] else: new_extendsearch[key] += ['up'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=n_min, n_max=n_max, dn_min=new_dn_min, dn_max=new_dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((nu_in_range.value, new_nu_select.value)), new_nu_select.unit) n_in_range = NP.concatenate((n_in_range, new_n_select)) dn_in_range = NP.concatenate((dn_in_range, new_dn_select)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) if dn_select.min() == dn_min: if 'down' in extendsearch[key]: if dn_min > 1: new_dn_min = max([1, 2*dn_min - dn_max - 1]) new_dn_max = dn_max - 1 if new_extendsearch is None: new_extendsearch = {key: ['down']} elif key not in new_extendsearch: new_extendsearch[key] = ['down'] else: new_extendsearch[key] += ['down'] new_n_select, new_dn_select, new_nu_select = search_transitions_in_freq_range(freq_min, freq_max, atomic_number, atomic_mass, n_min=n_min, n_max=n_max, dn_min=new_dn_min, dn_max=new_dn_max, z=z, screening=screening, extendsearch=new_extendsearch) if new_nu_select.size > 0: if nu_in_range is not None: nu_in_range = units.Quantity(NP.concatenate((new_nu_select.value, nu_in_range.value)), new_nu_select.unit) n_in_range = NP.concatenate((new_n_select, n_in_range)) dn_in_range = NP.concatenate((new_dn_select, dn_in_range)) else: nu_in_range = new_nu_select.copy() n_in_range = NP.copy(new_n_select) dn_in_range = NP.copy(new_dn_select) return (n_in_range, dn_in_range, nu_in_range)
bd5fc3873909ce3937b6e94db9f04edb94dab326
3,657,517
async def test_async__rollback(): """Should rollback basic async actions""" state = {"counter": 0} async def incr(): state["counter"] += 1 return state["counter"] async def decr(): state["counter"] -= 1 async def fail(): raise ValueError("oops") try: with Saga() as saga: counter = await saga.action(incr, decr) assert counter == 1 counter = await saga.action(incr, decr) assert counter == 2 await saga.action(fail, noop) except SagaFailed as e: assert state["counter"] == 0 assert e.transaction.name == "3" assert e.__cause__.args == ("oops",)
54cc780b01190bfd2ea2aacc70e62e8f0b3dfa64
3,657,518
import requests def is_referenced(url, id, catalog_info): """Given the url of a resource from the catalog, this function returns True if the resource is referenced by data.gouv.fr False otherwise :param :url: url of a resource in the catalog :type :url: string""" dgf_page = catalog_info['url_dgf'] headers = requests.head(url).headers downloadable = 'attachment' in headers.get('Content-Disposition', '') if not downloadable: raise Exception(f'This id is associated to a dataset not referenced by data.gouv.fr. \n ' f'Please download the dataset from here: {dgf_page}\n' f'Then manually upload it in the corresponding folder and name it: {id}.csv') return downloadable
15cfa64979f2765d29d7c4bb60a7a017feb27d43
3,657,520
import glob import functools def create_sema3d_datasets(args, test_seed_offset=0): """ Gets training and test datasets. """ train_names = ['bildstein_station1', 'bildstein_station5', 'domfountain_station1', 'domfountain_station3', 'neugasse_station1', 'sg27_station1', 'sg27_station2', 'sg27_station5', 'sg27_station9', 'sg28_station4', 'untermaederbrunnen_station1'] valid_names = ['bildstein_station3', 'domfountain_station2', 'sg27_station4', 'untermaederbrunnen_station3'] #train_names = ['bildstein_station1', 'domfountain_station1', 'untermaederbrunnen_station1'] #valid_names = ['domfountain_station2', 'untermaederbrunnen_station3'] path = '{}/features_supervision/'.format(args.ROOT_PATH) if args.db_train_name == 'train': trainlist = [path + 'train/' + f + '.h5' for f in train_names] elif args.db_train_name == 'trainval': trainlist = [path + 'train/' + f + '.h5' for f in train_names + valid_names] testlist = [] if 'train' in args.db_test_name: testlist += [path + 'train/' + f + '.h5' for f in train_names] if 'val' in args.db_test_name: testlist += [path + 'train/' + f + '.h5' for f in valid_names] if 'testred' in args.db_test_name: testlist += [f for f in glob.glob(path + 'test_reduced/*.h5')] if 'testfull' in args.db_test_name: testlist += [f for f in glob.glob(path + 'test_full/*.h5')] return tnt.dataset.ListDataset(trainlist, functools.partial(graph_loader, train=True, args=args, db_path=args.ROOT_PATH)), \ tnt.dataset.ListDataset(testlist, functools.partial(graph_loader, train=False, args=args, db_path=args.ROOT_PATH, full_cpu = True))
8642c5a10a5256fb9541be86676073c993b2faf8
3,657,521
def adjust_learning_rate(optimizer, step, args): """ Sets the learning rate to the initial LR decayed by gamma at every specified step/epoch Adapted from PyTorch Imagenet example: https://github.com/pytorch/examples/blob/master/imagenet/main.py step could also be epoch """ schedule_list = np.array(args.schedule) decay = args.gamma ** (sum(step >= schedule_list)) lr = args.lr * decay for param_group in optimizer.param_groups: param_group['lr'] = lr return lr
359e2c5e0deb1abd156b7a954ecfae1b23511db2
3,657,522
def sigmoid(z): """sigmoid函数 """ return 1.0/(1.0+np.exp(-z))
80187d3711d18602a33d38edcc48eaad5c51818f
3,657,523
def beamformerFreq(steerVecType, boolRemovedDiagOfCSM, normFactor, inputTupleSteer, inputTupleCsm): """ Conventional beamformer in frequency domain. Use either a predefined steering vector formulation (see Sarradj 2012) or pass your own steering vector. Parameters ---------- steerVecType : (one of the following strings: 'classic' (I), 'inverse' (II), 'true level' (III), 'true location' (IV), 'custom') Either build the steering vector via the predefined formulations I - IV (see :ref:`Sarradj, 2012<Sarradj2012>`) or pass it directly. boolRemovedDiagOfCSM : bool Should the diagonal of the csm be removed? normFactor : float In here both the signalenergy loss factor (due to removal of the csm diagonal) as well as beamforming algorithm (music, capon, ...) dependent normalization factors are handled. inputTupleSteer : contains the information needed to create the steering vector. Is dependent of steerVecType. There are 2 cases: steerVecType != 'custom' : inputTupleSteer = (distGridToArrayCenter, distGridToAllMics, waveNumber) , with distGridToArrayCenter : float64[nGridpoints] Distance of all gridpoints to the center of sensor array distGridToAllMics : float64[nGridpoints, nMics] Distance of all gridpoints to all sensors of array waveNumber : float64 The wave number steerVecType == 'custom' : inputTupleSteer = steeringVector , with steeringVector : complex128[nGridPoints, nMics] The steering vector of each gridpoint for the same frequency as the CSM inputTupleCsm : contains the data of measurement as a tuple. There are 2 cases: perform standard CSM-beamformer: inputTupleCsm = csm csm : complex128[ nMics, nMics] The cross spectral matrix for one frequency perform beamformer on eigenvalue decomposition of csm: inputTupleCsm = (eigValues, eigVectors) , with eigValues : float64[nEV] nEV is the number of eigenvalues which should be taken into account. All passed eigenvalues will be evaluated. eigVectors : complex128[nMics, nEV] Eigen vectors corresponding to eigValues. All passed eigenvector slices will be evaluated. Returns ------- *Autopower spectrum beamforming map [nGridPoints] *steer normalization factor [nGridPoints]... contains the values the autopower needs to be multiplied with, in order to fullfill 'steer^H * steer = 1' as needed for functional beamforming. Some Notes on the optimization of all subroutines ------------------------------------------------- Reducing beamforming equation: Let the csm be C and the steering vector be h, than, using Linear Albegra, the conventional beamformer can be written as .. math:: B = h^H \\cdot C \\cdot h, with ^H meaning the complex conjugated transpose. When using that C is a hermitian matrix one can reduce the equation to .. math:: B = h^H \\cdot C_D \\cdot h + 2 \\cdot Real(h^H \\cdot C_U \\cdot h), where C_D and C_U are the diagonal part and upper part of C respectively. Steering vector: Theoretically the steering vector always includes the term "exp(distMicsGrid - distArrayCenterGrid)", but as the steering vector gets multplied with its complex conjugation in all beamformer routines, the constant "distArrayCenterGrid" cancels out --> In order to save operations, it is not implemented. Spectral decomposition of the CSM: In Linear Algebra the spectral decomposition of the CSM matrix would be: .. math:: CSM = \\sum_{i=1}^{nEigenvalues} \\lambda_i (v_i \\cdot v_i^H) , where lambda_i is the i-th eigenvalue and v_i is the eigenvector[nEigVal,1] belonging to lambda_i and ^H denotes the complex conjug transpose. Using this, one must not build the whole CSM (which would be time consuming), but can drag the steering vector into the sum of the spectral decomp. This saves a lot of operations. Squares: Seemingly "a * a" is slightly faster than "a**2" in numba Square of abs(): Even though "a.real**2 + a.imag**2" would have fewer operations, modern processors seem to be optimized for "a * a.conj" and are slightly faster the latter way. Both Versions are much faster than "abs(a)**2". Using Cascading Sums: When using the Spectral-Decomposition-Beamformer one could use numpys cascading sums for the scalar product "eigenVec.conj * steeringVector". BUT (at the moment) this only brings benefits in comp-time for a very small range of nMics (approx 250) --> Therefor it is not implemented here. """ boolIsEigValProb = isinstance(inputTupleCsm, tuple)# len(inputTupleCsm) > 1 # get the beamformer type (key-tuple = (isEigValProblem, formulationOfSteeringVector, RemovalOfCSMDiag)) beamformerDict = {(False, 'classic', False) : _freqBeamformer_Formulation1AkaClassic_FullCSM, (False, 'classic', True) : _freqBeamformer_Formulation1AkaClassic_CsmRemovedDiag, (False, 'inverse', False) : _freqBeamformer_Formulation2AkaInverse_FullCSM, (False, 'inverse', True) : _freqBeamformer_Formulation2AkaInverse_CsmRemovedDiag, (False, 'true level', False) : _freqBeamformer_Formulation3AkaTrueLevel_FullCSM, (False, 'true level', True) : _freqBeamformer_Formulation3AkaTrueLevel_CsmRemovedDiag, (False, 'true location', False) : _freqBeamformer_Formulation4AkaTrueLocation_FullCSM, (False, 'true location', True) : _freqBeamformer_Formulation4AkaTrueLocation_CsmRemovedDiag, (False, 'custom', False) : _freqBeamformer_SpecificSteerVec_FullCSM, (False, 'custom', True) : _freqBeamformer_SpecificSteerVec_CsmRemovedDiag, (True, 'classic', False) : _freqBeamformer_EigValProb_Formulation1AkaClassic_FullCSM, (True, 'classic', True) : _freqBeamformer_EigValProb_Formulation1AkaClassic_CsmRemovedDiag, (True, 'inverse', False) : _freqBeamformer_EigValProb_Formulation2AkaInverse_FullCSM, (True, 'inverse', True) : _freqBeamformer_EigValProb_Formulation2AkaInverse_CsmRemovedDiag, (True, 'true level', False) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_FullCSM, (True, 'true level', True) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_CsmRemovedDiag, (True, 'true location', False) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_FullCSM, (True, 'true location', True) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_CsmRemovedDiag, (True, 'custom', False) : _freqBeamformer_EigValProb_SpecificSteerVec_FullCSM, (True, 'custom', True) : _freqBeamformer_EigValProb_SpecificSteerVec_CsmRemovedDiag} coreFunc = beamformerDict[(boolIsEigValProb, steerVecType, boolRemovedDiagOfCSM)] # prepare Input if steerVecType == 'custom': # beamformer with custom steering vector steerVec = inputTupleSteer #nFreqs, nGridPoints = steerVec.shape[0], steerVec.shape[1] nGridPoints = steerVec.shape[0] else: # predefined beamformers (Formulation I - IV) distGridToArrayCenter, distGridToAllMics, waveNumber = inputTupleSteer#[0], inputTupleSteer[1], inputTupleSteer[2] if not isinstance(waveNumber, np.ndarray): waveNumber = np.array([waveNumber]) #nFreqs, nGridPoints = waveNumber.shape[0], distGridToAllMics.shape[0] nGridPoints = distGridToAllMics.shape[0] if boolIsEigValProb: eigVal, eigVec = inputTupleCsm#[0], inputTupleCsm[1] else: csm = inputTupleCsm # beamformer routine: parallelized over Gridpoints beamformOutput = np.zeros(nGridPoints, np.float64) steerNormalizeOutput = np.zeros_like(beamformOutput) result = np.zeros(nGridPoints, np.float64) normalHelp = np.zeros_like(result) if steerVecType == 'custom': # beamformer with custom steering vector if boolIsEigValProb: coreFunc(eigVal, eigVec, steerVec, normFactor, result, normalHelp) else: coreFunc(csm, steerVec, normFactor, result, normalHelp) else: # predefined beamformers (Formulation I - IV) if boolIsEigValProb: coreFunc(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp) else: coreFunc(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp) beamformOutput = result steerNormalizeOutput = normalHelp return beamformOutput, steerNormalizeOutput
f747122b0dff9a7b966813062b93a1cab8a91f3f
3,657,524
from typing import IO def createNewPY(): """trans normal pinyin to TTS pinyin""" py_trans = {} input_pinyin_list = IO.readList(r'docs/transTTSPinyin.txt') for line in input_pinyin_list: line_array = line.split(',') py_trans[line_array[0]] = line_array[1] return py_trans
e2bd5007cc217f72e3ffbeafd0ff75e18f8ec213
3,657,525
import re def search_wheelmap (lat, lng, interval, name, n): """Searches for a place which matches the given name in the given coordinates range. Returns false if nothing found""" # Calculate the bbox for the API call from_lat = lat - interval to_lat = lat + interval from_lng = lng - interval to_lng = lng + interval # Remove parentheses (better for search, generally) name = re.sub(r'\([^)]*\)', '', name) wheelmap_client = wheelmap.Wheelmap(env['WHEELMAP_API_KEY']) bbox= (from_lng, from_lat, to_lng, to_lat) nodes = wheelmap_client.nodes_collection(bbox=bbox, per_page=n) # max_node and max_name_match are holding the # best match through the SequenceMatcher after the loop max_name_match = 0.0 for node in nodes: if node.name and name: name_match = SequenceMatcher(None, node.name, name).ratio() if name_match > max_name_match: max_node = node max_name_match = name_match # Is the best match better than 60% ? # If yes, let's take it. Otherwise nothing was found. if max_name_match > 0.6: return max_node else: return False
88dfbf973fbd4891a4d8bf955335177ca3654016
3,657,526
from typing import Dict def get_entity_contents(entity: Dict) -> Dict: """ :param entity: Entity is a dictionary :return: A dict representation of the contents of entity """ return { 'ID': entity.get('id'), 'Name': entity.get('name'), 'EmailAddress': entity.get('email_address'), 'Organization': entity.get('organization'), 'Tags': entity.get('labels'), 'StrictNameMatching': entity.get('strict_name_matching'), 'PolicyID': entity.get('policy_id'), 'Profile': entity.get('profile'), 'EntityGroupID': entity.get('entity_group', {}).get('id') if entity.get('entity_group') else None, 'EntityGroupName': entity.get('entity_group', {}).get('name') if entity.get('entity_group') else None, 'TypeID': entity.get('type', {}).get('id') if entity.get('type') else None, 'TypeName': entity.get('type', {}).get('name') if entity.get('type') else None }
3c9e133bf80bc4d59c6f663503b5083401acc4e0
3,657,527
def t68tot90(t68): """Convert from IPTS-68 to ITS-90 temperature scales, as specified in the CF Standard Name information for sea_water_temperature http://cfconventions.org/Data/cf-standard-names/27/build/cf-standard-name-table.html temperatures are in degrees C""" t90 = 0.99976 * t68 return t90
87ff55a196f01b8f1afd78381e7d012eafa079fa
3,657,528
def get_sort_accuracy_together(fake_ys, y): """ Args: fake_ys (np.ndarray): with shape (n_results, n_sample,). y (np.ndarray): with sample (n_sample,). Returns: corr (np.ndarray): with shape (n_result,) """ y_sort = np.sort(y) y_sort2 = np.sort(y)[::-1] fake_ys = np.nan_to_num(fake_ys, nan=np.nan, posinf=np.nan, neginf=np.nan) mark = np.any(np.isnan(fake_ys), axis=1) fake_ys = np.nan_to_num(fake_ys, nan=-1, posinf=-1, neginf=-1) index = np.argsort(fake_ys, axis=1) y_pre_sort = y[index] acc1 = 1 - np.mean(np.abs(y_pre_sort - y_sort), axis=1) acc2 = 1 - np.mean(np.abs(y_pre_sort - y_sort2), axis=1) score = np.max(np.concatenate((acc1.reshape(1, -1), acc2.reshape(1, -1)), axis=0), axis=0) score[mark] = 0.0 return score
4ba4810057bb936fdb5a94669796b0a260eeee49
3,657,529
def random_account_number(): """ Generate random encoded account number for testing """ _, account_number = create_account() return encode_verify_key(verify_key=account_number)
d662dc0acdc78f86baf2de998ab6ab920cc80ca0
3,657,530
def get_recommendation_summary_of_projects(project_ids, state, credentials): """Returns the summary of recommendations on all the given projects. Args: project_ids: List(str) project to which recommendation is needed. state: state of recommendations credentials: client credentials. """ recommender = build("recommender", "v1", credentials=credentials, cache_discovery=False) def get_metric(project_id): recommendation_metric = common.get_recommendations( project_id, recommender=recommender, state=state, credentials=credentials) return accounts_can_made_safe(project_id, state, recommendation_metric) recommendation_stats = common.rate_limit_execution(get_metric, RATE_LIMIT, project_ids) recommendation_stats_sorted = sorted( recommendation_stats, key=lambda metric: -sum(metric["stats"].values())) return recommendation_stats_sorted
68cd42e4465bbdc85d88b82cb345b64a4ec1fec8
3,657,531
def selection_filter(file_path): """ 获得经过filter方法获得的特征子集 f_classif, chi2, mutual_info_classif """ df = pd.read_csv(file_path) delete_list = ['id'] df.drop(delete_list, axis=1, inplace=True) feature_attr = [i for i in df.columns if i not in ['label']] df.fillna(0, inplace=True) # 特征预处理 obj_attrs = [] for attr in feature_attr: if df.dtypes[attr] == np.dtype(object): # 添加离散数据列 obj_attrs.append(attr) if len(obj_attrs) > 0: df = pd.get_dummies(df, columns=obj_attrs) # 转为哑变量 y = df.label X = df.drop('label', axis=1) model = SelectKBest(f_classif, k=108) X_new = model.fit_transform(X, y) df_X_new = pd.DataFrame(X_new) list = [] for i in X.columns: for j in df_X_new.columns: if np.sum(np.abs(X[i].values - df_X_new[j].values)) == 0: list.append(i) break useful_list = sorted(set(X.columns.to_list()) - set(list), key = X.columns.to_list().index) print(useful_list) list.append('label') return list
d6f6848c499f2d4899828e1e1bd0fb0ffe930186
3,657,532
def _process_voucher_data_for_order(cart): """Fetch, process and return voucher/discount data from cart.""" vouchers = Voucher.objects.active(date=date.today()).select_for_update() voucher = get_voucher_for_cart(cart, vouchers) if cart.voucher_code and not voucher: msg = pgettext( 'Voucher not applicable', 'Voucher expired in meantime. Task placement aborted.') raise NotApplicable(msg) if not voucher: return {} increase_voucher_usage(voucher) return { 'voucher': voucher, 'discount_amount': cart.discount_amount, 'discount_name': cart.discount_name, 'translated_discount_name': cart.translated_discount_name}
ec15f13607cee7e4bdd2e16f9a44904638964d36
3,657,533
def is_insertion(ref, alt): """Is alt an insertion w.r.t. ref? Args: ref: A string of the reference allele. alt: A string of the alternative allele. Returns: True if alt is an insertion w.r.t. ref. """ return len(ref) < len(alt)
17d7d6b8dfdf387e6dd491a6f782e8c9bde22aff
3,657,534
from typing import Optional def identify_fast_board(switches: int, drivers: int) -> Optional[FastIOBoard]: """Instantiate and return a FAST board capable of accommodating the given number of switches and drivers.""" if switches > 32 or drivers > 16: return None if switches > 16: return None if drivers > 8 else FastIO3208() if drivers <= 4: return FastIO0804() if switches <= 8: return FastIO1616() return None
27c0dca3e0421c9b74976a947eda5d6437598c01
3,657,535
import struct def encode_hop_data( short_channel_id: bytes, amt_to_forward: int, outgoing_cltv_value: int ) -> bytes: """Encode a legacy 'hop_data' payload to bytes https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md#legacy-hop_data-payload-format :param short_channel_id: the short channel id this hop relates to :param amt_to_forward: the amount to forward on this hop :param outgoing_cltv_value: the outgoing cltv value to use for this hop :return: the hop_data payload """ # Bolt #7: The hop_data format is identified by a single 0x00-byte length, for # backward compatibility. hop_data = struct.pack(config.be_u8, 0x00) hop_data += short_channel_id hop_data += struct.pack(config.be_u64, amt_to_forward) hop_data += struct.pack(config.be_u32, outgoing_cltv_value) # [12*byte:padding] hop_data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" return hop_data
51fda780036fdcbb8ff1d5cd77b422aaf92eb4fd
3,657,536
def extract_all_patterns(game_state, action, mask, span): """ Extracting the local forward model pattern for each cell of the grid's game-state and returning a numpy array :param prev_game_state: game-state at time t :param action: players action at time t :param game_state: resulting game-state at time t+1 :param mask: square pattern mask (boolean array to mark which tiles should be included. :param span: The span of the mask. :return: np.ndarray of observed patterns """ data_set = np.zeros((game_state.shape[0]*game_state.shape[1], np.sum(mask)+1)) # only iterate over positions that were affected by the game state's changes positions = [(x, y) for x in range(game_state.shape[0]) for y in range(game_state.shape[1])] ext_game_state_grid = np.pad(game_state, span, "constant", constant_values=1) for i, (x, y) in enumerate(positions): el = ext_game_state_grid[span + x - span: span + x + span + 1, span + y - span: span + y + span + 1][mask].tolist() el.append(action) data_set[i, :] = el return data_set
06e44c871a14b7685ca5dd165285cfe2c7076b85
3,657,537
def cond(*args, **kwargs): """Conditional computation to run on accelerators.""" return backend()['cond'](*args, **kwargs)
969307c62bd4a2eef6b16dffff953910524cc3c1
3,657,540
def singleton(cls): """Decorator that provides singleton functionality. >>> @singleton ... class Foo(object): ... pass ... >>> a = Foo() >>> b = Foo() >>> a is b True """ _inst = [None] def decorated(*args, **kwargs): if _inst[0] is None: _inst[0] = cls(*args, **kwargs) return _inst[0] return decorated
4ae64aeaaba1b838232e4d7700d692dcc109be6d
3,657,542
import inspect def _with_factory(make_makers): """Return a decorator for test methods or classes. Args: make_makers (callable): Return an iterable over (name, maker) pairs, where maker (callable): Return a fixture (arbitrary object) given Factory as single argument """ def wrap(test_func): def wrapper(self, *args, **kwargs): factory = make_factory( self.addCleanup, test=self, root=None, makers=make_makers()) return test_func(self, factory, *args, **kwargs) return wrapper def deco(test_func_or_class): if inspect.isclass(test_func_or_class): class_ = test_func_or_class for name, method in inspect.getmembers(class_, is_test_method): wrapped_method = wrap(method) setattr(class_, name, wrapped_method) return class_ else: method = test_func_or_class return wrap(method) return deco
5841e80129b212bba2c6d0b1f89966fa0d5ce152
3,657,543
import time def timeItDeco(func): """ Decorator which times the given function. """ def timing(*args, **kwargs): """ This function will replace the original function. """ # Start the clock t1 = time.clock() # Run the original function and collect results result = func(*args, **kwargs) # Print out the execution time print('Execution time', time.clock() - t1) return result # Return the funtion that was modified return timing
9c59a512a9cf9eac190af4a88dbf8ccab2069f55
3,657,544
def apply_haste(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn: """ Apply the effects of haste to the target: attack beats attack """ # "attack": {"beats": ["disrupt", "area", "attack"], "loses": ["block", "dodge"]} if left: # Remove attack from the attack: loses dict if "attack" in rules["attack"]["loses"]: rules["attack"]["loses"].remove("attack") # Add attack to the attack: beats dict if "attack" not in rules["attack"]["beats"]: rules["attack"]["beats"].append("attack") # "attack": {"beats": ["disrupt", "area"], "loses": ["block", "dodge", "attack"]} else: # Remove attack from the attack: beats dict if "attack" in rules["attack"]["beats"]: rules["attack"]["beats"].remove("attack") # Add attack to the attack: loses dict if "attack" not in rules["attack"]["loses"]: rules["attack"]["loses"].append("attack") return self, target, rules
0186fe8553cb89c73d9a3cfae35048cd465b9859
3,657,545
def get_mean_cube(datasets): """Get mean cube of a list of datasets. Parameters ---------- datasets : list of dict List of datasets (given as metadata :obj:`dict`). Returns ------- iris.cube.Cube Mean cube. """ cubes = iris.cube.CubeList() for dataset in datasets: path = dataset['filename'] cube = iris.load_cube(path) prepare_cube_for_merging(cube, path) cubes.append(cube) mean_cube = cubes.merge_cube() if len(cubes) > 1: mean_cube = mean_cube.collapsed(['cube_label'], iris.analysis.MEAN) mean_cube.remove_coord('cube_label') return mean_cube
492b5df11252beb691c62c58005ce2c3c1dcb3b8
3,657,546
async def gen_unique_chk_sum(phone, message, first_dial): """Generates a checksum in order to identify every single call""" return blake2b( bytes(phone, encoding="utf-8") + bytes(message, encoding="utf-8") + bytes(str(first_dial), encoding="utf-8"), digest_size=4, ).hexdigest()
c85076f4fd1e2814116ece59390bebb9f398a4f6
3,657,547
def getQtipResults(version, installer): """ Get QTIP results """ period = get_config('qtip.period') url_base = get_config('testapi.url') url = ("http://" + url_base + "?project=qtip" + "&installer=" + installer + "&version=" + version + "&period=" + str(period)) request = Request(url) try: response = urlopen(request) k = response.read() response.close() results = json.loads(k)['results'] except URLError as err: print 'Got an error code: {}'.format(err) result_dict = {} if results: for r in results: key = '{}/{}'.format(r['pod_name'], r['scenario']) if key not in result_dict.keys(): result_dict[key] = [] result_dict[key].append(r['details']['score']) # return scenario_results return result_dict
4ae01b33a2eed23a8d3ad7b7dd1d5a3bcc8d5ab8
3,657,548
def scaled_softplus(x, alpha, name=None): """Returns `alpha * ln(1 + exp(x / alpha))`, for scalar `alpha > 0`. This can be seen as a softplus applied to the scaled input, with the output appropriately scaled. As `alpha` tends to 0, `scaled_softplus(x, alpha)` tends to `relu(x)`. Note: the gradient for this operation is defined to depend on the backprop inputs as well as the outputs of this operation. Args: x: A `Tensor` of inputs. alpha: A scalar `Tensor`, indicating the amount of smoothness. The caller must ensure that `alpha > 0`. name: A name for the scope of the operations (optional). Returns: A tensor of same size and type as `x`. """ with ops.name_scope(name, 'scaled_softplus', [x, alpha]): x = ops.convert_to_tensor(x, name='x') dtype = x.dtype alpha = ops.convert_to_tensor(alpha, dtype=dtype, name='alpha') # Verify that alpha is a scalar. alpha.get_shape().assert_has_rank(0) def _grad(op, g): """Backprop for scaled softplus.""" y = op.outputs[0] alpha = op.inputs[1] # Prevent the expensive computations from happening before g is available. with ops.control_dependencies([g]): y /= alpha emy = math_ops.exp(-y) dy_dx = 1. - emy # The eps below avoids log(0). Note that t*log(t) -> 0 as t->0. eps = 1e-8 dy_dalpha = y * emy - dy_dx * math_ops.log(dy_dx + eps) return g * dy_dx, math_ops.reduce_sum(g * dy_dalpha) @function.Defun(dtype, dtype, func_name='ScaledSoftplus_%s' % dtype.name, shape_func=lambda op: [op.inputs[0].get_shape()], python_grad_func=_grad) def _forward(x, alpha): """Forward computation of scaled softplus.""" return alpha * nn.softplus(x / alpha) return _forward(x, alpha)
526c5169b1ac938e3f645e96dc7e65bb4acf64b5
3,657,549
def get_choice(options): """Devuelve como entero la opcion seleccionada para el input con mensaje message""" print(options) try: return int(input("Por favor, escoja una opción: ")) except ValueError: return 0
32e95e0113650d0b94449e5e31e7d8156ae85981
3,657,550
def _listminus(list1, list2): """ """ return [a for a in list1 if a not in list2]
3f05d8bfd4169d92bb51c4617536b54779b387c9
3,657,551
import pytesseract from pdf2image import convert_from_bytes def pdf_to_hocr(path, lang="fra+deu+ita+eng", config="--psm 4"): """Loads and transform a pdf into an hOCR file. Parameters ---------- path : str, required The pdf's path lang: str, optional (default="fra+deu+ita+eng") Supporter Language of Pytesseract. config: str, optional (default = "--psm 4") Custom configuration flag used by Tesseract """ try: except ImportError: logger.error( "pytesseract and pdf2image have to be installed to use this function\n run `pip install -U pytesseract pdf2image`" ) return with open(path, "rb") as f: images = convert_from_bytes(f.read(), dpi=300) return images_to_hocr(images)
9619d45dc418f07634fd161f1dff50b4cf334e21
3,657,552
import httpx async def fetch_cart_response(cart_id: str) -> httpx.Response: """Fetches cart response.""" headers = await get_headers() async with httpx.AsyncClient(base_url=CART_BASE_URL) as client: response = await client.get( url=f'/{cart_id}', headers=headers, ) try: response.raise_for_status() except httpx.HTTPStatusError: raise MoltinError(response.json()) # type: ignore return response
2d2da772b257b43beda78f3b08c42c914c01f00d
3,657,553
def is_namespace_mutable(context, namespace): """Return True if the namespace is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return namespace.owner == context.owner
f5303e75b975a1ba51aa39c608ec5af339917446
3,657,555
def get_schularten_by_veranst_iq_id(veranst_iq_id): """ liefert die Liste der zu der Veranstaltung veranst_iq_id passenden Schularten """ query = session.query(Veranstaltung).add_entity(Schulart).join('rel_schulart') query = query.reset_joinpoint() query = query.filter_by(veranst_iq_id=veranst_iq_id) return query.all()
4c18b2fe73b17752ee2838815fa9fde8426a7ccb
3,657,556
def get_station_freqs(df, method='median'): """ apply to df after applying group_by_days and group_by_station """ #df['DATE'] = df.index.get_level_values('DATE') df['DAY'] = [d.dayofweek for d in df.index.get_level_values('DATE')] df['DAYNAME'] = [d.day_name() for d in df.index.get_level_values('DATE')] return df.groupby(['STATION', 'DAY','DAYNAME']).agg({'INS':method, 'OUTS':method})
aebc1a2486c48ff2d829fc70f1f2c4b38bd3017b
3,657,557
def faster_symbol_array(genome, symbol): """A faster calculation method for counting a symbol in genome. Args: genome (str): a DNA string as the search space. symbol (str): the single base to query in the search space. Returns: Dictionary, a dictionary, position-counts pairs of symbol in each genome sliding window. Examples: The symbol array for genome equal to "AAAAGGGG" and symbol equal to "A". >>> genome = 'AAAAGGGG' >>> symbol = 'A' >>> position_symbolcount_dict = symbol_array(genome, symbol) >>> position_symbolcount_dict {0: 4, 1: 3, 2: 2, 3: 1, 4: 0, 5: 1, 6: 2, 7: 3} """ array = {} n = len(genome) extended_genome = genome + genome[0:n//2] # look at the first half of Genome to compute first array value array[0] = pattern_count(symbol, genome[0:n//2]) for i in range(1, n): # start by setting the current array value equal to the previous array value array[i] = array[i-1] # the current array value can differ from the previous array value by at most 1 if extended_genome[i-1] == symbol: array[i] = array[i]-1 if extended_genome[i+(n//2)-1] == symbol: array[i] = array[i]+1 return array
a1bbf70a211adcee14573534b62b4a4af5abdebd
3,657,558
def makeArg(segID: int, N, CA, C, O, geo: ArgGeo) -> Residue: """Creates an Arginie residue""" ##R-Group CA_CB_length = geo.CA_CB_length C_CA_CB_angle = geo.C_CA_CB_angle N_C_CA_CB_diangle = geo.N_C_CA_CB_diangle CB_CG_length = geo.CB_CG_length CA_CB_CG_angle = geo.CA_CB_CG_angle N_CA_CB_CG_diangle = geo.N_CA_CB_CG_diangle CG_CD_length = geo.CG_CD_length CB_CG_CD_angle = geo.CB_CG_CD_angle CA_CB_CG_CD_diangle = geo.CA_CB_CG_CD_diangle CD_NE_length = geo.CD_NE_length CG_CD_NE_angle = geo.CG_CD_NE_angle CB_CG_CD_NE_diangle = geo.CB_CG_CD_NE_diangle NE_CZ_length = geo.NE_CZ_length CD_NE_CZ_angle = geo.CD_NE_CZ_angle CG_CD_NE_CZ_diangle = geo.CG_CD_NE_CZ_diangle CZ_NH1_length = geo.CZ_NH1_length NE_CZ_NH1_angle = geo.NE_CZ_NH1_angle CD_NE_CZ_NH1_diangle = geo.CD_NE_CZ_NH1_diangle CZ_NH2_length = geo.CZ_NH2_length NE_CZ_NH2_angle = geo.NE_CZ_NH2_angle CD_NE_CZ_NH2_diangle = geo.CD_NE_CZ_NH2_diangle carbon_b = calculateCoordinates( N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle ) CB = Atom("CB", carbon_b, 0.0, 1.0, " ", " CB", 0, "C") carbon_g = calculateCoordinates( N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle ) CG = Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") carbon_d = calculateCoordinates( CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle ) CD = Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C") nitrogen_e = calculateCoordinates( CB, CG, CD, CD_NE_length, CG_CD_NE_angle, CB_CG_CD_NE_diangle ) NE = Atom("NE", nitrogen_e, 0.0, 1.0, " ", " NE", 0, "N") carbon_z = calculateCoordinates( CG, CD, NE, NE_CZ_length, CD_NE_CZ_angle, CG_CD_NE_CZ_diangle ) CZ = Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C") nitrogen_h1 = calculateCoordinates( CD, NE, CZ, CZ_NH1_length, NE_CZ_NH1_angle, CD_NE_CZ_NH1_diangle ) NH1 = Atom("NH1", nitrogen_h1, 0.0, 1.0, " ", " NH1", 0, "N") nitrogen_h2 = calculateCoordinates( CD, NE, CZ, CZ_NH2_length, NE_CZ_NH2_angle, CD_NE_CZ_NH2_diangle ) NH2 = Atom("NH2", nitrogen_h2, 0.0, 1.0, " ", " NH2", 0, "N") res = Residue((" ", segID, " "), "ARG", " ") res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(CD) res.add(NE) res.add(CZ) res.add(NH1) res.add(NH2) return res
4539d48e37e7bacd637300136799b8f7b3dc635d
3,657,560
def shows_monthly_aggregate_score_heatmap(): """Monthly Aggregate Score Heatmap Graph""" database_connection.reconnect() all_scores = show_scores.retrieve_monthly_aggregate_scores(database_connection) if not all_scores: return render_template("shows/monthly-aggregate-score-heatmap/graph.html", years=None, scores=None) scores_list = [] years = list(all_scores.keys()) for year in all_scores: scores_list.append(list(all_scores[year].values())) return render_template("shows/monthly-aggregate-score-heatmap/graph.html", years=years, scores=scores_list)
4bf26e21c7d76be96395fce43228ee0a80930e4e
3,657,562
import requests def run(string, entities): """Call a url to create a api in github""" # db = utils.db()['db'] # query = utils.db()['query'] # operations = utils.db()['operations'] # apikey = utils.config('api_key') # playlistid = utils.config('playlist_id') # https://developers.google.com/youtube/v3/docs/playlistItems/list # url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=' + playlistid + '&key=' + apikey nombreapi = '' nombredata = '' result = '' for item in entities: if item['entity'] == 'elapi': nombreapi = item['sourceText'].lower() for item in entities: if item['entity'] == 'eldata': nombretema = item['sourceText'].lower() url = 'https://youtochipizarron.herokuapp.com/' + nombreapi + '_' + nombredata utils.output('inter', 'checking', utils.translate('checking',{ 'website_name': url })) # call the url to create a github api branch/repository try: r = utils.http('GET', url) # In case there is a problem like wrong settings #if 'error' in r.json(): # error = r.json()['error']['errors'][0] # return utils.output('settings_error', 'settings_error', utils.translate('settings_errors', { # 'reason': error['reason'], # 'message': error['message'] # })) # items = r.json()['rooms'] result += utils.translate('list_element', { 'repository_url': url, 'repository_name': nombreapi + '_' + nombredata } ) except requests.exceptions.RequestException as e: return utils.output('request_error', 'request_error', utils.translate('request_errors')) # Will synchronize the content (because "end" type) if synchronization enabled return utils.output('end', 'success', utils.translate('success', { 'nuevoapi': nombreapi, 'nuevodata': nombredata, 'result': result }))
6a3a9899e8081c655e9a7eabc3e96f103a77a6bd
3,657,563
def gamma(surface_potential, temperature): """Calculate term from Gouy-Chapmann theory. Arguments: surface_potential: Electrostatic potential at the metal/solution boundary in Volts, e.g. 0.05 [V] temperature: Temperature of the solution in Kelvin, e.g. 300 [K] Returns: float """ product = sc.elementary_charge * surface_potential / (4 * sc.Stefan_Boltzmann * temperature) return np.tanh(product)
b8996f01bb221a5cd2f6c222d166a61f1759845f
3,657,564
def calculate_mask(maskimage, masks): """Extracts watershed seeds from data.""" dims = list(maskimage.slices2shape()) maskdata = np.ones(dims, dtype='bool') if masks: dataslices = utils.slices2dataslices(maskimage.slices) maskdata = utils.string_masks(masks, maskdata, dataslices) maskimage.write(data=maskdata, slices=maskimage.slices) return maskdata
4935cacb3689b844ab119ec3b24b9e59b7db7ec3
3,657,565
def Range(lo, hi, ctx = None): """Create the range regular expression over two sequences of length 1 >>> range = Range("a","z") >>> print(simplify(InRe("b", range))) True >>> print(simplify(InRe("bb", range))) False """ lo = _coerce_seq(lo, ctx) hi = _coerce_seq(hi, ctx) return ReRef(Z3_mk_re_range(lo.ctx_ref(), lo.ast, hi.ast), lo.ctx)
cb9cf3a334ba8509a54226c86c555257092a0951
3,657,566
import numpy def quantile(data, num_breaks): """ Calculate quantile breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): """ function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles """ def _quantiles1D(data,m,p): x = numpy.sort(data.compressed()) n = len(x) if n == 0: return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True) elif n == 1: return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask) aleph = (n*p + m) k = numpy.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] # Initialization & checks --------- data = numpy.ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") # if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = numpy.ma.masked # p = numpy.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) # Computes quantiles along axis (or globally) if (axis is None): return _quantiles1D(data, m, p) return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p) return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks))
24486e39fcefb9e6cf969067836d1793b9f4a7c8
3,657,567
def extract_conformers_from_rdkit_mol_object(mol_obj, conf_ids): """ Generate xyz lists for all the conformers in conf_ids :param mol_obj: Molecule object :param conf_ids: (list) list of conformer ids to convert to xyz :return: (list(list(cgbind.atoms.Atom))) """ conformers = [] for i in range(len(conf_ids)): mol_block_lines = Chem.MolToMolBlock(mol_obj, confId=conf_ids[i]).split('\n') atoms = [] for line in mol_block_lines: split_line = line.split() if len(split_line) == 16: atom_label, x, y, z = split_line[3], split_line[0], split_line[1], split_line[2] atoms.append(Atom(atom_label, float(x), float(y), float(z))) conformer = BaseStruct() conformer.set_atoms(atoms) conformers.append(conformer) if len(conformers) == 0: raise CgbindCritical('Length of conformer xyz list was 0. RDKit failed') return conformers
821977c0be57441b5146c9d5ef02a19320cf5b91
3,657,568
def create_embedding(name: str, env_spec: EnvSpec, *args, **kwargs) -> Embedding: """ Create an embedding to use with sbi. :param name: identifier of the embedding :param env_spec: environment specification :param args: positional arguments forwarded to the embedding's constructor :param kwargs: keyword arguments forwarded to the embedding's constructor :return: embedding instance """ if name == LastStepEmbedding.name: embedding = LastStepEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == DeltaStepsEmbedding.name: embedding = DeltaStepsEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == BayesSimEmbedding.name: embedding = BayesSimEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == DynamicTimeWarpingEmbedding.name: embedding = DynamicTimeWarpingEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == RNNEmbedding.name: embedding = RNNEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) elif name == AllStepsEmbedding.name: embedding = AllStepsEmbedding(env_spec, RolloutSamplerForSBI.get_dim_data(env_spec), *args, **kwargs) else: raise pyrado.ValueErr( given_name=name, eq_constraint=f"{LastStepEmbedding.name}, {DeltaStepsEmbedding.name}, {BayesSimEmbedding.name}, " f"{DynamicTimeWarpingEmbedding.name}, or {RNNEmbedding.name}", ) return embedding
70f4651f5815f008670de08805249d0b9dfc39e9
3,657,569
def _init_allreduce_operators(length, split_indices): """ initialize allreduce communication operators""" indices = split_indices[0] fusion = split_indices[1] op_list = () j = 0 for i in range(length): if j <= len(indices)-1: temp = indices[j] else: temp = length if i >= temp: j = j + 1 fusion = fusion + 1 op = AllReduce('sum', GlobalComm.WORLD_COMM_GROUP) op.add_prim_attr('fusion', fusion) op_list = op_list + (op,) return op_list
91f752e049394b27340553830dce70074ef7ed81
3,657,570
def get_valid_fields(val: int, cs: dict) -> set: """ A value is valid if there's at least one field's interval which contains it. """ return { field for field, intervals in cs.items() if any(map(lambda i: i[0] <= val <= i[1], intervals)) }
3016e78637374eadf7d0e2029d060538fea86377
3,657,571
import glob import re def load_data_multiview(_path_features, _path_lables, coords, joints, cycles=3, test_size=0.1): """Generate multi-view train/test data from gait cycles. Args: _path_features (str): Path to gait sequence file _path_lables (str): Path to labels of corresponding gait sequence coords (int): Number of co-ordinates representing each joint in gait cycle joints (int)): Number of joints in the gait sequence cycles (int, optional): Time duration of gait cycle. Defaults to 3. test_size (float, optional): Ratio of test data. Defaults to 0.1. Returns: [list]: train and test data """ feature_files = glob.glob(_path_features) label_files = glob.glob(_path_lables) print(f'---> Number of files = {len(feature_files)}') # sorting files so that features and labels files match feature_files.sort() label_files.sort() angle_regex = re.compile('(\d*).h5') folder_regex = re.compile('(\w*)\/') all_data_train = [] all_data_test = [] all_labels_train = [] all_labels_test = [] all_angles_train = [] all_angles_test = [] for feature_file, label_file in zip(feature_files, label_files): ff = h5py.File(feature_file, 'r') fl = h5py.File(label_file, 'r') angle = int(angle_regex.search(feature_file).group(1)) folder = folder_regex.findall(feature_file)[-1] print(f"--->> processing - {folder} - {angle}") data_list = [] num_samples = len(ff.keys()) time_steps = 0 labels = np.empty(num_samples) for si in range(num_samples): ff_group_key = list(ff.keys())[si] data_list.append(list(ff[ff_group_key])) # Get the data time_steps_curr = len(ff[ff_group_key]) if time_steps_curr > time_steps: time_steps = time_steps_curr labels[si] = fl[list(fl.keys())[si]][()] data = np.empty((num_samples, time_steps*cycles, joints*coords)) for si in range(num_samples): data_list_curr = np.tile( data_list[si], (int(np.ceil(time_steps / len(data_list[si]))), 1)) for ci in range(cycles): data[si, time_steps * ci:time_steps * (ci + 1), :] = data_list_curr[0:time_steps] data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size=test_size) all_data_train.extend(data_train) all_data_test.extend(data_test) all_labels_train.extend(labels_train) all_labels_test.extend(labels_test) all_angles_train.extend([angle]*len(labels_train)) all_angles_test.extend([angle]*len(labels_test)) return data, labels, \ all_data_train, all_labels_train, \ all_data_test, all_labels_test, \ all_angles_train, all_angles_test
574ca69bf6a6637b4ca53de05f8e792844e134bb
3,657,572
def T_ncdm(omega_ncdm, m_ncdm): # RELICS ONLY? """Returns T_ncdm as a function of omega_ncdm, m_ncdm. omega_ncdm : relative relic abundance. Unitless. m_ncdm : relic mass in units [eV]. T_ncdm : relic temperature in units [K] """ T_ncdm = (np.power( cf.NEUTRINO_SCALE_FACTOR * omega_ncdm / m_ncdm, 1./3.) * cf.RELIC_TEMP_SCALE) return T_ncdm
c3db4e4d2ac226f12afca3077bbc3436bd7a0459
3,657,573
import binascii def generate_initialisation_vector(): """Generates an initialisation vector for encryption.""" initialisation_vector = Random.new().read(AES.block_size) return (initialisation_vector, int(binascii.hexlify(initialisation_vector), 16))
4c05067d86cbf32de7f07b5d7483811c46307b64
3,657,575
def assign_score(relevant_set): """Assign score to each relevant element in descending order and return the score list.""" section = len(relevance[0])//3 score = [] s = 3 for i in range(3): if s == 1: num = len(relevance[0]) - len(score) score.extend([s]*num) else: score.extend([s]*section) s -= 1 return score
76a43780e1d1f37f7e0220ff0a0ca2ec484dd036
3,657,576