content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def read_sj_out_tab(filename): """Read an SJ.out.tab file as produced by the RNA-STAR aligner into a pandas Dataframe. Parameters ---------- filename : str of filename or file handle Filename of the SJ.out.tab file you want to read in Returns ------- sj : pandas.DataFrame Dataframe of splice junctions """ def int_to_intron_motif(n): if n == 0: return 'non-canonical' if n == 1: return 'GT/AG' if n == 2: return 'CT/AC' if n == 3: return 'GC/AG' if n == 4: return 'CT/GC' if n == 5: return 'AT/AC' if n == 6: return 'GT/AT' sj = pd.read_table(filename, header=None, names=COLUMN_NAMES, low_memory=False) sj.intron_motif = sj.intron_motif.map(int_to_intron_motif) sj.annotated = sj.annotated.map(bool) sj.strand.astype('object') sj.strand = sj.strand.apply(lambda x: ['unk','+','-'][x]) # See https://groups.google.com/d/msg/rna-star/B0Y4oH8ZSOY/NO4OJbbUU4cJ for # definition of strand in SJout files. sj = sj.sort_values(by=['chrom', 'start', 'end']) return sj
bc96813e1e69c8017f7ad0e5c945d4bf8c17e645
3,651,600
def gc_subseq(seq, k=2000): """ Returns GC content of non − overlapping sub− sequences of size k. The result is a list. """ res = [] for i in range(0, len(seq)-k+1, k): subseq = seq[i:i+k] gc = calculate_gc(subseq) res.append(gc) return gc
9c2208f9dad291689ef97556e8aaa69213be6470
3,651,601
import re import os def get_links_from_page(text: str=None) -> set: """ extract the links from the HTML :param text: the search term :return: a set of links :rtype: set """ links = set() link_pattern = re.compile('img.src=.+') # todo expand this to get href's # link_pattern = re.compile(r'href=*') if text: text = quote(text) url = "https://www.flickr.com/search/?text=%s" % text else: url = "https://www.flickr.com/search/" logger.info("url: %s", url) try: response = urlopen(url) data = response.read().decode('utf-8') except: logger.error('url: %s', url, exc_info=True) return links # logger.info("data: %s", data) for line in data.splitlines(): # logger.info("line: %s", line) img_data = link_pattern.search(line) # seems best to step through the lines # img_data = link_pattern.search(data) if img_data: # input('found something: %s' % img_data) # logger.info("img_data: %s", img_data) # logger.info("line: %s", line) link = line.split('=')[1].replace("'", '').strip(';').lower() ext = os.path.splitext(link)[1] # logger.info('ext: %s', ext) if ext in SUPPORTED_IMAGE_TYPES: links.add(link) logger.info("%s %s links: %s", len(links), text, links) return links
cc1d982a0dcfd5cab640ceecd9c715807b41c083
3,651,602
def pcursor(): """Database cursor.""" dbconn = get_dbconn("portfolio") return dbconn.cursor()
50a19e3837a3846f10c44bcbb61933786d5bf84b
3,651,603
import argparse def get_args_from_command_line(): """Parse the command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument("--HITId", type=str) parser.add_argument("--worker_id", type=str) args = parser.parse_args() return args
2e018c2cec3fbdd305185fc9f1190416f0d13137
3,651,604
import math def truncate(f, n): """ Floors float to n-digits after comma. """ return math.floor(f * 10 ** n) / 10 ** n
ae7e935a7424a15c02f7cebfb7de6ca9b4c715c0
3,651,605
import json def process_simple_summary_csv(out_f, in_f, rundate): """Scan file and compute sums for 2 columns""" df = panda.read_csv(in_f) FORMATTING_FILE = "ColumnFormatting.json" with open(FORMATTING_FILE) as json_data: column_details = json.load(json_data) # this dictionary will contain information about individual column data type DAYS = 30 """ Depending on the amount of detail in the report from PAI this dataframe may include more than one row for each location. When only one row contains information on an individual location then the file lacks any indication of WHEN the report covers. We can get the date of the report being created by PAI from the filename but the time range covered by the report won't be included. When the report has MULTIPLE lines for each LOCATION then each line contains a datestring. These datestrings can be converted to datetimes and sorted to find the earliest and latest dates in the report. print(dft2) # sample dataframe of timestamps 0 2019-12-01 1 2019-12-02 2 2019-12-03 3 2019-12-04 4 2019-12-05 5 2019-12-06 6 2019-12-07 7 2019-12-08 8 2019-12-09 9 2019-12-10 10 2019-12-11 11 2019-12-12 12 2019-12-13 13 2019-12-14 14 2019-12-15 15 2019-12-16 16 2019-12-17 17 2019-12-18 18 2019-12-19 19 2019-12-20 20 2019-12-21 21 2019-12-22 22 2019-12-23 23 2019-12-24 24 2019-12-25 25 2019-12-26 26 2019-12-27 27 2019-12-28 28 2019-12-29 29 2019-12-30 30 2019-12-31 Name: Settlement Date, dtype: datetime64[ns] dft2.astype(str).max() '2019-12-31' dft2.astype(str).min() '2019-12-01' """ try: # TODO standardize the function that strips extra characters from a numeric string # e.g. df[?] = strip2float(df[?]) # try to recognize as many standard strings as possible. $1 ($1) -$1 $-1,234.876 etc df["Surch"].replace("[\$,)]", "", regex=True, inplace=True) df["Surch"] = df["Surch"].astype(float) except KeyError as e: logger.error(f"KeyError in dataframe: {e}") return False try: df["Settlement"].replace("[\$,)]", "", regex=True, inplace=True) df["Settlement"] = df["Settlement"].astype(float) except KeyError as e: logger.error(f"KeyError in dataframe: {e}") return False try: df["WD Trxs"] = df["WD Trxs"].astype(float) except KeyError as e: logger.error(f"KeyError in dataframe: {e}") return False def calc(row): """Calculate the surcharge earned per withdrawl.""" wd = row["WD Trxs"] if wd > 0: return round(row["Surch"] / wd, 2) else: return 0 try: df["Surcharge amt"] = df.apply(lambda row: calc(row), axis=1) except KeyError as e: logger.error(f"KeyError in dataframe: {e}") return False def avgWD(row): """Calculate the average amount of withdrawls.""" wd = row["WD Trxs"] if wd > 0: return round(row["Settlement"] / wd, 2) else: return 0 try: df["Average WD amount"] = df.apply(lambda row: avgWD(row), axis=1) except KeyError as e: logger.error(f"KeyError in dataframe: {e}") return False def DailyWD(row): """Assuming 30 days in report data calculate daily withdrawl total.""" return round(row["Settlement"] / DAYS, 2) try: df["Daily Vault AVG"] = df.apply(lambda row: DailyWD(row), axis=1) except KeyError as e: logger.error(f"KeyError in dataframe: {e}") return False # work is finished. Drop unneeded columns from output # TODO expand this to drop all columns except those desired in the report df = df.drop(["Settlement Date"], axis=1) # df.columns is zero-based panda.Index # sort the data df = df.sort_values("Surch", ascending=False) indx = 0 return {f"Outputfile{indx}.xlsx": df}
9f95acf84115a5fe6eb87de2e0910097bc4f2f10
3,651,606
import math def rotY(theta): """ returns Rotation matrix such that R*v -> v', v' is rotated about y axis through theta_d. theta is in radians. rotY = Ry' """ st = math.sin(theta) ct = math.cos(theta) return np.matrix([[ ct, 0., st ], [ 0., 1., 0. ], [ -st, 0., ct ]])
1ed327485f9861eb8cf045a60f0a7352de1b4b25
3,651,607
def get_core_blockdata(core_index, spltcore_index, core_bases): """ Get Core Offset and Length :param core_index: Index of the Core :param splitcore_index: Index of last core before split :param core_bases: Array with base offset and offset after split :return: Array with core offset and core length """ core_base = int(core_bases[0]) core_len = int(core_bases[1]) core_split = 0 if len(core_bases) > 4: core_split = int(core_bases[4]) core_offset = core_base + core_index * core_len if core_split and core_index + 2 > spltcore_index: core_offset = core_split + (core_index - spltcore_index + 1) * core_len return [core_offset, core_len]
85efb96fa45ecfa3f526374c677e57c70e3dc617
3,651,608
def make_bench_verify_token(alg): """ Return function which will generate token for particular algorithm """ privk = priv_keys[alg].get('default', priv_key) token = jwt.generate_jwt(payload, privk, alg, timedelta(days=1)) def f(_): """ Verify token """ pubk = pub_keys[alg].get('default', pub_key) jwt.verify_jwt(token, pubk, [alg]) return f
4e7da537ab7027711d338d6d3155c198c371391b
3,651,609
def status(): """ Status of the API """ return jsonify({'status': 'OK'})
579c265c88ac8e2c3b5d19000564e90f106be3f5
3,651,610
def calc_median(input_list): """sort the list and return median""" new_list = sorted(input_list) len_list = len(new_list) if len_list%2 == 0: return (new_list[len_list/2-1] + new_list[len_list/2] ) / 2 else: return new_list[len_list/2]
28c0331d1f2dab56d50d63fa59d4dda79a177057
3,651,611
def _load_eigenvalue(h5_result, log): """Loads a RealEigenvalue""" class_name = _cast(h5_result.get('class_name')) table_name = '???' title = '' nmodes = _cast(h5_result.get('nmodes')) if class_name == 'RealEigenvalues': obj = RealEigenvalues(title, table_name, nmodes=nmodes) elif class_name == 'ComplexEigenvalues': obj = ComplexEigenvalues(title, table_name, nmodes) elif class_name == 'BucklingEigenvalues': obj = BucklingEigenvalues(title, table_name, nmodes=nmodes) else: log.warning(' %r is not supported...skipping' % class_name) return None assert obj.class_name == class_name, 'class_name=%r selected; should be %r' % (obj.class_name, class_name) keys_to_skip = ['class_name', 'is_complex', 'is_real', 'table_name_str'] for key in h5_result.keys(): if key in keys_to_skip: continue else: datai = _cast(h5_result.get(key)) if isinstance(datai, bytes): pass elif isinstance(datai, str): datai = datai.encode('latin1') else: assert not isinstance(datai, bytes), key setattr(obj, key, datai) return obj
f27d65d84481e1bb91a0d2282945da0944de1190
3,651,612
def _GenerateBaseResourcesAllowList(base_module_rtxt_path, base_allowlist_rtxt_path): """Generate a allowlist of base master resource ids. Args: base_module_rtxt_path: Path to base module R.txt file. base_allowlist_rtxt_path: Path to base allowlist R.txt file. Returns: list of resource ids. """ ids_map = resource_utils.GenerateStringResourcesAllowList( base_module_rtxt_path, base_allowlist_rtxt_path) return ids_map.keys()
b6b3ef988b343115e4e1b2950667f07fd3771b19
3,651,613
def finite_min_max(array_like): """ Obtain finite (non-NaN, non-Inf) minimum and maximum of an array. Parameters ---------- array_like : array_like A numeric array of some kind, possibly containing NaN or Inf values. Returns ------- tuple Two-valued tuple containing the finite minimum and maximum of *array_like*. """ array_like = np.asanyarray(array_like) finite_values = array_like[np.isfinite(array_like)] return finite_values.min(), finite_values.max()
c300b55d2e53685fb0ade9809e13af4cfae4b1a8
3,651,614
def list_extend1(n): """ using a list to built it up, then convert to a numpy array """ l = [] num_to_extend = 100 data = range(num_to_extend) for i in xrange(n/num_to_extend): l.extend(data) return np.array(l)
7a2240a397e32fc438f4245b92f97f103752b60c
3,651,615
def ccf(tdm, tsuid_list_or_dataset, lag_max=None, tsuids_out=False, cut_ts=False): """ This function calculates the maximum of the cross correlation function matrix between all ts in tsuid_list_or_dataset in a serial mode. The result is normalized (between -1 and 1) Cross correlation is a correlation between two timeseries whose one is delayed of successive lag values. Result of CCF is a timeseries (correlation function of the lag between timeseries). This function keep the maximum value of the CCF function generated and pull it in the matrix for corresponding timeseries couple. :returns: a string matrix (whose size is equal to the number of tsuids in tsuid_list_or_dataset plus one line and one column for headers) :rtype: np.ndarray :param tdm: Temporal Data Manager client :param tsuid_list_or_dataset: list of identifiers of the time series or dataset name :param lag_max: maximum lag between timeseries (cf. _ccf function for more details) :param tsuids_out: True to fill headers with tsuids False to fill headers with functional ids :param cut_ts: Cut the TS list to the min-length if set to True :type tdm: TemporalDataMgr :type tsuid_list_or_dataset: list of str or str :type lag_max: positive int :type tsuids_out: boolean :type cut_ts: boolean :raises TypeError: if tsuids_out is not a boolean """ if type(tsuids_out) is not bool: raise TypeError("tsuids_out must be a boolean") # retrieve data from temporal data manager ts_data_list, tsuid_list = __retrieve_data( tdm, tsuid_list_or_dataset) if tsuids_out: ts_list = tsuid_list else: ts_list = __retrieve_func_id(tdm, tsuid_list) # number and size of time series ts_nb = len(ts_data_list) ts_size = len(ts_data_list[0]) if cut_ts: for ts in ts_data_list: ts_size = min(len(ts), ts_size) else: # check time series have same length for ts in ts_data_list: if len(ts) != ts_size: raise ValueError('time series do not have same length') # matrix initialization matrix_corr = np.zeros([ts_nb, ts_nb]) for index1, _ in enumerate(ts_data_list): matrix_corr[index1, index1] = 1 # Conversion ts1 data from list (keeping only value column) to an array ts1 = np.asarray(ts_data_list[index1][:ts_size, 1]) for index2 in range(index1 + 1, len(ts_data_list)): # Conversion ts2 data from list (keeping only value column) to an # array ts2 = np.asarray(ts_data_list[index2][:ts_size, 1]) # cross correlation calculation # keeping the maximum absolute value between cross correlation with # positive and with negative lag ccf_fcn = _ccf(ts1, ts2, lag_max) max_ccf = __get_max_abs_value(ccf_fcn) # fill matrix with result (max of ccf is commutative) matrix_corr[index1, index2] = max_ccf matrix_corr[index2, index1] = max_ccf # fill final matrix with headers matrix = __fill_headers_to_final_matrix(matrix_corr, ts_list) return matrix
be3b5ccae3686fdef2e71eca87bc8131519d0398
3,651,616
def filter_zoau_installs(zoau_installs, build_info, minimum_zoau_version): """Sort and filter potential ZOAU installs based on build date and version. Args: zoau_installs (list[dict]): A list of found ZOAU installation paths. build_info (list[str]): A list of build info strings minimum_zoau_version (str): The minimum version of ZOAU to accept. Returns: list[dict]: A sorted and filtered list of ZOAU installation paths. """ for index, zoau_install in enumerate(zoau_installs): zoau_install["build"] = build_info[index] for zoau_install in zoau_installs: zoau_install["build"] = _get_version_from_build_string(zoau_install.get("build", "")) zoau_installs.sort(key=lambda x: _version_to_tuple(x.get("build")), reverse=True) min_version = _version_to_tuple(minimum_zoau_version) valid_installs = [] for zoau_install in zoau_installs: if min_version <= _version_to_tuple( zoau_install.get("build") ): valid_installs.append(zoau_install) # account for the fact 1.1.0 may or may not require pip install depending on PTF if "1.1.0" in zoau_install.get("build", ""): backup_install = zoau_install.copy() # set build to none so we do not treat it like a pip 1.1.0 install when testing backup_install["build"] = "" valid_installs.append(backup_install) return valid_installs
6e6c2de214c75630091b89e55df2e57fd9be12b9
3,651,617
def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE): """Build a transaction that spends parent_txid.vout[n] and produces one output with amount = parent_value with a fee deducted. Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created). """ inputs = [{"txid": parent_txid, "vout": n}] my_value = parent_value - fee outputs = {address : my_value} rawtx = node.createrawtransaction(inputs, outputs) prevtxs = [{ "txid": parent_txid, "vout": n, "scriptPubKey": parent_locking_script, "amount": parent_value, }] if parent_locking_script else None signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs) assert signedtx["complete"] tx = tx_from_hex(signedtx["hex"]) return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
07f18e227f13c146c6fba0a9487c73337654a2a3
3,651,618
import argparse def parse_args(): """ Parse input arguments Returns ------- args : object Parsed args """ h = { "program": "Simple Baselines training", "train_folder": "Path to training data folder.", "batch_size": "Number of images to load per batch. Set according to your PC GPU memory available. If you get " "out-of-memory errors, lower the value. defaults to 64", "epochs": "How many epochs to train for. Once every training image has been shown to the CNN once, an epoch " "has passed. Defaults to 15", "test_folder": "Path to test data folder", "num_workers": "Number of workers to load in batches of data. Change according to GPU usage", "test_only": "Set to true if you want to test a loaded model. Make sure to pass in model path", "model_path": "Path to your model", "learning_rate": "The learning rate of your model. Tune it if it's overfitting or not learning enough"} parser = argparse.ArgumentParser(description=h['program'], formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--train_folder', help=h["train_folder"], type=str) parser.add_argument('--batch_size', help=h['batch_size'], type=int, default=64) parser.add_argument('--epochs', help=h["epochs"], type=int, default=15) parser.add_argument('--test_folder', help=h["test_folder"], type=str) parser.add_argument('--num_workers', help=h["num_workers"], type=int, default=5) parser.add_argument('--test_only', help=h["test_only"], type=bool, default=False) parser.add_argument('--model_path', help=h["num_workers"], type=str), parser.add_argument('--learning_rate', help=h["learning_rate"], type=float, default=0.003) args = parser.parse_args() return args
5edfea499b64d35295ffd81403e3253027503d41
3,651,619
from datetime import datetime def timestamp(date): """Get the timestamp of the `date`, python2/3 compatible :param datetime.datetime date: the utc date. :return: the timestamp of the date. :rtype: float """ return (date - datetime(1970, 1, 1)).total_seconds()
a708448fb8cb504c2d25afa5bff6208abe1159a4
3,651,620
def pratt_arrow_risk_aversion(t, c, theta, **params): """Assume constant relative risk aversion""" return theta / c
ccbe6e74a150a4cbd3837ca3ab24bf1074d694c9
3,651,621
def parse_content_type(content_type): """ Parse a content-type and its parameters into values. RFC 2616 sec 14.17 and 3.7 are pertinent. **Examples**:: 'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')]) 'text/plain; charset=UTF-8; level=1' -> ('text/plain', [('charset, 'UTF-8'), ('level', '1')]) :param content_type: content_type to parse :returns: a tuple containing (content type, list of k, v parameter tuples) """ parm_list = [] if ';' in content_type: content_type, parms = content_type.split(';', 1) parms = ';' + parms for m in _rfc_extension_pattern.findall(parms): key = m[0].strip() value = m[1].strip() parm_list.append((key, value)) return content_type, parm_list
ba7f93853299dafdd4afc342b5ba2ce7c6fdd3e7
3,651,622
def mapplot(df, var, metric, ref_short, ref_grid_stepsize=None, plot_extent=None, colormap=None, projection=None, add_cbar=True, figsize=globals.map_figsize, dpi=globals.dpi, **style_kwargs): """ Create an overview map from df using df[var] as color. Plots a scatterplot for ISMN and a image plot for other input values. Parameters ---------- df : pandas.DataFrame DataFrame with lat and lon in the multiindex and var as a column var : str variable to be plotted. metric: ref_short: str short name of the reference dataset (read from netCDF file) ref_is_regular: bool (or 0, 1), optional (True by default) information if dataset hase a regular grid (in terms of angular distance) ref_grid_stepsize: float or None, optional (None by default) angular grid stepsize, needed only when ref_is_angular == False, plot_extent: tuple (x_min, x_max, y_min, y_max) in Data coordinates. The default is None. colormap: Colormap, optional colormap to be used. If None, defaults to globals._colormaps. projection: cartopy.crs, optional Projection to be used. If none, defaults to globals.map_projection. The default is None. add_cbar: bool, optional Add a colorbar. The default is True. figsize: tuple, optional Figure size in inches. The default is globals.map_figsize. dpi: int, optional Resolution for raster graphic output. The default is globals.dpi. **style_kwargs : Keyword arguments for plotter.style_map(). Returns ------- fig : TYPE DESCRIPTION. ax : TYPE DESCRIPTION. """ # === value range === v_min, v_max = get_value_range(df[var], metric) # === init plot === fig, ax, cax = init_plot(figsize, dpi, add_cbar, projection) if not colormap: # colormap = globals._colormaps[meta['metric']] cmap = globals._colormaps[metric] else: cmap = colormap # cmap = plt.cm.get_cmap(colormap) # === scatter or mapplot === if ref_short in globals.scattered_datasets: # === scatterplot === # === coordiniate range === if not plot_extent: plot_extent = get_plot_extent(df) # === marker size === markersize = globals.markersize ** 2 # in points**2 # === plot === lat, lon = globals.index_names im = ax.scatter(df.index.get_level_values(lon), df.index.get_level_values(lat), c=df[var], cmap=cmap, s=markersize, vmin=v_min, vmax=v_max, edgecolors='black', linewidths=0.1, zorder=2, transform=globals.data_crs) else: # === mapplot === # === coordiniate range === if not plot_extent: plot_extent = get_plot_extent(df, grid_stepsize=ref_grid_stepsize, grid=True) # === prepare values === zz, zz_extent, origin = geotraj_to_geo2d(df, var, grid_stepsize=ref_grid_stepsize) # === plot === im = ax.imshow(zz, cmap=cmap, vmin=v_min, vmax=v_max, interpolation='nearest', origin=origin, extent=zz_extent, transform=globals.data_crs, zorder=2) # === add colorbar === if add_cbar: _make_cbar(fig, im, cax, ref_short, metric) style_map(ax, plot_extent, **style_kwargs) # === layout === fig.canvas.draw() # very slow. necessary bcs of a bug in cartopy: https://github.com/SciTools/cartopy/issues/1207 # plt.tight_layout() # pad=1) # pad=0.5,h_pad=1,w_pad=1,rect=(0, 0, 1, 1)) return fig, ax
e1bf50a214b169c9b13ddbf86a6bded0df5c5310
3,651,623
def generate_athena(config): """Generate Athena Terraform. Args: config (dict): The loaded config from the 'conf/' directory Returns: dict: Athena dict to be marshalled to JSON """ result = infinitedict() prefix = config['global']['account']['prefix'] athena_config = config['lambda']['athena_partitioner_config'] data_buckets = athena_partition_buckets_tf(config) database = athena_config.get('database_name', '{}_streamalert'.format(prefix)) results_bucket_name = athena_query_results_bucket(config) queue_name = athena_config.get( 'queue_name', '{}_streamalert_athena_s3_notifications'.format(prefix) ).strip() logging_bucket, _ = s3_access_logging_bucket(config) # Set variables for the athena partitioner's IAM permissions result['module']['athena_partitioner_iam'] = { 'source': './modules/tf_athena', 'account_id': config['global']['account']['aws_account_id'], 'prefix': prefix, 's3_logging_bucket': logging_bucket, 'database_name': database, 'queue_name': queue_name, 'athena_data_buckets': data_buckets, 'results_bucket': results_bucket_name, 'lambda_timeout': athena_config['timeout'], 'kms_key_id': '${aws_kms_key.server_side_encryption.key_id}', 'function_role_id': '${module.athena_partitioner_lambda.role_id}', 'function_name': '${module.athena_partitioner_lambda.function_name}', 'function_alias_arn': '${module.athena_partitioner_lambda.function_alias_arn}', } # Set variables for the Lambda module result['module']['athena_partitioner_lambda'] = generate_lambda( '{}_streamalert_{}'.format(prefix, ATHENA_PARTITIONER_NAME), 'streamalert.athena_partitioner.main.handler', athena_config, config, tags={ 'Subcomponent': 'AthenaPartitioner' } ) return result
4fd3a18e5220e82a04451271f1ea8004978b4c65
3,651,624
def _angular_rate_to_rotvec_dot_matrix(rotvecs): """Compute matrices to transform angular rates to rot. vector derivatives. The matrices depend on the current attitude represented as a rotation vector. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. Returns ------- ndarray, shape (n, 3, 3) """ norm = np.linalg.norm(rotvecs, axis=1) k = np.empty_like(norm) mask = norm > 1e-4 nm = norm[mask] k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2 mask = ~mask nm = norm[mask] k[mask] = 1/12 + 1/720 * nm**2 skew = _create_skew_matrix(rotvecs) result = np.empty((len(rotvecs), 3, 3)) result[:] = np.identity(3) result[:] += 0.5 * skew result[:] += k[:, None, None] * np.matmul(skew, skew) return result
c0d468901ec7dc4d6da7f5eff7b95ac3fc176901
3,651,625
from typing import Any def get_all_learners() -> Any: """Get all learner configurations which are prepared.""" return { "learner_types": sorted( [ possible_dir.name for possible_dir in LEARNERS_DIR.iterdir() if possible_dir.is_dir() ] ) }
d05fd8d9da820061cea29d25002513e778c2b367
3,651,626
def getdate(targetconnection, ymdstr, default=None): """Convert a string of the form 'yyyy-MM-dd' to a Date object. The returned Date is in the given targetconnection's format. Arguments: - targetconnection: a ConnectionWrapper whose underlying module's Date format is used - ymdstr: the string to convert - default: The value to return if the conversion fails """ try: (year, month, day) = ymdstr.split('-') modref = targetconnection.getunderlyingmodule() return modref.Date(int(year), int(month), int(day)) except Exception: return default
21d27c3ef4e99b28b16681072494ce573e592255
3,651,627
def thermal_dm(n, u): """ return the thermal density matrix for a boson n: integer dimension of the Fock space u: float reduced temperature, omega/k_B T """ nlist = np.arange(n) diags = exp(- nlist * u) diags /= np.sum(diags) rho = lil_matrix(n) rho.setdiag(diags) return rho.tocsr()
80631a0575176e16e8832cb6c136030bcd589c58
3,651,628
def _get_confidence_bounds(confidence): """ Get the upper and lower confidence bounds given a desired confidence level. Args: confidence (float): [description] # TODO: ^^ Returns: float, float: - upper confidence bound - lower confidence bound """ return [50 + 0.5 * confidence, 50 - 0.5 * confidence]
26542f3a103c7e904634f0b0c6d4e2fc522d358c
3,651,629
from zope.configuration import xmlconfig, config def zcml_strings(dir, domain="zope", site_zcml=None): """Retrieve all ZCML messages from `dir` that are in the `domain`.""" # Load server-independent site config context = config.ConfigurationMachine() xmlconfig.registerCommonDirectives(context) context.provideFeature("devmode") context = xmlconfig.file(site_zcml, context=context, execute=False) return context.i18n_strings.get(domain, {})
23c62c50b313f53b25ad151ebccd5808bf7bad59
3,651,630
def const_p(a: C) -> Projector[C]: """ Make a projector that always returns the same still frame """ return lambda _: a
d73fb818f0606f9a64cb0076c99ff57c0b3bb042
3,651,631
import json def get_s3_bucket(bucket_name, s3): """" Takes the s3 and bucket_name and returns s3 bucket If does not exist, it will create bucket with permissions """ bucket_name = bucket_name.lower().replace('/','-') bucket = s3.Bucket(bucket_name) exists = True try: s3.meta.client.head_bucket(Bucket=bucket_name) except ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. error_code = int(e.response['Error']['Code']) if error_code == 404: exists = False if exists is False: s3.create_bucket(Bucket=bucket_name, ACL='public-read') # We need to set an S3 policy for our bucket to # allow anyone read access to our bucket and files. # If we do not set this policy, people will not be # able to view our S3 static web site. bucket_policy = s3.BucketPolicy(bucket_name) policy_payload = { "Version": "2012-10-17", "Statement": [{ "Sid": "Allow Public Access to All Objects", "Effect": "Allow", "Principal": "*", "Action": "s3:GetObject", "Resource": "arn:aws:s3:::%s/*" % (bucket_name) }] } # Add the policy to the bucket bucket_policy.put(Policy=json.dumps(policy_payload)) # Make our new S3 bucket a static website bucket_website = s3.BucketWebsite(bucket_name) # Create the configuration for the website website_configuration = { 'ErrorDocument': {'Key': 'error.html'}, 'IndexDocument': {'Suffix': 'index.html'}, } bucket_website.put( WebsiteConfiguration=website_configuration ) bucket = s3.Bucket(bucket_name) return bucket
67e9ede766989894aa86d2af1c766a57c4ed7116
3,651,632
def rf_render_ascii(tile_col): """Render ASCII art of tile""" return _apply_column_function('rf_render_ascii', tile_col)
d697014f019b303c3c7de0e874e8d321c5d96f7a
3,651,633
import json def index(): """ Display productpage with normal user and test user buttons""" global productpage table = json2html.convert(json = json.dumps(productpage), table_attributes="class=\"table table-condensed table-bordered table-hover\"") return render_template('index.html', serviceTable=table)
e27de5745c9e20f8942ea1ae3b07a4afa932b0f3
3,651,634
def student_classes(id): """ Show students registrered to class * display list of all students (GET) """ template = "admin/class_students.html" if not valid_integer(id): return ( render_template( "errors/custom.html", title="400", message="Id must be integer" ), 400, ) school_class = dict_sql_query( f"SELECT * FROM school_classes WHERE id={id}", fetchone=True ) if not school_class: return ( render_template( "errors/custom.html", title="400", message="Class does not exist." ), 400, ) # show students with class defined as this one students = [] for student in dict_sql_query( f"SELECT * FROM students WHERE class_id={school_class['id']}" ): students.append( { "student": student, "activity_name": dict_sql_query( f"SELECT name FROM activities WHERE id={student['chosen_activity']}", fetchone=True, )["name"] if student["chosen_activity"] else "Ej valt", } ) return render_template(template, school_class=school_class, students=students)
b431a21e39c97cbcc21d161a411cc9f3a3746cc8
3,651,635
def _get_service_handler(request, service): """Add the service handler to the HttpSession. We use the django session object to store the service handler's representation of the remote service between sequentially logic steps. This is done in order to improve user experience, as we avoid making multiple Capabilities requests (this is a time saver on servers that feature many layers. """ service_handler = get_service_handler( service.base_url, service.proxy_base, service.type) request.session[service.base_url] = service_handler logger.debug("Added handler to the session") return service_handler
388b0a98c24039f629e5794427877d98d702d1e2
3,651,636
def f_score(overlap_count, gold_count, guess_count, f=1): """Compute the f1 score. :param overlap_count: `int` The number of true positives. :param gold_count: `int` The number of gold positives (tp + fn) :param guess_count: `int` The number of predicted positives (tp + fp) :param f: `int` The beta term to weight precision vs recall. :returns: `float` The f score """ beta_sq = f*f if guess_count == 0: return 0.0 p = precision(overlap_count, guess_count) r = recall(overlap_count, gold_count) if p == 0.0 or r == 0.0: return 0.0 f = (1. + beta_sq) * (p * r) / (beta_sq * p + r) return f
6c7c0e3e58aa7fe4ca74936ce9029b6968ed6ee3
3,651,637
import math def phi(n): """Calculate phi using euler's product formula.""" assert math.sqrt(n) < primes[-1], "Not enough primes to deal with " + n # For details, check: # http://en.wikipedia.org/wiki/Euler's_totient_function#Euler.27s_product_formula prod = n for p in primes: if p > n: break if n % p == 0: prod *= 1 - (1 / p) return int(prod)
d17f0b5901602a9a530427da2b37d0402ef426ce
3,651,638
import logging def run_bert_pretrain(strategy, custom_callbacks=None): """Runs BERT pre-training.""" bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) if not strategy: raise ValueError('Distribution strategy is not specified.') # Runs customized training loop. logging.info('Training using customized training loop TF 2.0 with distributed' 'strategy.') performance.set_mixed_precision_policy(common_flags.dtype(), use_experimental_api=False) # Only when explicit_allreduce = True, post_allreduce_callbacks and # allreduce_bytes_per_pack will take effect. optimizer.apply_gradients() no # longer implicitly allreduce gradients, users manually allreduce gradient and # pass the allreduced grads_and_vars to apply_gradients(). # With explicit_allreduce = True, clip_by_global_norm is moved to after # allreduce. return run_customized_training( strategy, bert_config, FLAGS.init_checkpoint, # Used to initialize only the BERT submodel. FLAGS.max_seq_length, FLAGS.max_predictions_per_seq, FLAGS.model_dir, FLAGS.num_steps_per_epoch, FLAGS.steps_per_loop, FLAGS.num_train_epochs, FLAGS.learning_rate, FLAGS.warmup_steps, FLAGS.end_lr, FLAGS.optimizer_type, FLAGS.input_files, FLAGS.train_batch_size, FLAGS.use_next_sentence_label, FLAGS.train_summary_interval, custom_callbacks=custom_callbacks, explicit_allreduce=FLAGS.explicit_allreduce, pre_allreduce_callbacks=[ model_training_utils.clip_by_global_norm_callback ], allreduce_bytes_per_pack=FLAGS.allreduce_bytes_per_pack)
16397fb83bb02e2f01c716f97f6f461e4675c319
3,651,639
import json def add_mutes(guild_id: int, role_id: int, user_id: int, author_id: int, datetime_to_parse: str): """ Add a temporary mute to a user. NOTE: datetime_to_parse should be a string like: "1 hour 30 minutes" """ with open("data/unmutes.json", "r+", newline='\n', encoding='utf-8') as temp_file: mutes = json.load(temp_file) new_mute_data = (user_id, role_id, guild_id) str_dt_obj = parse_times(datetime_to_parse) # if the script made it this far, this is real we have to store mute data if str_dt_obj not in mutes: mutes[str_dt_obj] = [] mutes[str_dt_obj].append(new_mute_data) mute_index = len(mutes[str_dt_obj]) - 1 if str(guild_id) not in mutes: mutes[str(guild_id)] = {} if str(user_id) in mutes[str(guild_id)]: mutes[str(guild_id)].pop(str(user_id)) if not str(user_id) in mutes[str(guild_id)]: mutes[str(guild_id)][str(user_id)] = [] mutes[str(guild_id)][str(user_id)] = [str_dt_obj, author_id, mute_index] json.dump(mutes, open("data/unmutes.json", "w+", newline='\n', encoding='utf-8')) return str_dt_obj # Don't worry I can't read this mess either.
8c762f56217ee940d8803e069f1b3bce47629a2e
3,651,640
def operation_dict(ts_epoch, request_dict): """An operation as a dictionary.""" return { "model": request_dict, "model_type": "Request", "args": [request_dict["id"]], "kwargs": {"extra": "kwargs"}, "target_garden_name": "child", "source_garden_name": "parent", "operation_type": "REQUEST_CREATE", }
e7b63d79c6de73616b39e2713a0ba2da6f9e2a25
3,651,641
def unix_timestamp(s=None, p="yyyy-MM-dd HH:mm:ss"): """ :rtype: Column >>> import os, time >>> os.environ['TZ'] = 'Europe/Paris' >>> if hasattr(time, 'tzset'): time.tzset() >>> from pysparkling import Context, Row >>> from pysparkling.sql.session import SparkSession >>> spark = SparkSession(Context()) >>> spark.range(1).select(unix_timestamp(lit("2033-05-18 05:33:21"))).show() +--------------------------------------------------------+ |unix_timestamp(2033-05-18 05:33:21, yyyy-MM-dd HH:mm:ss)| +--------------------------------------------------------+ | 2000000001| +--------------------------------------------------------+ >>> spark.range(1).select(unix_timestamp(lit("2019-01-01"), "yyyy-MM-dd")).show() +--------------------------------------+ |unix_timestamp(2019-01-01, yyyy-MM-dd)| +--------------------------------------+ | 1546297200| +--------------------------------------+ """ if s is None: s = col(CurrentTimestamp()) return col(UnixTimestamp(ensure_column(s), lit(p)))
82dce864f70ef367f5a38bcc39aec1bf996d66e9
3,651,642
def memory_index(indices, t): """Location of an item in the underlying memory.""" memlen, itemsize, ndim, shape, strides, offset = t p = offset for i in range(ndim): p += strides[i] * indices[i] return p
ed97592aa5444cfd6d6894b042b5b103d2de6afc
3,651,643
def createExpData(f, xVals): """Asssumes f is an exponential function of one argument xVals is an array of suitable arguments for f Returns array containing results of applying f to the elements of xVals""" yVals = [] for i in range(len(xVals)): yVals.append(f(xVals[i])) return pylab.array(xVals), pylab.array(yVals)
79c6575ec07579e792e77b65960992a48837f2e9
3,651,644
from typing import Tuple import math def discrete_one_samp_ks(distribution1: np.array, distribution2: np.array, num_samples: int) -> Tuple[float, bool]: """Uses the one-sample Kolmogorov-Smirnov test to determine if the empirical results in distribution1 come from the distribution represented in distribution2 :param distribution1: empirical distribution (numpy array) :param distribution2: reference distribution (numpy array) :param num_samples: number of samples used to generate distribution1 :return: a tuple (D, D<D_{alpha}) """ cutoff = 1.36 / math.sqrt(num_samples) ecdf1 = np.array([sum(distribution1[:i + 1]) for i in range(len(distribution1))]) ecdf2 = np.array([sum(distribution2[:i + 1]) for i in range(len(distribution2))]) max_diff = np.absolute(ecdf1 - ecdf2).max() return max_diff, max_diff < cutoff
37e85c695f0e33c70566e5462fb55e7882fbcd02
3,651,645
def _get_product_refs(pkgs): """Returns a list of product references as declared in the specified packages list. Args: pkgs: A `list` of package declarations (`struct`) as created by `packages.create()`, `packages.pkg_json()` or `spm_pkg()`. Returns: A `list` of product reference (`string`) values. """ return [refs.create(ref_types.product, pkg.name, prd) for pkg in pkgs for prd in pkg.products]
f545f261e237dfe447533c89a489abb863b994e8
3,651,646
def merge_intervals(interval_best_predictors): """ Merge intervals with the same best predictor """ predictor2intervals = defaultdict(set) for interval, best_predictor in interval_best_predictors.items(): predictor2intervals[best_predictor].update(interval) merged_intervals = {best_predictor: max(interval_points) - min(interval_points) for best_predictor, interval_points in predictor2intervals.items()} return merged_intervals
6ebd0b5b26193c5d3885e603ab3bae68d395d6b1
3,651,647
def build_pixel_sampler(cfg, **default_args): """Build pixel sampler for segmentation map.""" return build_module_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
f7d687b80c7bb3cfa266b65691574e40291021d2
3,651,648
def solution_to_schedule(solution, events, slots): """Convert a schedule from solution to schedule form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- list A list of instances of :py:class:`resources.ScheduledItem` """ return [ ScheduledItem( event=events[item[0]], slot=slots[item[1]] ) for item in solution ]
7470849f90e445f8146c561a49646d4bd8bbb886
3,651,649
def flip_tiles( tiles ): """ Initially all tiles are white. Every time, a tile is visited based on the directions, it is flipped (to black, or to white again). The directions are represented in (x,y) coordinates starting from reference tile at (0,0). Based on the given directions to each tile starting from the reference tile, the coordinates of the tile is found and added to the set of black tiles. If the tile is already a black tile, it is flipped and thus removed from the set. This function returns the set of black tiles. """ black_tiles = set() for directions_to_tile in tiles: x,y = (0,0) for direction in directions_to_tile: x,y = get_coordinates( x,y, direction ) found_tile = (x,y) if found_tile not in black_tiles: black_tiles.add( found_tile ) else: black_tiles.remove( found_tile ) return black_tiles
91628f0ae4d1713f1fa12dad34d2a3e2f97b663e
3,651,650
def version() -> int: """Return the version number of the libpq currently loaded. The number is in the same format of `~psycopg.ConnectionInfo.server_version`. Certain features might not be available if the libpq library used is too old. """ return impl.PQlibVersion()
cc8360372787d08f3852cb8d908db780fe3c9573
3,651,651
import scipy def feature_predictors_from_ensemble(features, verbose=False): """generates a dictionary of the form {"offset":offset_predictor, "sigma":sigma_predictor, ...} where the predictors are generated from the center and spread statistics of the feature ensemble. features: list the feature objects """ lparams = np.asarray([f.profile.get_parameters() for f in features]) cent_wvs = np.asarray([f.wv for f in features]) rel_norms = np.asarray([f.relative_continuum for f in features]) delta_wvs = np.asarray([np.mean(scipy.gradient(f.data_sample.wv)) for f in features]) dwv_over_wv = delta_wvs/cent_wvs med_inv_r = np.median(dwv_over_wv) sig_over_wv = lparams[:, 1]/cent_wvs sig_med = np.median(sig_over_wv) sig_mad = np.median(np.abs(sig_over_wv-sig_med)) if verbose: print("sigma median", sig_med, "sigma mad", sig_mad) vel_offs = lparams[:, 0]/cent_wvs vel_med = np.median(vel_offs) vel_mad = np.median(np.abs(vel_offs - vel_med)) if verbose: print("velocity median", vel_med, "velocity mad", vel_mad) gam_med = np.median(np.abs(lparams[:, 2])) gam_mad = np.median(np.abs(lparams[:, 2]-gam_med)) if verbose: print("gamma median", gam_med, "gamma mad", gam_mad) rel_med = np.median(rel_norms) rel_mad = np.median(np.abs(rel_norms-rel_med)) if verbose: print("rel_norm median", gam_med, "rel_norm mad", gam_mad) predictors = {} offset_predictor = WavelengthScaledGaussianPredictor(vel_med, 1.4*vel_mad) sigma_predictor = WavelengthScaledGaussianPredictor(sig_med, 1.4*sig_mad + 0.5*med_inv_r) gamma_predictor = GaussianPredictor(gam_med, 1.4*gam_mad+0.1*np.median(delta_wvs)) rel_norm_predictor = GaussianPredictor(1.0, 0.01) predictors["offset"] = offset_predictor predictors["sigma"] = sigma_predictor predictors["gamma"] = gamma_predictor predictors["rel_norm"] = rel_norm_predictor return predictors
ea26d0640fa6dd8b948c8620b266519137805979
3,651,652
import requests def remoteLoggingConfig(host, args, session): """ Called by the logging function. Configures remote logging (rsyslog). @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/config/remote" try: res = session.put(url + '/attr/Port', headers=jsonHeader, json = {"data": args.port}, verify=False, timeout=baseTimeout) res = session.put(url + '/attr/Address', headers=jsonHeader, json = {"data": args.address}, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text
04417970f671f79af0157d82ea048b5c4f8f957d
3,651,653
import numpy as np from typing import Union import pathlib def _merge_3d_t1w(filename: Union[str, PathLike]) -> pathlib.Path: """ Merges T1w images that have been split into two volumes Parameters ---------- filename : str or pathlib.Path Path to T1w image that needs to be merged Returns ------- filename : pathlib.Path Path to merged T1w image """ filename = pathlib.Path(filename).resolve() img = nib.load(str(filename)) if not (len(img.shape) == 4 and img.shape[-1] > 1): return # split data along fourth dimension and then concatenate along third imdata = img.get_data() cat = [d.squeeze() for d in np.split(imdata, imdata.shape[-1], axis=-1)] imdata = np.concatenate(cat, axis=-1) new_img = img.__class__(imdata, img.affine, img.header) nib.save(new_img, filename) return filename
f1eae741c270553ee18c6f4cc2eb2484215617db
3,651,654
def get_partial_results(case_name, list_of_variables): """ Get a dictionary with the variable names and the time series for `list_of_variables` """ reader = get_results(case_name) d = dict() read_time = True for v in list_of_variables: if read_time: d['time'] = reader.values(v)[0] read_time = False d[v] = reader.values(v)[1] return d
43954296a11bea2c8a04f2e65a709c56ea14d00a
3,651,655
def take_with_time(self, duration, scheduler=None): """Takes elements for the specified duration from the start of the observable source sequence, using the specified scheduler to run timers. Example: res = source.take_with_time(5000, [optional scheduler]) Description: This operator accumulates a queue with a length enough to store elements received during the initial duration window. As more elements are received, elements older than the specified duration are taken from the queue and produced on the result sequence. This causes elements to be delayed with duration. Keyword arguments: duration -- {Number} Duration for taking elements from the start of the sequence. scheduler -- {Scheduler} Scheduler to run the timer on. If not specified, defaults to rx.Scheduler.timeout. Returns {Observable} An observable sequence with the elements taken during the specified duration from the start of the source sequence. """ source = self scheduler = scheduler or timeout_scheduler def subscribe(observer): def action(scheduler, state): observer.on_completed() disposable = scheduler.schedule_relative(duration, action) return CompositeDisposable(disposable, source.subscribe(observer)) return AnonymousObservable(subscribe)
d96ce7ae892fe6700b9f14cccbb01e3aa45b9b76
3,651,656
def add_label(hdf5_filename, key, peak, label): """ Function that adds a label to a peak dataset in the hdf5 file.It has to be iterated over every single peak. Parameters: hdf5_filename (string): filename of experimental file key (string): key within `hdf5_filename` of experimental file peak (string): string name of 'Peak_0#" associated with the peak list containing tuples of the x_data (wavenumber) and y_data (counts) values of the peaks. label (string): string name of an individual label from internal function unknown_peak_assignment that is used in lineidplot. Returns: df (DataFrame): DataFrame which contains the peak fitted data and peak descriptors of each classified peak based on the fed-in known spectra. """ #Handling errors in inputs. if not isinstance(hdf5_filename, str): raise TypeError("""Passed value of `hdf5_filename` is not a string! Instead, it is: """ + str(type(hdf5_filename))) if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5': raise TypeError("""`hdf5_filename` is not type = .hdf5! Instead, it is: """ + hdf5_filename.split('/')[-1].split('.')[-1]) if not isinstance(key, str): raise TypeError("""Passed value of `key` is not a str! Instead, it is: """ + str(type(key))) if not isinstance(peak, str): raise TypeError("""Passed value of `peak` is not a string! Instead, it is: """ + str(type(peak))) if not isinstance(label, str): raise TypeError("""Passed value of `label` is not a string! Instead, it is: """ + str(type(label))) # open hdf5 file as read/write hdf5 = h5py.File(hdf5_filename, 'r+') # extract existing data from peak dataset peak_data = list(hdf5['{}/{}'.format(key, peak)][0])[:7] # print(peak_data) # make a new tuple that contains the orginal data as well as the label label_tuple = (label,) data = tuple(peak_data) +label_tuple # delete the old dataset so the new one can be saved del hdf5['{}/{}'.format(key, peak)] # define a custom datatype that allows for a string as the the last tuple element my_datatype = np.dtype([('fraction', np.float), ('center', np.float), ('sigma', np.float), ('amplitude', np.float), ('fwhm', np.float), ('height', np.float), ('area under the curve', np.float), ('label', h5py.special_dtype(vlen=str))]) # recreate the old dataset in the hdf5 file dataset = hdf5.create_dataset('{}/{}'.format(key, peak), (1,), dtype=my_datatype) # apply custom dtype to data tuple # print(dataset) # print(data) # print(my_datatype) data_array = np.array(data, dtype=my_datatype) # write new values to the blank dataset dataset[...] = data_array # print(dataset) hdf5.close() df = pd.DataFrame(data = data) return df
b903d59ae8e18adf2942227fc6b4c0e207dbde78
3,651,657
def _infer_color_variable_kind(color_variable, data): """Determine whether color_variable is array, pandas dataframe, callable, or scikit-learn (fit-)transformer.""" if hasattr(color_variable, "dtype") or hasattr(color_variable, "dtypes"): if len(color_variable) != len(data): raise ValueError( "color_variable and data must have the same length.") color_variable_kind = "scalars" elif hasattr(color_variable, "transform"): color_variable_kind = "transformer" elif hasattr(color_variable, "fit_transform"): color_variable_kind = "fit_transformer" elif callable(color_variable): color_variable_kind = "callable" elif color_variable is None: color_variable_kind = "none" else: # Assume color_variable is a selection of columns color_variable_kind = "else" return color_variable_kind
a1a21c6df4328331754f9fb960e64cf8bfe09be7
3,651,658
import os def ParseChromeosImage(chromeos_image): """Parse the chromeos_image string for the image and version. The chromeos_image string will probably be in one of two formats: 1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \ chromiumos_test_image.bin 2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \ chromiumos_test_image.bin We parse these strings to find the 'chromeos_version' to store in the json archive (without the .datatime bit in the first case); and also the 'chromeos_image', which would be all of the first case, but only the part after '/chroot/tmp' in the second case. Args: chromeos_image: string containing the path to the chromeos_image that crosperf used for the test. Returns: version, image: The results of parsing the input string, as explained above. """ # Find the Chromeos Version, e.g. R45-2345.0.0..... # chromeos_image should have been something like: # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin" if chromeos_image.endswith('/chromiumos_test_image.bin'): full_version = chromeos_image.split('/')[-2] # Strip the date and time off of local builds (which have the format # "R43-2345.0.0.date-and-time"). version, _ = os.path.splitext(full_version) else: version = '' # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then # it's an official image that got downloaded, so chop off the download path # to make the official image name more clear. official_image_path = '/chroot/tmp' if official_image_path in chromeos_image: image = chromeos_image.split(official_image_path, 1)[1] else: image = chromeos_image return version, image
49652dad39bcc1df8b3decae4ec374adaf353185
3,651,659
import random def Dense(name, out_dim, W_init=stax.glorot(), b_init=stax.randn()): """Layer constructor function for a dense (fully-connected) layer.""" def init_fun(rng, example_input): input_shape = example_input.shape k1, k2 = random.split(rng) W, b = W_init(k1, (out_dim, input_shape[-1])), b_init(k2, (out_dim,)) return W, b def apply_fun(params, inputs): W, b = params return np.dot(W, inputs) + b return core.Layer(name, init_fun, apply_fun).bind
a2b20961ff3fd23e0cd87f200d1c575d9788e076
3,651,660
from datetime import datetime def datetime_to_epoch(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (seconds).""" return int(date_time.timestamp())
73767c663d66464420594e90a438687c9363b884
3,651,661
def parse_arguments(): """ Merge the scar.conf parameters, the cmd parameters and the yaml file parameters in a single dictionary. The precedence of parameters is CMD >> YAML >> SCAR.CONF That is, the CMD parameter will override any other configuration, and the YAML parameters will override the SCAR.CONF settings """ config_args = ConfigFileParser().get_properties() func_call, cmd_args = CommandParser().parse_arguments() if 'conf_file' in cmd_args['scar'] and cmd_args['scar']['conf_file']: yaml_args = FileUtils.load_yaml(cmd_args['scar']['conf_file']) # YAML >> SCAR.CONF merged_args = fdl.merge_conf(config_args, yaml_args) merged_args = fdl.merge_cmd_yaml(cmd_args, merged_args) else: # CMD >> SCAR.CONF merged_args = fdl.merge_conf(config_args, cmd_args) #self.cloud_provider.parse_arguments(merged_args) FileUtils.create_tmp_config_file(merged_args) return func_call
a34525ed55514db2133c5c39d273ea48af8f8c54
3,651,662
from typing import BinaryIO def check_signature(stream: BinaryIO) -> str: """ Check signature of the model file and return characters used by the model. The characters returned are sorted in lexicographical order. """ uzmodel_tag = stream.read(8) if uzmodel_tag != b'UZMODEL ': raise IOError('invalid uzmodel_tag') uzmodel_version = read_int(stream) if uzmodel_version == 1: ssv = 0 elif uzmodel_version == 2: ssv = read_int(stream) else: raise IOError('invalid uzmodel_version') if ssv == 0: chars = ''.join(map(chr, chain(STD, AFT, EXA, EXB, SPC))) elif ssv == 1: chars = ''.join(map(chr, chain(STD, AFT, EXA, EXB))) else: raise ValueError('invalid ssv') bmarkov_tag = stream.read(8) if bmarkov_tag != b'BMARKOV ': raise IOError('invalid bmarkov_tag') bmarkov_version = read_int(stream) if bmarkov_version != 0: raise IOError('invalid bmarkov_version') return chars
3a8d2e646a2ffe08a471f5447d8e790aefd6fc68
3,651,663
def Validate(expected_schema, datum): """Determines if a python datum is an instance of a schema. Args: expected_schema: Schema to validate against. datum: Datum to validate. Returns: True if the datum is an instance of the schema. """ schema_type = expected_schema.type if schema_type == 'null': return datum is None elif schema_type == 'boolean': return isinstance(datum, bool) elif schema_type == 'string': return isinstance(datum, str) elif schema_type == 'bytes': return isinstance(datum, bytes) elif schema_type == 'int': return (isinstance(datum, int) and (INT_MIN_VALUE <= datum <= INT_MAX_VALUE)) elif schema_type == 'long': return (isinstance(datum, int) and (LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE)) elif schema_type in ['float', 'double']: return (isinstance(datum, int) or isinstance(datum, float)) elif schema_type == 'fixed': return isinstance(datum, bytes) and (len(datum) == expected_schema.size) elif schema_type == 'enum': return datum in expected_schema.symbols elif schema_type == 'array': return (isinstance(datum, list) and all(Validate(expected_schema.items, item) for item in datum)) elif schema_type == 'map': return (isinstance(datum, dict) and all(isinstance(key, str) for key in datum.keys()) and all(Validate(expected_schema.values, value) for value in datum.values())) elif schema_type in ['union', 'error_union']: return any(Validate(union_branch, datum) for union_branch in expected_schema.schemas) elif schema_type in ['record', 'error', 'request']: return (isinstance(datum, dict) and all(Validate(field.type, datum.get(field.name)) for field in expected_schema.fields)) else: raise AvroTypeException('Unknown Avro schema type: %r' % schema_type)
22ed46f2d82f9c4ea53fdd707553d54958a20814
3,651,664
def get_main_play_action(action: PlayerAction) -> PlayerAction: """ Gets the main play, e.g., FLYOUT or SINGLE :param action: :return: """ print("Searching for main play") # find out if the string contains any of the allowed actions for i in PlayerActionEnum: if i.value in action.action_text: print(f"\tFound {i.value}!") action.action_type = i action.action_text = action.action_text.replace(i.value, '') break return action
ec85c305509b5f6f88eb157e7a110dbed7ad0ab4
3,651,665
from functools import reduce def inet_aton(s): """Convert a dotted-quad to an int.""" try: addr = list(map(int, s.split('.'))) addr = reduce(lambda a,b: a+b, [addr[i] << (3-i)*8 for i in range(4)]) except (ValueError, IndexError): raise ValueError('illegal IP: {0}'.format(s)) return addr
abc16c14e416f55c9ae469b4b9c1958df265433c
3,651,666
def local_principals(context, principals): """ The idea behind this is to process __ac_local_roles__ (and a boolean __ac_local_roles_block__ to disable) and add local principals. This only works if you're in correct context, though, which does not seem to be the case. """ local_principals = set() block = False for location in lineage(context): if block: break block = getattr(location, '__ac_local_roles_block__', False) local_roles = getattr(location, '__ac_local_roles__', None) if local_roles and callable(local_roles): local_roles = local_roles() if not local_roles: continue for principal in principals: try: roles = local_roles[principal] except KeyError: pass else: if not is_nonstr_iter(roles): roles = [roles] local_principals.update(roles) if not local_principals: return principals local_principals.update(principals) if DEBUG_PERMISSIONS: PRINT("local_principals") PRINT(" context.collection=", context.collection) PRINT(" context.__acl__()=", context.__acl__()) PRINT(" context.collection.__ac_local_roles_()=", context.__ac_local_roles__()) PRINT("local_principals returning", local_principals) return local_principals
ccd2597ca1657fc7805a85c212c87d2d04cacad4
3,651,667
def helper(): """I'm useful helper""" data = { "31 Dec 2019": "Wuhan Municipal Health Commission, China, reported a cluster of cases of pneumonia in Wuhan, Hubei Province. A novel coronavirus was eventually identified.", "1 January 2020": "WHO had set up the IMST (Incident Management Support Team) across the three levels of the organization: headquarters, regional headquarters and country level, putting the organization on an emergency footing for dealing with the outbreak.", "4 January 2020": "WHO reported on social media that there was a cluster of pneumonia cases – with no deaths – in Wuhan, Hubei province." } return data
1f0f58505ce4179d56b2bf6e4cb29e42cdd7cfc9
3,651,668
def canonicalize_specification(expr, syn_ctx, theory): """Performs a bunch of operations: 1. Checks that the expr is "well-bound" to the syn_ctx object. 2. Checks that the specification has the single-invocation property. 3. Gathers the set of synth functions (should be only one). 4. Gathers the variables used in the specification. 5. Converts the specification to CNF (as part of the single-invocation test) 6. Given that the spec is single invocation, rewrites the CNF spec (preserving and sat) by introducing new variables that correspond to a uniform way of invoking the (single) synth function Returns a tuple containing: 1. A list of 'variable_info' objects corresponding to the variables used in the spec 2. A list of synth functions (should be a singleton list) 3. A list of clauses corresponding to the CNF specification 4. A list of NEGATED clauses 5. A list containing the set of formal parameters that all appearances of the synth functions are invoked with. """ check_expr_binding_to_context(expr, syn_ctx) clauses, cnf_expr = to_cnf(expr, theory, syn_ctx) synth_function_set = gather_synth_functions(expr) synth_function_list = list(synth_function_set) num_funs = len(synth_function_list) orig_variable_set = gather_variables(expr) orig_variable_list = [x.variable_info for x in orig_variable_set] orig_variable_list.sort(key=lambda x: x.variable_name) # check single invocation/separability properties if (not check_single_invocation_property(clauses, syn_ctx)): raise basetypes.ArgumentError('Spec:\n%s\nis not single-invocation!' % exprs.expression_to_string(expr)) (intro_clauses, intro_vars) = _intro_new_universal_vars(clauses, syn_ctx, synth_function_list[0]) # ensure that the intro_vars at the head of the list # Arjun: Why? Most likely not necessary variable_list = [x.variable_info for x in intro_vars] + orig_variable_list num_vars = len(variable_list) for i in range(num_vars): variable_list[i].variable_eval_offset = i num_funs = len(synth_function_list) for i in range(num_funs): synth_function_list[i].synth_function_id = i if len(intro_clauses) == 1: canon_spec = intro_clauses[0] else: canon_spec = syn_ctx.make_function_expr('and', *intro_clauses) canon_clauses = [] for ic in intro_clauses: if exprs.is_application_of(ic, 'or'): disjuncts = ic.children else: disjuncts = [ic] canon_clauses.append(disjuncts) return (variable_list, synth_function_list, canon_spec, canon_clauses, intro_vars)
99613fb5cc78b53ca094ffb46cc927f05d5f74d4
3,651,669
def human_time(seconds, granularity=2): """Returns a human readable time string like "1 day, 2 hours".""" result = [] for name, count in _INTERVALS: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip("s") result.append("{} {}".format(int(value), name)) else: # Add a blank if we're in the middle of other values if len(result) > 0: result.append(None) if not result: if seconds < 1.0: return "%.2f seconds" % seconds else: if seconds == 1: return "1 second" else: return "%d seconds" % seconds return ", ".join([x for x in result[:granularity] if x is not None])
25d184982e5c0c2939814938f09a72ab2d46d270
3,651,670
def cmorlet_wavelet(x, fs, freq_vct, n=6, normalization=True): """Perform the continuous wavelet (CWT) tranform using the complex Morlet wavelet. Parameters ---------- x : 1D array with shape (n_samples) or 2D array with shape (n_samples, n_channels) fs : Sampling frequency in Hz freq_vct : 1D array with frequencies to compute the CWT (Default = [1 : 1 : fs/2] ) n : Number of cicles inside the Gaussian curve (Default 6) normalization : Scale each wavelet to have energy equal to 1 (Default True) Returns ------- wcoef : Complex wavelet coefficients 2D array with shape [n_samples, n_freqs] if `x` is 1D array 3D array with shape [n_samples, n_freqs, n_channels] if `x` is 2D array wfam : 2D array with shape [n_wavelet_samples, n_freqs] where each column corresponds to the a member of the wavelet family """ # input 'x' as 2D matrix [samples, columns] try: x.shape[1] except IndexError: x = x[:, np.newaxis] # number of samples and number of channels n_samples, n_channels = x.shape # number of wavelets n_freqs = len(freq_vct) # number of samples for Wavetet family # This is equal to the number of samples needed to represent 2*n cycles # of a sine with frequency = fres(1)[Hz], sampled at fs [Hz]. # This is done to ensure that every wavelet in the wavalet family will be # close to 0 in the negative and positive edges n_samples_wav = np.round( (2*n/freq_vct[0])*fs ) # The wavelet will be symmetrical around 0 if np.mod(n_samples_wav,2) == 0: # even samples n_samples_wav = n_samples_wav + 1 # create time vector for Wavelet family half = np.floor(n_samples_wav/2) time = np.arange(-half, half+1)/fs # initialize Wavelet family matrix wfam = np.zeros([len(time), n_freqs], dtype=complex) # for each frequency defined in FREQ, create its respective Wavelet for iwav in range(n_freqs): s = n/(2*np.pi*freq_vct[iwav]) gaussian_win = np.exp((-time**2)/(2*s**2)) sinwave = np.exp(2*np.pi*1j*freq_vct[iwav]*time) if normalization: # each wavelet has unit energy sum(abs(wavelet).^2)) = 1 A = 1. / ((s**2) * np.pi) ** (1./4) else: A = 1. # Complex Morlet wavelet wfam[:, iwav] = A * sinwave * gaussian_win wcoef = np.zeros((n_samples, n_freqs, n_channels), dtype=complex) if n_channels == 1: # one channel tmp = conv_m(x, wfam, 'same') wcoef[:, :, 0] = tmp else: # convolution between signal X and the each Wavelt in the Wavelet family for i_channel in range(n_channels): x_tmp = x[:, i_channel] tmp = conv_m(x_tmp, wfam, 'same') wcoef[:, :, i_channel] = tmp return wcoef, wfam
13a5e2b16c2641b8fabf997679f4d8f6724d32a9
3,651,671
import os def update(key=None, value=None, cache_type=None, file_path=None): """Set the cache that depends on the file access time :param key: the key for the cache :param value: the value in the cache :param cache_type: when we are using cache in different modules this param can protects from the overradings :param file_path: path to the file :return: True - if cache was setted successful False - cache wasn't setted successful """ global __cache_store __was_set = False try: with __lock: if cache_type not in __cache_store: __cache_store[cache_type] ={} if key not in __cache_store[cache_type]: __cache_store[cache_type][key] = {} if file_path not in __cache_store[cache_type][key]: __cache_store[cache_type][key][file_path] = { "access_time": None, "value": None } if os.path.exists(file_path): statbuf = os.stat(file_path) __cache_store[cache_type][key][file_path]['access_time'] = statbuf.st_mtime __cache_store[cache_type][key][file_path]['value'] = value __was_set = True except TypeError: # if key has unhashable type pass except Exception as error: raise RuntimeError(" Can't set key: %s type: %s because %s " % (key, cache_type, error)) return __was_set
454b928dbacbcfa297aaf9385af1f185187857c9
3,651,672
from typing import Tuple from typing import Union from typing import List def add_fake_planet( stack: np.ndarray, parang: np.ndarray, psf_template: np.ndarray, polar_position: Tuple[Quantity, Quantity], magnitude: float, extra_scaling: float, dit_stack: float, dit_psf_template: float, return_planet_positions: bool = False, interpolation: str = 'bilinear', ) -> Union[np.ndarray, Tuple[np.ndarray, List[Tuple[float, float]]]]: """ Add a fake planet to the given ``stack`` which, when derotating and merging the stack, will show up at the given ``position``. This function can also be used to *remove* planets from a stack by setting the ``psf_scaling`` to a negative number. If you simply want to use this function to generate a fake signal stack, set ``stack`` to all zeros, the ``magnitude`` to zero, both the ``dit_stack`` and ``dit_psf_template`` to 1 (or any other non-zero number), and use the `extra_scaling` factor to linearly control the "brightness" of the injected planet. This function is essentially a simplified port of the corresponding PynPoint function :py:func:`pynpoint.util.analysis.fake_planet()`. Args: stack: A 3D numpy array of shape `(n_frames, width, height)` which contains the stack of images / frames into which we want to inject a fake planet. parang: A 1D numpy array of shape `(n_frames,)` that contains the respective parallactic angle for every frame in `stack`. psf_template: A 2D numpy array that contains the (centered) PSF template which will be used for the fake planet. This should *not* be normalized to `(0, 1]` if we want to work with actual astrophysical magnitudes for the contrast. polar_position: A tuple `(separation, angle)` which specifies the position at which the planet will show up after de-rotating with ``parang``. ``separation`` needs to be a ``Quantity`` that can be converted to pixel; `angle` needs to be a ``Quantity`` that can be converted to radian. Additionally, ``angle`` should be using *astronomical* polar coordinates, that is, 0 degrees will be "up" (= North), not "right". This function will internally add 90° to the angles to convert them to mathematical pilar coordinates. magnitude: The magnitude difference used to scale the PSF. Note: This is the contrast ratio in *magnitudes*, meaning that increasing this value by a factor of 5 will result in a planet that is 100 times brighter. In case you want to keep things linear, set this value to 0 and only use the ``psf_scaling`` parameter. extra_scaling: An additional scaling factor that is used for the PSF template. This number is simply multiplied with the PSF template, meaning that it changes the brightness linearly, not on a logarithmic scale. For example, you could use `-1` to add a *negative* planet to remove an actual planet in the data. This can also be used to incorporate an additional dimming factor due to a neutral density (ND) filter. dit_stack: The detector integration time of the frames in the ``stack`` (in seconds). Necessary to compute the correct scaling factor for the planet that we inject. dit_psf_template: The detector integration time of the ``psf_template`` (in seconds). Necessary to compute the correct scaling factor for the planet that we inject. return_planet_positions: Whether to return the (Cartesian) positions at which the fake planet was injected, as a 2D numpy array of shape `(n_frames, 2)`. interpolation: ``interpolation`` argument that is passed to :py:func:`scipy.ndimage.shift` that is used internally. Returns: A 3D numpy array of shape `(n_frames, width, height)` which contains the original ``stack`` into which a fake planet has been injected, as well as a list of tuples `(x, y)` that, for each frame, contain the position at which the fake planet has been added. If desired (i.e., if ``return_planet_positions`` is ``True``), the function also returns a 2D numpy array of shape `(n_frames, 2)` containing the Cartesian positions at which the fake planet has been injected. """ # Make sure that the stack and the parallactic angles are compatible check_consistent_size(stack, parang) # Define shortcut for the number of frames and the frame_size n_frames, frame_size = stack.shape[0], (stack.shape[1], stack.shape[2]) # Split the target planet position into separation and angles, convert # the quantities to pixels / convert to mathematical polar coordinates rho = polar_position[0].to('pixel').value phi = np.radians(polar_position[1].to('degree').value + 90 - parang) # Convert `magnitude` from logarithmic contrast to linear flux ratio flux_ratio = 10.0 ** (-magnitude / 2.5) # Compute scaling factor that is due to the different integration times # for the science images and the PSF template dit_scaling = dit_stack / dit_psf_template # Combine all scaling factors and scale the PSF template scaling_factor = flux_ratio * dit_scaling * extra_scaling psf_scaled = scaling_factor * np.copy(psf_template) # Make sure that the PSF has a compatible shape, that is, either crop or # pad the PSF template to the same spatial shape as the `stack`. psf_scaled = crop_or_pad(psf_scaled, frame_size) # Compute the shift for each frame x_shift = rho * np.cos(phi) y_shift = rho * np.sin(phi) # Initialize the "pure signal" stack (can use empty() here, because all # values will be overwritten and allocation should be slightly faster) signal_stack = np.empty_like(stack) # For each frame, move the scaled PSF template to the correct position # Note: We use mode='constant' instead of 'reflect' here (unlike PynPoint) # because the latter just does not seem to make a lot of sense? for i in range(n_frames): signal_stack[i] = shift_image( image=psf_scaled, offset=(float(x_shift[i]), float(y_shift[i])), interpolation=interpolation, mode='constant', ) # Add the planet stack to the original input stack output_stack = stack + signal_stack # Either return only the output stack, or the output stack and # the planet positions if return_planet_positions: center = get_center(frame_size) planet_positions = np.column_stack( (x_shift + center[0], y_shift + center[1]) ) return output_stack, planet_positions return np.array(output_stack)
f5897585934fe9609a4d6cc0f032285194a59f19
3,651,673
import os import json import tqdm def make_cache(channel, subdir): """Reads and/or generates the cachefile and returns the cache""" # load cache channel_name = _get_channel_name(channel) cachefile = f"{channel_name}.{subdir}.cache.json" if os.path.exists(cachefile): print(f"Loading cache from {cachefile}") with open(cachefile) as f: cache = json.load(f) else: cache = {} # load repodata pkgs = _get_repodata_packages(channel, subdir) # add packages to cache needed = set(pkgs.keys()) - set(cache.keys()) for i, artifact in enumerate(tqdm.tqdm(needed)): _add_artifact_to_cache(cache, pkgs[artifact], channel, subdir, artifact) if i % 100 == 99: # save the state occasionally _save_cache(cache, cachefile, display=False) _save_cache(cache, cachefile) return cache
35443f07821602ef380a3040ad9f80f937a9674a
3,651,674
def _BD_from_Av_for_dereddening(line_lambdas, line_fluxes, A_v): """ Find the de-reddened Balmer decrement (BD) that would arise from "removing" an extinction of A_v (magnitudes) from the line_fluxes. line_lambdas, line_fluxes: As in the function "deredden". A_v: The extinction (magnitudes), as a scalar or array of extinction values. Returns the Balmer decrement dereddened_BD (F_Halpha / F_Hbeta), as a float or array of floats with the same shape as A_v. """ assert np.all(np.asarray(A_v) >= 0) initial_BD = _find_BD(line_lambdas, line_fluxes) # Calculate the Balmer decrement (BD) that would result from "removing" an # extinction of A_v, using an inverted form of Equation A14 in Vogt13. dereddened_BD = initial_BD / 10**(A_v / 8.55) return dereddened_BD
280255db3669b8ee585afbcb685dc97dfbedc5c0
3,651,675
def otherEnd(contours, top, limit): """ top与end太近了,找另一个顶部的点,与top距离最远 """ tt = (0, 9999) for li in contours: for pp in li: p = pp[0] if limit(p[0]) and top[1] - p[1] < 15 and abs(top[0] - p[0]) > 50 and p[1] < tt[1]: tt = p return tt
4f938d33ba28c1999603cd60381ed6d9aec23815
3,651,676
import os def plivo_webhook(event, context): """ Receives SMS messages and forwards them to telegram """ CHAT_ID = int(os.environ['CHAT_ID']) bot = configure_telegram() logger.info('Plivo Event: {}'.format(event)) try: body = parse_plivo_msg(event) except AssertionError as e: logger.info(e) return ERROR_RESPONSE sender = body['From'] msg = body['Text'] text = "{}: {}".format(sender, msg) bot.send_message(chat_id=CHAT_ID, text=text) logger.info('Message sent') return OK_RESPONSE
580925e10f32c717116487a6c0e3623f724ad16a
3,651,677
from matador.workflows.castep.common import castep_prerelax def castep_phonon_prerelax(computer, calc_doc, seed): """ Run a singleshot geometry optimisation before an SCF-style calculation. This is typically used to ensure phonon calculations start successfully. The phonon calculation will then be restarted from the .check file produced here. Parameters: computer (:obj:`ComputeTask`): the object that will be calling CASTEP. calc_doc (dict): the structure to run on. seed (str): root filename of structure. """ LOG.info('Performing CASTEP phonon pre-relax...') required = ["write_checkpoint"] forbidden = ['phonon_fine_kpoint_list', 'phonon_fine_kpoint_path', 'phonon_fine_kpoint_mp_spacing', 'phonon_fine_kpoint_path_spacing'] return castep_prerelax( computer, calc_doc, seed, required_keys=required, forbidden_keys=forbidden )
4687e6cdf7150c8721329c7ea1b007e47ee3cd7e
3,651,678
def get_external_links(soup): """Retrieve the different links from a `Lyric Wiki` page. The links returned can be found in the `External Links` page section, and usually references to other platforms (like Last.fm, Amazon, iTunes etc.). Args: soup (bs4.element.Tag): connection to the `Lyric Wiki` page. Returns: dict Examples:: >>> # Import packages >>> import bs4 # for web scrapping >>> import urllib.request # to connect >>> # Set Up: connect to a lyric wiki page >>> USER = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' >>> HEADERS = {'User-Agent': USER} >>> URL = 'https://lyrics.fandom.com/wiki/London_Grammar:Who_Am_I' >>> req = urllib.request.Request(URL, headers=HEADERS) >>> page = urllib.request.urlopen(req) >>> soup = bs4.BeautifulSoup(page, 'lxml') >>> # Retrieve links from the page >>> get_external_links(soup) {'Amazon': ['https://www.amazon.com/exec/obidos/redirect?link_code=ur2&tag=wikia-20&camp=1789&creative=9325&path=https%3A%2F%2Fwww.amazon.com%2Fdp%2FB00J0QJ84E'], 'Last.fm': ['https://www.last.fm/music/London+Grammar', 'https://www.last.fm/music/London+Grammar/If+You+Wait'], 'iTunes': ['https://itunes.apple.com/us/album/695805771'], 'AllMusic': ['https://www.allmusic.com/album/mw0002559862'], 'Discogs': ['http://www.discogs.com/master/595953'], 'MusicBrainz': ['https://musicbrainz.org/release-group/dbf36a9a-df02-41c4-8fa9-5afe599960b0'], 'Spotify': ['https://open.spotify.com/album/0YTj3vyjZmlfp16S2XGo50']} """ # Only add links from this set. Other are not relevant. links_keys = ['Amazon', 'Last.fm', 'iTunes', 'AllMusic', 'Discogs', 'MusicBrainz', 'Spotify', 'Bandcamp', 'Wikipedia', 'Pandora', 'Hype Machine'] links = {} # Scrape links from a page for external_tag in scrape_external_links(soup): # Get the respective kink / href for link_a in external_tag.findAll('a', attrs={'class', 'external text'}): # Add it to a dict key = external_tag.text.split(':')[0].strip() if key in links_keys: links.setdefault(key, []) links[key].append(link_a.get('href')) return links
9d1f654176cfe5ccdc849448b5cf1720dba4e6c5
3,651,679
def gcc(): """Return the current container, that is the widget holding the figure and all the control widgets, buttons etc.""" gcf() # make sure we have something.. return current.container
d32b9c53694ad258976757b15cc0982431b06e8e
3,651,680
def preprocessing(string): """helper function to remove punctuation froms string""" string = string.replace(',', ' ').replace('.', ' ') string = string.replace('(', '').replace(')', '') words = string.split(' ') return words
17f41a566c3661ab6ffb842ac6d610425fc779d1
3,651,681
def add_input_arguments(argument_parser_object): """Adds input args for this script to `argparse.ArgumentParser` object. :param argument_parser_object: `argparse.ArgumentParser` object, which may or may not already contain input args. :return: argument_parser_object: Same as input object, but with new input args added. """ argument_parser_object.add_argument( '--' + TRACKING_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_TRACKING_DIR_NAME, help=TRACKING_DIR_HELP_STRING) argument_parser_object.add_argument( '--' + TRACKING_SCALE_INPUT_ARG, type=int, required=False, default=echo_top_tracking.DUMMY_TRACKING_SCALE_METRES2, help=TRACKING_SCALE_HELP_STRING) argument_parser_object.add_argument( '--' + GRIDRAD_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_GRIDRAD_DIR_NAME, help=GRIDRAD_DIR_HELP_STRING) argument_parser_object.add_argument( '--' + OUTPUT_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_OUTPUT_DIR_NAME, help=OUTPUT_DIR_HELP_STRING) return argument_parser_object
7e4b407aff10148c9843ba33410c233a32acc36d
3,651,682
def fullUnitSphere(res): """Generates a unit sphere in the same way as :func:`unitSphere`, but returns all vertices, instead of the unique vertices and an index array. :arg res: Resolution - the number of angles to sample. :returns: A ``numpy.float32`` array of size ``(4 * (res - 1)**2, 3)`` containing the ``(x, y, z)`` vertices which can be used to draw a unit sphere (using the ``GL_QUADS`` primitive type). """ u = np.linspace(-np.pi / 2, np.pi / 2, res, dtype=np.float32) v = np.linspace(-np.pi, np.pi, res, dtype=np.float32) cosu = np.cos(u) cosv = np.cos(v) sinu = np.sin(u) sinv = np.sin(v) vertices = np.zeros(((res - 1) * (res - 1) * 4, 3), dtype=np.float32) cucv = np.outer(cosu[:-1], cosv[:-1]).flatten() cusv = np.outer(cosu[:-1], sinv[:-1]).flatten() cu1cv = np.outer(cosu[1:], cosv[:-1]).flatten() cu1sv = np.outer(cosu[1:], sinv[:-1]).flatten() cu1cv1 = np.outer(cosu[1:], cosv[1:]) .flatten() cu1sv1 = np.outer(cosu[1:], sinv[1:]) .flatten() cucv1 = np.outer(cosu[:-1], cosv[1:]) .flatten() cusv1 = np.outer(cosu[:-1], sinv[1:]) .flatten() su = np.repeat(sinu[:-1], res - 1) s1u = np.repeat(sinu[1:], res - 1) vertices.T[:, ::4] = [cucv, cusv, su] vertices.T[:, 1::4] = [cu1cv, cu1sv, s1u] vertices.T[:, 2::4] = [cu1cv1, cu1sv1, s1u] vertices.T[:, 3::4] = [cucv1, cusv1, su] return vertices
65d83a83b17087934847ab7db8200a67c79294d4
3,651,683
def prompt_for_word_removal(words_to_ignore=None): """ Prompts the user for words that should be ignored in kewword extraction. Parameters ---------- words_to_ignore : str or list Words that should not be included in the output. Returns ------- ignore words, words_added : list, bool A new list of words to ignore and a boolean indicating if words have been added. """ if isinstance(words_to_ignore, str): words_to_ignore = [words_to_ignore] words_to_ignore = [w.replace("'", "") for w in words_to_ignore] words_added = False # whether to run the models again more_words = True while more_words: more_words = input("\nShould words be removed [y/n]? ") if more_words == "y": new_words_to_ignore = input("Type or copy word(s) to be removed: ") # Remove commas if the user has used them to separate words, # as well as apostraphes. new_words_to_ignore = [ char for char in new_words_to_ignore if char not in [",", "'"] ] new_words_to_ignore = "".join(new_words_to_ignore) if " " in new_words_to_ignore: new_words_to_ignore = new_words_to_ignore.split(" ") elif isinstance(new_words_to_ignore, str): new_words_to_ignore = [new_words_to_ignore] words_to_ignore += new_words_to_ignore words_added = True # we need to run the models again more_words = False elif more_words == "n": more_words = False else: print("Invalid input") return words_to_ignore, words_added
65615f3fe5f0391f44d60e7e9a2990d8fea35bc0
3,651,684
import time def wait_for_image_property(identifier, property, cmp_func, wait=20, maxtries=10): """Wait for an image to have a given property. Raises TimeoutError on failure. :param identifier: the image identifier :param property: the name of the property :param cmp_func: predicate function accepting current value of the property :param wait: time (in seconds) between polls :param maxtries: maximum number of attempts :returns: True """ logger.info('Waiting for {identifier} to be {property} using {cmp_func}' .format(**locals())) for _ in xrange(maxtries): output = image_show(identifier) current = openstack_parse_show(output, 'status') if cmp_func(current): return True else: time.sleep(wait) msg = 'Timeout while waiting for image {identifier} {property} using {fn}'\ .format(identifier=identifier, property=property, fn=cmp_func) logger.info(msg) raise TimeoutError(msg)
27ad96fceb931a73deddb49fb40975dd295ebd36
3,651,685
import os def make_sample_ensemble_seg_plot(model2, model3, sample_filenames, test_samples_fig, flag='binary'): """ "make_sample_ensemble_seg_plot(model2, model3, sample_filenames, test_samples_fig, flag='binary')" This function uses two trained models to estimate the label image from each input image It then uses a KL score to determine which one to return and returns both images and labels as a list, as well as a list of which model's output is returned INPUTS: * model: trained and compiled keras model * sample_filenames: [list] of strings * test_samples_fig [string]: filename to print figure to * flag [string]: either 'binary' or 'multiclass' OPTIONAL INPUTS: None GLOBAL INPUTS: None OUTPUTS: * imgs: [list] of images * lbls: [list] of label images * model_num: [list] of integers indicating which model's output was retuned based on CRF KL divergence """ plt.figure(figsize=(16,16)) imgs = [] lbls = [] model_num = [] for counter,f in enumerate(sample_filenames): image = seg_file2tensor(f)/255 est_label1 = model2.predict(tf.expand_dims(image, 0) , batch_size=1).squeeze() if flag is 'binary': est_label1[est_label1>0.5] = 1 est_label1 = (est_label1*255).astype(np.uint8) else: est_label1 = tf.argmax(est_label1, axis=-1) est_label2 = model3.predict(tf.expand_dims(image, 0) , batch_size=1).squeeze() if flag is 'binary': est_label2[est_label2>0.5] = 1 est_label2 = (est_label2*255).astype(np.uint8) else: est_label2 = tf.argmax(est_label2, axis=-1) label = est_label1.numpy().astype('int') img = (image.numpy()*255).astype(np.uint8) est_labelA, kl1 = crf_refine(label, img ) label = est_label2.numpy().astype('int') est_labelB, kl2 = crf_refine(label, img ) del label # plt.subplot(221); plt.imshow(image); plt.imshow(est_label1, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 1 estimate', fontsize=6) # plt.subplot(222); plt.imshow(image); plt.imshow(est_label2, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 2 estimate', fontsize=6) # plt.subplot(223); plt.imshow(image); plt.imshow(est_labelA, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 1 CRF estimate ('+str(-np.log(-kl1))[:7]+')', fontsize=6) # plt.subplot(224); plt.imshow(image); plt.imshow(est_labelB, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 2 CRF estimate ('+str(-np.log(-kl2))[:7]+')', fontsize=6) # plt.savefig('crf-example'+str(counter)+'.png', dpi=600, bbox_inches='tight'); plt.close('all') # if kl1<kl2: est_label = est_labelA.copy() model_num.append(1) else: est_label = est_labelB.copy() model_num.append(2) if flag is 'binary': plt.subplot(6,4,counter+1) else: plt.subplot(4,4,counter+1) name = sample_filenames[counter].split(os.sep)[-1].split('_')[0] plt.title(name, fontsize=10) plt.imshow(image) if flag is 'binary': plt.imshow(est_label, alpha=0.5, cmap=plt.cm.gray, vmin=0, vmax=1) else: plt.imshow(est_label, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3) plt.axis('off') imgs.append(image) lbls.append(est_label) # plt.show() plt.savefig(test_samples_fig, dpi=200, bbox_inches='tight') plt.close('all') return imgs, lbls, model_num
635f237659c6ba123c494175925d5ce5070b850c
3,651,686
def mock_requests_get_json_twice(mocker: MockerFixture) -> MagicMock: """Mock two pages of results returned from the parliament open data API.""" mock: MagicMock = mocker.patch("requests.get") mock.return_value.__enter__.return_value.json.side_effect = [ { "columnNames": ["column1", "column2"], "rowData": [["Lorem ipsum", "dolor sit amet"]], "hasMore": True, }, { "columnNames": ["column1", "column2"], "rowData": [["eripuit principes intellegam", "eos id"]], "hasMore": False, }, ] return mock
1c546963b5a2503c8d65d87ee373c2d2c5981b2a
3,651,687
def _get_rating_accuracy_stats(population, ratings): """ Calculate how accurate our ratings were. :param population: :param ratings: :return: """ num_overestimates = 0 num_underestimates = 0 num_correct = 0 for employee, rating in zip(population, ratings): if rating < employee: num_underestimates += 1 elif rating > employee: num_overestimates += 1 else: num_correct += 1 return num_underestimates, num_correct, num_overestimates
6fefd6faf465a304acc692b465f575cc4c3a62e3
3,651,688
import hashlib def genb58seed(entropy=None): """ Generate a random Family Seed for Ripple. (Private Key) entropy = String of any random data. Please ensure high entropy. ## Note: ecdsa library's randrange() uses os.urandom() to get its entropy. ## This should be secure enough... but just in case, I added the ability ## to include your own entropy in addition. """ if entropy == None: entropy = int2data(ecdsa.util.randrange(2 ** 128), 16) else: entropy = hashlib.sha256(entropy + int2data(ecdsa.util.randrange(2 ** 128), 16)).digest()[:16] b58seed = data_to_address(entropy, 33) return b58seed
1bfbbbff5abffa2bac0fd2accf9480387ff2e8bb
3,651,689
def convert_nhwc_to_nchw(data: np.array) -> np.array: """Convert data to NCHW.""" return np.transpose(data, [0, 3, 1, 2])
5ca229d9dfcb388d3f3a487b51719eaa0dd8fdb6
3,651,690
def get_mfcc_features(wave_data: pd.Series, n_mfcc): """ mfcc_feature """ x = wave_data.apply(lambda d: (d-np.mean(d))/(np.std(d))) # x = wave_data x, max_length = utils.padding_to_max(x) features = [] for i in range(x.shape[0]): t1 = mfcc(x[i], sr=16000, n_mfcc=n_mfcc) t2 = utils.diff(t1, axis=0) t3 = utils.diff(t1, axis=0, delta=2) t = np.concatenate([t1.T, t2.T, t3.T], axis=1).flatten() features.append(t) return np.array(features)
2f5fa5a4f752c4d5af963bd390868f98e886c0d9
3,651,691
def download_instance_func(instance_id): """Download a DICOM Instance as DCM""" file_bytes = client.orthanc.download_instance_dicom(instance_id) return flask.send_file(BytesIO(file_bytes), mimetype='application/dicom', as_attachment=True, attachment_filename=f'{instance_id}.dcm')
bbd506904096da9d73f3c0f33dd30ba869551025
3,651,692
def generate_random_initial_params(n_qubits, n_layers=1, topology='all', min_val=0., max_val=1., n_par=0, seed=None): """Generate random parameters for the QCBM circuit (iontrap ansatz). Args: n_qubits (int): number of qubits in the circuit. n_layers (int): number of entangling layers in the circuit. If n_layers=-1, you can specify a custom number of parameters (see below). topology (str): describes topology of qubits connectivity. min_val (float): minimum parameter value. max_val (float): maximum parameter value. n_par (int): specifies number of parameters to be generated in case of incomplete layers (i.e. n_layers=-1). seed (int): initialize random generator Returns: numpy.array: the generated parameters, stored in a 1D array. """ gen = np.random.RandomState(seed) assert(topology == 'all') n_params_layer_zero = 2*n_qubits n_params_per_layer = int((n_qubits*(n_qubits-1))/2) if n_layers==-1: n_params=n_par else: assert(n_layers>0) if n_par!=0: raise ValueError("If n_layers is specified, n_par is automatically computed.") n_params = n_params_layer_zero+n_layers*n_params_per_layer params = gen.uniform(min_val, max_val, n_params) return(params)
f3beaa9b36b704d8289c91c46895247275a69ef1
3,651,693
def number_of_friends(user): """How many friends does this user have?""" user_id = user["id"] friend_ids = friendships[user_id] return len(friend_ids)
3f17dfb1e2c3829c650727d36a34a24885d4d77d
3,651,694
def get_serializer_class(format=None): """Convenience function returns serializer or raises SerializerNotFound.""" if not format: serializer = BaseSerializer() elif format == 'json-ld': serializer = JsonLDSerializer() elif format == 'json': serializer = JsonSerializer() else: raise SerializerNotFound(format) return serializer
7660ba2f7861773d6a4e8d5796facbbe96259503
3,651,695
from typing import Optional from typing import Any def get_or_create_mpc_section( mp_controls: "MpConfigControls", section: str, subkey: Optional[str] = None # type: ignore ) -> Any: """ Return (and create if it doesn't exist) a settings section. Parameters ---------- mp_controls : MpConfigControls The MP Config database. section : str The section name (top level settings item) subkey : Optional[str], optional Optional subkey to create, by default None Returns ------- Any The settings at that section[subkey] location. """ curr_section = mp_controls.get_value(section) if curr_section is None: mp_controls.set_value(section, {}) curr_section = mp_controls.get_value(section) if subkey and subkey not in curr_section: mp_controls.set_value(f"{section}.{subkey}", {}) return mp_controls.get_value(f"{section}.{subkey}") return mp_controls.get_value(section)
60b741f35e0a1c9fe924b472217e0e3b62a1d31e
3,651,696
import csv def get_sql_table_headers(csv_dict_reader: csv.DictReader) -> str: """ This takes in a csv dictionary reader type, and returns a list of the headings needed to make a table """ column_names = [] for row in csv_dict_reader: for column in row: column_names.append('{} {} '.format(column, get_sql_type(row[column]))) return column_names
b874ca3992eac45ed1708434a5adfd28fd96c1cd
3,651,697
from unittest.mock import call def greater_than(val1, val2): """Perform inequality check on two unsigned 32-bit numbers (val1 > val2)""" myStr = flip_string(val1) + flip_string(val2) call(MATH_32BIT_GREATER_THAN,myStr) return ord(myStr[0]) == 1
b9bba2aa776dc71320df736c654a5c0163827dff
3,651,698
from re import I def upsampling_2x_blocks(n_speakers, speaker_dim, target_channels, dropout): """Return a list of Layers that upsamples the input by 2 times in time dimension. Args: n_speakers (int): number of speakers of the Conv1DGLU layers used. speaker_dim (int): speaker embedding size of the Conv1DGLU layers used. target_channels (int): channels of the input and the output.(the list of layers does not change the number of channels.) dropout (float): dropout probability. Returns: List[Layer]: upsampling layers. """ upsampling_convolutions = [ Conv1DTranspose( target_channels, target_channels, 2, stride=2, param_attr=I.Normal(scale=np.sqrt(1. / (2 * target_channels)))), Conv1DGLU( n_speakers, speaker_dim, target_channels, target_channels, 3, dilation=1, std_mul=1., dropout=dropout), Conv1DGLU( n_speakers, speaker_dim, target_channels, target_channels, 3, dilation=3, std_mul=4., dropout=dropout) ] return upsampling_convolutions
e2a31c4ef7c392d86e5cf6ac96891b1a57a3692e
3,651,699