content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import json def get_assay_description(assay_id, summary=True, attempts=10): """ Get the description of an assay in JSON format. Parameters ---------- assay_id : int The id of the bioassay. summary : bool, optional If true returns a summary of the description of the assay (default=True). attempts : int, optional number of times to try to download the data in case of failure (default=10). Returns -------- dict A dictionary containing the assay description. """ assay_url = base_url + "/assay/aid/{}".format(assay_id) if summary: description_url = assay_url + "/summary/JSON" else: description_url = assay_url + "/description/JSON" data = _get_data(description_url, attempts) return json.loads(data)
13ff3620a1ef3e7aa1c12bd5a9b5aa88b2fb297f
3,655,337
def acos(expr): """ Arc cosine -- output in radians. It is the same that :code:`arccos` moodle math function. """ return Expression('acos({0})'.format(str(expr)))
d064caaa037de619266e322f85ae09c2ba7d9d16
3,655,338
from datetime import datetime def annotate_genes(gene_df, annotation_gtf, lookup_df=None): """ Add gene and variant annotations (e.g., gene_name, rs_id, etc.) to gene-level output gene_df: output from map_cis() annotation_gtf: gene annotation in GTF format lookup_df: DataFrame with variant annotations, indexed by 'variant_id' """ gene_dict = {} print('['+datetime.now().strftime("%b %d %H:%M:%S")+'] Adding gene and variant annotations', flush=True) print(' * parsing GTF', flush=True) with open(annotation_gtf) as gtf: for row in gtf: row = row.strip().split('\t') if row[0][0]=='#' or row[2]!='gene': continue # get gene_id and gene_name from attributes attr = dict([i.split() for i in row[8].replace('"','').split(';') if i!='']) # gene_name, gene_chr, gene_start, gene_end, strand gene_dict[attr['gene_id']] = [attr['gene_name'], row[0], row[3], row[4], row[6]] print(' * annotating genes', flush=True) if 'group_id' in gene_df: gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df['group_id']], columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'], index=gene_df.index) else: gene_info = pd.DataFrame(data=[gene_dict[i] for i in gene_df.index], columns=['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand'], index=gene_df.index) gene_df = pd.concat([gene_info, gene_df], axis=1) assert np.all(gene_df.index==gene_info.index) col_order = ['gene_name', 'gene_chr', 'gene_start', 'gene_end', 'strand', 'num_var', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df', 'variant_id', 'tss_distance'] if lookup_df is not None: print(' * adding variant annotations from lookup table', flush=True) gene_df = gene_df.join(lookup_df, on='variant_id') # add variant information col_order += list(lookup_df.columns) col_order += ['ma_samples', 'ma_count', 'af', 'pval_nominal', 'slope', 'slope_se', 'pval_perm', 'pval_beta'] if 'group_id' in gene_df: col_order += ['group_id', 'group_size'] col_order += ['qval', 'pval_nominal_threshold'] gene_df = gene_df[col_order] print('done.', flush=True) return gene_df
562ef01380075a3e12eeaecdd6ab1e2285ddbc4f
3,655,339
import torch def y_gate(): """ Pauli y """ return torch.tensor([[0, -1j], [1j, 0]]) + 0j
c0da0112233773e1c764e103599a591bb7a4a7f5
3,655,340
import tarfile def extract_tarball(tarball, install_dir): """Extract tarball to a local path""" if not tarball.path.is_file(): raise IOError(f"<info>{tarball.path}</info> is not a file!") try: with tarfile.open(tarball.path, "r:gz") as f_tarball: extraction_dir = [ obj.name for obj in f_tarball.getmembers() if obj.isdir() and "/" not in obj.name ][0] f_tarball.extractall(install_dir) except tarfile.ReadError as exc: raise IOError(f"<info>{tarball.path}</info> is not a valid tarball!") from exc return install_dir / extraction_dir
da9deeb71da36c7c01611f3be7965a8c4a22dc41
3,655,341
def compose_matrix(scale=None, shear=None, angles=None, translation=None, perspective=None): """Calculates a matrix from the components of scale, shear, euler_angles, translation and perspective. Parameters ---------- scale : [float, float, float] The 3 scale factors in x-, y-, and z-direction. shear : [float, float, float] The 3 shear factors for x-y, x-z, and y-z axes. angles : [float, float, float] The rotation specified through the 3 Euler angles about static x, y, z axes. translation : [float, float, float] The 3 values of translation. perspective : [float, float, float, float] The 4 perspective entries of the matrix. Returns ------- list[list[float]] The 4x4 matrix that combines the provided transformation components. Examples -------- >>> trans1 = [1, 2, 3] >>> angle1 = [-2.142, 1.141, -0.142] >>> scale1 = [0.123, 2, 0.5] >>> M = compose_matrix(scale1, None, angle1, trans1, None) >>> scale2, shear2, angle2, trans2, persp2 = decompose_matrix(M) >>> allclose(scale1, scale2) True >>> allclose(angle1, angle2) True >>> allclose(trans1, trans2) True """ M = [[1. if i == j else 0. for i in range(4)] for j in range(4)] if perspective is not None: P = matrix_from_perspective_entries(perspective) M = multiply_matrices(M, P) if translation is not None: T = matrix_from_translation(translation) M = multiply_matrices(M, T) if angles is not None: R = matrix_from_euler_angles(angles, static=True, axes="xyz") M = multiply_matrices(M, R) if shear is not None: H = matrix_from_shear_entries(shear) M = multiply_matrices(M, H) if scale is not None: S = matrix_from_scale_factors(scale) M = multiply_matrices(M, S) for i in range(4): for j in range(4): M[i][j] /= M[3][3] return M
a186919f8b6fc47637e7c20db30fbdd8e461e059
3,655,342
def dict_merge(set1, set2): """Joins two dictionaries.""" return dict(list(set1.items()) + list(set2.items()))
d88a68720cb9406c46bdef40f46e461a80e588c0
3,655,343
def EucDistIntegral(a, b, x): """[summary] Calculate Integrated Euclidean distance. Args: a (float): a value b (float): b value x (float): x value Returns: val: Integration result """ asq = a * a bsq = b * b xsq = x * x dn = (6 * (1 + asq)**(3 / 2)) cx = (a * b + x + asq * x) / \ sqrt((bsq + 2 * a * b * x + (1 + asq) * xsq)) / sqrt((1 + asq)) if abs(abs(cx) - 1) <= 1E-9 or np.isnan(cx): c1 = x * b**2 else: c1 = b**3 * arctanh(np.float(cx)) c2 = sqrt(bsq + 2 * a * b * x + (1 + asq) * xsq) * \ (2 * b * x + 2 * asq * b * x + a**3 * xsq + a * (bsq + xsq)) if x == 0: c4 = 0 else: c3 = abs(x) / (b + a * x + sqrt(xsq + (b + a * x)**2)) if np.isnan(c3) or np.isinf(c3): if b == 0: c3 = 1 / (sign(x) * a + sqrt(asq + 1)) else: c3 = -2 * b / abs(x) c4 = (1 + asq) * x**3 * log(c3) return (c1 + sqrt(1 + asq) * (c2 - c4)) / dn
3da541356636e8be7f9264d9d59a29dd003c082b
3,655,344
def _VarintSize(value): """Compute the size of a varint value.""" if value <= 0x7f: return 1 if value <= 0x3fff: return 2 if value <= 0x1fffff: return 3 if value <= 0xfffffff: return 4 if value <= 0x7ffffffff: return 5 if value <= 0x3ffffffffff: return 6 if value <= 0x1ffffffffffff: return 7 if value <= 0xffffffffffffff: return 8 if value <= 0x7fffffffffffffff: return 9 return 10
4bd9b1c8d362f5e72e97f9f2c8e0d5711065291f
3,655,345
import requests def send_to_hipchat( message, token=settings.HIPCHAT_API_TOKEN, room=settings.HIPCHAT_ROOM_ID, sender="Trello", color="yellow", notify=False): # noqa """ Send a message to HipChat. Returns the status code of the request. Should be 200. """ payload = { 'auth_token': token, 'notify': notify, 'color': color, 'from': sender, 'room_id': room, 'message': message } return requests.post(HIPCHAT_API_URL, data=payload).status_code
138abbf59f561a4c5d21aea9976856dbd7a581ca
3,655,346
from cStringIO import StringIO import cgi def input(*requireds, **defaults): """ Returns a `storage` object with the GET and POST arguments. See `storify` for how `requireds` and `defaults` work. """ def dictify(fs): return dict([(k, fs[k]) for k in fs.keys()]) _method = defaults.pop('_method', 'both') e = ctx.env.copy() out = {} if _method.lower() in ['both', 'post']: a = {} if e['REQUEST_METHOD'] == 'POST': a = cgi.FieldStorage(fp = StringIO(data()), environ=e, keep_blank_values=1) a = dictify(a) out = dictadd(out, a) if _method.lower() in ['both', 'get']: e['REQUEST_METHOD'] = 'GET' a = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1)) out = dictadd(out, a) try: return storify(out, *requireds, **defaults) except KeyError: badrequest() raise StopIteration
0b3fcd9142dbcd3309b80837c6fc53abdf4aaad6
3,655,347
def nodes_and_edges_valid(dev, num_nodes, node_names, rep): """Asserts that nodes in a device ``dev`` are properly initialized, when there are ``num_nodes`` nodes expected, with names ``node_names``, using representation ``rep``.""" if not set(dev._nodes.keys()) == {"state"}: return False if not len(dev._nodes["state"]) == num_nodes: return False for idx in range(num_nodes): if not dev._nodes["state"][idx].name == node_names[idx]: return False return edges_valid(dev, num_nodes=num_nodes, rep=rep)
ad6dbfdfd92114c9b041617a91ad30dbe8a8189f
3,655,348
def is_android(builder_cfg): """Determine whether the given builder is an Android builder.""" return ('Android' in builder_cfg.get('extra_config', '') or builder_cfg.get('os') == 'Android')
74b1620ba2f6fff46495174158f734c5aa8da372
3,655,349
def twoSum(self, numbers, target): # ! 这个方法可行 """ :type numbers: List[int] :type target: int :rtype: List[int] """ numbers_dict = {} for idn, v in enumerate(numbers): if target - v in numbers_dict: return [numbers_dict[target - v] + 1, idn + 1] numbers_dict[v] = idn
e2b93828b5db7256b9a1e90e7e21adad1ce0b4de
3,655,350
def not_after(cert): """ Gets the naive datetime of the certificates 'not_after' field. This field denotes the last date in time which the given certificate is valid. :return: Datetime """ return cert.not_valid_after
4f084146908d70af5c2cdfa5151f0c26533ac7fe
3,655,351
from datetime import datetime def parse_time_string(time_str: str) -> datetime.time: """Parses a string recognizable by TIME_REGEXP into a datetime.time object. If the string has an invalid format, a ValueError is raised.""" match = TIME_REGEXP.match(time_str) if match is None: raise ValueError("time string {} has an invalid format".format(repr(time_str))) groups = match.groupdict() return datetime.time(int(groups["h"]), int(groups["m"]), int(groups["s"] or 0))
3238abcc6edb5a37c4a3d615b71e9dde6344f0ac
3,655,352
def get_roc_curve(y_true, y_score, title=None, with_plot=True): """ Plot the [Receiver Operating Characteristic][roc] curve of the given true labels and confidence scores. [roc]: http://en.wikipedia.org/wiki/Receiver_operating_characteristic """ fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true, y_score) auc = np.trapz(tpr, fpr) fig = None if with_plot: fig = vislab.results_viz.plot_curve_with_area( fpr, tpr, auc, 'False Positive Rate', 'True Positive Rate', 'AUC') ax = fig.get_axes()[0] ax.plot([0, 1], [0, 1], 'k--') if title is not None: ax.set_title(title) return fig, fpr, tpr, auc
7635af1705c6bdaccce1e1c5e99719645026d436
3,655,354
from datetime import datetime def read_err_songs(): """ read song data from xml file to a list of dictionaries """ songfile = open('/home/gabe/python/selfishmusic/errors.xml') soup = BS.BeautifulSoup(songfile.read()) songsxml = soup.findAll('song') songs = [] for song in songsxml: sd = {} sd['songnum'] = int(get_text(song.songnum)) sd['title'] = get_text(song.title) sd['artist'] = get_text(song.artist) date = get_text(song.date) date = [x.strip(' ,') for x in date.split(' ')] sd['date'] = datetime.date(month=MONTHS.index(date[0]) + 1, day=int(date[1]), year=int(date[2])) sd['lyrics'] = get_text(song.lyrics) sd['found_title'] = get_text(song.found_title) sd['found_artist'] = get_text(song.found_artist) songs.append(sd) songfile.close() return songs
287c205c054045b3a88b74cf008e5a21037f9727
3,655,355
def word_value(word: str) -> int: """Returns the sum of the alphabetical positions of each letter in word.""" return (0 if word == '' else word_value(word[:-1]) + alpha.letter_index_upper(word[-1]))
b964faa5a5792e003fb0859c1ffb0b25e63f6a75
3,655,356
def status(): """ Returns json response of api status Returns: JSON: json object """ status = { "status": "OK" } return jsonify(status)
d515e0628bb4c77ad83b0a26b758a3686663d329
3,655,357
def celcius_to_farenheit(x): """calculate celcius to farenheit""" farenheit = (9*x/5) + 32 return farenheit
fa0041451c82b20283e4f20b501a6042ab19ec95
3,655,358
def CheckFlags(node_name, report_per_node, warnings, errors, flags, warning_helper, error_helper): """Check the status flags in each node and bookkeep the results. Args: node_name: Short name of the node. report_per_node: Structure to record warning/error messages per node. Its type should be collections.defaultdict(list). warnings: Structure to record nodes that raise each warning type. Its type should be collections.defaultdict(list). errors: Structure to record nodes that raise each error type. Its type should be collections.defaultdict(list). flags: The status flags to check against. warning_helper: The EnumHelper for warnings. error_helper: The EnumHelper for errors. Returns: True if there are any warnings/errors. """ any_warning_or_error = False if warning_helper: for warning_value in warning_helper.Values(): warning_name = warning_helper.ShortName(warning_value) if avionics_util.CheckWarning(flags, warning_value): report_per_node[node_name].append(('WARNING', warning_name)) warnings[warning_name].append(node_name) any_warning_or_error = True if error_helper: for error_value in error_helper.Values(): error_name = error_helper.ShortName(error_value) if avionics_util.CheckError(flags, error_value): report_per_node[node_name].append(('ERROR', error_name)) errors[error_name].append(node_name) any_warning_or_error = True return any_warning_or_error
63bac7bfa4e3fa9c3cc462f5400d68116dfb898d
3,655,359
def EnrollmentTransaction(): """ :return: """ return b'\x20'
05adff34b6cf100d95e16ab837b38b26b6315b6a
3,655,360
def sentinel_id(vocabulary, return_value=None): """Token ID to use as a sentinel. By default, we use the last token in the vocabulary. Args: vocabulary: a t5.data.vocabularies.Vocabulary return_value: an optional integer Returns: an integer """ if return_value is not None: return return_value return vocabulary.vocab_size - 1
08ad1116b7f41ba7070359675a0133f14b9917bd
3,655,361
from datetime import datetime import urllib import hmac import hashlib import base64 def create_signature(api_key, method, host, path, secret_key, get_params=None): """ 创建签名 :param get_params: dict 使用GET方法时附带的额外参数(urlparams) :return: """ sorted_params = [ ("AccessKeyId", api_key), ("SignatureMethod", "HmacSHA256"), ("SignatureVersion", "2"), ("Timestamp", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")) ] if get_params: sorted_params.extend(list(get_params.items())) sorted_params = list(sorted(sorted_params)) encode_params = urllib.parse.urlencode(sorted_params) payload = [method, host, path, encode_params] payload = "\n".join(payload) payload = payload.encode(encoding="UTF8") secret_key = secret_key.encode(encoding="UTF8") digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest() signature = base64.b64encode(digest) params = dict(sorted_params) params["Signature"] = signature.decode("UTF8") return params
3e38bc883da9f5ebb311e1498f8cc73d1754c38b
3,655,362
def measure_fwhm(image, plot=True, printout=True): """ Find the 2D FWHM of a background/continuum subtracted cutout image of a target. The target should be centered and cropped in the cutout. Use lcbg.utils.cutout for cropping targets. FWHM is estimated using the sigmas from a 2D gaussian fit of the target's flux. The FWHM is returned as a tuple of the FWHM in the x and y directions. Parameters ---------- image : array like Input background/continuum subtracted cutout image. printout : bool Print out info. plot : bool To plot fit or not. Returns ------- tuple : array of floats FWHM in x and y directions. """ # Find FWHM # ---------- fitted_line = fit_gaussian2d(image) # Find fitted center x_mean, y_mean = [i.value for i in [fitted_line.x_mean, fitted_line.y_mean]] # Estimate FWHM using gaussian_sigma_to_fwhm x_fwhm = fitted_line.x_stddev * gaussian_sigma_to_fwhm y_fwhm = fitted_line.y_stddev * gaussian_sigma_to_fwhm # Find half max hm = fitted_line(x_mean, y_mean) / 2. # Find the mean of the x and y direction mean_fwhm = np.mean([x_fwhm, y_fwhm]) mean_fwhm = int(np.round(mean_fwhm)) # Print info about fit and FWHM # ------------------------------ if printout: print("Image Max: {}".format(image.max())) print("Amplitude: {}".format(fitted_line.amplitude.value)) print("Center: ({}, {})".format(x_mean, y_mean)) print("Sigma = ({}, {})".format(fitted_line.x_stddev.value, fitted_line.y_stddev.value, )) print("Mean FWHM: {} Pix ".format(mean_fwhm)) print("FWHM: (x={}, y={}) Pix ".format(x_fwhm, y_fwhm)) if plot: fig, [ax0, ax1, ax2, ax3] = plot_fit(image, fitted_line) # Make x and y grid to plot to y_arange, x_arange = np.mgrid[:image.shape[0], :image.shape[1]] # Plot input image with FWHM and center # ------------------------------------- ax0.imshow(image, cmap='gray_r') ax0.axvline(x_mean - x_fwhm / 2, c='c', linestyle="--", label="X FWHM") ax0.axvline(x_mean + x_fwhm / 2, c='c', linestyle="--") ax0.axhline(y_mean - y_fwhm / 2, c='g', linestyle="--", label="Y FWHM") ax0.axhline(y_mean + y_fwhm / 2, c='g', linestyle="--") ax0.set_title("Center and FWHM Plot") ax0.legend() # Plot X fit # ---------- ax2.axvline(x_mean, linestyle="-", label="Center") ax2.axvline(x_mean - x_fwhm / 2, c='c', linestyle="--", label="X FWHM") ax2.axvline(x_mean + x_fwhm / 2, c='c', linestyle="--") ax2.axhline(hm, c="black", linestyle="--", label="Half Max") ax2.legend() # Plot Y fit # ---------- ax3.axvline(y_mean, linestyle="-", label="Center") ax3.axvline(y_mean - y_fwhm / 2, c='g', linestyle="--", label="Y FWHM") ax3.axvline(y_mean + y_fwhm / 2, c='g', linestyle="--") ax3.axhline(hm, c="black", linestyle="--", label="Half Max") ax3.legend() plt.show() return np.array([x_fwhm, y_fwhm])
c2fdb3a10ffa575ffe6fdeb9e86a47ffaefea5c2
3,655,363
from .mappia_publisher import MappiaPublisherPlugin def classFactory(iface): # pylint: disable=invalid-name """Load MappiaPublisher class from file MappiaPublisher. :param iface: A QGIS interface instance. :type iface: QgsInterface """ # return MappiaPublisherPlugin()
1802094cb49c01b0c9c5ed8b45d3c77bcd9b746a
3,655,364
def mongo_insert_canary(mongo, db_name, coll_name, doc): """ Inserts a canary document with 'j' True. Returns 0 if successful. """ LOGGER.info("Inserting canary document %s to DB %s Collection %s", doc, db_name, coll_name) coll = mongo[db_name][coll_name].with_options( write_concern=pymongo.write_concern.WriteConcern(j=True)) res = coll.insert_one(doc) return 0 if res.inserted_id else 1
d82fe021db76972be19394688a07e9426bff82b7
3,655,365
from typing import Type def is_dict_specifier(value): # type: (object) -> bool """ Check if value is a supported dictionary. Check if a parameter of the task decorator is a dictionary that specifies at least Type (and therefore can include things like Prefix, see binary decorator test for some examples). :param value: Decorator value to check. :return: True if value is a dictionary that specifies at least the Type of the key. """ return isinstance(value, dict) and Type in value
e18ad83a1b79a8150dfda1c65f4ab7e72cc8c8c8
3,655,366
def parse_star_count(stars_str): """Parse strings like 40.3k and get the no. of stars as a number""" stars_str = stars_str.strip() return int(float(stars_str[:-1]) * 1000) if stars_str[-1] == 'k' else int(stars_str)
d47177f26656e6dc33d708a0c4824ff677f3387a
3,655,367
import shutil def is_libreoffice_sdk_available() -> bool: """ do we have idlc somewhere (we suppose it is made available in current path var.) ? """ return shutil.which("idlc") is not None
83f8b158bcf97aa875280b20e177895432116d21
3,655,368
def set_metrics_file(filenames, metric_type): """Create metrics from data read from a file. Args: filenames (list of str): Paths to files containing one json string per line (potentially base64 encoded) metric_type (ts_mon.Metric): any class deriving from ts_mon.Metric. For ex. ts_mon.GaugeMetric. Returns: metric (list of metric_type): the metric instances, filled. """ if not filenames: return [] metrics = [] for filename in filenames: with open(filename, 'r') as f: lines = f.read() # Skip blank lines because it helps humans. lines = [line for line in lines.splitlines() if line.strip()] metrics.extend(set_metrics(lines, metric_type)) return metrics
372ec1fcb4b50711b35e40936e63839d75689dee
3,655,369
def sortino_ratio_nb(returns, ann_factor, required_return_arr): """2-dim version of `sortino_ratio_1d_nb`. `required_return_arr` should be an array of shape `returns.shape[1]`.""" result = np.empty(returns.shape[1], dtype=np.float_) for col in range(returns.shape[1]): result[col] = sortino_ratio_1d_nb(returns[:, col], ann_factor, required_return=required_return_arr[col]) return result
2dfd6be1b7d3747c87484b22eb0cc0b0271c93a6
3,655,370
import re def format_env_var(name: str, value: str) -> str: """ Formats environment variable value. Formatter is chosen according to the kind of variable. :param name: name of environment variable :param value: value of environment variable :return: string representation of value in appropriate format """ formatter = get_formatter(name) new = str(value) new = formatter(new) new = escape(new) new = re.sub("\n", "<br>", new) return new
030b16b897f2222d8465143b462f99ba344ba1eb
3,655,371
from typing import Counter def evenly_divisible(n): """ Idea: - Find factors of numbers 1 to n. Use DP to cache results bottom up. - Amongst all factors, we have to include max counts of prime factors. - For example, in in 1 .. 10, 2 has to be included 3 times since 8 = 2 ^ 3 """ max_counts = Counter() for n in range(n, 1, -1): factors = prime_factorize(n) # Update max counts for k, v in factors.iteritems(): max_counts[k] = max(max_counts[k], v) res = 1 for k, v in max_counts.iteritems(): res *= k ** v return res
68301a33751c2f3863092450235ca5c24b28379e
3,655,372
def do_open(user_input): """identical to io.open in PY3""" try: with open(user_input) as f: return f.read() except Exception: return None
72037207adecb2758c844c2f0c7233d834060111
3,655,374
def likely_solution(players): """ Return tuples of cards with the number of players who don't have them """ likely = likely_solution_nums(players) return sorted([(ALLCARDS[n], ct) for n, ct in likely], key=lambda tp: tp[1], reverse=True)
f0531f3188a38ec1b70ca48f95c9cfdc71d723b5
3,655,375
def cns_extended_inp(mtf_infile, pdb_outfile): """ Create CNS iput script (.inp) to create extended PDB file from molecular topology file (.mtf) Parameters ---------- mtf_infile : str Path to .mtf topology file pdb_outfile : str Path where extended .pdb file will be stored Returns ------- str: Input script """ return _cns_render_template( "generate_extended", { "mtf_infile": mtf_infile, "pdb_outfile": pdb_outfile, } )
c850137db9a22fd48559228e3032bcd510c9d69b
3,655,376
def index(request, response_format='html'): """Sales index page""" query = Q(status__hidden=False) if request.GET: if 'status' in request.GET and request.GET['status']: query = _get_filter_query(request.GET) else: query = query & _get_filter_query(request.GET) orders = Object.filter_by_request( request, SaleOrder.objects.filter(query), mode="r") filters = OrderFilterForm(request.user.profile, '', request.GET) statuses = Object.filter_by_request(request, SaleStatus.objects, mode="r") massform = MassActionForm(request.user.profile) return render_to_response('sales/index', {'orders': orders, 'filters': filters, 'statuses': statuses, 'massform': massform }, context_instance=RequestContext(request), response_format=response_format)
afb47a5c9094c9ff125c05c3588712d1875c69f3
3,655,377
def team_points_leaders(num_results=None, round_name=None): """Returns the team points leaders across all groups, as a dictionary profile__team__name and points. """ size = team_normalize_size() if size: entries = score_mgr.team_points_leaders(round_name=round_name) else: entries = score_mgr.team_points_leaders(num_results=num_results, round_name=round_name) if entries: if size: for entry in entries: team = Team.objects.get(name=entry["profile__team__name"]) if team.size: entry["points"] = int(entry["points"] * float(size / team.size)) # resort the entries after the normalization entries = sorted(entries, key=lambda e: e["points"], reverse=True) return entries[:num_results] else: return entries else: results = Team.objects.all().extra( select={'profile__team__name': 'name', 'points': 0}).values( 'profile__team__name', 'points') if num_results: results = results[:num_results] return results
56b72b28f74f94e428b668b785b3dbd5b0c7c378
3,655,379
def with_color(text, color, bold=False): """ Return a ZSH color-formatted string. Arguments --------- text: str text to be colored color: str ZSH color code bold: bool whether or not to make the text bold Returns ------- str string with ZSH color-coded text """ color_fmt = '$fg_bold[{:s}]' if bold else '$fg[{:s}]' return '%{{{:s}%}}{:s}%{{$reset_color%}}'.format( color_fmt.format(color), text)
40c194d9de76ab504a25592cfb13407cb089da0a
3,655,380
def sample_test(): """Return sample test json.""" return get_sample_json("test.json")
9d135d4fd2f7eb52d16ff96332811e4141139a12
3,655,381
import warnings from io import StringIO def dataframe_from_inp(inp_path, section, additional_cols=None, quote_replace=' ', **kwargs): """ create a dataframe from a section of an INP file :param inp_path: :param section: :param additional_cols: :param skip_headers: :param quote_replace: :return: """ # format the section header for look up in headers OrderedDict sect = remove_braces(section).upper() # get list of all section headers in inp to use as section ending flags headers = get_inp_sections_details(inp_path, include_brackets=False) if sect not in headers: warnings.warn(f'{sect} section not found in {inp_path}') return pd.DataFrame() # extract the string and read into a dataframe start_string = format_inp_section_header(section) end_strings = [format_inp_section_header(h) for h in headers.keys()] s = extract_section_of_file(inp_path, start_string, end_strings, **kwargs) # replace occurrences of double quotes "" s = s.replace('""', quote_replace) # and get the list of columns to use for parsing this section # add any additional columns needed for special cases (build instructions) additional_cols = [] if additional_cols is None else additional_cols cols = headers[sect]['columns'] + additional_cols if headers[sect]['columns'][0] == 'blob': # return the whole row, without specific col headers return pd.read_csv(StringIO(s), delim_whitespace=False) else: try: df = pd.read_csv(StringIO(s), header=None, delim_whitespace=True, skiprows=[0], index_col=0, names=cols) except IndexError: print(f'failed to parse {section} with cols: {cols}. head:\n{s[:500]}') raise return df
8eaefdc08c7de3991f5a85cfe5001a6dcd0aaf7b
3,655,382
def compositional_stratified_splitting(dataset, perc_train): """Given the dataset and the percentage of data you want to extract from it, method will apply stratified sampling where X is the dataset and Y is are the category values for each datapoint. In the case each structure contains 2 types of atoms, the category will be constructed as such: number of atoms of type 1 + number of atoms of type 2 * 100. Parameters ---------- dataset: [Data] A list of Data objects representing a structure that has atoms. subsample_percentage: float Percentage of the dataset. Returns ---------- [Data] Subsample of the original dataset constructed using stratified sampling. """ dataset_categories = create_dataset_categories(dataset) dataset, dataset_categories = duplicate_unique_data_samples( dataset, dataset_categories ) sss_train = sklearn.model_selection.StratifiedShuffleSplit( n_splits=1, train_size=perc_train, random_state=0 ) trainset, val_test_set = generate_partition(sss_train, dataset, dataset_categories) val_test_dataset_categories = create_dataset_categories(val_test_set) val_test_set, val_test_dataset_categories = duplicate_unique_data_samples( val_test_set, val_test_dataset_categories ) sss_valtest = sklearn.model_selection.StratifiedShuffleSplit( n_splits=1, train_size=0.5, random_state=0 ) valset, testset = generate_partition( sss_valtest, val_test_set, val_test_dataset_categories ) return trainset, valset, testset
b57a0b7d651e6f9be4182fec8c918438dcae9b7a
3,655,383
def is_inside_line_segment(x, y, x0, y0, x1, y1): """Return True if the (x, y) lies inside the line segment defined by (x0, y0) and (x1, y1).""" # Create two vectors. v0 = np.array([ x0-x, y0-y ]).reshape((2,1)) v1 = np.array([ x1-x, y1-y ]).reshape((2,1)) # Inner product. prod = v0.transpose().dot(v1) if ( prod <= 0 ): return True else: return False
b653c542d3d573857199d90257e9e36e6c45ccdc
3,655,384
def transition_soil_carbon(area_final, carbon_final, depth_final, transition_rate, year, area_initial, carbon_initial, depth_initial): """This is the formula for calculating the transition of soil carbon .. math:: (af * cf * df) - \ \\frac{1}{(1 + tr)^y} * \ [(af * cf * df) - \ (ai * ci * di)] where * :math:`af` is area_final * :math:`cf` is carbon_final * :math:`df` is depth_final * :math:`tr` is transition_rate * :math:`y` is year * :math:`ai` is area_initial * :math:`ci` is carbon_initial * :math:`di` is depth_initial Args: area_final (float): The final area of the carbon carbon_final (float): The final amount of carbon per volume depth_final (float): The final depth of carbon transition_rate (float): The rate at which the transition occurs year (float): The amount of time in years overwhich the transition occurs area_initial (float): The intial area of the carbon carbon_initial (float): The iniital amount of carbon per volume depth_initial (float): The initial depth of carbon Returns: float: Transition amount of soil carbon """ return (area_final * carbon_final * depth_final) - \ (1/((1 + transition_rate) ** year)) * \ ((area_final * carbon_final * depth_final) - \ (area_initial * carbon_initial * depth_initial))
bfbf83f201eb8b8b0be0ec6a8722e850f6084e95
3,655,385
def _snr_approx(array, source_xy, fwhm, centery, centerx): """ array - frame convolved with top hat kernel """ sourcex, sourcey = source_xy rad = dist(centery, centerx, sourcey, sourcex) ind_aper = draw.circle(sourcey, sourcex, fwhm/2.) # noise : STDDEV in convolved array of 1px wide annulus (while # masking the flux aperture) * correction of # of resolution elements ind_ann = draw.circle_perimeter(int(centery), int(centerx), int(rad)) array2 = array.copy() array2[ind_aper] = array[ind_ann].mean() # quick-n-dirty mask n2 = (2*np.pi*rad)/fwhm - 1 noise = array2[ind_ann].std()*np.sqrt(1+(1/n2)) # signal : central px minus the mean of the pxs (masked) in 1px annulus signal = array[sourcey, sourcex] - array2[ind_ann].mean() snr = signal / noise return sourcey, sourcex, snr
6f055444163c03d0bcc61107db2045b968f06b52
3,655,388
def create_pipeline(training_set, validation_set, test_set): """ Create a pipeline for the training, validation and testing set Parameters: training_set: Training data set validation_set: Validation data set test_set: Test data set Returns: batch_size: Batch size image_size: Image dimensions (width, height) training_batches: Batches of training data set validation_batches: Batches of validation data set testing_batches: Batches of test data set """ # Define batch size and image size batch_size = 64 image_size = 224 # Define function to convert images to appropriate format, resize to fit the input layer and normalize it def format_image(image, label): image = tf.cast(image, tf.float32) image = tf.image.resize(image, [image_size, image_size]) image /= 255 return image, label # Define batches, while modifying images according to above function as well as batch and prefetch them training_batches = training_set.map(format_image).batch(batch_size).prefetch(1) validation_batches = validation_set.map(format_image).batch(batch_size).prefetch(1) testing_batches = test_set.map(format_image).batch(batch_size).prefetch(1) return batch_size, image_size, training_batches, validation_batches, testing_batches
a6af6ff83180a0a11bfc3bacefd6a2e2261aaeed
3,655,389
from typing import Tuple from typing import Any def invalid_request() -> Tuple[Any, int]: """Invalid request API response.""" return jsonify({API.Response.KEY_INFO: API.Response.VAL_INVALID_REQUEST}), 400
76a81f8c85014822f4fa306c917a06d92a89ea70
3,655,390
from pathlib import Path from typing import List from typing import Dict from typing import Any def _add_hyperparameters( ranges_path: Path, defaults_path: Path ) -> List[Dict[str, Any]]: """Returns a list of hyperparameters in a format that is compatible with the json reader of the ConfigSpace API. The list is created from two files: a hp_space file that defines the ranges of the hyperparameters and an options file that defines the default values of the hyperparameters. Both are in json format. Parameters ---------- ranges_path: Path Path to the hp_space file defaults_path: Path Path to the options file Returns ------- List A list of hyperparameters """ # load the ranges of the hyperparameters as a dict ranges_dict = load_data(ranges_path) ranges_dict = flatten_dictionary(ranges_dict) # load the default values of the hyperparameters as a dict defaults_dict = load_data(defaults_path) defaults_dict = flatten_dictionary(defaults_dict) hyperparameter_list = _add_ranges(ranges_dict) hyperparameter_list = _add_defaults(hyperparameter_list, defaults_dict) return hyperparameter_list
9609d3f31ffaee69148360966b1040f1970399b3
3,655,391
def setup(app): """Setup extension.""" app.add_domain(StuffDomain) app.connect("builder-inited", generate_latex_preamble) app.connect("config-inited", init_numfig_format) app.add_css_file("stuff.css") app.add_enumerable_node( StuffNode, "stuff", html=(html_visit_stuff_node, html_depart_stuff_node), singlehtml=(html_visit_stuff_node, html_depart_stuff_node), latex=(latex_visit_stuff_node, latex_depart_stuff_node), ) app.add_node( nodes.caption, override=True, html=(html_visit_caption_node, html_depart_caption_node), singlehtml=(html_visit_caption_node, html_depart_caption_node), latex=(latex_visit_caption_node, latex_depart_caption_node), ) app.add_node( ContentNode, html=(html_visit_content_node, html_depart_content_node), singlehtml=(html_visit_content_node, html_depart_content_node), latex=(latex_visit_content_node, latex_depart_content_node), ) return {"version": __version__, "parallel_read_safe": True}
3c7a5d36c835e7339876cdf88673d79e5f76b590
3,655,392
from pathlib import Path import typing def has_checksum(path: Path, csum: str, csum_fun: typing.Optional[Checksum] = None) -> bool: """ :return: True if the file at the path `path` has given checksum """ return get_checksum(path, csum_fun=csum_fun) == csum
e9bed6e0d82745113412e6dace5869aa32aa4fc9
3,655,393
import numpy as np def remove_outliers(column): """ :param column: list of numbers :return: """ if len(column) < 1: return [] clean_column = [] q1 = np.percentile(column, 25) q3 = np.percentile(column, 75) #k = 1.5 k = 2 # [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)] lower_bound = q1 - k*(q3-q1) upper_bound = q3 + k*(q3-q1) for c in column: if c >= lower_bound and c <= upper_bound: clean_column.append(c) return clean_column
04c1e736e27ffeaef528f25fd303d0f27c3a94ac
3,655,394
def read_photons(photonfile, ra0, dec0, tranges, radius, verbose=0, colnames=['t', 'x', 'y', 'xa', 'ya', 'q', 'xi', 'eta', 'ra', 'dec', 'flags']): """ Read a photon list file and return a python dict() with the expected format. :param photonfile: Name of the photon event file to use. :type photonfile: str :param ra0: Right ascension of the targeted sky position, in degrees. :type ra0: float :param dec0: Declination of the targeted sky position, in degrees. :type dec0: float :param tranges: Set of time ranges from which to retrieve photon events, in GALEX time units :type tranges: list :param radius: The radius, in degrees, defining a cone on the sky that is centered on ra0 and dec0, from which to extract photons. :type radius: float :param verbose: Verbosity level, a value of 0 is minimum verbosity. :type verbose: int :param colnames: Labels of the columns found in the photon event file. :type colnames: list :returns: dict -- The set of photon events and their properties. """ # [Future]: Consider moving this method to 'dbasetools'. if verbose: mc.print_inline('Reading photon list file: {f}'.format(f=photonfile)) data = pd.io.parsers.read_csv(photonfile, names=colnames) ra, dec = np.array(data['ra']), np.array(data['dec']) angsep = mc.angularSeparation(ra0, dec0, ra, dec) ix = np.array([]) for trange in tranges: cut = np.where((angsep <= radius) & (np.isfinite(angsep)))[0] ix = np.concatenate((ix, cut), axis=0) events = {'t':np.array(data['t'][ix])/tscale, 'ra':np.array(data['ra'][ix]), 'dec':np.array(data['dec'][ix]), 'xi':np.array(data['xi'][ix]), 'eta':np.array(data['eta'][ix]), 'x':np.array(data['x'][ix]), 'y':np.array(data['y'][ix])} return events
c83958f8ae541e5df564c5ce53dd40593c9dfc3e
3,655,396
def normElec(surf, electrode, normdist, NaN_as_zeros=True): """ Notes ----- When `normway` is a scalar, it takes the normal of the points of the mesh which are closer than `normway`. However, some points have a normal of (0, 0, 0) (default assigned if the vertex does not belong to any triangle). projectElectrodes.m includes those (0, 0, 0) in the calculation, but it might not be correct. See l. 138 (there are no NaN in normals but only (0, 0, 0)). To replicate the matlab behavior, make sure that `NaN_as_zeros` is True. """ dvect = norm(electrode - surf['pos'], axis=1) # l. 104-112 of projectElectrodes.m closevert = dvect < normdist # l. 120 of projectElectrodes.m normal = surf['pos_norm'][closevert, :].mean(axis=0) # l. 144 of projectElectrodes.m normals2av = surf['pos_norm'][closevert, :].copy() if NaN_as_zeros: normals2av[isnan(normals2av)] = 0 normal = nanmean(normals2av, axis=0) return normal
d449f4518c589a2a68b64ca812d964cb6249694e
3,655,397
def filter_sources(sources, release): """Check if a source has already been consumed. If has not then add it to sources dict. """ source, version, dist, arch = parse_release(release) if source not in sources.keys(): sources[source] = {version: {dist: [arch]}} return True elif version not in sources[source].keys(): sources[source][version] = {dist: [arch]} return True elif dist not in sources[source][version]: sources[source][version][dist] = [arch] return True elif arch not in sources[source][version][dist]: sources[source][version][dist].append(arch) return True return False
661d379291170a4994c0813d24820007e47bd092
3,655,398
from typing import Union from typing import Dict from typing import Any async def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]): """ Train a machine learning model. Provide records to the model to train it. The model should be already instantiated. Parameters ---------- model : Model Machine Learning model to use. See :doc:`/plugins/dffml_model` for models options. *args : list Input data for training. Could be a ``dict``, :py:class:`Record`, filename, one of the data :doc:`/plugins/dffml_source`, or a filename with the extension being one of the data sources. Examples -------- >>> import asyncio >>> from dffml import * >>> >>> model = SLRModel( ... features=Features( ... Feature("Years", int, 1), ... ), ... predict=Feature("Salary", int, 1), ... directory="tempdir", ... ) >>> >>> async def main(): ... await train( ... model, ... {"Years": 0, "Salary": 10}, ... {"Years": 1, "Salary": 20}, ... {"Years": 2, "Salary": 30}, ... {"Years": 3, "Salary": 40}, ... ) >>> >>> asyncio.run(main()) """ sources = _records_to_sources(*args) async with sources as sources, model as model: async with sources() as sctx, model() as mctx: return await mctx.train(sctx)
9a8e1648247a8eb3c8354c324ac2c48a52617899
3,655,399
def setup_model_and_optimizer(args): """Setup model and optimizer.""" print ("setting up model...") model = get_model(args) print ("setting up optimizer...") optimizer = get_optimizer(model, args) print ("setting up lr scheduler...") lr_scheduler = get_learning_rate_scheduler(optimizer, args) if DEEPSPEED_WRAP and args.deepspeed: print_rank_0("DeepSpeed is enabled.") print ("Calling deepspeed.initialize with our model, optimizer and scheduler") model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize( model=model, optimizer=optimizer, args=args, lr_scheduler=lr_scheduler, mpu=mpu, dist_init_required=False ) print ("We've wrapped our model, optimizer and scheduler in DeepSpeed") if args.load is not None: print_rank_0("Load checkpoint from " + args.load) args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args, deepspeed=DEEPSPEED_WRAP and args.deepspeed) print_rank_0("Checkpoint loaded") else: args.iteration = 0 print ("returning our model, optimizer and scheduler") return model, optimizer, lr_scheduler
9283ec825b55ff6619ac2ee2f7ac7cce9e4bced7
3,655,401
def ensure_str(origin, decode=None): """ Ensure is string, for display and completion. Then add double quotes Note: this method do not handle nil, make sure check (nil) out of this method. """ if origin is None: return None if isinstance(origin, str): return origin if isinstance(origin, int): return str(origin) elif isinstance(origin, list): return [ensure_str(b) for b in origin] elif isinstance(origin, bytes): if decode: return origin.decode(decode) return _literal_bytes(origin) else: raise Exception(f"Unknown type: {type(origin)}, origin: {origin}")
0409bc75856b012cf3063d9ed2530c2d7d5bf3e4
3,655,402
from typing import Union from typing import Dict from typing import Any from typing import Optional def restore( collection: str, id: Union[str, int, Dict[str, Any]] ) -> Optional[Dict[str, Any]]: """Restrieve cached data from database. :param collection: The collection to be retrieved. Same name as API commands. :type collection: str :param id: The unique identifier for a particular collection. This varies by command. :type id: Union[str, int] :return: The retrieved data if exists, else None. :rtype: Optional[Dict[str, Any]] """ db = _get_connection() if not db: return None if not isinstance(id, dict): id = dict(_id=id) return db[collection].find_one(id, dict(_id=0))
a0b2ccb995661b5c3286dee3b3f5f250cb728011
3,655,403
def encode_bits(data, number): """Turn bits into n bytes of modulation patterns""" # 0000 00BA gets encoded as: # 128 64 32 16 8 4 2 1 # 1 B B 0 1 A A 0 # i.e. a 0 is a short pulse, a 1 is a long pulse #print("modulate_bits %s (%s)" % (ashex(data), str(number))) shift = number-2 encoded = [] for i in range(int(number/2)): bits = (data >> shift) & 0x03 #print(" shift %d bits %d" % (shift, bits)) encoded.append(ENCODER[bits]) shift -= 2 #print(" returns:%s" % ashex(encoded)) return encoded
0299a30c4835af81e97e518e116a51fa08006999
3,655,404
from typing import Tuple def _tf_get_negs( all_embed: "tf.Tensor", all_raw: "tf.Tensor", raw_pos: "tf.Tensor", num_neg: int ) -> Tuple["tf.Tensor", "tf.Tensor"]: """Get negative examples from given tensor.""" if len(raw_pos.shape) == 3: batch_size = tf.shape(raw_pos)[0] seq_length = tf.shape(raw_pos)[1] else: # len(raw_pos.shape) == 2 batch_size = tf.shape(raw_pos)[0] seq_length = 1 raw_flat = _tf_make_flat(raw_pos) total_candidates = tf.shape(all_embed)[0] all_indices = tf.tile( tf.expand_dims(tf.range(0, total_candidates, 1), 0), (batch_size * seq_length, 1), ) shuffled_indices = tf.transpose( tf.random.shuffle(tf.transpose(all_indices, (1, 0))), (1, 0) ) neg_ids = shuffled_indices[:, :num_neg] bad_negs = _tf_get_bad_mask(raw_flat, all_raw, neg_ids) if len(raw_pos.shape) == 3: bad_negs = tf.reshape(bad_negs, (batch_size, seq_length, -1)) neg_embed = _tf_sample_neg(batch_size * seq_length, all_embed, neg_ids) if len(raw_pos.shape) == 3: neg_embed = tf.reshape( neg_embed, (batch_size, seq_length, -1, all_embed.shape[-1]) ) return neg_embed, bad_negs
9cef1cf3fc869108d400704f8cd90d432382ac2e
3,655,406
def load_pdb(path): """ Loads all of the atomic positioning/type arrays from a pdb file. The arrays can then be transformed into density (or "field") tensors before being sent through the neural network. Parameters: path (str, required): The full path to the pdb file being voxelized. Returns: dictionary: A dictionary containing the following arrays from the pdb file: num_atoms, atom_types, positions, atom_type_set, xcoords, ycoords, zcoords, residues, residue_set """ pdb = PandasPdb().read_pdb(path) # This just creates a dataframe from the pdb file using biopandas #print('This is vars',vars(pdb)) pdf = pdb.df['ATOM'] # atomic coordinates x_coords = pdf['x_coord'].values y_coords = pdf['y_coord'].values z_coords = pdf['z_coord'].values # create an array containing tuples of x,y,z for every atom positions = [] for i, x in enumerate(x_coords): position_tuple = (x_coords[i], y_coords[i], z_coords[i]) positions.append(position_tuple) positions = np.array(positions) # names of all the atoms contained in the protein atom_types = pdf['atom_name'].values num_atoms = len(atom_types) atom_type_set = np.unique(atom_types) num_atom_types = len(atom_type_set) # residue names residue_names = pdf['residue_name'].values residue_set = np.unique(residue_names) protein_dict = {'x_coords':x_coords, 'y_coords':y_coords, 'z_coords':z_coords, 'positions':positions, 'atom_types':atom_types, 'num_atoms':num_atoms, 'atom_type_set':atom_type_set, 'num_atom_types':num_atom_types, 'residues':residue_names, 'residue_set':residue_set} # add a value to the dictionary, which is all of the atomic coordinates just # shifted to the origin protein_dict = shift_coords(protein_dict) return protein_dict
aa7fe0f338119b03f00a2acb727608afcd5c1e0d
3,655,409
def __filter_handler(query_set, model, params): """ Handle user-provided filtering requests. Args: query_set: SQLAlchemy query set to be filtered. model: Data model from which given query set is generated. params: User-provided filter params, with format {"query": [...], ...}. For query format see "__build_filter_exp" function. Returns: A query set with user-provided filters applied. """ query = params.get("query") if query: filter_exp = __build_filter_exp(query, model) return query_set.filter(filter_exp) else: return query_set
c91b7d55795399f453106d0dda52e80a0c998075
3,655,411
def split_data_set(data_set, axis, value): """ 按照给定特征划分数据集,筛选某个特征为指定特征值的数据 (然后因为是按该特征进行划分了,该特征在以后的划分中就不用再出现,所以把该特征在新的列表中移除) :param data_set: 待划分的数据集,格式如下,每一行是一个list,list最后一个元素就是标签,其他元素是特征 :param axis: 划分数据集的特征(特征的序号) :param value: 需要返回的特征的值(筛选特征的值要等于此值) :return: >>>myDat = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] >>>split_data_set(myDat,0,1) [[1, 'yes'], [1, 'yes'], [0, 'no']] >>>split_data_set(myDat,0,0) [[1, 'no'], [1, 'no']] """ # 创建新的list对象 ret_data_set = [] for feature_vec in data_set: if feature_vec[axis] == value: # 抽取, 把指定特征从列表中去掉,组成一个新的特征+标签的列表 reduced_feature_vec = feature_vec[:axis] reduced_feature_vec.extend(feature_vec[axis + 1:]) ret_data_set.append(reduced_feature_vec) return ret_data_set
f90fdffee3bbee4b4477e371a9ed43094051126a
3,655,412
import ast import six import types import inspect import textwrap def get_ast(target_func_or_module): """ See :func:``bettertimeit`` for acceptable types. :returns: an AST for ``target_func_or_module`` """ if isinstance(target_func_or_module, ast.AST): return target_func_or_module if not isinstance(target_func_or_module, (six.string_types, six.binary_type)): handled_types = ( types.ModuleType, types.FunctionType, getattr(types, "UnboundMethodType", types.MethodType), types.MethodType, ) if not isinstance(target_func_or_module, handled_types): raise TypeError("Don't know how to handle objects of types '%s'" % type(target_func_or_module)) target_func_or_module = inspect.getsource(target_func_or_module) target_func_or_module = textwrap.dedent(target_func_or_module) return ast.parse(target_func_or_module)
929a8f1b915850c25369edf0dcf0dc8bc2fe16e9
3,655,413
from typing import List from re import T from typing import Callable from typing import Tuple def enumerate_spans(sentence: List[T], offset: int = 0, max_span_width: int = None, min_span_width: int = 1, filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]: """ Given a sentence, return all token spans within the sentence. Spans are `inclusive`. Additionally, you can provide a maximum and minimum span width, which will be used to exclude spans outside of this range. Finally, you can provide a function mapping ``List[T] -> bool``, which will be applied to every span to decide whether that span should be included. This allows filtering by length, regex matches, pos tags or any Spacy ``Token`` attributes, for example. Parameters ---------- sentence : ``List[T]``, required. The sentence to generate spans for. The type is generic, as this function can be used with strings, or Spacy ``Tokens`` or other sequences. offset : ``int``, optional (default = 0) A numeric offset to add to all span start and end indices. This is helpful if the sentence is part of a larger structure, such as a document, which the indices need to respect. max_span_width : ``int``, optional (default = None) The maximum length of spans which should be included. Defaults to len(sentence). min_span_width : ``int``, optional (default = 1) The minimum length of spans which should be included. Defaults to 1. filter_function : ``Callable[[List[T]], bool]``, optional (default = None) A function mapping sequences of the passed type T to a boolean value. If ``True``, the span is included in the returned spans from the sentence, otherwise it is excluded.. """ max_span_width = max_span_width or len(sentence) filter_function = filter_function or (lambda x: True) spans: List[Tuple[int, int]] = [] for start_index in range(len(sentence)): last_end_index = min(start_index + max_span_width, len(sentence)) first_end_index = min(start_index + min_span_width - 1, len(sentence)) for end_index in range(first_end_index, last_end_index): start = offset + start_index end = offset + end_index # add 1 to end index because span indices are inclusive. if filter_function(sentence[slice(start_index, end_index + 1)]): spans.append((start, end)) return spans
68df595e4fd55d2b36645660df6fa9198a8d28ef
3,655,414
def fixed_mu(mu, data, qty, comp='muAI', beads_2_M=1): """ """ return fixed_conc(mu*np.ones([len(data.keys())]), data, qty, comp=comp, beads_2_M=beads_2_M)
f7241e8b1534d5d537a1817bce4f019e5a4081a7
3,655,415
def error_nrmse(y_true, y_pred, time_axis=0): """ Computes the Normalized Root Mean Square Error (NRMSE). The NRMSE index is computed separately on each channel. Parameters ---------- y_true : np.array Array of true values. If must be at least 2D. y_pred : np.array Array of predicted values. If must be compatible with y_true' time_axis : int Time axis. All other axes define separate channels. Returns ------- NRMSE : np.array Array of r_squared value. """ SSE = np.mean((y_pred - y_true)**2, axis=time_axis) RMSE = np.sqrt(SSE) NRMSE = RMSE/np.std(y_true, axis=time_axis) return NRMSE
39461cdf0337c9d681d4247168ba32d7a1cbd364
3,655,416
import shutil def rmdir_empty(f): """Returns a count of the number of directories it has deleted""" if not f.is_dir(): return 0 removable = True result = 0 for i in f.iterdir(): if i.is_dir(): result += rmdir_empty(i) removable = removable and not i.exists() else: removable = removable and (i.name == '.DS_Store') if removable: items = list(f.iterdir()) assert not items or items[0].name == '.DS_Store' print(f) shutil.rmtree(f) result += 1 return result
f2dba5bb7e87c395886574ca5f3844a8bab609d9
3,655,417
def generic_exception_json_response(code): """ Turns an unhandled exception into a JSON payload to respond to a service call """ payload = { "error": "TechnicalException", "message": "An unknown error occured", "code": code } resp = make_response(jsonify(payload), code) resp.headers["Content-type"] = "application/json" return resp
fc2f0edfc774a56e6b6ccfc8a746b37ad19f6536
3,655,418
def UnN(X, Z, N, sampling_type, kernel="prod"): """Computes block-wise complete U-statistic.""" def fun_block(x, z): return Un(x, z, kernel=kernel) return UN(X, Z, N, fun_block, sampling_type=sampling_type)
962788706d3b4d71a0f213f925e89fd78f220791
3,655,419
def delete_card(request): """Delete card""" return delete_container_element(request)
128b521ed89077ebae019942147fc3b4af1a5cdf
3,655,420
def get_plot_grid_size(num_plots, fewer_rows=True): """ Returns the number of rows and columns ideal for visualizing multiple (identical) plots within a single figure Parameters ---------- num_plots : uint Number of identical subplots within a figure fewer_rows : bool, optional. Default = True Set to True if the grid should be short and wide or False for tall and narrow Returns ------- nrows : uint Number of rows ncols : uint Number of columns """ assert isinstance(num_plots, Number), 'num_plots must be a number' # force integer: num_plots = int(num_plots) if num_plots < 1: raise ValueError('num_plots was less than 0') if fewer_rows: nrows = int(np.floor(np.sqrt(num_plots))) ncols = int(np.ceil(num_plots / nrows)) else: ncols = int(np.floor(np.sqrt(num_plots))) nrows = int(np.ceil(num_plots / ncols)) return nrows, ncols
e83f14db347cd679e9e7b0761d928cd563444712
3,655,422
def check_source(module): """ Check that module doesn't have any globals. Example:: def test_no_global(self): result, line = check_source(self.module) self.assertTrue(result, "Make sure no code is outside functions.\\nRow: " + line) """ try: source = module.__file__ except Exception: raise Exception('Varmista, että koodin suoritus onnistuu') allowed = [ "import ", "from ", "def ", "class ", " ", "\t", "#", "if __name__", "@", ] with open(source) as file: for line in file.readlines(): if line.strip() == "": continue for prefix in allowed: if line.startswith(prefix): break else: return (False, line) return (True, "")
6bc012892d6ec7bb6788f20a565acac0f6d1c662
3,655,423
def pre_processing(X): """ Center and sphere data.""" eps = 1e-18 n = X.shape[0] cX = X - np.mean(X, axis=0) # centering cov_mat = 1.0/n * np.dot(cX.T, cX) eigvals, eigvecs = eigh(cov_mat) D = np.diag(1./np.sqrt(eigvals+eps)) W = np.dot(np.dot(eigvecs, D), eigvecs.T) # whitening matrix wcX = np.dot(cX, W) return wcX
802ce958c6616dcf03de5842249be8480e6a5a7c
3,655,424
def get_as_tags(bundle_name, extension=None, config="DEFAULT", attrs=""): """ Get a list of formatted <script> & <link> tags for the assets in the named bundle. :param bundle_name: The name of the bundle :param extension: (optional) filter by extension, eg. "js" or "css" :param config: (optional) the name of the configuration :param attrs: (optional) further attributes on the tags :return: a list of formatted tags as strings """ bundle = _get_bundle(bundle_name, extension, config) return _render_tags(bundle, attrs)
ec54184ff2b13bd4f37de8395276685191535948
3,655,425
def devpiserver_get_credentials(request): """Search request for X-Remote-User header. Returns a tuple with (X-Remote-User, '') if credentials could be extracted, or None if no credentials were found. The first plugin to return credentials is used, the order of plugin calls is undefined. """ if 'X-Remote-User' in request.headers: remote_user = request.headers['X-Remote-User'] threadlog.info("Found X-Remote-User in request: %s", remote_user) return remote_user, ''
bc6ccaa52b719c25d14784c758d6b78efeae104d
3,655,427
def vader_sentiment( full_dataframe, grading_column_name, vader_columns=COLUMN_NAMES, logger=config.LOGGER ): """apply vader_sentiment analysis to dataframe Args: full_dataframe (:obj:`pandas.DataFrame`): parent dataframe to apply analysis to grading_column_name (str): column with the data to grade vader_columns (:obj:`list`. optional): names to map vader results to ['neu', 'pos', 'compound', 'neg'] logger (:obj:`logging.logger`, optional): logging handle Returns; (:obj:`pandas.DataFrame`): updated dataframe with vader sentiment """ logger.info('applying vader sentiment analysis to `%s`', grading_column_name) logger.info('--applying vader_lexicon') vader_df = map_vader_sentiment( full_dataframe[grading_column_name], column_names=vader_columns ) logger.info('--merging results into original dataframe') joined_df = full_dataframe.merge( vader_df, how='left', on=grading_column_name ) return joined_df
43572857ecc382f800b243ee12e6f3fe3b1f5d5a
3,655,428
def _overlayPoints(points1, points2): """Given two sets of points, determine the translation and rotation that matches them as closely as possible. Parameters ---------- points1 (numpy array of simtk.unit.Quantity with units compatible with distance) - reference set of coordinates points2 (numpy array of simtk.unit.Quantity with units compatible with distance) - set of coordinates to be rotated Returns ------- translate2 - vector to translate points2 by in order to center it rotate - rotation matrix to apply to centered points2 to map it on to points1 center1 - center of points1 Notes ----- This is based on W. Kabsch, Acta Cryst., A34, pp. 828-829 (1978). """ if len(points1) == 0: return (mm.Vec3(0, 0, 0), np.identity(3), mm.Vec3(0, 0, 0)) if len(points1) == 1: return (points1[0], np.identity(3), -1*points2[0]) # Compute centroids. center1 = unit.sum(points1)/float(len(points1)) center2 = unit.sum(points2)/float(len(points2)) # Compute R matrix. R = np.zeros((3, 3)) for p1, p2 in zip(points1, points2): x = p1-center1 y = p2-center2 for i in range(3): for j in range(3): R[i][j] += y[i]*x[j] # Use an SVD to compute the rotation matrix. (u, s, v) = lin.svd(R) return (-1*center2, np.dot(u, v).transpose(), center1)
c3d1df9569705bcee33e112596e8ab2a332e947e
3,655,429
def return_request(data): """ Arguments: data Return if call detect: list[dist1, dist2, ...]: dist = { "feature": feature } Return if call extract: list[dist1, dist2, ...]: dist = { "confidence_score": predict probability, "class": face, "bounding_box": [xmin, ymin, xmax, ymax], "keypoints": {'left_eye': (x,y), 'right_eye':(x,y), 'nose': (x,y), 'mouth_left': (x,y), 'mouth_right': (x,y)} } """ contents = [] try: boxs = data['predictions'] print(type(boxs)) print(boxs) # for box in boxs: # contents.append({ # "confidence_score": box[4], # "class": 'face', # "bounding_box": [box[0], box[1], box[2], box[3]] # }) except: pass try: features = data['features'] for feature in features: contents.append({ "feature": feature }) except: pass return contents
11887921c89a846ee89bc3cbb79fb385382262fa
3,655,430
def get_recent_messages_simple(e: TextMessageEventObject): """ Command to get the most recent messages with default count. This command has a cooldown of ``Bot.RecentActivity.CooldownSeconds`` seconds. This command will get the most recent ``Bot.RecentActivity.DefaultLimitCountDirect`` messages without the message that called this command. :param e: message event that called this command :return: default count of most recent messages with a link to the recent activity page """ return get_recent_messages(e, Bot.RecentActivity.DefaultLimitCountLink)
58d03a4b34254dc532ad6aa53747ea730446cd31
3,655,431
def parse_systemctl_units(stdout:str, stderr:str, exitcode:int) -> dict: """ UNIT LOAD ACTIVE SUB DESCRIPTION mono-xsp4.service loaded active running LSB: Mono XSP4 motd-news.service loaded inactive dead Message of the Day ● mountkernfs.service masked inactive dead mountkernfs.service systemd-machine-id-commit.service loaded inactive dead Commit a transient machine-id on disk ● systemd-modules-load.service loaded failed failed Load Kernel Modules systemd-networkd-resolvconf-update.service loaded inactive dead Update resolvconf for networkd DNS sysinit.target loaded active active System Initialization ● syslog.target not-found inactive dead syslog.target time-sync.target loaded active active System Time Synchronized LOAD = Reflects whether the unit definition was properly loaded. ACTIVE = The high-level unit activation state, i.e. generalization of SUB. SUB = The low-level unit activation state, values depend on unit type. 354 loaded units listed. To show all installed unit files use 'systemctl list-unit-files'. """ if exitcode != 0: raise Exception() # split into list of lines lines = LineList(stdout) assert isinstance(lines, LineList) # now we must separate a trailing description. lineNumbers = lines.getLineNumbersOfEmptyLines() assert lineNumbers assert lineNumbers[0] > 0 del lines[lineNumbers[0]:] # get column split positions wordPos = [ 0 ] + getPositionsOfWords(lines[0]) table = lines.createDataTableFromColumns(wordPos, bLStrip=True, bRStrip=True, bFirstLineIsHeader=True, columnDefs=[ ColumnDef("MARK", _parseMark), ColumnDef("UNIT"), ColumnDef("LOAD"), ColumnDef("ACTIVE"), ColumnDef("SUB"), ColumnDef("DESCRIPTION"), ]) # build output matrix: use service names as keys ret = {} for record in table: key = record[1] pos = key.rfind(".") category = key[pos+1:] + "s" # pluralize the category key = key[:pos] if category not in ret: ret[category] = {} ret[category][key] = record return ret
d9e7e4c71f418311799345c7dacfb9655912475f
3,655,432
def resnet_model_fn(is_training, feature, label, data_format, params): """Build computation tower (Resnet). Args: is_training: true if is training graph. feature: a Tensor. label: a Tensor. data_format: channels_last (NHWC) or channels_first (NCHW). params: params for the model to consider Returns: A tuple with the loss for the tower, the gradients and parameters, and predictions. """ num_layers = params.num_layers batch_norm_decay = params.batch_norm_decay batch_norm_epsilon = params.batch_norm_epsilon weight_decay = params.weight_decay model = cifar10_with_resnet_model.ResNetCifar10( num_layers, batch_norm_decay=batch_norm_decay, batch_norm_epsilon=batch_norm_epsilon, is_training=is_training, data_format=data_format) logits = model.forward_pass(feature, input_data_format='channels_last') predictions = { 'classes': tf.argmax(input=logits, axis=1), 'probabilities': tf.nn.softmax(logits) } loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=label) loss = tf.reduce_mean(loss) model_params = tf.trainable_variables() loss += weight_decay * tf.add_n( [tf.nn.l2_loss(v) for v in model_params]) gradients = tf.gradients(loss, model_params) return loss, zip(gradients, model_params), predictions
70f30b4c5b4485ed1c4f362cc7b383cb192c57c4
3,655,433
def permutate_touched_latent_class(untouched_classes, class_info_np, gran_lvl_info): """untouch certain class num latent class, permute the rest (reserve H(Y))""" # get untouched instance index untouched_instance_index = [] for i in untouched_classes: index = np.where(class_info_np == i)[0] untouched_instance_index.append(index) untouched_instance_index_np = np.concatenate(untouched_instance_index) # permutate touched id my_gran_lvl_info = gran_lvl_info * np.ones(gran_lvl_info.shape) # replicate the gran_lvl_info untouched_latent_class_np = my_gran_lvl_info[untouched_instance_index_np] touched_index = np.delete(np.arange(my_gran_lvl_info.shape[0]), untouched_instance_index_np, 0) # exclude untouched index tourched_latent_class = my_gran_lvl_info[touched_index] my_gran_lvl_info[touched_index] = np.random.permutation(tourched_latent_class) return my_gran_lvl_info.astype(np.int32)
ebae2213508260474a9e9c581f6b42fd81006a22
3,655,434
from datetime import datetime import pytz def get_interarrival_times(arrival_times, period_start): """ Given a list of report dates, it returns the list corresponding to the interrival times. :param arrival_times: List of arrival times. :return: List of inter-arrival times. """ interarrival_times = [] for position, created_date in enumerate(arrival_times): if position > 0: distance = created_date - arrival_times[position - 1] interarrival_times.append(get_distance_in_hours(distance)) else: if isinstance(created_date, np.datetime64): created_date = datetime.datetime.utcfromtimestamp(created_date.tolist() / 1e9) created_date = pytz.utc.localize(created_date) distance = get_distance_in_hours(created_date - period_start) if distance > 0: interarrival_times.append(distance) return pd.Series(data=interarrival_times)
b6d345ff73e16d8c7502509ddd36e2a1d7f12252
3,655,435
def _indexOp(opname): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ def wrapper(self, other): func = getattr(self.view(np.ndarray), opname) return func(other) return wrapper
a9bdccca9d0bc1ffa2334132c6cfd4b965b95878
3,655,436
def safe_log(a): """ Return the element-wise log of an array, checking for negative array elements and avoiding divide-by-zero errors. """ if np.any([a < 0]): raise ValueError('array contains negative components') return np.log(a + 1e-12)
7ac5f01272f4c90110c4949aba8dfb9f783c82b9
3,655,437
def make_batch_keys(args, extras=None): """depending on the args, different data are used by the listener.""" batch_keys = ['objects', 'tokens', 'target_pos'] # all models use these if extras is not None: batch_keys += extras if args.obj_cls_alpha > 0: batch_keys.append('class_labels') if args.lang_cls_alpha > 0: batch_keys.append('target_class') return batch_keys
a86c2a5cff58f811a67cbdd5eed322c86aa3e0e0
3,655,441
from typing import List def magic_split(value: str, sep=",", open="(<", close=")>"): """Split the value according to the given separator, but keeps together elements within the given separator. Useful to split C++ signature function since type names can contain special characters... Examples: - magic_split("a,b,c", sep=",") -> ["a", "b", "c"] - magic_split("a<b,c>,d(e,<k,c>),p) -> ["a<b,c>", "d(e,<k,c>)", "p"] Args: value: String to split. sep: Separator to use. open: List of opening characters. close: List of closing characters. Order must match open. Returns: The list of split parts from value. """ i, j = 0, 0 s: List[str] = [] r = [] while i < len(value): j = i + 1 while j < len(value): c = value[j] # Separator found and the stack is empty: if c == sep and not s: break # Check close/open: if c in open: s.append(open.index(c)) elif c in close: # The stack might be empty if the separator is also an opening element: if not s and sep in open and j + 1 == len(value): pass else: t = s.pop() if t != close.index(c): raise ValueError( "Found closing element {} for opening element {}.".format( c, open[t] ) ) j += 1 r.append(value[i:j]) i = j + 1 return r
9f152c9cfa82778dcf5277e3342b5cab25818a55
3,655,443
def update_group_annotation(name=None, annotation_name=None, x_pos=None, y_pos=None, angle=None, opacity=None, canvas=None, z_order=None, network=None, base_url=DEFAULT_BASE_URL): """Update Group Annotation Updates a group annotation, changing the given properties. Args: name (UUID or str): Single UUID or str naming group object annotation_name (UUID or str): Name of annotation by UUID or name x_pos (int): X position in pixels from left; default is center of current view y_pos (int): Y position in pixels from top; default is center of current view angle (float): Angle of text orientation; default is 0.0 (horizontal) canvas (str): Canvas to display annotation, i.e., foreground (default) or background z_order (int): Arrangement order specified by number (larger values are in front of smaller values); default is 0 network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: A named list of annotation properties, including UUID Raises: CyError: if invalid name requests.exceptions.HTTPError: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> update_group_annotation(annotation_name='Group 1', angle=180) {'canvas': 'foreground', 'rotation': '180.0', 'name': 'Group 1', 'x': '2450.0', 'y': '1883.0', 'z': '0', 'type': 'org.cytoscape.view.presentation.annotations.GroupAnnotation', 'uuid': 'b9bf3184-3c5a-4e8b-9651-4bc4403af158', 'memberUUIDs': 'bb3061c5-d8d5-4fca-ac5c-9b7bf8fb9fd0,32f89c1d-e987-4867-9b8a-787aaac6e165,ec73aad8-b00b-4f4d-9361-a4b93f70c8f8'} >>> update_group_annotation(name='2c0a77f8-a6d0-450d-b6ee-1bfe3c8f8aea', annotation_name=group_uuid, x_pos=101, y_pos=201, angle=180, canvas='foreground') {'canvas': 'foreground', 'rotation': '180.0', 'name': 'Group 1', 'x': '101.0', 'y': '201.0', 'z': '0', 'type': 'org.cytoscape.view.presentation.annotations.GroupAnnotation', 'uuid': '2c0a77f8-a6d0-450d-b6ee-1bfe3c8f8aea', 'memberUUIDs': '8872c2f6-42ad-4b6a-8fb9-1d1b13da504d,2c830227-7f6a-4e58-bbef-2070f1b5a603,8d04e34d-86b8-486f-9927-581184cbe03e'} """ cmd_string, net_suid = _build_base_cmd_string('annotation update group', network, base_url) # a good start cmd_string += _get_annotation_name_cmd_string(annotation_name, 'Must provide the UUID or name of group') # x and y position cmd_string += _get_x_y_pos_cmd_string(x_pos, y_pos, net_suid, base_url) # optional params cmd_string += _get_angle_cmd_string(angle) cmd_string += _get_name_cmd_string(name, network, base_url) cmd_string += _get_canvas_cmd_string(canvas) cmd_string += _get_z_order_cmd_string(z_order) # execute command res = commands.commands_post(cmd_string, base_url=base_url) return res
fd515728ee3a3dece381bb65e2c6816b9c96b41e
3,655,444
import urllib def _extract_properties(properties_str): """Return a dictionary of properties from a string in the format ${key1}={value1}&${key2}={value2}...&${keyn}={valuen} """ d = {} kv_pairs = properties_str.split("&") for entry in kv_pairs: pair = entry.split("=") key = urllib.parse.unquote(pair[0]).lstrip("$") value = urllib.parse.unquote(pair[1]) d[key] = value return d
4f22cae8cbc2dd5b73e6498d5f8e6d10e184f91c
3,655,445
from typing import Any def first_fail_second_succeed(_: Any, context: Any) -> str: """ Simulate Etherscan saying for the first time 'wait', but for the second time 'success'. """ context.status_code = 200 try: if first_fail_second_succeed.called: # type: ignore return '{ "status": "1", "result" : "Pass - Verified", "message" : "" }' except AttributeError: # first time pass first_fail_second_succeed.called = True # type: ignore return '{ "status": "0", "result" : "wait for a moment", "message" : "" }'
5feb3188bdee2d0d758584709df13dc876c37391
3,655,446
def get_ipsw_url(device, ios_version, build): """Get URL of IPSW by specifying device and iOS version.""" json_data = fw_utils.get_json_data(device, "ipsw") if build is None: build = fw_utils.get_build_id(json_data, ios_version, "ipsw") fw_url = fw_utils.get_firmware_url(json_data, build) if fw_url is None: print("[w] could not get IPSW url, exiting...") return fw_url
75b9d85d93b03b1ebda681aeb51ac1c9b0a30474
3,655,447
from typing import Any def escape_parameter(value: Any) -> str: """ Escape a query parameter. """ if value == "*": return value if isinstance(value, str): value = value.replace("'", "''") return f"'{value}'" if isinstance(value, bytes): value = value.decode("utf-8") return f"'{value}'" if isinstance(value, bool): return "TRUE" if value else "FALSE" if isinstance(value, (int, float)): return str(value) return f"'{value}'"
00b706681b002a3226874f04e74acbb67d54d12e
3,655,448
def Get_Query(Fq): """ Get_Query """ Q = "" EoF = False Ok = False while True: l = Fq.readline() if ("--" in l) : # skip line continue elif l=="": EoF=True break else: Q += l if ";" in Q: Ok = True break return EoF, Ok, Q
a1850799f7c35e13a5b61ba8ebbed5d49afc08df
3,655,449
def get_path_segments(url): """ Return a list of path segments from a `url` string. This list may be empty. """ path = unquote_plus(urlparse(url).path) segments = [seg for seg in path.split("/") if seg] if len(segments) <= 1: segments = [] return segments
fe8daff2269d617516a22f7a2fddc54bd76c5025
3,655,450
import typing from pathlib import Path def get_config_file(c: typing.Union[str, ConfigFile, None]) -> typing.Optional[ConfigFile]: """ Checks if the given argument is a file or a configFile and returns a loaded configFile else returns None """ if c is None: # See if there's a config file in the current directory where Python is being run from current_location_config = Path("flytekit.config") if current_location_config.exists(): logger.info(f"Using configuration from Python process root {current_location_config.absolute()}") return ConfigFile(str(current_location_config.absolute())) # If not, see if there's a config in the user's home directory home_dir_config = Path(Path.home(), ".flyte", "config") # _default_config_file_name in main.py if home_dir_config.exists(): logger.info(f"Using configuration from home directory {home_dir_config.absolute()}") return ConfigFile(str(home_dir_config.absolute())) # If not, see if the env var that flytectl sandbox tells the user to set is set, # or see if there's something in the default home directory location flytectl_path = Path(Path.home(), ".flyte", "config.yaml") flytectl_path_from_env = getenv(FLYTECTL_CONFIG_ENV_VAR, None) if flytectl_path_from_env: flytectl_path = Path(flytectl_path_from_env) if flytectl_path.exists(): logger.info(f"Using flytectl/YAML config {flytectl_path.absolute()}") return ConfigFile(str(flytectl_path.absolute())) # If not, then return None and let caller handle return None if isinstance(c, str): return ConfigFile(c) return c
6e176167a81bccaa0e0f4570c918fcb32a406edb
3,655,451
def get_index(): """Redirects the index to /form """ return redirect("/form")
e52323397156a5a112e1d6b5d619136ad0fea3f0
3,655,453
def _register_network(network_id: str, chain_name: str): """Register a network. """ network = factory.create_network(network_id, chain_name) cache.infra.set_network(network) # Inform. utils.log(f"registered {network.name_raw} - metadata") return network
8e9b84670057974a724df1387f4a8cf9fc886a56
3,655,454