content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from web3.middleware import geth_poa_middleware def make_w3(gateway_config=None): """ Create a Web3 instance configured and ready-to-use gateway to the blockchain. :param gateway_config: Blockchain gateway configuration. :type gateway_config: dict :return: Configured Web3 instance. :rtype: :class:`web3.Web3` """ if gateway_config is None or gateway_config['type'] == 'auto': w3 = web3.Web3() elif gateway_config['type'] == 'user': request_kwargs = gateway_config.get('http_options', {}) w3 = web3.Web3(web3.Web3.HTTPProvider(gateway_config['http'], request_kwargs=request_kwargs)) elif gateway_config['type'] == 'infura': request_kwargs = gateway_config.get('http_options', {}) project_id = gateway_config['key'] # project_secret = gateway_config['secret'] http_url = 'https://{}.infura.io/v3/{}'.format(gateway_config['network'], project_id) w3 = web3.Web3(web3.Web3.HTTPProvider(http_url, request_kwargs=request_kwargs)) # https://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority if gateway_config.get('network', None) == 'rinkeby': # This middleware is required to connect to geth --dev or the Rinkeby public network. # inject the poa compatibility middleware to the innermost layer w3.middleware_onion.inject(geth_poa_middleware, layer=0) else: raise RuntimeError('invalid blockchain gateway type "{}"'.format(gateway_config['type'])) return w3
85119161c7842319718e7075192b277f810b4328
3,654,046
def _log2_ratio_to_absolute(log2_ratio, ref_copies, expect_copies, purity=None): """Transform a log2 ratio to absolute linear scale (for an impure sample). Does not round to an integer absolute value here. Math:: log2_ratio = log2(ncopies / ploidy) 2^log2_ratio = ncopies / ploidy ncopies = ploidy * 2^log2_ratio With rescaling for purity:: let v = log2 ratio value, p = tumor purity, r = reference ploidy, x = expected ploidy, n = tumor ploidy ("ncopies" above); v = log_2(p*n/r + (1-p)*x/r) 2^v = p*n/r + (1-p)*x/r n*p/r = 2^v - (1-p)*x/r n = (r*2^v - x*(1-p)) / p If purity adjustment is skipped (p=1; e.g. if germline or if scaling for heterogeneity was done beforehand):: n = r*2^v """ if purity and purity < 1.0: ncopies = (ref_copies * 2**log2_ratio - expect_copies * (1 - purity) ) / purity else: ncopies = _log2_ratio_to_absolute_pure(log2_ratio, ref_copies) return ncopies
939a9e4ccb0a1fe9c8c2e6f369bb23c556f04a14
3,654,047
def fixt(): """ Create an Exchange object that will be re-used during testing. """ mesh = df.UnitCubeMesh(10, 10, 10) S3 = df.VectorFunctionSpace(mesh, "DG", 0) Ms = 1 A = 1 m = df.Function(S3) exch = ExchangeDG(A) exch.setup(S3, m, Ms) return {"exch": exch, "m": m, "A": A, "S3": S3, "Ms": Ms}
4e46550f1ef9e821e459612b82c0410b7459b09b
3,654,048
def touch_emulator(ev, x, y): """ This emulates a touch-screen device, like a tablet or smartphone. """ if ev.type == pygame.MOUSEBUTTONDOWN: if ev.button != 1: return None, x, y elif ev.type == pygame.MOUSEBUTTONUP: if ev.button != 1: return None, x, y move = pygame.event.Event(pygame.MOUSEMOTION, { "pos" : (0, 0), "rel" : (0, 0), "buttons" : (0, 0, 0) }) renpy.display.interface.pushed_event = move elif ev.type == pygame.MOUSEMOTION: if not ev.buttons[0]: x = 0 y = 0 elif ev.type == pygame.KEYDOWN: if not ev.key in TOUCH_KEYS: return None, x, y elif ev.type == pygame.KEYUP: if not ev.key in TOUCH_KEYS: return None, x, y return ev, x, y
826b17c7bc9089acebdf3e1ea64fa0613e13e8ea
3,654,049
def invert_dict(d): """ Invert dictionary by switching keys and values. Parameters ---------- d : dict python dictionary Returns ------- dict Inverted python dictionary """ return dict((v, k) for k, v in d.items())
c70bfdb5ffa96cf07b1a4627aa484e3d5d0f4fea
3,654,051
def atom_present_in_geom(geom, b, tol=DEFAULT_SYM_TOL): """Function used by set_full_point_group() to scan a given geometry and determine if an atom is present at a given location. """ for i in range(len(geom)): a = [geom[i][0], geom[i][1], geom[i][2]] if distance(b, a) < tol: return True return False
ad4afd6ca3d419b69ef502d64e3e66635485d340
3,654,052
import locale def atof(value): """ locale.atof() on unicode string fails in some environments, like Czech. """ if isinstance(value, unicode): value = value.encode("utf-8") return locale.atof(value)
b0b2d2ea70c5e631ad2a1d25eb5c55d06cbdac1e
3,654,053
def r_group_list(mol, core_mol): """ This takes a mol and the common core and finds all the R-groups by replacing the atoms in the ligand (which make up the common core) with nothing. This fragments the ligand and from those fragments we are able to determine what our R-groups are. for any common core atom which touched the fragment a * will replace that atom in the fragments. Inputs: :param rdkit.Chem.rdchem.Mol mol: an rdkit molecule :param rdkit.Chem.rdchem.Mol core_mol: an rdkit molecule for the shared common core Returns: :returns: rdkit.Chem.rdchem.Mol replace_core_mol: an rdkit molecule with the common core removed from a ligand fragments the mol which can be used to make lists of R-groups """ # This returns all the mol frags for a particular compound against the # core molecule replace_core_mol = Chem.ReplaceCore( mol, core_mol, labelByIndex=True, replaceDummies=True, requireDummyMatch=False ) if len(replace_core_mol.GetAtoms()) == 0: # This means that the mol either did not contain the core_mol or the # core_mol is the same mol as the mol. ie) if mol_string # ="[10000N-]=[10001N+]=[10002N][10003CH]1[10004O][10005CH]([10006CH2][10007OH])[10008CH]([10013OH])[10009CH]([10012OH])[10010CH]1[10011OH]" # and core_string # ="[10000NH]=[10001N+]=[10002N][10003CH]1[10004O][10005CH]([10006CH2][10007OH])[10008CH]([10013OH])[10009CH]([10012OH])[10010CH]1[10011OH]" # the only difference is the H's which means it can be replaced within # because its the same mol This is rare but does occur. return None return replace_core_mol
16df0f54a5bf374bd1e77d3443baa42aab2dd231
3,654,054
def set_execution_target(backend_id='simulator'): """ Used to run jobs on a real hardware :param backend_id: device name. List of available devices depends on the provider example usage. set_execution_target(backend_id='arn:aws:braket:::device/quantum-simulator/amazon/sv1') """ global device if backend_id == None or backend_id == "": device = None elif backend_id == "simulator": device = LocalSimulator() else: device = AwsDevice(backend_id) if verbose: print(f"... using Braket device = {device}") # create an informative device name device_name = device.name device_str = str(device) if device_str.find(":device/") > 0: idx = device_str.rindex(":device/") device_name = device_str[idx+8:-1] metrics.set_plot_subtitle(f"Device = {device_name}") return device
2efb44723a804e39d998ddc3e7a7c3bdaa0440db
3,654,055
def segregate(str): """3.1 Basic code point segregation""" base = bytearray() extended = set() for c in str: if ord(c) < 128: base.append(ord(c)) else: extended.add(c) extended = sorted(extended) return bytes(base), extended
e274393735bf4f1d51a75c73351848cbfdd5f81f
3,654,056
def count_tetrasomic_indivs(lis) -> dict: """ Count number of times that a chromosome is tetrasomic (present in four copies) :returns counts_of_tetrasomic_chromosomes""" counts_of_tetrasomic_chromosomes = {k: 0 for k in chr_range} for kary_group in lis: for index, chr_type in enumerate(chr_range): if kary_group[index // 2].count(chr_type) == 4: counts_of_tetrasomic_chromosomes[chr_type] += 1 return counts_of_tetrasomic_chromosomes
598cd10bdbaeea061be0e259de756beba9d248b7
3,654,057
def make_image_grid(images: np.ndarray, nrow: int = 1) -> np.ndarray: """Concatenate multiple images into a single image. Args: images (np.array): Images can be: - A 4D mini-batch image of shape [B, C, H, W]. - A 3D RGB image of shape [C, H, W]. - A 2D grayscale image of shape [H, W]. nrow (int): Number of images in each row of the grid. Final grid size is `[B / nrow, nrow]`. Default: `1`. Returns: cat_image (np.ndarray): Concatenated image. """ # NOTE: Type checking if images.ndim == 3: return images # NOTE: Conversion (just for sure) if is_channel_first(images): images = to_channel_last(images) b, c, h, w = images.shape ncols = nrow nrows = (b // nrow) if (b // nrow) > 0 else 1 cat_image = np.zeros((c, int(h * nrows), w * ncols)) for idx, im in enumerate(images): j = idx // ncols i = idx % ncols cat_image[:, j * h: j * h + h, i * w: i * w + w] = im return cat_image
1b367392c275a44e5c23ce19c96f5727015285f5
3,654,058
def drive_time_shapes(drive_time): """Simplify JSON response into a dictionary of point lists.""" isochrones = {} try: for shape in drive_time['response']['isoline']: uid = str(int(shape['range'] / 60)) + ' minutes' points = shape['component'][0]['shape'] point_list = array_to_points(points) isochrones[uid] = point_list except KeyError: print(drive_time) return isochrones
f8f5074d5326ba598c083fdcc228bdfb69f427a5
3,654,059
def compute_transformation_sequence_case_1(cumprod, local_shape, ind, sharded_leg_pos, pgrid): """ Helper function for `pravel`, see `pravel` for more details. """ ops = [] ndev = np.prod(pgrid) if ndev % cumprod[ind - 1] != 0: raise ValueError("reshaping not possible") remainder = ndev // cumprod[ind - 1] # the local leg has to be divisible by the remainder, # otherwise we can't place the sharded legs that need to be # localized at their respective positions if local_shape[sharded_leg_pos] % remainder != 0: raise ValueError( f"tensor.shape[{sharded_leg_pos}] = {local_shape[sharded_leg_pos]}" f" is not divisible by a local remainder of {remainder}. " f"Try using a different shape for the input tensor") if np.prod(local_shape[sharded_leg_pos:]) % remainder != 0: raise ValueError("reshaping not possible 2") # the first index group contains all legs that are going to be sharded # the second index group contain is swapped with the currently sharded legs # the third group remains unchanged orig_left_shape = tuple(local_shape[:sharded_leg_pos],) + (remainder,) orig_right_shape = (local_shape[sharded_leg_pos] // remainder,) + tuple( local_shape[sharded_leg_pos + 1:]) shape_1 = orig_left_shape + (ndev,) + (np.prod(orig_right_shape) // ndev,) ops.append(('reshape', [local_shape, shape_1])) ops.append(('pswapaxes', { 'axis_name': AXIS_NAME, 'axis': sharded_leg_pos + 1 })) # the previously sharded legs are now localized at position # sharded_leg_pos + 1 we now split off the legs that need # to be distributed again and move them to the right of their # corresponding local legs shape_2 = orig_left_shape + tuple(pgrid) + ( np.prod(orig_right_shape) // ndev,) l = list(range(len(shape_2))) left = l[:len(orig_left_shape)] right = l[len(orig_left_shape):] perm_1 = misc.flatten([[r, l] for l, r in zip(left, right[:len(left)]) ]) + right[len(left):] shape_3 = (np.prod(shape_2[:2 * len(orig_left_shape)]),) + tuple( shape_2[2 * len(orig_left_shape):]) ops.append(('reshape', [shape_1, shape_2])) ops.append(('transpose', perm_1)) perm_shape_2 = [shape_2[p] for p in perm_1] ops.append(('reshape', [perm_shape_2, shape_3])) ops.append(('pswapaxes', {'axis_name': AXIS_NAME, 'axis': 0})) # swap the first local axis with the sharded one # now we have the harded legs in the right order # next we need to fix the order of the localized legs perm_2 = list(range(1, len( pgrid[sharded_leg_pos:]))) + [0] + [len(pgrid[sharded_leg_pos:])] shape_4 = tuple(pgrid[sharded_leg_pos + 1:]) + orig_right_shape ops.append(('transpose', perm_2)) perm_shape_3 = [shape_3[p] for p in perm_2] ops.append(('reshape', [perm_shape_3, shape_4])) p = len(pgrid[sharded_leg_pos + 1:]) left = list(range(p)) right = list(range(p + 1, len(shape_4))) perm_3 = [p] + misc.flatten([[l, r] for l, r in zip(left, right[:len(left)]) ]) + right[len(left):] ops.append(('transpose', perm_3)) perm_shape_4 = [shape_4[p] for p in perm_3] shape_5 = misc.maybe_ravel_shape(perm_shape_4) ops.append(('reshape', [perm_shape_4, shape_5])) return ops
0f88967a2fcc132af753a2c5dfbf2a9b8087877a
3,654,060
def fix_join_words(text: str) -> str: """ Replace all join ``urdu`` words with separate words Args: text (str): raw ``urdu`` text Returns: str: returns a ``str`` object containing normalized text. """ for key, value in WORDS_SPACE.items(): text = text.replace(key, value) return text
a13040b420db5b0daf27f3a2f6a1e93a9188c312
3,654,061
import statistics def constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db): """normalize expression for raw expression data (only for non-baseline data)""" #avg_true_const_exp_db[affygene] = [avg_const_exp] temp_avg_const_exp_db={} for probeset in array_raw_group_values: conditions = len(array_raw_group_values[probeset][y]); break #number of raw expresson values to normalize for affygene in gene_db: ###This is blank when there are no constitutive or the above condition is implemented if affygene in constitutive_gene_db: probeset_list = constitutive_gene_db[affygene] z = 1 else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present probeset_list = gene_db[affygene] z = 0 x = 0 while x < conditions: ### average all exp values for constitutive probesets for each conditionF exp_list=[] for probeset in probeset_list: try: exp_val = array_raw_group_values[probeset][y][x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis except KeyError: continue exp_list.append(exp_val) try: avg_const_exp = statistics.avg(exp_list) except Exception: avg_const_exp = 'null' if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null': if z == 1: try: avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: avg_const_exp_db[affygene] = [avg_const_exp] try: temp_avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp] elif avg_const_exp != 'null': ###*** try: avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: avg_const_exp_db[affygene] = [avg_const_exp] try: temp_avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp] x += 1 if analysis_method == 'ANOVA': global normalized_raw_exp_ratios; normalized_raw_exp_ratios = {} for affygene in gene_db: probeset_list = gene_db[affygene] for probeset in probeset_list: while x < group_size: new_ratios = [] ### Calculate expression ratios relative to constitutive expression exp_val = array_raw_group_values[probeset][y][x] const_exp_val = temp_avg_const_exp_db[affygene][x] ###Since the above dictionary is agglomerating all constitutive expression values for permutation, ###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary) #non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val) #non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val) #non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val log_exp_ratio = exp_val - const_exp_val try: normalized_raw_exp_ratios[probeset].append(log_exp_ratio) except KeyError: normalized_raw_exp_ratios[probeset] = [log_exp_ratio] return avg_const_exp_db
38d5d39cb6a5532b84f08ddd0fbb27335e45897b
3,654,063
def parse_rummager_topics(results): """ Parse topics from rummager results """ pages = [] for result in results: pages.append( Topic( name=result['title'], base_path=result['slug'], document_type=DocumentType[result['format']] ) ) return pages
d88355014c4a74e1ca7ca2ca1389850cba550612
3,654,064
def format_last_online(last_online): """ Return the upper limit in seconds that a profile may have been online. If last_online is an int, return that int. Otherwise if last_online is a str, convert the string into an int. Returns ---------- int """ if isinstance(last_online, str): if last_online.lower() in ('day', 'today'): last_online_int = 86400 # 3600 * 24 elif last_online.lower() == 'week': last_online_int = 604800 # 3600 * 24 * 7 elif last_online.lower() == 'month': last_online_int = 2678400 # 3600 * 24 * 31 elif last_online.lower() == 'year': last_online_int = 31536000 # 3600 * 365 elif last_online.lower() == 'decade': last_online_int = 315360000 # 3600 * 365 * 10 else: # Defaults any other strings to last hour last_online_int = 3600 else: last_online_int = last_online return last_online_int
335ed9a37062964b785c75246c9f23f678b4a90e
3,654,065
from datetime import datetime import pkg_resources import requests import json def get_currency_cross_historical_data(currency_cross, from_date, to_date, as_json=False, order='ascending', interval='Daily'): """ This function retrieves recent historical data from the introduced `currency_cross` from Investing via Web Scraping. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a :obj:`json` file, with `ascending` or `descending` order. Args: currency_cross (:obj:`str`): name of the currency cross to retrieve recent historical data from. from_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, from where data is going to be retrieved. to_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, until where data is going to be retrieved. as_json (:obj:`bool`, optional): optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`). order (:obj:`str`, optional): optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`). interval (:obj:`str`, optional): value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`. Returns: :obj:`pandas.DataFrame` or :obj:`json`: The function returns a either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved recent data from the specified currency_cross via argument. The dataset contains the open, high, low, close and volume values for the selected currency_cross on market days. The return data is case we use default arguments will look like:: Date || Open | High | Low | Close | Currency -----||------|------|-----|-------|--------- xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxxxx but if we define `as_json=True`, then the output will be:: { name: name, recent: [ dd/mm/yyyy: { 'open': x, 'high': x, 'low': x, 'close': x, 'currency' : x }, ... ] } Raises: ValueError: argument error. IOError: stocks object/file not found or unable to retrieve. RuntimeError: introduced currency_cross does not match any of the indexed ones. ConnectionError: if GET requests does not return 200 status code. IndexError: if currency_cross information was unavailable or not found. Examples: >>> investpy.get_currency_cross_historical_data(currency_cross='EUR/USD', from_date='01/01/2018', to_date='01/01/2019') Open High Low Close Currency Date 2018-01-01 1.2003 1.2014 1.1995 1.2010 USD 2018-01-02 1.2013 1.2084 1.2003 1.2059 USD 2018-01-03 1.2058 1.2070 1.2001 1.2014 USD 2018-01-04 1.2015 1.2090 1.2004 1.2068 USD 2018-01-05 1.2068 1.2085 1.2021 1.2030 USD """ if not currency_cross: raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.") if not isinstance(currency_cross, str): raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.") try: datetime.strptime(from_date, '%d/%m/%Y') except ValueError: raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.") try: datetime.strptime(to_date, '%d/%m/%Y') except ValueError: raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.") start_date = datetime.strptime(from_date, '%d/%m/%Y') end_date = datetime.strptime(to_date, '%d/%m/%Y') if start_date >= end_date: raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.") if not isinstance(as_json, bool): raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.") if order not in ['ascending', 'asc', 'descending', 'desc']: raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.") if not interval: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") if not isinstance(interval, str): raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") if interval not in ['Daily', 'Weekly', 'Monthly']: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") date_interval = { 'intervals': [], } flag = True while flag is True: diff = end_date.year - start_date.year if diff > 20: obj = { 'start': start_date.strftime('%m/%d/%Y'), 'end': start_date.replace(year=start_date.year + 20).strftime('%m/%d/%Y'), } date_interval['intervals'].append(obj) start_date = start_date.replace(year=start_date.year + 20) else: obj = { 'start': start_date.strftime('%m/%d/%Y'), 'end': end_date.strftime('%m/%d/%Y'), } date_interval['intervals'].append(obj) flag = False interval_limit = len(date_interval['intervals']) interval_counter = 0 data_flag = False resource_package = 'investpy' resource_path = '/'.join(('resources', 'currency_crosses', 'currency_crosses.csv')) if pkg_resources.resource_exists(resource_package, resource_path): currency_crosses = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path)) else: raise FileNotFoundError("ERR#0060: currency_crosses file not found or errored.") if currency_crosses is None: raise IOError("ERR#0050: currency_crosses not found or unable to retrieve.") currency_cross = currency_cross.strip() currency_cross = currency_cross.lower() if unidecode.unidecode(currency_cross) not in [unidecode.unidecode(value.lower()) for value in currency_crosses['name'].tolist()]: raise RuntimeError("ERR#0054: the introduced currency_cross " + str(currency_cross) + " does not exists.") id_ = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'id'] name = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'name'] currency = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'second'] final = list() header = name + ' Historical Data' for index in range(len(date_interval['intervals'])): interval_counter += 1 params = { "curr_id": id_, "smlID": str(randint(1000000, 99999999)), "header": header, "st_date": date_interval['intervals'][index]['start'], "end_date": date_interval['intervals'][index]['end'], "interval_sec": interval, "sort_col": "date", "sort_ord": "DESC", "action": "historical_data" } head = { "User-Agent": get_random(), "X-Requested-With": "XMLHttpRequest", "Accept": "text/html", "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", } url = "https://www.investing.com/instruments/HistoricalDataAjax" req = requests.post(url, headers=head, data=params) if req.status_code != 200: raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.") root_ = fromstring(req.text) path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr") result = list() if path_: for elements_ in path_: info = [] for nested_ in elements_.xpath(".//td"): info.append(nested_.get('data-real-value')) if elements_.xpath(".//td")[0].text_content() == 'No results found': if interval_counter < interval_limit: data_flag = False else: raise IndexError("ERR#0055: currency_cross information unavailable or not found.") else: data_flag = True if data_flag is True: currency_cross_date = datetime.fromtimestamp(int(info[0])) currency_cross_date = date(currency_cross_date.year, currency_cross_date.month, currency_cross_date.day) currency_cross_close = float(info[1].replace(',', '')) currency_cross_open = float(info[2].replace(',', '')) currency_cross_high = float(info[3].replace(',', '')) currency_cross_low = float(info[4].replace(',', '')) result.insert(len(result), Data(currency_cross_date, currency_cross_open, currency_cross_high, currency_cross_low, currency_cross_close, None, currency)) if data_flag is True: if order in ['ascending', 'asc']: result = result[::-1] elif order in ['descending', 'desc']: result = result if as_json is True: json_ = {'name': name, 'historical': [value.currency_cross_as_json() for value in result] } final.append(json_) elif as_json is False: df = pd.DataFrame.from_records([value.currency_cross_to_dict() for value in result]) df.set_index('Date', inplace=True) final.append(df) else: raise RuntimeError("ERR#0004: data retrieval error while scraping.") if as_json is True: return json.dumps(final[0], sort_keys=False) elif as_json is False: return pd.concat(final)
6a01f89b128842497e76d0a3497b204ac6641080
3,654,066
def load_target(target_name, base_dir, cloud=False): """load_target load target from local or cloud Parameters ---------- target_name : str target name base_dir : str project base directory cloud : bool, optional load from GCS, by default False Returns ------- y_train: pd.DataFrame target data """ if cloud: y_train = load_cloud_target(target_name, base_dir) else: y_train = load_local_target(target_name, base_dir) return y_train
2ea76be87afdf524b45f26e9f8271ec973e0951a
3,654,067
def gradient(pixmap, ca, cb, eff, ncols): """ Returns a gradient width the start and end colors. eff should be Gradient.Vertical or Gradient.Horizontal """ x=0 y=0 rca = ca.red() rDiff = cb.red() - rca gca = ca.green() gDiff = cb.green() - gca bca = ca.blue() bDiff = cb.blue() - bca rl = rca << 16 gl = gca << 16 bl = bca << 16 if eff == Gradient.Vertical: rcdelta = (1<<16) / (pixmap.height() * rDiff) gcdelta = (1<<16) / (pixmap.height() * gDiff) bcdelta = (1<<16) / (pixmap.height() * bDiff) else: print (1<<16) ,pixmap.width() * rDiff rcdelta = (1<<16) / (pixmap.width() * rDiff) gcdelta = (1<<16) / (pixmap.width() * gDiff) bcdelta = (1<<16) / (pixmap.width() * bDiff) p = QPainter(pixmap) # these for-loops could be merged, but the if's in the inner loop # would make it slow if eff == Gradient.Vertical: for y in range(pixmap.height()): rl += rcdelta gl += gcdelta bl += bcdelta p.setPen(QColor(rl>>16, gl>>16, bl>>16)) p.drawLine(0, y, pixmap.width()-1, y) else: for x in pixmap.width(): rl += rcdelta gl += gcdelta bl += bcdelta p.setPen(QColor(rl>>16, gl>>16, bl>>16)) p.drawLine(x, 0, x, pixmap.height()-1) return pixmap
63406959617a7192c35e05b8efc81dcedfa7d54a
3,654,068
def option_to_text(option): """Converts, for example, 'no_override' to 'no override'.""" return option.replace('_', ' ')
4b7febe0c4500aa23c368f83bbb18902057dc378
3,654,069
def login(email, password): """ :desc: Logs a user in. :param: email - Email of the user - required password - Password of the user - required :return: `dict` """ if email == '' or password == '': return {'success': False, 'message': 'Email/Password field left blank.'} resp = {'success': False} data = {'email': email, 'password': password} session = get_session() session.cookies = LWPCookieJar(filename=COOKIES_FILE_PATH) resp_obj = session.post(LOGIN_URL, data=data) if resp_obj.status_code == 200: if resp_obj.url == BASE_URL: session.cookies.save(ignore_expires=True, ignore_discard=True) resp['success'] = True resp['message'] = 'Successfully Logged In!' else: resp['message'] = 'Incorrect credentials' else: resp['message'] = 'Stackoverflow is probably down. Please try again.' return resp
3ea350984d2c4206d66136e283b4784e08606352
3,654,070
def _cons8_89(m8, L88, L89, d_gap, k, Cp, h_gap): """dz constrant for edge gap sc touching edge, corner gap sc""" term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts term2 = k * d_gap / m8 / Cp / L88 # cond to adj bypass edge term3 = k * d_gap / m8 / Cp / L89 # cond to adj bypass corner return 1 / (term1 + term2 + term3)
b6e8b6331be394e9a10659029143997b097fae86
3,654,071
def categories_split(df): """ Separate the categories in their own columns. """ ohe_categories = pd.DataFrame(df.categories.str.split(';').apply( lambda x: {e.split('-')[0]: int(e.split('-')[1]) for e in x}).tolist()) return df.join(ohe_categories).drop('categories', axis=1)
93e6b1dc384162b63fbf5775d168c0e693829f97
3,654,072
def build_received_request(qparams, variant_id=None, individual_id=None, biosample_id=None): """"Fills the `receivedRequest` part with the request data""" request = { 'meta': { 'requestedSchemas' : build_requested_schemas(qparams), 'apiVersion' : qparams.apiVersion, }, 'query': build_received_query(qparams, variant_id, individual_id, biosample_id), } return request
bfb0131f3ead563ffd1840119b6f7297a466d4dc
3,654,073
def is_router_bgp_configured_with_four_octet( device, neighbor_address, vrf, max_time=35, check_interval=10 ): """ Verifies that router bgp has been enabled with four octet capability and is in the established state Args: device('obj'): device to check vrf('vrf'): vrf to check under neighbor_address('str'): neighbor address to match max_time('int'): maximum time to wait check_interval('int'): how often to check Returns: True False Raise: None """ log.info( "Verifying {} has bgp configured with four octet capability".format( device.hostname ) ) timeout = Timeout(max_time, check_interval) while timeout.iterate(): out = device.parse("show ip bgp neighbors") if out: if vrf in out.get("vrf", {}): for neighbor in out["vrf"][vrf].get("neighbor", {}): if neighbor_address in neighbor: neighbor_dict = out["vrf"][vrf]["neighbor"][neighbor] if ( "established" in neighbor_dict.get("session_state", "").lower() ): if "bgp_negotiated_capabilities" in neighbor_dict and "advertised and received" in neighbor_dict[ "bgp_negotiated_capabilities" ].get( "four_octets_asn", "" ): return True timeout.sleep() return False
870600a1a5c68d5a4080d8a18966ddc107ae8a72
3,654,074
import six def cluster_absent( name='localhost', quiet=None): """ Machine is not running as a cluster node quiet: execute the command in quiet mode (no output) """ ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if __salt__['crm.status'](): ret['result'] = True ret['comment'] = 'Cluster is already not running' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Cluster node {} would be removed'.format(name) ret['changes']['name'] = name return ret try: # Here starts the actual process result = __salt__['crm.cluster_remove']( host=name, force=True, quiet=quiet) if result: ret['changes']['name'] = name ret['comment'] = 'Error removing cluster node' ret['result'] = False return ret ret['changes']['name'] = name ret['comment'] = 'Cluster node removed' ret['result'] = True return ret except exceptions.CommandExecutionError as err: ret['comment'] = six.text_type(err) return ret
f19c4c18cd0812ee2f4426a458cfb49e4faba5e0
3,654,076
def link_symbols_in_code_blocks(path, blocks, symbols): """Link symbols appearing a sequence of blocks.""" return [link_symbols_in_code_block(path, block, symbols) for block in blocks]
4185e9a1c9b0c8ff2748e80390763b089e9f8831
3,654,077
def cem_model_factory( env, network=mlp, network_params={}, input_shape=None, min_std=1e-6, init_std=1.0, adaptive_std=False, model_file_path=None, name='cem'): """ Model for gradient method """ def build_graph(model, network=network, input_shape=input_shape, network_params=network_params): policy = make_policy( env, 'pi', model, network_params=network_params, input_shape=input_shape, init_std=init_std, adaptive_std=adaptive_std, min_std=min_std, network=network) model['policy'] = policy model.add_output_node(policy.distribution.output_node) var_list = policy.get_trainable_variables() shapes = map(tf_utils.var_shape, var_list) total_size = sum(np.prod(shape) for shape in shapes) model['theta'] = tf.placeholder(tf.float32, [total_size]) var_list = policy.get_trainable_variables() model['gf'] = tf_utils.flatten_vars(var_list) model['sff'] = tf_utils.setfromflat(var_list, model['theta']) if model_file_path is not None: return Model.load(model_file_path, name) return Model(env, build_graph, empty_feed_dict, name=name)
e9327a4f3711e19e71cc16658d6e93acba29da47
3,654,078
def get_job(job_id: UUID) -> Job: """ Get job by ID. Args: job_id (UUID): ID of the job to be returned. Returns: Job """ return JobRepository.get_one_by_id(model_id=job_id)
53e70843ce18e77b17e79bac83ba0225d6087e23
3,654,079
import pytz def localize_datetime(input_df, timezone=DEFAULT_TIMEZONE, tms_gmt_col=DEFAULT_TMS_GMT_COL): """ Convert datetime column from UTC to another timezone. """ tmz = pytz.timezone(timezone) df = input_df.copy() return (df.set_index(tms_gmt_col) .tz_localize(pytz.utc) #  UTC time .tz_convert(tmz))
0d6f8638199f0ccfcf61e025b38dbe84d9eab8ff
3,654,081
import contextlib import socket def get_available_port() -> int: """Finds and returns an available port on the system.""" with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.bind(('', 0)) _, port = sock.getsockname() return int(port)
c86de127fb237662052b8ce010e99d271836e1ef
3,654,082
def prettyprint_float(val, digits): """Print a floating-point value in a nice way.""" format_string = "%." + f"{digits:d}" + "f" return (format_string % val).rstrip("0").rstrip(".")
ba62671d9cb8061744fbf1e070e76c31d0ba185d
3,654,083
from typing import Any from typing import Dict def year_page(archive_id: str, year: int) -> Any: """ Get year page for archive. Parameters ---------- archive : str Must be an arXiv archive identifier. year: int Must be a two or four digit year. Returns ------- dict Search result response data. int HTTP status code. dict Headers to add to the response. """ thisYear = date.today().year if year is None: year = thisYear if year > thisYear: # 307 because year might be valid in the future return {}, status.HTTP_307_TEMPORARY_REDIRECT, {'Location': '/'} if year < 100: if year >= 91: year = 1900 + year else: year = 2000 + year if archive_id not in taxonomy.ARCHIVES: raise BadRequest("Unknown archive.") else: archive = taxonomy.ARCHIVES[archive_id] listing_service = get_listing_service() month_listing = listing_service.monthly_counts(archive_id, year) for month in month_listing['month_counts']: month['art'] = ascii_art_month(archive_id, month) # type: ignore month['yymm'] = f"{month['year']}-{month['month']:02}" # type: ignore month['url'] = url_for('browse.list_articles', # type: ignore context=archive_id, subcontext=f"{month['year']}{month['month']:02}") response_data: Dict[str, Any] = { 'archive_id': archive_id, 'archive': archive, 'months': month_listing['month_counts'], 'listing': month_listing, 'year': str(year), 'stats_by_year': stats_by_year(archive_id, archive, years_operating(archive), year) } response_headers: Dict[str, Any] = {} response_status = status.HTTP_200_OK return response_data, response_status, response_headers
9bd609718d782d3cca185929ebacebd0e353bb10
3,654,084
import scipy def vert_polyFit2(data, z, bin0, step=1, deg=2): """ Trying to use the vertical polynomial fit to clean up the data not reallly sure about what im doing though """ data = np.squeeze(data) z = np.squeeze(z) dz = np.nanmean(np.gradient(np.squeeze(z))) bin1 = int(np.ceil(bin0/dz)) fits = [] zFits = [] bins = [] for i in range(len(z)): if 2*i+bin1 < len(z): bins.append(np.arange(i,2*i+bin1+1)) mask = np.isfinite(data[i:2*i+bin1]) dataIn = data[i:2*i+bin1] zIn = z[i:2*i+bin1] dataIn = dataIn[mask] if dataIn.size == 0: fits.append(np.nan) zFits.append(np.nan) else: zIn = zIn[mask] zFits.append(np.nanmean(z[i:2*i+bin1])) p = scipy.polyfit(zIn, dataIn, deg) fits.append(np.nanmean(scipy.polyval(p, z[i:2*i+bin1][mask]))) fits = np.hstack(fits) zFits = np.hstack(zFits) mask2 = np.isfinite(fits) P = scipy.interpolate.interp1d(zFits[mask2], fits[mask2], fill_value='extrapolate') fitrev = P(z) return fitrev
ceeeac26b9eba625164a055deb96741c6d99702e
3,654,085
def is_downloadable(url): """ Does the url contain a downloadable resource """ h = requests.head(url, allow_redirects=True) header = h.headers content_type = header.get('content-type') print content_type if 'text' in content_type.lower(): return False if 'html' in content_type.lower(): return False return True
74ccff9d967a3763c852a23d8775970ac9ff9e10
3,654,086
def dataframe_is_one_query_target_pair(dataframe): """ make sure there is only one query sequence and reference sequence in the given dataframe. Used to check that we aren't aggregating % identity numbers across bin alignment pairs. :param dataframe: :return: """ num_query_bins = len(dataframe['query bin'].unique()) num_ref_bins = len(dataframe['ref bin'].unique()) if not num_query_bins == 1: "Dataframe has a mix of {} query bins: {}".format( num_query_bins, dataframe['query bin'].unique()) if not num_ref_bins == 1: "Dataframe has a mix of {} reference bins: {}".format( num_query_bins, dataframe['ref bin'].unique()) if (num_query_bins == 1) & (num_ref_bins == 1): return True else: return False
8a8aba9f4b2eaaca6971bf5c158d043a033d0ec8
3,654,087
def update_api_key( self, name: str, permission: str, expiration: int, active: bool, key: str = None, description: str = None, ip_list: str = None, ) -> bool: """Update existing API key on Orchestrator .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - apiKey - PUT - /apiKey/{name} :param name: API Key Name :type name: str :param permission: API Key privileges. Allowed values are 'net_read_write' for RW and 'net_read" for RO :type permission: str :param expiration: API Key expiration in UNIX timestamp. Key will automatically become inactive on expiration date. :type expiration: int :param active: API Key state is active (True) or inactive (False). Inactive keys cannot be used to make requests. :type active: bool :param key: API Key value, defaults to None :type key: str, optional :param description: API Key description, defaults to None :type description: str, optional :param ip_list: List of allowed IP's to make requests with this API Key. Leave blank to allow all IP's. OptionalAPI Key state is active (True) or inactive (False). Inactive keys cannot be used to make requests, defaults to None :type ip_list: str :return: Returns True/False based on successful call :rtype: bool """ api_key_entry = { "name": name, "permission": permission, "expiration": expiration, "active": active, } if key is not None: api_key_entry["key"] = key if description is not None: api_key_entry["description"] = description if ip_list is not None: api_key_entry["ip_list"] = ip_list return self._put( "/apiKey/{}".format(name), data=api_key_entry, expected_status=[204], return_type="bool", )
9e37062475c3b83ab86c51355442cf6de0df1c34
3,654,088
def cleanGender(x): """ This is a helper funciton that will help cleanup the gender variable. """ if x in ['female', 'mostly_female']: return 'female' if x in ['male', 'mostly_male']: return 'male' if x in ['couple'] : return 'couple' else: return 'unknownGender'
23d71f2307aa829312f4a1d2a002ae2b55556050
3,654,089
def get_servers(): """ Retrieve all the discord servers in the database :return: List of servers """ session = Session() servers = session.query(Server).all() return servers
3953867d18c2e282ee11190a3ee1303126b2394e
3,654,090
def wait_till_postgres_responsive(url): """Check if something responds to ``url`` """ engine = sa.create_engine(url) conn = engine.connect() conn.close() return True
645c98799fa7d0347fc52850b7f3813fec74968c
3,654,091
def get_string_display(attr1, attr2, helper1, helper2, attribute_mode): """ get the attribute mode for string attribute mode can be: 'base', 'full', 'partial', 'masked' Note that some attribute does not have partial mode, in this case, partial mode will return masked mode Remeber to call has_partial_mode function to check if it actually has partial mode! Example: Input: attr1: '1000151475' attr2: '1000151575' helper1: '*******4**' helper2: '*******5**' attribute_mode: 'partial' Output: ['*******<span style="color:red">4</span>**', '*******<span style="color:red">5</span>**'] """ if attribute_mode == 'base': attr1_display = attr1 attr2_display = attr2 return [attr1_display, attr2_display] elif attribute_mode == 'full': if not attr1 or not attr2: if not attr1: attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr1_display = attr1 if not attr2: attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr2_display = attr2 else: if '*' not in helper1 and '*' not in helper2: attr1_display = attr1 attr2_display = attr2 else: attr1_display = '' attr2_display = '' i = 0 j = 0 k = 0 while k < len(helper1): if helper1[k] == '*': attr1_display += attr1[i] attr2_display += attr2[j] k += 1 i += 1 j += 1 elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \ helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]: attr1_display += '<span class="transpose_text">' + attr1[i] + attr1[i+1] + '</span>' attr2_display += '<span class="transpose_text">' + attr2[j] + attr2[j+1] + '</span>' k += 2 i += 2 j += 2 elif helper1[k] == '_' or helper1[k] == '?': attr2_display += '<span class="indel_text">' + attr2[j] + '</span>' k += 1 j += 1 elif helper2[k] == '_' or helper2[k] == '?': attr1_display += '<span class="indel_text">' + attr1[i] + '</span>' k += 1 i += 1 else: attr1_display += '<span class="replace_text">' + attr1[i] + '</span>' attr2_display += '<span class="replace_text">' + attr2[j] + '</span>' k += 1 i += 1 j += 1 return [attr1_display, attr2_display] elif attribute_mode == 'partial': if not attr1 or not attr2: if not attr1: attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr1_display = '*'*len(attr1) if not attr2: attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr2_display = '*'*len(attr2) else: if '*' not in helper1 and '*' not in helper2: attr1_display = len(attr1)*'@' attr2_display = len(attr2)*'&' elif helper1 == helper2: attr1_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' attr2_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' else: attr1_display = '' attr2_display = '' i = 0 j = 0 k = 0 while k < len(helper1): if helper1[k] == '*': attr1_display += '*' attr2_display += '*' k += 1 i += 1 j += 1 elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \ helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]: attr1_display += '<span class="transpose_text">' + attr1[i] + attr1[i+1] + '</span>' attr2_display += '<span class="transpose_text">' + attr2[j] + attr2[j+1] + '</span>' k += 2 i += 2 j += 2 elif helper1[k] == '_' or helper1[k] == '?': attr2_display += '<span class="indel_text">' + attr2[j] + '</span>' k += 1 j += 1 elif helper2[k] == '_' or helper2[k] == '?': attr1_display += '<span class="indel_text">' + attr1[i] + '</span>' k += 1 i += 1 else: attr1_display += '<span class="replace_text">' + attr1[i] + '</span>' attr2_display += '<span class="replace_text">' + attr2[j] + '</span>' k += 1 i += 1 j += 1 return [attr1_display, attr2_display] elif attribute_mode == 'masked': if not attr1 or not attr2: if not attr1: attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr1_display = '*'*len(attr1) if not attr2: attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr2_display = '*'*len(attr2) else: if '*' not in helper1 and '*' not in helper2: attr1_display = len(attr1)*'@' attr2_display = len(attr2)*'&' elif helper1 == helper2: attr1_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' attr2_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' else: attr1_display = '' attr2_display = '' i = 0 j = 0 k = 0 while k < len(helper1): if helper1[k] == '*': attr1_display += '*' attr2_display += '*' k += 1 i += 1 j += 1 elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \ helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]: attr1_display += '<span class="transpose_text">' + '@&' + '</span>' attr2_display += '<span class="transpose_text">' + '&@' + '</span>' k += 2 i += 2 j += 2 elif helper1[k] == '_' or helper1[k] == '?': attr2_display += '<span class="indel_text">' + '&' + '</span>' k += 1 j += 1 elif helper2[k] == '_' or helper2[k] == '?': attr1_display += '<span class="indel_text">' + '@' + '</span>' k += 1 i += 1 else: attr1_display += '<span class="replace_text">' + '@' + '</span>' attr2_display += '<span class="replace_text">' + '&' + '</span>' k += 1 i += 1 j += 1 return [attr1_display, attr2_display]
fa61332f82310ece349309f378126a4b3179483f
3,654,092
import re def is_doi(identifier: str) -> bool: """Validates if identifier is a valid DOI Args: identifier (str): potential doi string Returns: bool: true if identifier is a valid DOI """ doi_patterns = [ r"(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?![\"&\'])\S)+)", r"(10.\d{4,9}/[-._;()/:A-Z0-9]+)", r"(10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d)", r"(10.1021/\w\w\d+)", r"(10.1207/[\w\d]+\&\d+_\d+)", ] for pattern in doi_patterns: match = bool(re.match(pattern, identifier)) if match: return True return False
5c0bfe0527adbf53e89d302ee05feb80d285db64
3,654,093
def meshgrid_flatten(*X): """ Functionally same as numpy.meshgrid() with different output format. Function np.meshgrid() takes n 1d ndarrays of size N_1,...,N_n, and returns X_1,...,X_n n-dimensional arrays of shape (N_1, N_2,... N_n). This returns instead a 2d array of shape (N_1*...*N_n, n). """ if len(X) == 1: # Because np.meshgrid() can't handle return np.array([X[0]]).T # less than 2 arguments return np.vstack( map(lambda x: x.flatten(), mylib_meshgrid.meshgrid(*X, indexing='ij')) ).T
a7136a7a4dadb6449fd5079c78f15b13da3721dd
3,654,094
from typing import Union import copy def transform_scale( features, factor: float, origin: Union[str, list] = "centroid", mutate: bool = False, ): """ Scale a GeoJSON from a given point by a factor of scaling (ex: factor=2 would make the GeoJSON 200% larger). If a FeatureCollection is provided, the origin point will be calculated based on each individual Feature. :param features: GeoJSON to be scaled :param factor: of scaling, positive or negative values greater than 0 :param origin: Point from which the scaling will occur (string options: sw/se/nw/ne/center/centroid) :param mutate: allows GeoJSON input to be mutated (significant performance increase if true) :return: Scaled Geojson Example :- >>> from turfpy.transformation import transform_scale >>> from geojson import Polygon, Feature >>> f = Feature(geometry=Polygon([[[0,29],[3.5,29],[2.5,32],[0,29]]])) >>> transform_scale(f, 3, origin=[0, 29]) """ if not features: raise Exception("geojson is required") if not factor: raise Exception("invalid factor") if not mutate: features = copy.deepcopy(features) if features["type"] == "FeatureCollection": def _callback_feature_each(feature, feature_index): nonlocal factor, origin, features features["features"][feature_index] = scale(feature, factor, origin) feature_each(features, _callback_feature_each) return features return scale(features, factor, origin)
bacc6a365dbed0531d4516a736dd9ca2937b8cad
3,654,095
import importlib def create_agent(opt): """Create an agent from the options model, model_params and model_file. The input is either of the form "parlai.agents.ir_baseline.agents/IrBaselineAgent" (i.e. the path followed by the class name) or else just 'IrBaseline' which assumes the path above, and a class name suffixed with 'Agent' """ dir_name = opt['model'] if ':' in dir_name: s = dir_name.split(':') module_name = s[0] class_name = s[1] else: module_name = "parlai.agents.%s.agents" % (dir_name) words = opt['model'].split('_') class_name = '' for w in words: class_name += ( w[0].upper() + w[1:]) class_name += 'Agent' print(class_name) my_module = importlib.import_module(module_name) model_class = getattr(my_module, class_name) return model_class(opt)
6f5793ee0af7ed677f47c27ba5b94ad6f80ea957
3,654,096
def check_version(actver, version, cmp_op): """ Check version string of an active module against a required version. If dev/prerelease tags result in TypeError for string-number comparison, it is assumed that the dependency is satisfied. Users on dev branches are responsible for keeping their own packages up to date. Copyright (C) 2013 The IPython Development Team Distributed under the terms of the BSD License. """ if isinstance(actver, tuple): actver = '.'.join([str(i) for i in actver]) # Hacks needed so that LooseVersion understands that (for example) # version = '3.0.0' is in fact bigger than actver = '3.0.0rc1' if is_stable_version(version) and not is_stable_version(actver) and \ actver.startswith(version) and version != actver: version = version + 'zz' elif is_stable_version(actver) and not is_stable_version(version) and \ version.startswith(actver) and version != actver: actver = actver + 'zz' try: if cmp_op == '>': return LooseVersion(actver) > LooseVersion(version) elif cmp_op == '>=': return LooseVersion(actver) >= LooseVersion(version) elif cmp_op == '=': return LooseVersion(actver) == LooseVersion(version) elif cmp_op == '<': return LooseVersion(actver) < LooseVersion(version) elif cmp_op == '<=': return LooseVersion(actver) <= LooseVersion(version) else: return False except TypeError: return True
4d2cf92c412659044ad226aeeadb9145ceb75241
3,654,097
def dfa_intersection(dfa_1: dict, dfa_2: dict) -> dict: """ Returns a DFA accepting the intersection of the DFAs in input. Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two DFAs. Then there is a DFA :math:`A_∧` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word and accepts when both accept. It is defined as: :math:`A_∧ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, F_1 × F_2 )` where :math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff :math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2}= ρ_2 (s_2 , a)` Implementation proposed guarantees the resulting DFA has only **reachable** states. :param dict dfa_1: first input DFA; :param dict dfa_2: second input DFA. :return: *(dict)* representing the intersected DFA. """ intersection = { 'alphabet': dfa_1['alphabet'].intersection(dfa_2['alphabet']), 'states': {(dfa_1['initial_state'], dfa_2['initial_state'])}, 'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']), 'accepting_states': set(), 'transitions': dict() } boundary = set() boundary.add(intersection['initial_state']) while boundary: (state_dfa_1, state_dfa_2) = boundary.pop() if state_dfa_1 in dfa_1['accepting_states'] \ and state_dfa_2 in dfa_2['accepting_states']: intersection['accepting_states'].add((state_dfa_1, state_dfa_2)) for a in intersection['alphabet']: if (state_dfa_1, a) in dfa_1['transitions'] \ and (state_dfa_2, a) in dfa_2['transitions']: next_state_1 = dfa_1['transitions'][state_dfa_1, a] next_state_2 = dfa_2['transitions'][state_dfa_2, a] if (next_state_1, next_state_2) not in intersection['states']: intersection['states'].add((next_state_1, next_state_2)) boundary.add((next_state_1, next_state_2)) intersection['transitions'][(state_dfa_1, state_dfa_2), a] = \ (next_state_1, next_state_2) return intersection
ea69f3cda2bd28f5b70d1724ffdd628daf1beffa
3,654,098
def change_status(request, page_id): """ Switch the status of a page. """ perm = PagePermission(request.user).check('change', method='POST') if perm and request.method == 'POST': page = Page.objects.get(pk=page_id) page.status = int(request.POST['status']) page.invalidate() page.save() return HttpResponse(str(page.status)) raise Http404
b65775d91c69cf4ac4d5a59d128581011986f1e7
3,654,099
def str2bytes(s): """ Returns byte string representation of product state. Parameters ---------- s : str Representation of a product state, in terms of a string. """ return bitarray2bytes(str2bitarray(s))
defb9f471ba6108a0d667b6f4e9522c8b6f38649
3,654,100
import re def find_links(text, image_only=False): """ Find Markdown links in text and return a match object. Markdown links are expected to have the form [some txt](A-url.ext) or ![Alt text](cool-image.png). Parameters ---------- text : str Text in which to search for links. image_only : bool If ``True``, find only markdown image links, i.e. those that begin with an exclamation mark. Returns ------- list List of ``re.Match`` objects, one for each link found. Each object has two named groups, 'link_text', which contains the the part between the square brackets, and 'link',which is the URL (or file name for an image). """ if image_only: markdown_link = \ re.compile(r"!\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)", flags=re.MULTILINE) else: markdown_link = \ re.compile(r"!?\[(?P<link_text>.+?\n*?.*?)\]\((?P<link_url>.+?)\)", flags=re.MULTILINE) groups = [m for m in markdown_link.finditer(text)] return groups
5f96672b48d3d911faf2e398c86f622676263d73
3,654,101
def least_one_row(data_frame): """ checking at least one row in dataframe Input: pandas dataframe Output: True or False """ if data_frame: return True return False
a72cbd3d504140547233481ec8340a8510e35f52
3,654,102
import math def generate_label_colors(labels: list, colors: list, palette='Set2'): """Matches labels with colors If there are more labels than colors, repeat and cycle through colors """ label_colors = defaultdict(dict) num_repeats = math.ceil(len(labels) / len(colors)) for label in enumerate(labels): label_colors[label[1]] = (colors * num_repeats)[label[0]] return {**label_colors}
8b4b35498d4478604e81987b127ab099ebb0e70b
3,654,104
def index(): """ View root page function that returns the index page and its data """ # getting top headlines in sources topheadlines_sources = get_sources('top-headlines') business_sources = get_sources('business') entertainment_sources = get_sources('entertainment') title = 'Home - Welcome to your online News room' # print(topheadlines_sources.articles) # search_source = request.args.get(source_query) # if search_source: # return redirect(url_for('search',source_name=search_source)) # else: return render_template('index.html', title = title , topheadlines_sources = topheadlines_sources, business_sources = business_sources, entertainment_sources = entertainment_sources)
df3c5d0471cde998f6ea5a0de2b41ab16ef775c6
3,654,106
def start_ltm(tup, taus, w=0.1, add_coh=False, use_cv=False, add_const=False, verbose=False, **kwargs): """Calculate the lifetime density map for given data. Parameters ---------- tup : datatuple tuple with wl, t, data taus : list of floats Used to build the basis vectors. w : float, optional Used sigma for calculating the , by default 0.1. add_coh : bool, optional If true, coherent contributions are added to the basis. By default False. use_cv : bool, optional Whether to use cross-validation, by default False add_const : bool, optional Whether to add an explict constant, by default False verbose : bool, optional Wheater to be verobse, by default False Returns ------- tuple of (linear_model, coefs, fit, alphas) The linear model is the used sklearn model. Coefs is the arrary of the coefficents, fit contains the resulting fit and alphas is an array of the applied alpha value when using cv. """ X = _make_base(tup, taus, w=w, add_const=add_const, add_coh=add_coh) if not use_cv: mod = lm.ElasticNet(**kwargs, l1_ratio=0.98) else: mod = lm.ElasticNetCV(**kwargs, l1_ratio=0.98) mod.fit_intercept = not add_const mod.warm_start = 1 coefs = np.empty((X.shape[1], tup.data.shape[1])) fit = np.empty_like(tup.data) alphas = np.empty(tup.data.shape[1]) for i in range(tup.data.shape[1]): if verbose: print(i, 'ha', end=';') mod.fit(X, tup.data[:, i]) coefs[:, i] = mod.coef_.copy() fit[:, i] = mod.predict(X) if hasattr(mod, 'alpha_'): alphas[i] = mod.alpha_ return mod, coefs, fit, alphas
d24d2fdc9740a12766b5424a20c98f4ab14222eb
3,654,107
def manhattan_distance(origin, destination): """Return the Manhattan distance between the origin and the destination. @type origin: Location @type destination: Location @rtype: int >>> pt1 = Location(1,2) >>> pt2 = Location(3,4) >>> print(manhattan_distance(pt1, pt2)) 4 """ return (abs(origin.row - destination.row) + abs(origin.column - destination.column))
0bcfd7767e44b0dcc47890dc4bcb2c054abb4bde
3,654,108
from typing import Dict from typing import List import logging def find_best_resampler( features_train: pd.DataFrame, labels_train: pd.DataFrame, parameters: Dict ) -> List: """Compare several resamplers and find the best one to handle imbalanced labels. Args: features_train: Training data of independent features. labels_train: Training data of next month payment default status. parameters: Parameters defined in parameters.yml. Returns: A list containing the best resampler and the search CV results as DataFrame. """ col_dict = _get_column_dtype(features_train) if labels_train.shape[0] == features_train.shape[0]: labels_train.index = features_train.index # Create transformers for each dtype transformers = [ ("num_n_trans", StandardScaler(), col_dict["num_normal"]), ( "num_s_trans", QuantileTransformer(random_state=parameters["random_state"]), col_dict["num_skewed"], ), ("ordi_trans", "passthrough", col_dict["ordinal"]), ("bool_pass", "passthrough", col_dict["boolean"]), ( "cat_trans", JamesSteinEncoder(random_state=parameters["random_state"], return_df=False), col_dict["category"], ), ] transformers = _remove_unused_transformers(transformers) # Combine the transformers as preprocessor preprocessor = ColumnTransformer(transformers=transformers) num_cols = col_dict["num_normal"] + col_dict["num_skewed"] nomi_cols = col_dict["ordinal"] + col_dict["boolean"] + col_dict["category"] # Extract target target_train = labels_train["DEFAULT_PAY"] # Initalize samplers smotenc_smpl = SMOTENC( categorical_features=[ x for x in range(len(num_cols), len(num_cols) + len(nomi_cols)) ], n_jobs=-1, ) ro_smpl = RandomOverSampler() enn_smpl = EditedNearestNeighbours(n_jobs=-1) tl_smpl = TomekLinks(n_jobs=-1) ru_smpl = RandomUnderSampler() # Initalize classifier clf = ExtraTreesClassifier(max_depth=10, n_jobs=-1) # Create parameter grid param_grid = { "sampler": [None, smotenc_smpl, ro_smpl, enn_smpl, tl_smpl, ru_smpl], "classifier": [clf], } # Create classifier pipeline resampler = PipelineImb( steps=[ ("preprocessor", preprocessor), ("sampler", smotenc_smpl), ("classifier", clf), ] ) # Start grid search search_cv = GridSearchCV( resampler, param_grid=param_grid, scoring=[ "precision", "recall", "f1", "roc_auc", ], refit="f1", error_score=0, verbose=2, ) timer_start = timer() search_cv.fit(features_train, target_train) timer_end = timer() # Log search duration logger = logging.getLogger(__name__) logger.info( f"Best resampler search elapsed time : {_get_time_delta(timer_end - timer_start)}." ) # Save search result as DataFrame search_results = pd.DataFrame(search_cv.cv_results_).sort_values( by=["rank_test_f1"] ) # Remove unused steps from resampler resampler = search_cv.best_estimator_ resampler.set_params( steps=_remove_unused_steps(steps=resampler.steps, remove_clf=True) ) return [resampler, search_results]
29c14261e0c5131c8fad653bb286d03c73b8ddd7
3,654,110
def grid(dim, num): """Build a one-dim grid of num points""" if dim.type == "categorical": return categorical_grid(dim, num) elif dim.type == "integer": return discrete_grid(dim, num) elif dim.type == "real": return real_grid(dim, num) elif dim.type == "fidelity": return fidelity_grid(dim, num) else: raise TypeError( "Grid Search only supports `real`, `integer`, `categorical` and `fidelity`: " f"`{dim.type}`\n" "For more information on dimension types, see " "https://orion.readthedocs.io/en/stable/user/searchspace.html" )
1d59936882cd15372e0c13c02d80cbe739650134
3,654,111
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types): """ Path of functions between two types """ if not isinstance(source, type): source = type(source) if not isinstance(target, type): target = type(target) for cls in concatv(source.mro(), _virtual_superclasses): if cls in graph: source = cls break # If both source and target are Out-Of-Core types then restrict ourselves # to the graph of out-of-core types if ooc_types: oocs = tuple(ooc_types) if issubclass(source, oocs) and issubclass(target, oocs): graph = graph.subgraph([n for n in graph.nodes() if issubclass(n, oocs)]) with without_edges(graph, excluded_edges) as g: pth = nx.shortest_path(g, source=source, target=target, weight='cost') edge = adjacency(graph) def path_part(src, tgt): node = edge[src][tgt] return PathPart(src, tgt, node['func'], node['cost']) return map(path_part, pth, pth[1:])
6bdf2adbfc754dc5350406570bc865ac17c088ce
3,654,112
def is_resource_sufficient(order_ingredients): """Return true or false""" for item in order_ingredients: if order_ingredients[item]>=resources[item]: print(f"Sorry not Enough {item} to Make Coffee.") return False return True
758ab17760aac8f32b4d5fb93e42e01bc780507b
3,654,113
import requests def get_gh_releases_api(project, version=None): """ """ # https://developer.github.com/v3/auth/ # self.headers = {'Authorization': 'token %s' % self.api_token} # https://api.github.com/repos/pygame/stuntcat/releases/latest repo = get_repo_from_url(project.github_repo) if not repo: return url = f'https://api.github.com/repos/{repo}/releases' if version is not None: url += f'/{version}' if Config.GITHUB_RELEASES_OAUTH is None: headers = {} else: headers = {'Authorization': 'token %s' % Config.GITHUB_RELEASES_OAUTH} resp = requests.get( url, headers = headers ) if resp.status_code != 200: raise ValueError('github api failed') data = resp.json() return data
348857ab557277f7b26cb93866284ac899746524
3,654,114
import random import logging def weak_move(board): """Weak AI - makes a random valid move. Args: board: (Board) The game board. Returns: Array: Our chosen move. """ valid_moves = _get_moves(board, Square.black) # Return a random valid move our_move = valid_moves[random.randrange(0, len(valid_moves))] logging.info('Weak AI chose r%sc%s', our_move[0], our_move[1]) return our_move
6c978b58cca58baadaab5417b27adbf4444d59ff
3,654,115
def flow_to_image(flow): """ Input: flow: Output: Img array: Description: Transfer flow map to image. Part of code forked from flownet. """ out = [] maxu = -999. maxv = -999. minu = 999. minv = 999. maxrad = -1 for i in range(flow.shape[0]): u = flow[i, :, :, 0] v = flow[i, :, :, 1] idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7) u[idxunknow] = 0 v[idxunknow] = 0 maxu = max(maxu, np.max(u)) minu = min(minu, np.min(u)) maxv = max(maxv, np.max(v)) minv = min(minv, np.min(v)) rad = np.sqrt(u ** 2 + v ** 2) maxrad = max(maxrad, np.max(rad)) u = u / (maxrad + np.finfo(float).eps) v = v / (maxrad + np.finfo(float).eps) img = compute_color(u, v) out.append(img) return np.float32(np.uint8(out))
b7ed9cf684b4b818397f0329f3c7de1dbfa2ecd8
3,654,116
def parse_model_value(value, context): """ do interpolation first from context, "x is {size}" with size = 5 will be interpolated to "x is 5" then return interpolated string :param value: :param context: :return: """ return value.format(**context)
58cee6092bc03debe636ae8fa47878727457d334
3,654,117
def apply_tropomi_operator( filename, n_elements, gc_startdate, gc_enddate, xlim, ylim, gc_cache, build_jacobian, sensi_cache, ): """ Apply the tropomi operator to map GEOS-Chem methane data to TROPOMI observation space. Arguments filename [str] : TROPOMI netcdf data file to read n_elements [int] : Number of state vector elements gc_startdate [datetime64] : First day of inversion period, for GEOS-Chem and TROPOMI gc_enddate [datetime64] : Last day of inversion period, for GEOS-Chem and TROPOMI xlim [float] : Longitude bounds for simulation domain ylim [float] : Latitude bounds for simulation domain gc_cache [str] : Path to GEOS-Chem output data build_jacobian [log] : Are we trying to map GEOS-Chem sensitivities to TROPOMI observation space? sensi_cache [str] : If build_jacobian=True, this is the path to the GEOS-Chem sensitivity data Returns output [dict] : Dictionary with one or two fields: - obs_GC : GEOS-Chem and TROPOMI methane data - TROPOMI methane - GEOS-Chem methane - TROPOMI lat, lon - TROPOMI lat index, lon index If build_jacobian=True, also include: - K : Jacobian matrix """ # Read TROPOMI data TROPOMI = read_tropomi(filename) if TROPOMI == None: print(f"Skipping {filename} due to file processing issue.") return TROPOMI # We're only going to consider data within lat/lon/time bounds, with QA > 0.5, and with safe surface albedo values sat_ind = filter_tropomi(TROPOMI, xlim, ylim, gc_startdate, gc_enddate) # Number of TROPOMI observations n_obs = len(sat_ind[0]) print("Found", n_obs, "TROPOMI observations.") # If need to build Jacobian from GEOS-Chem perturbation simulation sensitivity data: if build_jacobian: # Initialize Jacobian K jacobian_K = np.zeros([n_obs, n_elements], dtype=np.float32) jacobian_K.fill(np.nan) # Initialize a list to store the dates we want to look at all_strdate = [] # For each TROPOMI observation for k in range(n_obs): # Get the date and hour iSat = sat_ind[0][k] # lat index jSat = sat_ind[1][k] # lon index time = pd.to_datetime(str(TROPOMI["utctime"][iSat])) strdate = time.round("60min").strftime("%Y%m%d_%H") all_strdate.append(strdate) all_strdate = list(set(all_strdate)) # Read GEOS_Chem data for the dates of interest all_date_gc = read_all_geoschem(all_strdate, gc_cache, build_jacobian, sensi_cache) # Initialize array with n_obs rows and 6 columns. Columns are TROPOMI CH4, GEOSChem CH4, longitude, latitude, II, JJ obs_GC = np.zeros([n_obs, 6], dtype=np.float32) obs_GC.fill(np.nan) # For each TROPOMI observation: for k in range(n_obs): # Get GEOS-Chem data for the date of the observation: iSat = sat_ind[0][k] jSat = sat_ind[1][k] p_sat = TROPOMI["pressures"][iSat, jSat, :] dry_air_subcolumns = TROPOMI["dry_air_subcolumns"][iSat, jSat, :] # mol m-2 apriori = TROPOMI["methane_profile_apriori"][iSat, jSat, :] # mol m-2 avkern = TROPOMI["column_AK"][iSat, jSat, :] time = pd.to_datetime(str(TROPOMI["utctime"][iSat])) strdate = time.round("60min").strftime("%Y%m%d_%H") GEOSCHEM = all_date_gc[strdate] # Find GEOS-Chem lats & lons closest to the corners of the TROPOMI pixel longitude_bounds = TROPOMI["longitude_bounds"][iSat, jSat, :] latitude_bounds = TROPOMI["latitude_bounds"][iSat, jSat, :] corners_lon_index = [] corners_lat_index = [] for l in range(4): iGC = nearest_loc(longitude_bounds[l], GEOSCHEM["lon"]) jGC = nearest_loc(latitude_bounds[l], GEOSCHEM["lat"]) corners_lon_index.append(iGC) corners_lat_index.append(jGC) # If the tolerance in nearest_loc() is not satisfied, skip the observation if np.nan in corners_lon_index + corners_lat_index: continue # Get lat/lon indexes and coordinates of GEOS-Chem grid cells closest to the TROPOMI corners ij_GC = [(x, y) for x in set(corners_lon_index) for y in set(corners_lat_index)] gc_coords = [(GEOSCHEM["lon"][i], GEOSCHEM["lat"][j]) for i, j in ij_GC] # Compute the overlapping area between the TROPOMI pixel and GEOS-Chem grid cells it touches overlap_area = np.zeros(len(gc_coords)) dlon = GEOSCHEM["lon"][1] - GEOSCHEM["lon"][0] dlat = GEOSCHEM["lat"][1] - GEOSCHEM["lat"][0] # Polygon representing TROPOMI pixel polygon_tropomi = Polygon(np.column_stack((longitude_bounds, latitude_bounds))) # For each GEOS-Chem grid cell that touches the TROPOMI pixel: for gridcellIndex in range(len(gc_coords)): # Define polygon representing the GEOS-Chem grid cell coords = gc_coords[gridcellIndex] geoschem_corners_lon = [ coords[0] - dlon / 2, coords[0] + dlon / 2, coords[0] + dlon / 2, coords[0] - dlon / 2, ] geoschem_corners_lat = [ coords[1] - dlat / 2, coords[1] - dlat / 2, coords[1] + dlat / 2, coords[1] + dlat / 2, ] polygon_geoschem = Polygon( np.column_stack((geoschem_corners_lon, geoschem_corners_lat)) ) # Calculate overlapping area as the intersection of the two polygons if polygon_geoschem.intersects(polygon_tropomi): overlap_area[gridcellIndex] = polygon_tropomi.intersection( polygon_geoschem ).area # If there is no overlap between GEOS-Chem and TROPOMI, skip to next observation: if sum(overlap_area) == 0: continue # ======================================================= # Map GEOS-Chem to TROPOMI observation space # ======================================================= # Otherwise, initialize tropomi virtual xch4 and virtual sensitivity as zero area_weighted_virtual_tropomi = 0 # virtual tropomi xch4 area_weighted_virtual_tropomi_sensitivity = 0 # virtual tropomi sensitivity # For each GEOS-Chem grid cell that touches the TROPOMI pixel: for gridcellIndex in range(len(gc_coords)): # Get GEOS-Chem lat/lon indices for the cell iGC, jGC = ij_GC[gridcellIndex] # Get GEOS-Chem pressure edges for the cell p_gc = GEOSCHEM["PEDGE"][iGC, jGC, :] # Get GEOS-Chem methane for the cell gc_CH4 = GEOSCHEM["CH4"][iGC, jGC, :] # Get merged GEOS-Chem/TROPOMI pressure grid for the cell merged = merge_pressure_grids(p_sat, p_gc) # Remap GEOS-Chem methane to TROPOMI pressure levels sat_CH4 = remap( gc_CH4, merged["data_type"], merged["p_merge"], merged["edge_index"], merged["first_gc_edge"], ) # ppb # Convert ppb to mol m-2 sat_CH4_molm2 = sat_CH4 * 1e-9 * dry_air_subcolumns # mol m-2 # Derive the column-averaged XCH4 that TROPOMI would see over this ground cell # using eq. 46 from TROPOMI Methane ATBD, Hasekamp et al. 2019 virtual_tropomi_gridcellIndex = ( sum(apriori + avkern * (sat_CH4_molm2 - apriori)) / sum(dry_air_subcolumns) * 1e9 ) # ppb # Weight by overlapping area (to be divided out later) and add to sum area_weighted_virtual_tropomi += ( overlap_area[gridcellIndex] * virtual_tropomi_gridcellIndex ) # ppb m2 # If building Jacobian matrix from GEOS-Chem perturbation simulation sensitivity data: if build_jacobian: # Get GEOS-Chem perturbation sensitivities at this lat/lon, for all vertical levels and state vector elements sensi_lonlat = GEOSCHEM["Sensitivities"][iGC, jGC, :, :] # Map the sensitivities to TROPOMI pressure levels sat_deltaCH4 = remap_sensitivities( sensi_lonlat, merged["data_type"], merged["p_merge"], merged["edge_index"], merged["first_gc_edge"], ) # mixing ratio, unitless # Tile the TROPOMI averaging kernel avkern_tiled = np.transpose(np.tile(avkern, (n_elements, 1))) # Tile the TROPOMI dry air subcolumns dry_air_subcolumns_tiled = np.transpose( np.tile(dry_air_subcolumns, (n_elements, 1)) ) # mol m-2 # Derive the change in column-averaged XCH4 that TROPOMI would see over this ground cell tropomi_sensitivity_gridcellIndex = np.sum( avkern_tiled * sat_deltaCH4 * dry_air_subcolumns_tiled, 0 ) / sum( dry_air_subcolumns ) # mixing ratio, unitless # Weight by overlapping area (to be divided out later) and add to sum area_weighted_virtual_tropomi_sensitivity += ( overlap_area[gridcellIndex] * tropomi_sensitivity_gridcellIndex ) # m2 # Compute virtual TROPOMI observation as weighted mean by overlapping area # i.e., need to divide out area [m2] from the previous step virtual_tropomi = area_weighted_virtual_tropomi / sum(overlap_area) # Save actual and virtual TROPOMI data obs_GC[k, 0] = TROPOMI["methane"][ iSat, jSat ] # Actual TROPOMI methane column observation obs_GC[k, 1] = virtual_tropomi # Virtual TROPOMI methane column observation obs_GC[k, 2] = TROPOMI["longitude"][iSat, jSat] # TROPOMI longitude obs_GC[k, 3] = TROPOMI["latitude"][iSat, jSat] # TROPOMI latitude obs_GC[k, 4] = iSat # TROPOMI index of longitude obs_GC[k, 5] = jSat # TROPOMI index of latitude if build_jacobian: # Compute TROPOMI sensitivity as weighted mean by overlapping area # i.e., need to divide out area [m2] from the previous step jacobian_K[k, :] = area_weighted_virtual_tropomi_sensitivity / sum( overlap_area ) # Output output = {} # Always return the coincident TROPOMI and GEOS-Chem data output["obs_GC"] = obs_GC # Optionally return the Jacobian if build_jacobian: output["K"] = jacobian_K return output
c449ddaf8113a3adfcd0e501cacc245bcf0af495
3,654,118
import yaml from typing import cast def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike: """Load an analysis configuration from a file. Args: yaml: YAML object to use in loading the configuration. filename: Filename of the YAML configuration file. Returns: dict-like object containing the loaded configuration """ with open(filename, "r") as f: config = yaml.load(f) return cast(DictLike, config)
6c3b9b54b6e22b40c61c901b2bcb3b6af4847214
3,654,119
def device_list(request): """ :param request: :return: """ device_list = True list = Device.objects.all() return render(request, "back/device_list.html", locals())
f4892f40831d25182b55414a666fbd62d6d978ef
3,654,120
def L008_eval(segment, raw_stack, **kwargs): """ This is a slightly odd one, because we'll almost always evaluate from a point a few places after the problem site """ # We need at least two segments behind us for this to work if len(raw_stack) < 2: return True else: cm1 = raw_stack[-1] cm2 = raw_stack[-2] if cm2.name == 'comma': if cm1.name not in ['whitespace', 'newline']: # comma followed by something that isn't whitespace! return cm2 elif cm1.raw not in ['\n', ' '] and not segment.is_comment: return cm1 return True
71c42999ffc76bd28a61b640cf85086b0b9e8d69
3,654,121
def overwrite_ruffus_args(args, config): """ :param args: :param config: :return: """ if config.has_section('Ruffus'): cmdargs = dict() cmdargs['draw_horizontally'] = bool cmdargs['flowchart'] = str cmdargs['flowchart_format'] = str cmdargs['forced_tasks'] = lambda x: x.split() cmdargs['history_file'] = str cmdargs['jobs'] = int cmdargs['just_print'] = bool cmdargs['key_legend_in_graph'] = bool cmdargs['log_file'] = str cmdargs['recreate_database'] = bool cmdargs['target_tasks'] = lambda x: x.split() cmdargs['touch_files_only'] = bool cmdargs['use_threads'] = bool cmdargs['verbose'] = lambda x: x.split() for k, v in config.items('Ruffus'): try: args.__setattr__(k, cmdargs[k](v)) except KeyError: pass return args
6f947c362a37bfdc6df53c861783604999621a88
3,654,122
def read_sfr_df(): """Reads and prepares the sfr_df Parameters: Returns: sfr_df(pd.DataFrame): dataframe of the fits file mosdef_sfrs_latest.fits """ sfr_df = read_file(imd.loc_sfrs_latest) sfr_df['FIELD_STR'] = [sfr_df.iloc[i]['FIELD'].decode( "utf-8").rstrip() for i in range(len(sfr_df))] return sfr_df
9d0d16929ffd5043853096c01cafa00747104b9f
3,654,123
def redshift(x, vo=0., ve=0.,def_wlog=False): """ x: The measured wavelength. v: Speed of the observer [km/s]. ve: Speed of the emitter [km/s]. Returns: The emitted wavelength l'. Notes: f_m = f_e (Wright & Eastman 2014) """ if np.isnan(vo): vo = 0 # propagate nan as zero (@calibration in fib B) a = (1.0+vo/c) / (1.0+ve/c) if def_wlog: return x + np.log(a) # logarithmic #return x + a # logarithmic + approximation v << c else: return x * a #return x / (1.0-v/c)
0dee71d862d2dc4252033964a9adcb4428c5dfa9
3,654,124
import mmap def overlay_spectra_plot(array, nrow=5,ncol=5,**kwargs): """ Overlay spectra on a collapsed cube. Parameters ---------- array : 3D numpy array nrow : int Number of rows in the figure. ncol : int Number of columns in the figure. **kwargs : dict Keyword arguments passed to `ax.plot` for the spectra Returns ------- fig : matplotlib.figure.Figure The figure object. """ cube = np.nan_to_num(array) fig,ax = plt.subplots(subplot_kw={'projection':mmap.wcs},figsize=(10,10)) fig.set_constrained_layout(False) collapsed_cube = np.nanmean(cube,axis=2) vmin,vmax = np.percentile(collapsed_cube[collapsed_cube>0], [0.1,99.9]) ax.imshow(collapsed_cube,cmap='Greys',norm=mpl.colors.LogNorm(vmin=vmin,vmax=vmin)) w = 1/ncol # in figure coords h = 1/nrow # in figure coords dr,dc = collapsed_cube.shape # create grid of inset_axes on figure for i in range(nrow): for j in range(ncol): b,l = i*h, j*w #print(f'left:{l:0.1f} col: {j} bottom:{b:0.1f} row:{i}') bl = [b,l] ax2 = ax.inset_axes([l,b,w,h]) ax2.set_xticks([]) ax2.set_yticks([]) ax2.set_facecolor('none') #ax.add_patch(mpl.patches.Rectangle([l,b],w,h,transform=ax.transAxes,color='r',alpha=0.5)) #ju.annotate(f'row:{i} col:{j}',l,b,ha='left',va='bottom',ax=ax,transform='axes') #print(f'{int(b*dr)}:{int((b+h)*dr)},{int(l*dc)}:{int((l+w)*dc)}') line = np.nanmean(mmap.co[sl][int(b*dr):int((b+h)*dr),int(l*dc):int((l+w)*dc),vsl],axis=(0,1)) ax2.plot(mmap.v[vsl],ju.scale_ptp(line),'r',lw=1,**kwargs) ax2.set_ylim(ax2.get_ylim()[0],max(ax2.get_ylim()[1],.3)) #ax2.set_axis_off() #ax.add_patch(mpl.patches.Rectangle([bl[0],bl[1]],w*dc,h*dr,transform=ax.transData,alpha=0.25)) return fig
8abbbbe7667c57bea50575a58bf11c3b080c8608
3,654,125
def digest_from_rsa_scheme(scheme, hash_library=DEFAULT_HASH_LIBRARY): """ <Purpose> Get digest object from RSA scheme. <Arguments> scheme: A string that indicates the signature scheme used to generate 'signature'. Currently supported RSA schemes are defined in `securesystemslib.keys.RSA_SIGNATURE_SCHEMES` hash_library: The crypto library to use for the given hash algorithm (e.g., 'hashlib'). <Exceptions> securesystemslib.exceptions.FormatError, if the arguments are improperly formatted. securesystemslib.exceptions.UnsupportedAlgorithmError, if an unsupported hashing algorithm is specified, or digest could not be generated with given the algorithm. securesystemslib.exceptions.UnsupportedLibraryError, if an unsupported library was requested via 'hash_library'. <Side Effects> None. <Returns> Digest object e.g. hashlib.new(algorithm) or PycaDiggestWrapper object """ # Are the arguments properly formatted? If not, raise # 'securesystemslib.exceptions.FormatError'. securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme) # Get hash algorithm from rsa scheme (hash algorithm id is specified after # the last dash; e.g. rsassa-pss-sha256 -> sha256) hash_algorithm = scheme.split('-')[-1] return digest(hash_algorithm, hash_library)
6eaf10657a0e80f2ddfa5eacbcc1bac72437ca51
3,654,126
def table(content, accesskey:str ="", class_: str ="", contenteditable: str ="", data_key: str="", data_value: str="", dir_: str="", draggable: str="", hidden: str="", id_: str="", lang: str="", spellcheck: str="", style: str="", tabindex: str="", title: str="", translate: str=""): """ Returns a table.\n `content`: Contents of the table.\n """ g_args = global_args(accesskey, class_, contenteditable, data_key, data_value, dir_, draggable, hidden, id_, lang, spellcheck, style, tabindex, title, translate) return f"<table {g_args}>{content}</table>\n"
b27cf1b1897bdbc764fff76edc2e53fa0aca7861
3,654,128
def _ev_match( output_dir, last_acceptable_entry_index, certificate, entry_type, extra_data, certificate_index): """Matcher function for the scanner. Returns the certificate's hash if it is a valid, non-expired, EV certificate, None otherwise.""" # Only generate whitelist for non-precertificates. It is expected that if # a precertificate was submitted then the issued SCT would be embedded # in the final certificate. if entry_type != client_pb2.X509_ENTRY: return None # No point in including expired certificates. if certificate.is_expired(): return None # Do not include entries beyond the last entry included in the whitelist # generated on January 1st, 2015. if certificate_index > last_acceptable_entry_index: return None # Only include certificates that have an EV OID. matching_policies = find_matching_policies(certificate) if not matching_policies: return None # Removed the requirement that the root of the chain matches the root that # should be used for the EV policy OID. # See https://code.google.com/p/chromium/issues/detail?id=524635 for # details. # Matching certificate if output_dir: _write_cert_and_chain( output_dir, certificate, extra_data, certificate_index) return calculate_certificate_hash(certificate)
acd8416546d5f687fd1bfc1f0edfc099cde4408d
3,654,129
def axis_ratio_disklike(scale=0.3, truncate=0.2): """Sample (one minus) the axis ratio of a disk-like galaxy from the Rayleigh distribution Parameters ---------- scale : float scale of the Rayleigh distribution; the bigger, the smaller the axis ratio truncate : float the minimum value of the axis ratio Note ---- The default parameters are used in Lenspop ([1]_) and are expected for elliptical sources. References ---------- .. [1] Collett, Thomas E. "The population of galaxy–galaxy strong lenses in forthcoming optical imaging surveys." The Astrophysical Journal 811.1 (2015): 20. Returns ------- float the axis ratio """ q = 0.0 while q < truncate: q = 1.0 - np.random.rayleigh(scale, size=None) return q
d001ef0b2f5896f4e7f04f314cd4e71ffd97a277
3,654,130
import numpy def rk4(y0, t0, te, N, deriv, filename=None): """ General RK4 driver for N coupled differential eq's, fixed stepsize Input: - y0: Vector containing initial values for y - t0: Initial time - te: Ending time - N: Number of steps - deriv: See rk4_step - filename: Optional, use if you want to write data to file at each step. Format used: t y[0] y[1] ... (%10.15E) Output: If filename=None, return tuple containing: - time: Array of times at which it has iterated over - yout: N*len(y0) numpy array containing y for each timestep If filename specified, None is returned. """ h = (te-t0)/float(N) t = t0; if filename == None: #Setup arrays time = numpy.zeros(N); yout = [] #Inital values yout.append(y0); time[0] = t0; t = t0; #Loop over timesteps for i in xrange(1,N): yout.append(rk4_step(yout[i-1],t,h,deriv)); t = t0 + h*i; time[i] = t; return (time,yout) else: ofile = open(filename,'w') #Format string used for output file ostring = "%20.8E " + ("%20.8E "*len(y0)) + "\n" #Initial values y = y0 t = t0 foo = [t]; foo[1:] = y; ofile.write(ostring % tuple(foo)) while (t < te): y = rk4_step(y,t,h,deriv) t +=h foo = [t]; foo[1:] = y; ofile.write(ostring % tuple(foo)) ofile.close() return None
93b7255fc95f06f765df12930efcf89338970ee6
3,654,131
from typing import Dict from typing import Tuple def create_txt_substitute_record_rule_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]: """ Args: client: Client object args: Usually demisto.args() Returns: Outputs """ name = args.get('name') rp_zone = args.get('rp_zone') comment = args.get('comment') text = args.get('text') infoblox_object_type = 'record:rpz:txt' raw_response = client.create_substitute_record_rule(infoblox_object_type, name=name, rp_zone=rp_zone, comment=comment, text=text) rule = raw_response.get('result', {}) fixed_keys_rule_res = {RESPONSE_TRANSLATION_DICTIONARY.get(key, string_to_context_key(key)): val for key, val in rule.items()} title = f'{INTEGRATION_NAME} - Response Policy Zone rule: {name} has been created:' context = { f'{INTEGRATION_CONTEXT_NAME}.ModifiedResponsePolicyZoneRules(val.Name && val.Name === obj.Name)': fixed_keys_rule_res} human_readable = tableToMarkdown(title, fixed_keys_rule_res, headerTransform=pascalToSpace) return human_readable, context, raw_response
ada3c412ec166eedd04edb2219396da6aef967ea
3,654,132
def pot_rho_linear(SP, t, rho0=1025, a=2e-4, b=7e-4, SP0=35, t0=15): """ Potential density calculated using a linear equation of state: Parameters ---------- SP : array-like Salinity [g/kg] t : array-like Temperature [°C] rho0 : float, optional Constant density [kg/m^3] a : float, optional Thermal expansion coefficient [1/°C] b : float, optional saline expansion coefficient [kg/g] SP0 : float, optional Constant salinity [g/kg] t0 : float, optional Constant temperature [°C] Returns ------- pot_rho : ndarray Potential density [kg/m^3] """ return rho0 * (1 - a * (t - t0) + b * (SP - SP0))
47dd8248239d2147ff50d1b179d3fc4392c173cb
3,654,133
async def create_assc(conn : asyncpg.Connection, name : str, type : str, base : str, leader : int) -> Association: """Create an association with the fields given. type must be 'Brotherhood','College', or 'Guild'. """ psql = """ SELECT assc_id FROM associations WHERE assc_name = $1; """ if await conn.fetchval(psql, name) is not None: raise Checks.NameTaken(name) psql1 = """ WITH rows AS ( INSERT INTO associations (assc_name, assc_type, leader_id, assc_icon, base) VALUES ($1, $2, $3, $4, $5) RETURNING assc_id ) SELECT assc_id FROM rows; """ psql2 = """ UPDATE players SET assc = $1, guild_rank = 'Leader' WHERE user_id = $2; """ psql3 = """ INSERT INTO brotherhood_champions (assc_id) VALUES ($1); """ assc_id = await conn.fetchval( psql1, name, type, leader, Vars.DEFAULT_ICON, base) await conn.execute(psql2, assc_id, leader) if type == "Brotherhood": await conn.execute(psql3, assc_id) return await get_assc_by_id(conn, assc_id)
3089b6033e31325d7b3942d9d887b89cec21ca1c
3,654,135
def get_path_from_query_string(req): """Gets path from query string Args: req (flask.request): Request object from Flask Returns: path (str): Value of "path" parameter from query string Raises: exceptions.UserError: If "path" is not found in query string """ if req.args.get('path') is None: raise exceptions.UserError('Path not found in query string') return req.args.get('path')
7e279e8e33dbbaa6ceb18d4b9a61723826522ec3
3,654,137
import scipy def entropy_grassberger(n, base=None): """" Estimate the entropy of a discrete distribution from counts per category n: array of counts base: base in which to measure the entropy (default: nats) """ N = np.sum(n) entropy = np.log(N) - np.sum(n*scipy.special.digamma(n+1e-20))/N if base: entropy /= np.log(base) return entropy
1dc5ced1f5bb43bce30fa9501632825648b19cb8
3,654,138
def get_param_layout(): """Get layout for causality finding parameters definition window Parameters ---------- Returns ------- `List[List[Element]]` Layout for causality finding parameters window """ box = [ [ sg.Text('Parameters') ], [ sg.Text(' Epochs: '),sg.Input(key=cte.EPOCHS,size=(10,1), default_text="1000"), sg.Text(' Kernel Size: '),sg.Input(key=cte.KERNEL,size=(10,1), default_text="4") ], [ sg.Text(' Depth: '),sg.Input(key=cte.LEVEL,size=(10,1), default_text="1"), sg.Text(' Learning Rate: '),sg.Input(key=cte.RATE,size=(10,1), default_text="0.01") ], [ sg.Text(' Dilation: '),sg.Input(key=cte.DILATION,size=(10,1), default_text="4"), sg.Text(' Significance: '),sg.Input(key=cte.SIGNIFICANCE,size=(10,1), default_text="0.8") ], [ sg.Text('Optimizer: '),sg.Input(key=cte.OPTIMIZER,size=(10,1), default_text="Adam"), sg.Text(' Log Interval: '),sg.Input(key=cte.LOGINT,size=(10,1), default_text="500") ], [sg.Button('Create Causal Graph', key=cte.CREATE)] ] return box
e09db05b848e71449d7d17004793e8ce167dca1a
3,654,139
def senti_histplot(senti_df): """histogram plot for sentiment""" senti_hist = ( alt.Chart(senti_df) .mark_bar() .encode(alt.Y(cts.SENTI, bin=True), x="count()", color=cts.SENTI,) .properties(height=300, width=100) ).interactive() return senti_hist
731fda9cf5af49fdbec7d1a16edbf65148e67d5a
3,654,140
def pd_df_sampling(df, coltarget="y", n1max=10000, n2max=-1, isconcat=1): """ DownSampler :param df: :param coltarget: binary class :param n1max: :param n2max: :param isconcat: :return: """ df1 = df[df[coltarget] == 0].sample(n=n1max) n2max = len(df[df[coltarget] == 1]) if n2max == -1 else n2max df0 = df[df[coltarget] == 1].sample(n=n2max) if isconcat: df2 = pd.concat((df1, df0)) df2 = df2.sample(frac=1.0, replace=True) return df2 else: print("y=1", n2max, "y=0", len(df1)) return df0, df1
2cec90c189d00a8cd3ec19224fa7a2685c135bf2
3,654,141
def get_deliverer(batch_size, max_staleness, session): """ Helper function to returns the correct deliverer class for the batch_size and max_stalennes parameters """ if batch_size < 1: return SimpleDeliverer(session) else: return BatchDeliverer(session, batch_size, max_staleness)
544740a5f38befc4d8123e7835ba758feac2d35b
3,654,143
import copy def trace_fweight_deprecated(fimage, xinit, ltrace=None, rtraceinvvar=None, radius=3.): """ Python port of trace_fweight.pro from IDLUTILS Parameters: ----------- fimage: 2D ndarray Image for tracing xinit: ndarray Initial guesses for x-trace invvar: ndarray, optional Inverse variance array for the image radius: float, optional Radius for centroiding; default to 3.0 """ # Init nx = fimage.shape[1] ny = fimage.shape[0] ncen = len(xinit) xnew = copy.deepcopy(xinit) xerr = np.zeros(ncen) + 999. ycen = np.arange(ny, dtype=int) invvar = 0. * fimage + 1. x1 = xinit - radius + 0.5 x2 = xinit + radius + 0.5 ix1 = np.floor(x1).astype(int) ix2 = np.floor(x2).astype(int) fullpix = int(np.maximum(np.min(ix2-ix1)-1, 0)) sumw = np.zeros(ny) sumxw = np.zeros(ny) sumwt = np.zeros(ny) sumsx1 = np.zeros(ny) sumsx2 = np.zeros(ny) qbad = np.array([False]*ny) if invvar is None: invvar = np.zeros_like(fimage) + 1. # Compute for ii in range(0,fullpix+3): spot = ix1 - 1 + ii ih = np.clip(spot,0,nx-1) xdiff = spot - xinit # wt = np.clip(radius - np.abs(xdiff) + 0.5,0,1) * ((spot >= 0) & (spot < nx)) sumw = sumw + fimage[ycen,ih] * wt sumwt = sumwt + wt sumxw = sumxw + fimage[ycen,ih] * xdiff * wt var_term = wt**2 / (invvar[ycen,ih] + (invvar[ycen,ih] == 0)) sumsx2 = sumsx2 + var_term sumsx1 = sumsx1 + xdiff**2 * var_term #qbad = qbad or (invvar[ycen,ih] <= 0) qbad = np.any([qbad, invvar[ycen,ih] <= 0], axis=0) # Fill up good = (sumw > 0) & (~qbad) if np.sum(good) > 0: delta_x = sumxw[good]/sumw[good] xnew[good] = delta_x + xinit[good] xerr[good] = np.sqrt(sumsx1[good] + sumsx2[good]*delta_x**2)/sumw[good] bad = np.any([np.abs(xnew-xinit) > radius + 0.5, xinit < radius - 0.5, xinit > nx - 0.5 - radius], axis=0) if np.sum(bad) > 0: xnew[bad] = xinit[bad] xerr[bad] = 999.0 # Return return xnew, xerr
e927113477a277ceb9acc8ce6af8bd1689c2913c
3,654,144
from datetime import datetime def home(): """Renders the card page.""" cardStack = model.CardStack() return render_template( 'cards.html', title ='POSTIN - Swipe', cardSet = cardStack.cardList, year=datetime.now().year, )
203021b1da4833418aafd3e3e20964e3b765a816
3,654,145
def index(request): """ User profile page. """ user = request.user profile = user.userprofile context = collect_view_data(request, 'profile') context['user'] = user context['profile'] = profile context['uform'] = UserForm(request, request.user, init=True) context['upform'] = UserProfileForm(request, profile, init=True) context['pform'] = ChangePasswordForm(request.user) context['sform'] = SSHKeyForm(request, request.user) context['ssh_keys'] = request.user.usersshkey_set.all().order_by('id') context['email_aform'] = EmailActivationProfileForm(profile.email_token) context['phone_aform'] = PhoneActivationProfileForm(profile.phone_token) return render(request, 'gui/profile/profile.html', context)
1a8cc98ba476e21986f79ec8e662bb222df79fae
3,654,147
def user_confirm_email(token): """Confirm a user account using his email address and a token to approve. Parameters ---------- token : str The token associated with an email address. """ try: email = ts.loads(token, max_age=86400) except Exception as e: logger.error(str(e)) abort(404) user = User.query.filter_by(email=email).one_or_none() if user is None: flash( 'You did not sign-up yet to RAMP. Please sign-up first.', category='error' ) return redirect(url_for('auth.sign_up')) elif user.access_level in ('user', 'admin'): flash( "Your account is already approved. You don't need to confirm your " "email address", category='error' ) return redirect(url_for('auth.login')) elif user.access_level == 'asked': flash( "Your email address already has been confirmed. You need to wait " "for an approval from a RAMP administrator", category='error' ) return redirect(url_for('general.index')) User.query.filter_by(email=email).update({'access_level': 'asked'}) db.session.commit() admin_users = User.query.filter_by(access_level='admin') for admin in admin_users: subject = 'Approve registration of {}'.format( user.name ) body = body_formatter_user(user) url_approve = ('http://{}/sign_up/{}' .format(app.config['DOMAIN_NAME'], user.name)) body += 'Click on the link to approve the registration ' body += 'of this user: {}'.format(url_approve) send_mail(admin.email, subject, body) flash( "An email has been sent to the RAMP administrator(s) who will " "approve your account" ) return redirect(url_for('auth.login'))
3f26a4872af9759165d0592ac8d966f2e27a9bf6
3,654,148
def num_zeros_end(num): """ Counts the number of zeros at the end of the number 'num'. """ iszero = True num_zeros = 0 i = len(num)-1 while (iszero == True) and (i != 0): if num[i] == "0": num_zeros += 1 elif num[i] != "0": iszero = False i -= 1 return num_zeros
f227cce65e26a0684a10755031a4aeff2156015a
3,654,149
from typing import List def batch_summarize(texts: List[str]) -> List[str]: """Summarizes the texts (local mode). :param texts: The texts to summarize. :type texts: List[str] :return: The summarized texts. :rtype: List[str] """ if _summarizer is None: load_summarizer() assert _summarizer is not None tokenizer = get_summarizer_tokenizer() prompts = [summarizer_prompt.format(text=text) for text in texts] information = { "prompt_length": max(len(tokenizer.encode(prompt)) for prompt in prompts) } parameters = format_parameters_to_local(summarizer_parameters, information) response = _summarizer(prompts, **parameters) return [ cut_on_stop(choices[0]["generated_text"], summarizer_parameters["stop"]) for choices in response ]
7c05b8f612faab808fbeb1ef7c21f8b3b2487be5
3,654,150
def Vp_estimation(z, T, x, g=param.g): """ Estimation of the Vp profile from the results of solving the system. """ DT = T - T[-1] # temperature variation in the layer compared to T[ICB] drhoP = -param.rhoH**2. * g * z / Seismic_observations.calcK(z) drhoT = -param.rhoH * param.alpha * DT # *(Mp*h+X*Mx) rhoL = (param.rhoD - (1 - x[0]) * param.rhoH - drhoT[0] - drhoP[0]) / x[0] # print rhoL # rhoL2=x[0]/(1/(rhoD-drhoT[0]-drhoP[1])-(1-x[0])/rhoH) # print rhoL rho_new = x * rhoL + (1 - x) * param.rhoH + drhoT + drhoP Vp_new = np.sqrt(Seismic_observations.calcK(z) / rho_new) return rho_new, Vp_new
4f1e1936cc98cfd84d87a651c8deac5bb7aa39e0
3,654,151
def pp_date(dt): """ Human-readable (i.e. pretty-print) dates, e.g. for spreadsheets: See http://docs.python.org/tutorial/stdlib.html e.g. 31-Oct-2011 """ d = date_to_datetime(dt) return d.strftime('%d-%b-%Y')
a6c8cd97785212afebb2b8948117815f5553dc24
3,654,152
import copy import math def optimizer_p(cd, path, i, obs, path_penalty): """Optimizer of the current path. Reduce the piece-wise path length in the free space of the environment.""" p_tmp = copy.deepcopy(path) p_tmp[i].x = p_tmp[i].x + cd[0] p_tmp[i].y = p_tmp[i].y + cd[1] r1 = math.sqrt((p_tmp[i-1].x - p_tmp[i].x)**2+(p_tmp[i-1].y - p_tmp[i].y)**2) r2 = math.sqrt((p_tmp[i+1].x - p_tmp[i].x)**2+(p_tmp[i+1].y - p_tmp[i].y)**2) penalty1 = 0 penalty2 = 0 if obstacles: for o in obs: d1 = check_obst(p_tmp[i-1].x, p_tmp[i-1].y, p_tmp[i].x, p_tmp[i].y, o[0].x, o[0].y) if d1< o[1]: penalty1 = max(penalty1,(o[1] - d1)*path_penalty) d2 = check_obst(p_tmp[i].x, p_tmp[i].y, p_tmp[i+1].x, p_tmp[i+1].y, o[0].x, o[0].y) if d2 < o[1]: penalty2 = max(penalty1,(o[1] - d1)*path_penalty) return r1 + r2 + abs(r1-r2) + penalty1 + penalty2
da126e3e7c0013748b1bc5b39c1b51aa2bf0d68b
3,654,153
import base64 def create_message(sender_address, receiver_address , subject, email_content): """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. Returns: An object containing a base64url encoded email object. """ message = MIMEText(email_content, 'html') message['to'] = receiver_address message['from'] = sender_address message['subject'] = subject # return {'raw': base64.urlsafe_b64encode(message.as_string())} b64_bytes = base64.urlsafe_b64encode(message.as_bytes()) b64_string = b64_bytes.decode() return {'raw': b64_string}
3970272fda9650b5b59de9a57b2579374088b5c4
3,654,154
def get_latest_revision_number(request, package_id): """ returns the latest revision number for given package """ package = get_object_or_404(Package, id_number=package_id) return HttpResponse(simplejson.dumps({ 'revision_number': package.latest.revision_number}))
3f8053656cbd7e08336a4632f1deaf43e58bc3eb
3,654,156
from typing import Mapping def _make_mesh_tensors(inputs: Mapping[K, np.ndarray]) -> Mapping[K, tf.Tensor]: """ Computes tensors that are the Cartesian product of the inputs. This is around 20x faster than constructing this in Python. Args: inputs: A mapping from keys to NumPy arrays. Returns: A mapping from keys to a Tensor, which takes on values from the corresponding input NumPy array. Computing the Tensors should yield NumPy arrays equal to `{k: itertools.product(inputs.values())[i] for i, k in enumerate(inputs.keys())}`. """ # SOMEDAY(adam): messy, this would be much nicer in TF2 API # SOMEDAY(adam): v.dtype may not always match the dtype expected by the models. # e.g. `stable_baselines.common.input` always maps `MultiDiscrete` to `int32` even though # Gym reports it as `int64`. The dtypes match with `Box` though, which is the only thing we # need so far, so ignoring this (change should possibly be made in Stable Baselines). phs = {k: tf.placeholder(v.dtype, shape=v.shape) for k, v in inputs.items()} # Increase dimensions for broadcasting # So first tensor will be a[:, None, ..., None], # second tensor b[None, :, None, ..., None], ..., # final tensor z[None, ..., None, :]. tensors = {} for i, (k, ph) in enumerate(phs.items()): t = ph for j in range(len(phs)): if i != j: t = tf.expand_dims(t, axis=j) tensors[k] = t target_shape = tuple((len(v) for v in inputs.values())) tensors = { k: tf.broadcast_to(t, target_shape + inputs[k].shape[1:]) for k, t in tensors.items() } target_len = np.product(target_shape) tensors = {k: tf.reshape(t, (target_len,) + inputs[k].shape[1:]) for k, t in tensors.items()} handles = {k: tf.get_session_handle(t) for k, t in tensors.items()} feed_dict = {ph: inputs[k] for k, ph in phs.items()} return tf.get_default_session().run(handles, feed_dict=feed_dict)
65a97e7f7d85668acd2af50ba9ed745190181018
3,654,157
def make_counters(): """Creates all of the VariantCounters we want to track.""" def _gt_selector(*gt_types): return lambda v: variantutils.genotype_type(v) in gt_types return VariantCounters([ ('All', lambda v: True), ('SNPs', variantutils.is_snp), ('Indels', variantutils.is_indel), ('BiAllelic', variantutils.is_biallelic), ('MultiAllelic', variantutils.is_multiallelic), ('HomRef', _gt_selector(variantutils.GenotypeType.hom_ref)), ('Het', _gt_selector(variantutils.GenotypeType.het)), ('HomAlt', _gt_selector(variantutils.GenotypeType.hom_var)), ('NonRef', _gt_selector(variantutils.GenotypeType.het, variantutils.GenotypeType.hom_var)), ])
b7a943f045018556a2a5d0dbf5e093906d10242a
3,654,158