content
stringlengths
22
815k
id
int64
0
4.91M
def get_install_task_flavor(job_config): """ Pokes through the install task's configuration (including its overrides) to figure out which flavor it will want to install. Only looks at the first instance of the install task in job_config. """ project, = job_config.get('project', 'ceph'), tasks = job_config.get('tasks', dict()) overrides = job_config.get('overrides', dict()) install_overrides = overrides.get('install', dict()) project_overrides = install_overrides.get(project, dict()) first_install_config = dict() for task in tasks: if task.keys()[0] == 'install': first_install_config = task.values()[0] or dict() break first_install_config = copy.deepcopy(first_install_config) deep_merge(first_install_config, install_overrides) deep_merge(first_install_config, project_overrides) return get_flavor(first_install_config)
5,355,800
def equalize_hist(image, nbins=256): """Return image after histogram equalization. Parameters ---------- image : array Image array. nbins : int Number of bins for image histogram. Returns ------- out : float array Image array after histogram equalization. Notes ----- This function is adapted from [1]_ with the author's permission. References ---------- .. [1] http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html .. [2] http://en.wikipedia.org/wiki/Histogram_equalization """ image = img_as_float(image) cdf, bin_centers = cumulative_distribution(image, nbins) out = np.interp(image.flat, bin_centers, cdf) return out.reshape(image.shape)
5,355,801
def entry_point(): """gallerycrawler command line utilities."""
5,355,802
def printMV(*args, **kwargs): """ Affiche le texte donné, préfixé de l'acronyme de l'application. """ print("[MV]", *args, **kwargs)
5,355,803
def add9336(rh): """ Adds a 9336 (FBA) disk to virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'ADD9336' userid - userid of the virtual machine parms['diskPool'] - Disk pool parms['diskSize'] - size of the disk in blocks or bytes. parms['fileSystem'] - Linux filesystem to install on the disk. parms['mode'] - Disk access mode parms['multiPW'] - Multi-write password parms['readPW'] - Read password parms['vaddr'] - Virtual address parms['writePW'] - Write password Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.add9336") results, blocks = generalUtils.cvtToBlocks(rh, rh.parms['diskSize']) if results['overallRC'] != 0: # message already sent. Only need to update the final results. rh.updateResults(results) if results['overallRC'] == 0: parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-t", "9336", "-a", "AUTOG", "-r", rh.parms['diskPool'], "-u", "1", "-z", blocks, "-f", "1"] hideList = [] if 'mode' in rh.parms: parms.extend(["-m", rh.parms['mode']]) else: parms.extend(["-m", 'W']) if 'readPW' in rh.parms: parms.extend(["-R", rh.parms['readPW']]) hideList.append(len(parms) - 1) if 'writePW' in rh.parms: parms.extend(["-W", rh.parms['writePW']]) hideList.append(len(parms) - 1) if 'multiPW' in rh.parms: parms.extend(["-M", rh.parms['multiPW']]) hideList.append(len(parms) - 1) results = invokeSMCLI(rh, "Image_Disk_Create_DM", parms, hideInLog=hideList) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if (results['overallRC'] == 0 and 'fileSystem' in rh.parms): # Install the file system results = installFS( rh, rh.parms['vaddr'], rh.parms['mode'], rh.parms['fileSystem'], "9336") if results['overallRC'] == 0: results = isLoggedOn(rh, rh.userid) if (results['overallRC'] == 0 and results['rs'] == 0): # Add the disk to the active configuration. parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-m", rh.parms['mode']] results = invokeSMCLI(rh, "Image_Disk_Create", parms) if results['overallRC'] == 0: rh.printLn("N", "Added dasd " + rh.parms['vaddr'] + " to the active configuration.") else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit changeVM.add9336, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
5,355,804
def test_compute_fluctuations_rs(tsig_white): """Tests fluctuation analysis for the RS method.""" # Test white noise: expected RS of 0.5 t_scales, flucs, exp = compute_fluctuations(tsig_white, FS_HIGH, method='rs') assert np.isclose(exp, 0.5, atol=0.1)
5,355,805
def window_data(s1,s2,s5,s6,s7,s8, sat,ele,azi,seconds,edot,f,az1,az2,e1,e2,satNu,pfitV,pele): """ author kristine m. larson also calculates the scale factor for various GNNS frequencies. currently returns meanTime in UTC hours and mean azimuth in degrees cf, which is the wavelength/2 currently works for GPS, GLONASS, GALILEO, and Beidou new: pele are the elevation angle limits for the polynomial fit. these are appplied before you start windowing the data """ cunit = 1 dat = []; x=[]; y=[] # get scale factor # added glonass, 101 and 102 if (f == 1) or (f==101) or (f==201): dat = s1 if (f == 2) or (f == 20) or (f == 102) or (f==302): dat = s2 if (f == 5) or (f==205): dat = s5 # these are galileo frequencies (via RINEX definition) if (f == 206) or (f == 306): dat = s6 if (f == 207) or (f == 307): dat = s7 if (f == 208): dat = s8 # get the scaling factor for this frequency and satellite number # print(f,satNu) cf = arc_scaleF(f,satNu) # if not, frequency does not exist, will be tripped by Nv # remove the direct signal component if (cf > 0): x,y,sat,azi,seconds,edot = removeDC(dat, satNu, sat,ele, pele, azi,az1,az2,edot,seconds) # Nv = len(y); Nvv = 0 ; # some defaults in case there are no data in this region meanTime = 0.0; avgAzim = 0.0; avgEdot = 1; Nvv = 0 avgEdot_fit =1; delT = 0.0 # no longer have to look for specific satellites. some minimum number of points required if Nv > 30: model = np.polyfit(x,y,pfitV) fit = np.polyval(model,x) # redefine x and y as old variables ele = x dat = y - fit # ok - now figure out what is within the more restricted elevation angles x = ele[(ele > e1) & (ele < e2) & (azi > az1) & (azi < az2)] y = dat[(ele > e1) & (ele < e2) & (azi > az1) & (azi < az2)] ed = edot[(ele > e1) & (ele < e2) & (azi > az1) & (azi < az2)] a = azi[(ele > e1) & (ele < e2) & (azi > az1) & (azi < az2)] t = seconds[(ele > e1) & (ele < e2) & (azi > az1) & (azi < az2)] sumval = np.sum(y) if sumval == 0: x = []; y=[] ; Nv = 0 ; Nvv = 0 # since units were changed to volts/volts, the zeros got changed to 1 values if sumval == Nv: x = []; y=[] ; Nv = 0 ; Nvv = 0 Nvv = len(y) # calculate average time in UTC (actually it is GPS time) in hours and average azimuth # this is fairly arbitrary, but can't be so small you can't fit a polymial to it if (Nvv > 10): dd = np.diff(t) # edot, in radians/sec model = np.polyfit(t,x*np.pi/180,1) # edot in radians/second avgEdot_fit = model[0] avgAzim = np.mean(a) meanTime = np.mean(t)/3600 avgEdot = np.mean(ed) # delta Time in minutes delT = (np.max(t) - np.min(t))/60 # average tan(elev) cunit =np.mean(np.tan(np.pi*x/180)) # return tan(e)/edot, in units of radians/hour now. used for RHdot correction if avgEdot == 0: outFact1 = 0 else: outFact1 = cunit/(avgEdot*3600) outFact2 = cunit/(avgEdot_fit*3600) # was debugging #if (satNu == 25) and (f == 20): # print('Edot,cunit,az', avgEdot_fit, cunit, avgAzim, outFact2) return x,y,Nvv,cf,meanTime,avgAzim,outFact1, outFact2, delT
5,355,806
def get_related(user, kwargs): """ Get related model from user's input. """ for item in user.access_extra: if item[1] in kwargs: related_model = apps.get_model(item[0], item[1]) kwargs[item[1]] = related_model.objects.get(pk=get_id(kwargs[item[1]])) return kwargs
5,355,807
def _drive_help(message: Message, cmd1: str, cmd2: str) -> None: """ jira 検索コマンドのヘルプを返す """ botsend(message, HELP.format(cmd1, cmd2, DEFAULT_PROJECT))
5,355,808
def make_logical(n_tiles=1): """ Make a toy dataset with three labels that represent the logical functions: OR, XOR, AND (functions of the 2D input). """ pat = np.array([ # X X Y Y Y [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [1, 0, 1, 1, 0], [1, 1, 1, 0, 1] ], dtype=int) N, E = pat.shape D = 2 L = E - D pat2 = np.zeros((N, E)) pat2[:, 0:L] = pat[:, D:E] pat2[:, L:E] = pat[:, 0:D] pat2 = np.tile(pat2, (n_tiles, 1)) np.random.shuffle(pat2) Y = np.array(pat2[:, 0:L], dtype=float) X = np.array(pat2[:, L:E], dtype=float) return X, Y
5,355,809
def get_idmap_etl( p_idmap: object, p_etl_id: str, p_source_table: object =None ): """ Генерирует скрипт ETL для таблицы Idmap :param p_idmap: объект класса Idmap :param p_etl_id: id etl процесса :param p_source_table: таблица источник, которую требуется загрузить в idmap (если не указана, возвращается лист с etl всех таблиц источников) """ l_source_table_id=None if p_source_table: l_source_table_id=p_source_table.id l_etl=[] l_idmap_nk_column=None l_idmap_rk_column=None l_etl_column=None for i_attribute in _get_table_attribute_property(p_table=p_idmap): if i_attribute.attribute_type==C_RK: l_idmap_rk_column=i_attribute.id if i_attribute.attribute_type==C_NK: l_idmap_nk_column=i_attribute.id if i_attribute.attribute_type==C_ETL_ATTR: l_etl_column=i_attribute.id for i_source_table in p_idmap.entity.source_table: if l_source_table_id and l_source_table_id!=i_source_table.id: # пропускаем таблицу источник, если не она указана continue l_column_nk_sql="" # формируем скрипт для конкатенации натуральных ключей # сортируем список натуральных ключей по наименованию l_source_attribute_nk=sorted(p_idmap.source_attribute_nk, key=lambda nk: nk.name) for i_column_nk in l_source_attribute_nk: if i_source_table.id==i_column_nk.source_table.id: l_column_nk_sql=l_column_nk_sql+"CAST("+'"'+str(i_column_nk.id)+'"'+" AS VARCHAR(4000))\n\t\t||'@@'||\n\t\t" l_column_nk_sql=l_column_nk_sql[:-14] l_source_id=i_source_table.source.source_id # генерируем etl для каждой таблицы источника l_etl.append( Connection().dbms.get_idmap_etl( p_idmap_id=p_idmap.id, p_idmap_rk_id=l_idmap_rk_column, p_idmap_nk_id=l_idmap_nk_column, p_etl_id=l_etl_column, p_etl_value=p_etl_id, p_source_table_id=i_source_table.id, p_attribute_nk=l_column_nk_sql, p_source_id=l_source_id, p_max_rk=str(p_idmap.max_rk) ) ) return l_etl
5,355,810
def gs_tie(men, women, preftie): """ Gale-shapley algorithm, modified to exclude unacceptable matches Inputs: men (list of men's names) women (list of women's names) pref (dictionary of preferences mapping names to list of sets of preferred names in sorted order) Output: dictionary of stable matches """ rank = {} for w in women: rank[w] = {} i = 1 for m in preftie[w]: rank[w][tuple(m)] = i i += 1 #print(rank) prefpointer = {} for m in men: prefpointer[m] = 0 freemen = set(men) S = {} while(freemen) and prefpointer[m] < len(preftie[m]): m = freemen.pop() w = preftie[m][prefpointer[m]] w = tuple(w) #print(m + ' ' + str(w)) prefpointer[m] += 1 #print(m + ' ' + str(prefpointer[m])) for i in range(len(w)): if w[i] not in S: S[w[i]] = m #print(w[i]) else: mprime = S[w[i]] if m in rank[w[i]] and rank[w[i]][m] < rank[w[i]][mprime]: S[w[i]] = m freemen.add(mprime) else: freemen.add(m) #print(S) return S
5,355,811
def text_to_docs(text_id): """ Query a text against the OSP corpus. Args: text_id (int): A text row id. """ row = Text.get(Text.id==text_id) doc_ids = set() for tokens in row.queries: # Execute the query. results = config.es.search( index='document', request_timeout=90, body={ 'fields': [], 'size': 1000000, 'filter': { 'query': { 'match_phrase': { 'body': { 'query': ' '.join(tokens), 'slop': 5, } } } } } ) # Fail the job if the result is incomplete. if results['timed_out']: raise TimeoutError() # Register the doc ids. if results['hits']['total'] > 0: for hit in results['hits']['hits']: doc_ids.add(int(hit['_id'])) # Build doc -> text links. citations = [] for doc_id in doc_ids: citations.append({ 'document': doc_id, 'text': row.id, 'tokens': row.hash_tokens, }) # Bulk-insert the results. if citations: Citation.insert_many(citations).execute()
5,355,812
def is_iterable(value): """Return True if the object is an iterable type.""" return hasattr(value, '__iter__')
5,355,813
def get_search_app_by_model(model): """ :returns: a single search app (by django model) :param model: django model for the search app :raises LookupError: if it can't find the search app """ for search_app in get_search_apps(): if search_app.queryset.model is model: return search_app raise LookupError(f'search app for {model} not found.')
5,355,814
def opcode_Tj(renderer, string=b''): """Show a text string and move the position based on its length""" renderer.render_text(string)
5,355,815
def create_model_and_store_checkpoint(config: ModelConfigBase, checkpoint_path: Path, weights_only: bool = True) -> None: """ Creates a Lightning model for the given model configuration, and stores it as a checkpoint file. If a GPU is available, the model is moved to the GPU before storing. The trainer properties `current_epoch` and `global_step` are set to fixed non-default values. :param config: The model configuration. :param checkpoint_path: The path and filename of the checkpoint file. """ container = InnerEyeContainer(config) trainer, _ = create_lightning_trainer(container) model = create_lightning_model(config) if machine_has_gpu: model = model.cuda() # type: ignore trainer.model = model # Before saving, the values for epoch and step are incremented. Save them here in such a way that we can assert # easily later. We can't mock that because otherwise the mock object would be written to disk (that fails) trainer.fit_loop.current_epoch = FIXED_EPOCH - 1 # type: ignore trainer.fit_loop.global_step = FIXED_GLOBAL_STEP - 1 # type: ignore # In PL, it is the Trainer's responsibility to save the model. Checkpoint handling refers back to the trainer # to get a save_func. Mimicking that here. trainer.save_checkpoint(checkpoint_path, weights_only=weights_only)
5,355,816
def prct_overlap(adata, key_1, key_2, norm=False, ax_norm="row", sort_index=False): """ % or cell count corresponding to the overlap of different cell types between 2 set of annotations/clusters. Parameters ---------- adata: AnnData objet key_1: observational key corresponding to one cell division/ one set of clusters key_2: bservational key corresponding to one cell division/ one set of clusters norm: normalise the ratio to the cell numbers given the total number of cells per cluster in key_1 Return ------ Table containing the ratio of cells within a cluster """ data_1 = adata.obs[key_1].tolist() data_2 = adata.obs[key_2].tolist() count = {k:[] for k in list(set(data_1))} #count = {k:[] for k in sorted(list(set(data_1)))} i = 0 for index in data_1: count[index].append(data_2[i]) i += 1 total_matrix = [] for key, value in count.items(): value = sorted(value) curr_key_list = [] for element in sorted(list(set(data_2))): curr_count = 0 for v in value: if element == v: curr_count += 1 curr_key_list.append(curr_count) curr_sum = sum(curr_key_list) #total_matrix.append([x/curr_sum for x in curr_key_list]) total_matrix.append(curr_key_list) if norm and ax_norm == "row": total_matrix = [] for key, value in count.items(): value = sorted(value) curr_key_list = [] for element in sorted(list(set(data_2))): curr_count = 0 for v in value: if element == v: curr_count += 1 curr_key_list.append(curr_count) curr_sum = sum(curr_key_list) total_matrix.append([x/curr_sum for x in curr_key_list]) elif norm: print("""error in the argument ax_norm or it is col and I haven't figure out how to make it for mow. , here is the heatmap with no normalisation""") if sort_index: data_heatmap = pd.DataFrame(data=np.matrix(total_matrix), index=list(set(data_1)), columns=sorted(list(set(data_2)))).sort_index() else: data_heatmap = pd.DataFrame(data=np.matrix(total_matrix), index=list(set(data_1)), columns=sorted(list(set(data_2)))) return(data_heatmap)
5,355,817
def keep_category(df, colname, pct=0.05, n=5): """ Keep a pct or number of every levels of a categorical variable Parameters ---------- pct : float Keep at least pct of the nb of observations having a specific category n : int Keep at least n of the variables having a specific category Returns -------- Returns an index of rows to keep """ tokeep = [] nmin = df.groupby(colname).apply(lambda x: x.sample( max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index) for index in nmin: tokeep += index.tolist() return pd.Index(tokeep)
5,355,818
def remove_scope_from_name(name, scope): """ Args: name (str): full name of the tf variable with all the scopes Returns: (str): full name of the variable with the scope removed """ result = name.split(scope)[1] result = result[1:] if result[0] == '/' else result return result.split(":")[0]
5,355,819
async def get_timers_matching(ctx, name_str, channel_only=True, info=False): """ Interactively get a guild timer matching the given string. Parameters ---------- name_str: str Name or partial name of a group timer in the current guild or channel. channel_only: bool Whether to match against the groups in the current channel or those in the whole guild. info: bool Whether to display some summary info about the timer in the selector. Returns: Timer Raises ------ cmdClient.lib.UserCancelled: Raised if the user manually cancels the selection. cmdClient.lib.ResponseTimedOut: Raised if the user fails to respond to the selector within `120` seconds. """ # Get the full timer list if channel_only: timers = ctx.client.interface.get_channel_timers(ctx.ch.id) else: timers = ctx.client.interface.get_guild_timers(ctx.guild.id) # If there are no timers, quit early if not timers: return None # Build a list of matching timers name_str = name_str.strip() timers = [timer for timer in timers if name_str.lower() in timer.name.lower()] if len(timers) == 0: return None elif len(timers) == 1: return timers[0] else: if info: select_from = [timer.oneline_summary() for timer in timers] else: select_from = [timer.name for timer in timers] try: selected = await ctx.selector("Multiple matching groups found, please select one.", select_from) except ResponseTimedOut: raise ResponseTimedOut("Group selection timed out.") from None except UserCancelled: raise UserCancelled("User cancelled group selection.") from None return timers[selected]
5,355,820
def comprehension_array(size=1000000): """Fills an array that is handled by Python via list comprehension.""" return [random() * i for i in range(size)]
5,355,821
def handle_args(): """Handles command-line parameters.""" global MODE global VERBOSE parser = argparse.ArgumentParser(description='Sync Gettext messages to Google Sheets.') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('action', choices=['push', 'pull']) args = parser.parse_args() MODE = Mode.PULL if args.action == 'pull' else Mode.PUSH VERBOSE = args.verbose
5,355,822
def alignment(alpha, p, treatment): """Alignment confounding function. Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) """ assert p.shape[0] == treatment.shape[0] adj = alpha * (1 - p) * treatment + alpha * p * (1 - treatment) return adj
5,355,823
def percentiles(a, pcts, axis=None): """Like scoreatpercentile but can take and return array of percentiles. Parameters ---------- a : array data pcts : sequence of percentile values percentile or percentiles to find score at axis : int or None if not None, computes scores over this axis Returns ------- scores: array array of scores at requested percentiles first dimension is length of object passed to ``pcts`` """ scores = [] try: n = len(pcts) except TypeError: pcts = [pcts] n = 0 for i, p in enumerate(pcts): if axis is None: score = stats.scoreatpercentile(a.ravel(), p) else: score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p) scores.append(score) scores = np.asarray(scores) if not n: scores = scores.squeeze() return scores
5,355,824
def single_request(gh,kname='CVE exploit',page=1,per_page=50): """ 解析单页仓库数据,获取CVE和exp标记 :return cve_list:list, cve id in each page by searching github.com """ cve=dict() url="https://api.github.com/search/repositories?q={key_name}&sort=updated&order=desc&page={page}&per_page={per_page}".format(key_name=kname,page=page,per_page=per_page) r=gh.call_to_the_api(url) if r: content=r.text js=json.loads(content) items=js['items'] total_count=js['total_count'] cve_add=single_parser(gh,items) if cve_add: cve={**cve,**cve_add} return total_count,cve else: return False,False
5,355,825
def do_setup(): """Set-up folder structure and check flags.""" path_analysis = Path(cfg.get("LST1", "ANALYSIS_DIR")) / options.directory path_dl1 = Path(cfg.get("LST1", "DL1_DIR")) / options.directory path_dl2 = Path(cfg.get("LST1", "DL2_DIR")) / options.directory path_sub_analysis = path_analysis / options.prod_id path_dl1sub = path_dl1 / options.prod_id path_dl2_sub = path_dl2 / options.prod_id if Path(LOG_FILENAME).exists() and not options.append: CONFIG_FLAGS["Go"] = False log.info(f"File {LOG_FILENAME} already exists.") log.info(f"You must rename/remove {LOG_FILENAME} to produce a clean provenance.") log.info("You can also set --append flag to append captured provenance.") return CONFIG_FLAGS["TearSubAnalysis"] = ( False if path_sub_analysis.exists() or options.provenance else path_sub_analysis ) CONFIG_FLAGS["TearAnalysis"] = ( False if path_analysis.exists() or options.provenance else path_analysis ) CONFIG_FLAGS["TearSubDL1"] = ( False if path_dl1sub.exists() or options.provenance else path_dl1sub ) CONFIG_FLAGS["TearSubDL2"] = ( False if path_dl2_sub.exists() or options.provenance else path_dl2_sub ) CONFIG_FLAGS["TearDL1"] = ( False if path_dl1.exists() or options.provenance else path_dl1 ) CONFIG_FLAGS["TearDL2"] = ( False if path_dl2.exists() or options.provenance else path_dl2 ) if options.provenance and not options.force: if path_sub_analysis.exists(): CONFIG_FLAGS["Go"] = False log.info(f"Folder {path_sub_analysis} already exist.") if path_dl1sub.exists(): CONFIG_FLAGS["Go"] = False log.info(f"Folder {path_dl1sub} already exist.") if path_dl2_sub.exists(): CONFIG_FLAGS["Go"] = False log.info(f"Folder {path_dl2_sub} already exist.") if not CONFIG_FLAGS["Go"]: log.info("You must enforce provenance files overwrite with --force flag.") return path_sub_analysis.mkdir(parents=True, exist_ok=True) path_dl1sub.mkdir(parents=True, exist_ok=True) path_dl2_sub.mkdir(parents=True, exist_ok=True)
5,355,826
def ownerOf(tokenId: bytes) -> UInt160: """ Get the owner of the specified token. The parameter tokenId SHOULD be a valid NFT. If not, this method SHOULD throw an exception. :param tokenId: the token for which to check the ownership :type tokenId: ByteString :return: the owner of the specified token. :raise AssertionError: raised if `tokenId` is not a valid NFT. """ owner = get_owner_of(tokenId) debug(['ownerOf: ', owner]) return owner
5,355,827
def test_dequque_after(): """Validating if front node was removed and didn't have the same value""" assert new_queue.dequeue() != 'emma' assert new_queue._size == 0
5,355,828
def stuff_context(sites, rup, dists): """ Function to fill a rupture context with the contents of all of the other contexts. Args: sites (SiteCollection): A SiteCollection object. rup (RuptureContext): A RuptureContext object. dists (DistanceContext): A DistanceContext object. Returns: RuptureContext: A new RuptureContext whose attributes are all of the elements of the three inputs. """ ctx = RuptureContext() for name in [name for name in vars(sites) if not name.startswith("__")]: setattr(ctx, name, getattr(sites, name)) for name in [name for name in vars(rup) if not name.startswith("__")]: setattr(ctx, name, getattr(rup, name)) for name in [name for name in vars(dists) if not name.startswith("__")]: setattr(ctx, name, getattr(dists, name)) return ctx
5,355,829
def number_to_float(value): """The INDI spec allows a number of different number formats, given any, this returns a float :param value: A number string of a float, integer or sexagesimal :type value: String :return: The number as a float :rtype: Float """ # negative is True, if the value is negative negative = value.startswith("-") if negative: value = value.lstrip("-") # Is the number provided in sexagesimal form? if value == "": parts = [0, 0, 0] elif " " in value: parts = value.split(" ") elif ":" in value: parts = value.split(":") elif ";" in value: parts = value.split(";") else: # not sexagesimal parts = [value, "0", "0"] # Any missing parts should have zero if len(parts) == 2: # assume seconds are missing, set to zero parts.append("0") assert len(parts) == 3 number_strings = list(x if x else "0" for x in parts) # convert strings to integers or floats number_list = [] for part in number_strings: try: num = int(part) except ValueError: num = float(part) number_list.append(num) floatvalue = number_list[0] + (number_list[1]/60) + (number_list[2]/360) if negative: floatvalue = -1 * floatvalue return floatvalue
5,355,830
def spg_line_search_step_length(current_step_length, delta, f_old, f_new, sigma_one=0.1, sigma_two=0.9): """Return next step length for line search.""" step_length_tmp = (-0.5 * current_step_length ** 2 * delta / (f_new - f_old - current_step_length * delta)) next_step_length = 0 if sigma_one <= step_length_tmp <= sigma_two * current_step_length: next_step_length = step_length_tmp else: next_step_length = 0.5 * current_step_length return next_step_length
5,355,831
def group_by_iter(metrics_dict): """ Restructure our metrics dictionary to have the last list store all the trials' values \ for a given iteration, instead of all the iterations' values for a given trial. :param metrics_dict: data for an experiment (output of parse_party_data) :type metrics_dict: `dict[list[list[np.array]]]` :return: A new, reorganized dict :rtype: `dict[list[list[np.array]]]` """ # TODO: more pythonic, pandas-thonic, or numpy-thonic way of doing this? metrics_gbi = {} # look into the metrics... for (metric_key, metric_llist) in metrics_dict.items(): metrics_gbi[metric_key] = [] # ... for each party... for (party_idx, metric_for_party) in enumerate(metric_llist): metrics_gbi[metric_key] += [[]] # ... for each trial... for metric_for_trial in metric_for_party: # ... and finally for each iter. for (iter_idx, iter_val) in enumerate(metric_for_trial): if len(metrics_gbi[metric_key][party_idx]) <= iter_idx: metrics_gbi[metric_key][party_idx] += [[]] metrics_gbi[metric_key][party_idx][iter_idx] += [iter_val] return metrics_gbi
5,355,832
def calc_pv_invest(area, kw_to_area=0.125, method='EuPD'): """ Calculate PV investment cost in Euro Parameters ---------- area : float Photovoltaic area kw_to_area : float , optional Ratio of peak power to area (default: 0.125) For instance, 0.125 means 0.125 kWp / m2 area (http://www.solaranlagen-portal.com/photovoltaik/leistung) method : str, optional Method to calculate cost (default: 'EuPD') Options: - 'sap': Based on: Solaranlagenportal http://www.solaranlagen-portal.com/photovoltaik/kosten - 'EuPD': Based on: EuPD Research, Photovoltaik-Preismonitor Deutschland: German PV ModulePriceMonitor. Returns ------- pv_invest : float Investcost into PV system in Euro """ assert method in ['sap', 'EuPD'], 'Unknown method' assert area > 0, 'Area has to be larger than zero.' assert kw_to_area > 0, 'kWp / area ratio has to be larger than zero.' if method == 'sap': kw_peak = area * kw_to_area # kW peak load # kw_peak * (spec_price + spec_install_cost) + inverter cost pv_invest = kw_peak * (1100 + 120) + 2000 if method == 'EuPD': kw_peak = area * kw_to_area # kW peak load # kw_peak * (spec_cost) + inverter cost pv_invest = kw_peak * 1400 + 2000 return pv_invest
5,355,833
def procrustes_2d(x, y, n_restart=10, scale=True): """Align two sets of coordinates using an affine transformation. Attempts to find the affine transformation (composed of a rotation matrix `r` and a transformation vector `t`) for `y` such that `y_affine` closely matches `x`. Closeness is measures using MSE. y_affine = np.matmul(y, r) + t This algorithm only works with 2D coordinates (i.e., n_dim=2). Arguments: x: The first set of points. shape = (n_point, n_dim) y: The second set of points. shape = (n_point, n_dim) n_restart (optional): A scalar indicating the number of restarts for the optimization routine. scale (optional): Boolean indicating if scaling is permitted in the affine transformation. Returns: r: A rotation matrix. shape=(n_dim, n_dim) t: A transformation vector. shape=(1, n_dim) """ n_dim = 2 def assemble_r_t(params): # Assemble valid rotation matrix. s = params[3] * np.eye(n_dim) r = rotation_matrix(params[2]) r = np.matmul(s, r) f = np.array([[np.sign(params[4]), 0], [0, np.sign(params[5])]]) r = np.matmul(f, r) # Assemble translation vector. t = np.array([params[0], params[1]]) t = np.expand_dims(t, axis=0) return r, t # In order to avoid impossible rotation matrices, perform optimization # on rotation components separately (theta, scaling, mirror). def objective_fn(params, x, y): r, t = assemble_r_t(params) # Apply affine transformation. y_affine = np.matmul(y, r) + t # loss = np.mean(np.sum((x - y_affine)**2, axis=1)) TODO # Loss is defined as MAE, since MSE chases outliers and can result # in rediculous solutions. loss = np.mean(np.sum(np.abs(x - y_affine), axis=1)) return loss # t_0, t_1, theta, scaling, flip params_best = np.array((0., 0., 0., 1.)) loss_best = np.inf for _ in range(n_restart): (x0, y0) = np.random.rand(2) - .5 theta0 = 2 * np.pi * np.random.rand(1) if scale: s0 = np.random.rand(1) + .5 s_bnds = (0., None) else: s0 = 1 s_bnds = (1., 1.) # Perform a flip on some restarts. if np.random.rand(1) < .5: fx0 = -.1 else: fx0 = .1 if np.random.rand(1) < .5: fy0 = -.1 else: fy0 = .1 params0 = np.array((x0, y0, theta0, s0, fx0, fy0)) bnds = ( (None, None), (None, None), (0., 2*np.pi), s_bnds, (-.1, .1), (-.1, .1) ) res = minimize(objective_fn, params0, args=(x, y), bounds=bnds) params_candidate = res.x loss_candidate = res.fun if loss_candidate < loss_best: loss_best = loss_candidate params_best = params_candidate r, t = assemble_r_t(params_best) return r, t
5,355,834
def samps2ms(samples: float, sr: int) -> float: """samples to milliseconds given a sampling rate""" return (samples / sr) * 1000.0
5,355,835
def dump_json(json_filepath: str, json_dict: Dict[Any, Any]): """ Function to serialize a Python dictionary into a JSON file. The pretty printing is enabled by default. Parameters ---------- json_filepath : str Path to the JSON file to save to json_dict : Dict[Any, Any] Dictionary to be serialized """ with open(json_filepath, "w+") as write_file: json.dump(json_dict, write_file, indent=4, sort_keys=True)
5,355,836
def nice_year(dt, lang=None, bc=False): """Format a datetime to a pronounceable year. For example, generate 'nineteen-hundred and eighty-four' for year 1984 Args: dt (datetime): date to format (assumes already in local timezone) lang (string): the language to use, use Mycroft default language if not provided bc (bool) pust B.C. after the year (python does not support dates B.C. in datetime) Returns: (str): The formatted year string """ return lingua_franca.format.nice_year(dt, lang, bc)
5,355,837
def unstub(): """Unstubs all stubbed methods and functions""" mock_registry.unstub_all()
5,355,838
def get_results(job_id): """ Get the result of the job based on its id """ try: job = Job.fetch(job_id, connection=conn) if job.is_finished: return jsonify({ "status": "finished", "data": job.result }), 200 elif job.is_failed: return jsonify({ "status": "failed" }), 200 else: return jsonify({ "status": "in-progress" }), 200 except NoSuchJobError: return jsonify({ "msg": "job id does not exist" }), 404
5,355,839
def get_clean_dict(obj: HikaruBase) -> dict: """ Turns an instance of a HikaruBase into a dict without values of None This function returns a Python dict object that represents the hierarchy of objects starting at ``obj`` and recusing into any nested objects. The returned dict **does not** include any key/value pairs where the value of the key is None or empty. If you wish to instead have a dict with all key/value pairs even when there is no useful value then you should use the dataclasses module's ``asdict()`` function on obj. :param obj: some api_version_group of subclass of HikaruBase :return: a dict representation of the obj instance, but if any value in the dict was originally None, that key:value is removed from the returned dict, hence it is a minimal representation :raises TypeError: if the supplied obj is not a HikaruBase (dataclass), or if obj is not an instance of a HikaruBase subclass """ if not isinstance(obj, HikaruBase): raise TypeError("obj must be a kind of HikaruBase") initial_dict = asdict(obj) clean_dict = _clean_dict(initial_dict) return clean_dict
5,355,840
def _check_that_field_invisible_if_activatable_group_active_and_not(sdk_client: ADCMClient, path, app): """Check that field invisible if activatable group active and not.""" _, config = prepare_cluster_and_get_config(sdk_client, path, app) group_name = path.split("/")[-1] with allure.step('Check that field is visible if activatable group is not active'): group_active = config.group_is_active_by_name(group_name) assert group_active fields = config.get_field_groups() for field in fields: assert not field.is_displayed(), field.get_attribute("class") group_names = config.get_group_elements() assert len(group_names) == 1 assert group_names[0].text == group_name assert group_names, group_names config.show_advanced() assert config.advanced with allure.step('Check that field invisible if activatable group active'): config.activate_group_by_name(group_name) group_active = config.group_is_active_by_name(group_name) assert group_active group_names = config.get_group_elements() assert group_names, group_names assert len(group_names) == 1 assert group_names[0].text == group_name fields = config.get_field_groups() for field in fields: assert not field.is_displayed(), field.get_attribute("class")
5,355,841
def generate_html_frieze(type, value): """ Gets the data to be able to generate the frieze. Calls the function to actually generate HTML. Input: - Type (session or dataset) of the second input - A SQLAlchemy DB session or a dataset (list of mappings) Output: - The HTML to be displayed """ if type == "session": session = value mappings = list(get_all_mappings(session)) elif type == "dataset": mappings = value holes_raw = calc_all_holes("dataset", mappings) holes = [] for hole in holes_raw: holes.append( { "devices_id": -1000, "id": -1000, "iova": None, "phys_addr": hole[0], "size": hole[1], } ) for hole in holes: hole["devices_id"] = -1 try: mappings = add_device_info(mappings, session) except: session = create_session() mappings = add_device_info(mappings, session) mappings_as_dict = [] for m in mappings: mappings_as_dict.append(m.__dict__) memory_state = sorted( mappings_as_dict + holes, key=lambda mapping: mapping["phys_addr"] ) memory_state = unify_common_space(memory_state) html_frieze = create_html_from_memory_state(memory_state, session) return html_frieze
5,355,842
def get_folder_name(path, prefix=''): """ Look at the current path and change the name of the experiment if it is repeated Args: path (string): folder path prefix (string): prefix to add Returns: string: unique path to save the experiment """ if prefix == '': prefix = path.split('/')[-1] path = '/'.join(path.split('/')[:-1]) folders = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))] if prefix not in folders: path = os.path.join(path, prefix) elif not os.path.isdir(os.path.join(path, '{}_0'.format(prefix))): path = os.path.join(path, '{}_0'.format(prefix)) else: n = sorted([int(f.split('_')[-1]) for f in folders if '_' in f[-2:]])[-1] path = os.path.join(path, '{}_{}'.format(prefix, n+1)) return path
5,355,843
def instantiate_model(model_to_train: str, dataset_directory: str, performance_directory: str, gpu: Optional[bool] = None): """ A function to create the instance of the imported Class, Classifier. Args: model_to_train (str): name of the pretrained model to train dataset directory (str): Directory containing the data performance directory (str): The directory where the generated text, checkpoints model_stats will be saved. gpu (bool): Boolean indicating availability of a GPU Returns: None. """ file = get_latest_exp(performance_directory) if file is not None: filename = re.findall('\\\\([^\\\\]+)\.txt', file) exp_no = int((re.findall('_([0-9]+)', filename[0]))[0]) exp_no += 1 else: exp_no = 1 Model = Classifier(exp_no, model_to_train, dataset_directory, performance_directory, gpu=gpu) return Model
5,355,844
def is_probably_prime(x: int) -> bool: """ probabilistic primarity test (relatively low certainty) """ raise NotImplementedError("not implemented!")
5,355,845
def generate_hmac_key(): """ Generates a key for use in the :func:`~securitylib.advanced_crypto.hmac` function. :returns: :class:`str` -- The generated key, in byte string. """ return generate_secret_key(HMAC_KEY_MINIMUM_LENGTH)
5,355,846
def get_args(): """! Command line parser for Utterance level classification Leave one speaker out schema pipeline -- Find Best Models""" parser = argparse.ArgumentParser( description='Utterance level classification Leave one ' 'speaker out schema pipeline -- Find Best Models' ) parser.add_argument('-i', '--input_features_paths', nargs='+', help='File paths of the features you want to ' 'concatenate and the classify') args = parser.parse_args() return args
5,355,847
def main() -> None: """Take user numerical grade input and provide letter grade equivalent. Continue until user enters 'n'""" print('Letter Grade Converter') continue_character: str = 'y' while(continue_character.lower() == 'y'): print() numerical_grade: int = int(input('Enter numerical grade: ')) for grade in GRADE_MINIMUMS: if numerical_grade >= GRADE_MINIMUMS[grade]: print(f'Letter grade: {grade}\n') break continue_character: str = input('Continue? (y/n): ') print('Bye!')
5,355,848
def uscensus(location, **kwargs): """US Census Provider Params ------ :param location: Your search location you want geocoded. :param benchmark: (default=4) Use the following: > Public_AR_Current or 4 > Public_AR_ACSYYYY or 8 > Public_AR_Census2010 or 9 :param vintage: (default=4) Use the following: > Current_Current or 4 > Census2010_Current or 410 > ACS2013_Current or 413 > ACS2014_Current or 414 > ACS2015_Current or 415 > Current_ACS2015 or 8 > Census2010_ACS2015 or 810 > ACS2013_ACS2015 or 813 > ACS2014_ACS2015 or 814 > ACS2015_ACS2015 or 815 > Census2010_Census2010 or 910 > Census2000_Census2010 or 900 :param method: (default=geocode) Use the following: > geocode > reverse API Reference ------------- https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf """ return get(location, provider='uscensus', **kwargs)
5,355,849
def log_error_and_raise(message, exception, logger): """ logs an 'error' message and subsequently throws an exception """ logger.error(message) raise exception(message)
5,355,850
def getAllItemsWithName(name, cataloglist): """Searches the catalogs in a list for all items matching a given name. Returns: list of pkginfo items; sorted with newest version first. No precedence is given to catalog order. """ def compare_item_versions(a, b): """Internal comparison function for use with sorting""" return cmp(munkicommon.MunkiLooseVersion(b['version']), munkicommon.MunkiLooseVersion(a['version'])) itemlist = [] # we'll throw away any included version info name = nameAndVersion(name)[0] munkicommon.display_debug1('Looking for all items matching: %s...' % name) for catalogname in cataloglist: if not catalogname in CATALOG.keys(): # in case catalogname refers to a non-existent catalog... continue # is name in the catalog name table? if name in CATALOG[catalogname]['named']: versionsmatchingname = CATALOG[catalogname]['named'][name] for vers in versionsmatchingname.keys(): if vers != 'latest': indexlist = CATALOG[catalogname]['named'][name][vers] for index in indexlist: thisitem = CATALOG[catalogname]['items'][index] if not thisitem in itemlist: munkicommon.display_debug1( 'Adding item %s, version %s from catalog %s...' % (name, thisitem['version'], catalogname)) itemlist.append(thisitem) if itemlist: # sort so latest version is first itemlist.sort(compare_item_versions) return itemlist
5,355,851
def reco_source_position_sky(cog_x, cog_y, disp_dx, disp_dy, focal_length, pointing_alt, pointing_az): """ Compute the reconstructed source position in the sky Parameters ---------- cog_x: `astropy.units.Quantity` cog_y: `astropy.units.Quantity` disp: DispContainer focal_length: `astropy.units.Quantity` pointing_alt: `astropy.units.Quantity` pointing_az: `astropy.units.Quantity` Returns ------- """ src_x, src_y = disp_to_pos(disp_dx, disp_dy, cog_x, cog_y) return camera_to_sky(src_x, src_y, focal_length, pointing_alt, pointing_az)
5,355,852
def segment_annotations(table, num, length, step=None): """ Generate a segmented annotation table by stepping across the audio files, using a fixed step size (step) and fixed selection window size (length). Args: table: pandas DataFrame Annotation table. num: int Number of segments length: float Selection length in seconds. step: float Selection step size in seconds. If None, the step size is set equal to the selection length. Returns: df: pandas DataFrame Annotations table """ if step is None: step = length segs = [] for n in range(num): # select annotations that overlap with segment t1 = n * step t2 = t1 + length a = table[(table.start < t2) & (table.end > t1)].copy() if len(a) > 0: # shift and crop annotations a['start'] = a['start'].apply(lambda x: max(0, x - t1)) a['end'] = a['end'].apply(lambda x: min(length, x - t1)) a['sel_id'] = n #map to segment segs.append(a) df = pd.concat(segs) df.set_index(keys=['sel_id'], inplace=True, append=True) df = df.swaplevel() df = df.sort_index() return df
5,355,853
def com_google_fonts_check_name_typographicsubfamilyname(ttFont, expected_style): """Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries.""" failed = False nametable = ttFont['name'] win_name = nametable.getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP, WindowsLanguageID.ENGLISH_USA) mac_name = nametable.getName(NameID.TYPOGRAPHIC_SUBFAMILY_NAME, PlatformID.MACINTOSH, MacintoshEncodingID.ROMAN, MacintoshLanguageID.ENGLISH) if all([win_name, mac_name]): if win_name.toUnicode() != mac_name.toUnicode(): failed = True yield FAIL,\ Message("mismatch", f'TYPOGRAPHIC_SUBFAMILY_NAME entry' f' for Win "{win_name.toUnicode()}"' f' and Mac "{mac_name.toUnicode()}" do not match.') if expected_style.is_ribbi: if win_name and win_name.toUnicode() != expected_style.win_style_name: failed = True yield FAIL,\ Message("bad-win-name", f'TYPOGRAPHIC_SUBFAMILY_NAME entry' f' for Win "{win_name.toUnicode()}"' f' must be "{expected_style.win_style_name}".' f' Please note, since the font style is RIBBI,' f' this record can be safely deleted.') if mac_name and mac_name.toUnicode() != expected_style.mac_style_name: failed = True yield FAIL,\ Message("bad-mac-name", f'TYPOGRAPHIC_SUBFAMILY_NAME entry' f' for Mac "{mac_name.toUnicode()}"' f' must be "{expected_style.mac_style_name}".' f' Please note, since the font style is RIBBI,' f' this record can be safely deleted.') if expected_style.typo_style_name: if not win_name: failed = True yield FAIL,\ Message("missing-typo-win", f'TYPOGRAPHIC_SUBFAMILY_NAME for Win is missing.' f' It must be "{expected_style.typo_style_name}".') elif win_name.toUnicode() != expected_style.typo_style_name: failed = True yield FAIL,\ Message("bad-typo-win", f'TYPOGRAPHIC_SUBFAMILY_NAME for Win' f' "{win_name.toUnicode()}" is incorrect.' f' It must be "{expected_style.typo_style_name}".') if mac_name and mac_name.toUnicode() != expected_style.typo_style_name: failed = True yield FAIL,\ Message("bad-typo-mac", f'TYPOGRAPHIC_SUBFAMILY_NAME for Mac' f' "{mac_name.toUnicode()}" is incorrect.' f' It must be "{expected_style.typo_style_name}".' f' Please note, this record can be safely deleted.') if not failed: yield PASS, "TYPOGRAPHIC_SUBFAMILY_NAME entries are all good."
5,355,854
def get_vdw_style(vdw_styles, cut_styles, cutoffs): """Get the VDW_Style section of the input file Parameters ---------- vdw_styles : list list of vdw_style for each box, one entry per box cut_styles : list list of cutoff_style for each box, one entry per box. For a box with vdw_style == 'none', the cutoff style is None cutoffs : list list with cutoffs for each box, one entry per box For a box with vdw_style == 'none', the cutoff is None """ assert len(vdw_styles) == len(cut_styles) assert len(vdw_styles) == len(cutoffs) valid_vdw_styles = ["lj", "none"] valid_cut_styles = {vstyle: [] for vstyle in valid_vdw_styles} valid_cut_styles["lj"].append("cut") valid_cut_styles["lj"].append("cut_tail") valid_cut_styles["lj"].append("cut_switch") valid_cut_styles["lj"].append("cut_shift") valid_cut_styles["none"].append(None) for vdw_style in vdw_styles: if vdw_style not in valid_vdw_styles: raise ValueError( "Unsupported vdw_style: {}. Supported options " "include {}".format(vdw_style, vdw_styles) ) for cut_style, vdw_style in zip(cut_styles, vdw_styles): if cut_style not in valid_cut_styles[vdw_style]: raise ValueError( "Unsupported cutoff style: {}. Supported " "options for the selected vdw_style ({}) include " "{}".format(cut_style, vdw_style, valid_cut_styles[vdw_style]) ) for cut_style, cutoff in zip(cut_styles, cutoffs): if cut_style == "cut_switch": if not isinstance(cutoff, np.ndarray) or len(cutoff) != 2: raise ValueError( 'Style "cut_switch" requires an inner ' "and outer cutoff. Use the " "cutoffs=[inner_cut,outer_cut] " "kwargs option." ) inp_data = """ # VDW_Style""" for vdw_style, cut_style, cutoff in zip(vdw_styles, cut_styles, cutoffs): if vdw_style == "none": inp_data += """ {vdw_style}""".format( vdw_style=vdw_style ) else: if cut_style == "cut_switch": inner_cutoff = cutoff[0] outer_cutoff = cutoff[1] inp_data += """ {vdw_style} {cut_style} {inner_cutoff} {outer_cutoff}""".format( vdw_style=vdw_style, cut_style=cut_style, inner_cutoff=inner_cutoff, outer_cutoff=outer_cutoff, ) else: inp_data += """ {vdw_style} {cut_style} {cutoff}""".format( vdw_style=vdw_style, cut_style=cut_style, cutoff=cutoff ) inp_data += """ !------------------------------------------------------------------------------ """ return inp_data
5,355,855
def load_yaml_file(file): """ Loads a yaml file from file system. @param file Path to file to be loaded. """ try: with open(file, 'r') as yaml: kwargs=ruamel.yaml.round_trip_load(yaml, preserve_quotes=True) return kwargs except subprocess.CalledProcessError as e: print("error") return(e.output.decode("utf-8"))
5,355,856
def test_arbitrage_make_compatible_quantity_increments_with_imcompatible_increments() -> None: """Should raise `ImcompatibleQuantityIncrementsError`. Bid order quantity less than ask order quantity. Different and imcompatible quantity increments. Make compatible. """ ask = ArbitragePayload( symbol=SymbolInfo(quantity_increment=Decimal("0.03"), fee=Decimal("0.1")), order=OrderInfo(price=Decimal("10.5"), quantity=Decimal("100.15")), ) bid = ArbitragePayload( symbol=SymbolInfo(quantity_increment=Decimal("0.1"), fee=Decimal("0.1")), order=OrderInfo(price=Decimal("11.5"), quantity=Decimal("50.3")), ) with pytest.raises(ImcompabileQuantityIncrementsError): arbitrage(ask=ask, bid=bid)
5,355,857
def freduce(x, axis=None): """ Reduces a spectrum to positive frequencies only Works on the last dimension (contiguous in c-stored array) :param x: numpy.ndarray :param axis: axis along which to perform reduction (last axis by default) :return: numpy.ndarray """ if axis is None: axis = x.ndim - 1 siz = list(x.shape) siz[axis] = int(np.floor(siz[axis] / 2 + 1)) return np.take(x, np.arange(0, siz[axis]), axis=axis)
5,355,858
def init_pretraining_params(exe, pretraining_params_path, main_program): """load params of pretrained model, NOT including moment, learning_rate""" assert os.path.exists(pretraining_params_path ), "[%s] cann't be found." % pretraining_params_path def _existed_params(var): if not isinstance(var, fluid.framework.Parameter): return False return os.path.exists(os.path.join(pretraining_params_path, var.name)) fluid.io.load_vars( exe, pretraining_params_path, main_program=main_program, predicate=_existed_params) print("Load pretraining parameters from {}.".format( pretraining_params_path))
5,355,859
def sort_shipping_methods(request): """Sorts shipping methods after drag 'n drop. """ shipping_methods = request.POST.get("objs", "").split('&') assert (isinstance(shipping_methods, list)) if len(shipping_methods) > 0: priority = 10 for sm_str in shipping_methods: sm_id = sm_str.split('=')[1] sm_obj = ShippingMethod.objects.get(pk=sm_id) sm_obj.priority = priority sm_obj.save() priority = priority + 10 result = json.dumps({ "message": _(u"The shipping methods have been sorted."), }, cls=LazyEncoder) return HttpResponse(result, content_type='application/json')
5,355,860
def appendRecordData(record_df, record): """ Args: record_df (pd.DataFrame): record (vcf.model._Record): Returns: (pd.DataFrame): record_df with an additional row of record (SNP) data. """ # Alternate allele bases if len(record.ALT) == 0: alt0, alt1 = np.nan, np.nan elif len(record.ALT) == 1: alt0, alt1 = record.ALT[0], np.nan varIdentifier = pd.Series(record.ID, name="varIdentifier") df = pd.DataFrame( data = {"refBase": record.REF, "altAllele0": alt0, "altAllele1": alt1}, index = varIdentifier) record_df = record_df.append(df, ignore_index=False) return record_df
5,355,861
def get_removed_channels_from_file(fn): """ Load a list of removed channels from a file. Raises ------ * NotImplementedError if the file format isn't supported. Parameters ---------- fn : str Filename Returns ------- to_remove : list of str List of channels to remove. """ assert isinstance(fn, str) if fn.endswith('.mat'): # try: data = loadmat(fn) # except: for old .mat files in hdf5 format... assert('CHANNAMES' in data), f"{fn} must contain CHANNAMES!" assert('CHANACTIVE' in data), f"{fn} must contain CHANACTIVE!" channel_active = data['CHANACTIVE'].flatten() channel_names = np.array( [str(i[0]) for i in data['CHANNAMES'].flatten()], ) idx = np.argwhere(channel_active == 0).flatten() return channel_names[idx].tolist() else: raise NotImplementedError(f"Cannot load file: {fn}")
5,355,862
def invertHomogeneous(M, range_space_homogeneous=False, A_property=None): """ Return the inverse transformation of a homogeneous matrix. A homogenous matrix :math:`M` represents the transformation :math:`y = A x + b` in homogeneous coordinates. More precisely, ..math: M \tilde{x} = \left[ \begin{matrix} A & b \\ \end{matrix} \right] \left[ \begin{matrix} x \\ 1 \end{matrix} \right] Its inverse is the homogeneous matrix that represents the transformation :math:`x = A^{-1} ( y - b )`. Parameters ---------- M : numpy array of float, shape (num_dims, num_dims + 1) or (num_dims + 1, num_dims + 1) Matrix representing an affine transformation in homogeneous coordinates. if ``M.shape == (num_dims + 1, num_dims + 1)``, its last row is :math:`[0 1]` so that its output is also in homogeneous coordinates. range_space_homogeneous : bool, optional If True, the output has an extra row :math:`[ 0 1 ]` appended to the bottom so that its range space is also expressed in homogeneous coordinates. A_property : {'diag', 'ortho'}, optional Special property of the submatrix `A` that could make inversion easier. If no argument is given, this function just calls `m.np.linalg.pinv`. Returns ------- M_inverse : numpy array of float, shape (num_dims, num_dims + 1) or (num_dims + 1, num_dims + 1) Inverse transformation corresponding to input `M`. """ if A_property is None: invert = m.np.pinv elif A_property == 'diag': def invert(x): return m.np.diag(1 / m.np.diag(A)) elif A_property == 'ortho': invert = m.np.transpose else: err_str = f"Can't parse keyword argument 'A_property={A_property}'" raise ValueError(err_str) A, b = fromHomogeneous(M) A_inverse = invert(A) b_inverse = -A_inverse @ b M_inverse = homogeneousMatrix( A_inverse, b_inverse, range_space_homogeneous=range_space_homogeneous ) return M_inverse
5,355,863
def rename(level_folder: str) -> int: """Rename a custom level folder to the correct name.""" prefix = load_info(level_folder)[PREFIX].strip() suffix = load_info(level_folder)[SUFFIX].strip() prefix = prefix.translate(str.maketrans('', '', string.punctuation)) suffix = suffix.translate(str.maketrans('', '', string.punctuation)) new_name = f'{prefix} {CONNECT} {suffix}'.strip() if new_name != level_folder or FORCED: os.rename(MAIN_FOLDER + f'/{level_folder}', MAIN_FOLDER + f'/{new_name}') print(f"'{level_folder}' is renamed to '{new_name}'.") return 1 return 0
5,355,864
def in_line_mention(feature, features, mentions, line): """ Set *feature* to `True` for mentions that occur on *line* Args: feature: feature name features: mapping from (lgname, lgcode) pair to features to values mentions: list of language mentions line: FrekiLine object to inspect """ for m in get_window(mentions, line.lineno, line.lineno): features[(m.name, m.code)][feature] = True
5,355,865
def show_department(department_id): """ Returns rendered template to show department with its employees. :param department_id: department id :return: rendered template to show department with its employees """ url = f'{HOST}api/department/{department_id}' department = requests.get(url).json() return render_template('department.html', department=department)
5,355,866
def archive_scan(): """ Returns converted to a dictionary of functions to apply to parameters of archive_scan.py """ # Dictionary of default values setter, type converters and other applied functions d_applied_functions = { 'favor': [bool_converter, favor_default], 'cnn': [bool_converter], 'gpd': [bool_converter], 'model-name': [apply_default_model_name], 'weights': [apply_default_weights], 'features-number': [int_converter], 'waveform-duration': [float_converter], 'start': [utc_datetime_converter, start_date_default], 'end': [utc_datetime_converter, end_date_default], 'database': [database_filler], 'threshold': [threshold_converter], 'batch-size': [int_converter], 'frequency': [float_converter], 'trace-size': [float_converter, trace_size_converter], 'shift': [int_converter], 'generate-s-files': [string_trimmer], 'detections-for-event': [int_converter], 'generate-waveforms': [string_trimmer], 'register-events': [string_trimmer], 'no-filter': [bool_converter], 'no-detrend': [bool_converter], 'trace-normalization': [bool_converter], 'wavetool-waveforms': [bool_converter], 'detection-stations': [bool_converter], 'plot-positives': [bool_converter], 'silence-wavetool': [bool_converter], 'plot-positives-original': [bool_converter], 'print-scores': [bool_converter], 'print-precision': [int_converter], 'combine-events-range': [float_converter], 'time': [bool_converter], 'cpu': [bool_converter], 'print-files': [bool_converter], 'channel-order': [channel_order_converter], 'print-params': [bool_converter], } return d_applied_functions
5,355,867
def esOperador(o): """"retorna true si 'o' es un operador""" return o == "+" or o == "-" or o == "/" or o == "*"
5,355,868
def valid_identity(identity): """Determines whether or not the provided identity is a valid value.""" valid = (identity == "homer") or (identity == "sherlock") return valid
5,355,869
def is_align_flow(*args): """ is_align_flow(ea) -> bool """ return _ida_nalt.is_align_flow(*args)
5,355,870
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the OpenTherm Gateway binary sensors.""" if discovery_info is None: return gw_vars = hass.data[DATA_OPENTHERM_GW][DATA_GW_VARS] sensor_info = { # [device_class, friendly_name] gw_vars.DATA_MASTER_CH_ENABLED: [ None, "Thermostat Central Heating Enabled"], gw_vars.DATA_MASTER_DHW_ENABLED: [ None, "Thermostat Hot Water Enabled"], gw_vars.DATA_MASTER_COOLING_ENABLED: [ None, "Thermostat Cooling Enabled"], gw_vars.DATA_MASTER_OTC_ENABLED: [ None, "Thermostat Outside Temperature Correction Enabled"], gw_vars.DATA_MASTER_CH2_ENABLED: [ None, "Thermostat Central Heating 2 Enabled"], gw_vars.DATA_SLAVE_FAULT_IND: [ DEVICE_CLASS_PROBLEM, "Boiler Fault Indication"], gw_vars.DATA_SLAVE_CH_ACTIVE: [ DEVICE_CLASS_HEAT, "Boiler Central Heating Status"], gw_vars.DATA_SLAVE_DHW_ACTIVE: [ DEVICE_CLASS_HEAT, "Boiler Hot Water Status"], gw_vars.DATA_SLAVE_FLAME_ON: [ DEVICE_CLASS_HEAT, "Boiler Flame Status"], gw_vars.DATA_SLAVE_COOLING_ACTIVE: [ DEVICE_CLASS_COLD, "Boiler Cooling Status"], gw_vars.DATA_SLAVE_CH2_ACTIVE: [ DEVICE_CLASS_HEAT, "Boiler Central Heating 2 Status"], gw_vars.DATA_SLAVE_DIAG_IND: [ DEVICE_CLASS_PROBLEM, "Boiler Diagnostics Indication"], gw_vars.DATA_SLAVE_DHW_PRESENT: [None, "Boiler Hot Water Present"], gw_vars.DATA_SLAVE_CONTROL_TYPE: [None, "Boiler Control Type"], gw_vars.DATA_SLAVE_COOLING_SUPPORTED: [None, "Boiler Cooling Support"], gw_vars.DATA_SLAVE_DHW_CONFIG: [ None, "Boiler Hot Water Configuration"], gw_vars.DATA_SLAVE_MASTER_LOW_OFF_PUMP: [ None, "Boiler Pump Commands Support"], gw_vars.DATA_SLAVE_CH2_PRESENT: [ None, "Boiler Central Heating 2 Present"], gw_vars.DATA_SLAVE_SERVICE_REQ: [ DEVICE_CLASS_PROBLEM, "Boiler Service Required"], gw_vars.DATA_SLAVE_REMOTE_RESET: [None, "Boiler Remote Reset Support"], gw_vars.DATA_SLAVE_LOW_WATER_PRESS: [ DEVICE_CLASS_PROBLEM, "Boiler Low Water Pressure"], gw_vars.DATA_SLAVE_GAS_FAULT: [ DEVICE_CLASS_PROBLEM, "Boiler Gas Fault"], gw_vars.DATA_SLAVE_AIR_PRESS_FAULT: [ DEVICE_CLASS_PROBLEM, "Boiler Air Pressure Fault"], gw_vars.DATA_SLAVE_WATER_OVERTEMP: [ DEVICE_CLASS_PROBLEM, "Boiler Water Overtemperature"], gw_vars.DATA_REMOTE_TRANSFER_DHW: [ None, "Remote Hot Water Setpoint Transfer Support"], gw_vars.DATA_REMOTE_TRANSFER_MAX_CH: [ None, "Remote Maximum Central Heating Setpoint Write Support"], gw_vars.DATA_REMOTE_RW_DHW: [ None, "Remote Hot Water Setpoint Write Support"], gw_vars.DATA_REMOTE_RW_MAX_CH: [ None, "Remote Central Heating Setpoint Write Support"], gw_vars.DATA_ROVRD_MAN_PRIO: [ None, "Remote Override Manual Change Priority"], gw_vars.DATA_ROVRD_AUTO_PRIO: [ None, "Remote Override Program Change Priority"], gw_vars.OTGW_GPIO_A_STATE: [None, "Gateway GPIO A State"], gw_vars.OTGW_GPIO_B_STATE: [None, "Gateway GPIO B State"], gw_vars.OTGW_IGNORE_TRANSITIONS: [None, "Gateway Ignore Transitions"], gw_vars.OTGW_OVRD_HB: [None, "Gateway Override High Byte"], } sensors = [] for var in discovery_info: device_class = sensor_info[var][0] friendly_name = sensor_info[var][1] entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, var, hass=hass) sensors.append(OpenThermBinarySensor(entity_id, var, device_class, friendly_name)) async_add_entities(sensors)
5,355,871
def avro_rdd(ctx, sqlContext, hdir, date=None, verbose=None): """ Parse avro-snappy files on HDFS :returns: a Spark RDD object """ if date == None: date = time.strftime("year=%Y/month=%-m/day=%-d", time.gmtime(time.time()-60*60*24)) path = '%s/%s' % (hdir, date) elif len(str(date)) == 8: # YYYYMMDD ddd = dt.strptime(str(date), "%Y%m%d") date = time.strftime("year=%Y/month=%-m/day=%-d", ddd.utctimetuple()) path = '%s/%s' % (hdir, date) else: path = hdir if date: path = '%s/%s' % (hdir, date) print("### hdir", path, type(path)) if isinstance(path, list): afiles = path else: # get avro files from HDFS afiles = avro_files(path, verbose=verbose) print("### avro_files", afiles) # define newAPIHadoopFile parameters, java classes aformat="org.apache.avro.mapreduce.AvroKeyInputFormat" akey="org.apache.avro.mapred.AvroKey" awrite="org.apache.hadoop.io.NullWritable" aconv="org.apache.spark.examples.pythonconverters.AvroWrapperToJavaConverter" rdd = [] # load data from HDFS if len(afiles) == 0: rdd = ctx.emptyRDD() else: rdd = ctx.union([ctx.newAPIHadoopFile(f, aformat, akey, awrite, aconv) for f in afiles]) # the records are stored as [(dict, None), (dict, None)], therefore we take first element # and assign them to new rdd avro_rdd = rdd.map(lambda x: x[0]) records = avro_rdd.take(1) # take function will return list of records if verbose: print("### avro records", records, type(records)) return avro_rdd
5,355,872
def generate_gesture_trace(position): """ 生成手势验证码轨迹 :param position: :return: """ x = [] y = [] for i in position: x.append(int(i.split(',')[0])) y.append(int(i.split(',')[1])) trace_x = [] trace_y = [] for _ in range(0, 2): tepx = [x[_], x[_ + 1], x[_ + 2]] tepy = [y[_], y[_ + 1], y[_ + 2]] [a, b, c] = get_func(tepx, tepy) if _ == 0: for i in range(x[0], x[1]): trace_x.append(i) trace_y.append(a * i * i + b * i + c) for i in range(x[1], x[2]): trace_x.append(i) if random.randint(1, 5) == 1: trace_y.append((((float)(y[2] - y[1])) / (x[2] - x[1])) * (i - x[1]) + y[1] + random.randint(-1, 1)) else: trace_y.append((((float)(y[2] - y[1])) / (x[2] - x[1])) * (i - x[1]) + y[1]) else: for i in range(x[2], x[3]): trace_x.append(i) trace_y.append(a * i * i + b * i + c) trace_x = [int(i) for i in trace_x] trace_y = [int(i) for i in trace_y] last_trace_x = [] last_trace_y = [] plot_line(trace_x, trace_y, [0, 280], [0, 158]) xx = 0 while xx < len(trace_x) - 1: last_trace_x.append(trace_x[xx]) last_trace_y.append(trace_y[xx]) xx += random.randint(1, 4) last_trace_x.append(trace_x[-1]) last_trace_y.append(trace_y[-1]) timestamp_list = [] timestamp = random.randint(180, 220) for i in range(len(last_trace_x)): t = random.randint(5, 10) timestamp += t timestamp_list.append(timestamp) i += 1 trace = [{ 'p': ','.join([str(last_trace_x[0]), str(last_trace_y[0])]), 't': 1 }] for i in range(len(last_trace_x)): trace.append({ 'p': ','.join([str(last_trace_x[i]), str(last_trace_y[i])]), 't': timestamp_list[i] }) trace.append({ 'p': ','.join([str(last_trace_x[-1]), str(last_trace_y[-1])]), 't': timestamp_list[-1] + random.randint(50, 100) }) return x[3] - x[0], trace
5,355,873
def parse_arguments(): """parse_arguments""" parser = argparse.ArgumentParser(description="MindSpore Tensorflow weight transfer") parser.add_argument("--pretrained", default=None, type=str) parser.add_argument("--name", default="imagenet22k", choices=["imagenet22k",]) args = parser.parse_args() return args
5,355,874
def mad_daub_noise_est(x, c=0.6744): """ Estimate the statistical dispersion of the noise with Median Absolute Deviation on the first order detail coefficients of the 1d-Daubechies wavelets transform. """ try: _, cD = pywt.wavedec(x, pywt.Wavelet('db3'), level=1) except ValueError: cD = pywt.wavedec(x, pywt.Wavelet('db3'), level=0) return mad(cD, c=c)
5,355,875
def slugify(value, allow_unicode=False): """ Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. From Django's "django/template/defaultfilters.py". Copied from: https://github.com/django/django/blob/a6b3938afc0204093b5356ade2be30b461a698c5/django/utils/text.py#L394 """ import unicodedata value = str(value) if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value.lower()).strip() return re.sub(r'[-\s]+', '-', value)
5,355,876
def get_categories() -> dict: """ :return: dictionary with a hirachy of all categories """ with open("../src/categories.json", "r", encoding="utf-8") as f: return json.load(f)
5,355,877
def test_set_parameters_fusion(backend): """Check gate fusion when ``circuit.set_parameters`` is used.""" c = Circuit(2) c.add(gates.RX(0, theta=0.1234)) c.add(gates.RX(1, theta=0.1234)) c.add(gates.CNOT(0, 1)) c.add(gates.RY(0, theta=0.1234)) c.add(gates.RY(1, theta=0.1234)) fused_c = c.fuse() K.assert_allclose(fused_c(), c()) c.set_parameters(4 * [0.4321]) fused_c.set_parameters(4 * [0.4321]) K.assert_allclose(fused_c(), c())
5,355,878
def assert_cylindrical_isclose(cyl1: Cylindrical, cyl2: Cylindrical) -> None: """Checks two cylindricals are roughly equal.""" assert isclose(cyl1.p, cyl2.p) assert isclose(cyl1.phi, cyl2.phi) assert isclose(cyl1.z, cyl2.z)
5,355,879
def check_file_integrity(indir, outdir): """ Parse file in dir and check integrity """ dic_files={} dic_param={} dic_integ={} for f in os.listdir(indir): path= os.path.join(indir, f) #if os.path.isdir(path)==True: # print (str(f) + "is a dir" ) #elif os.path.isfile(path): if os.path.isfile(path): #dic_param['size']=Path(path).stat().st_size dic_param['size']=os.path.getsize(path) md5hasher = FileHash('md5') dic_param['md5']= md5hasher.hash_file(path) dic_files[f]=dic_param #print( f + " : It is a normal file") #Reinitialize dict dic_param={} #else: # print(f + "It is a special file (socket, FIFO, device file)" ) #print (dic_files) return dic_files
5,355,880
def get_ax(rows=1, cols=1, size=16): """Return a Matplotlib Axes array to be used in all visualizations in the notebook. Provide a central point to control graph sizes. Adjust the size attribute to control how big to render images """ _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows)) return ax ### LOAD VALIDATION SET
5,355,881
def build_arg_parser(): """ Builds an argparse object to handle command-line arguments passed in. """ parser = argparse.ArgumentParser(description="Loads an ontology file in " + "OBO file format into a Neo4j graph database.") parser.add_argument('-i', '--input_obo_file', required=True, help="The input OBO file") parser.add_argument('-s', '--neo4j_server_address', required=True, help="The address to the Neo4j server. Must include port number") parser.add_argument('-t', '--typedefs', default="is_a", help="Typedefs that" + "that are present in this ontology. These will be used to define " + "the types of relationships supported in the input ontology") parser.add_argument('-r', '--root_node', required=True, action="append", default=[], help='DOID\'s for any root nodes in this ontology') args = parser.parse_args() return args
5,355,882
def check_layer_hessians_psd(layers): """Check layer Hessians for positive semi-definiteness.""" for l in layers: if isinstance(l, HBPLinear): assert all_eigvals_nonnegative( matrix_from_mvp(l.bias.hvp, dims=2 * (l.bias.numel(),)) ) assert all_eigvals_nonnegative( matrix_from_mvp(l.weight.hvp, dims=2 * (l.weight.numel(),)) )
5,355,883
def createSynthModel(): """Return the modeling mesh, the porosity distribution and the parametric mesh for inversion. """ # Create the synthetic model world = mt.createCircle(boundaryMarker=-1, segments=64) tri = mt.createPolygon([[-0.8, -0], [-0.5, -0.7], [0.7, 0.5]], isClosed=True, area=0.0015) c1 = mt.createCircle(radius=0.2, pos=[-0.2, 0.5], segments=32, area=0.0025, marker=3) c2 = mt.createCircle(radius=0.2, pos=[0.32, -0.3], segments=32, area=0.0025, marker=3) poly = mt.mergePLC([world, tri, c1, c2]) poly.addRegionMarker([0.0, 0, 0], 1, area=0.0015) poly.addRegionMarker([-0.9, 0, 0], 2, area=0.0015) c = mt.createCircle(radius=0.99, segments=16, start=np.pi, end=np.pi*3) [poly.createNode(p.pos(), -99) for p in c.nodes()] mesh = pg.meshtools.createMesh(poly, q=34.4, smooth=[1, 10]) mesh.scale(1.0/5.0) mesh.rotate([0., 0., 3.1415/3]) mesh.rotate([0., 0., 3.1415]) petro = pg.solver.parseArgToArray([[1, 0.9], [2, 0.6], [3, 0.3]], mesh.cellCount(), mesh) # Create the parametric mesh that only reflect the domain geometry world = mt.createCircle(boundaryMarker=-1, segments=32, area=0.0051) paraMesh = pg.meshtools.createMesh(world, q=34.0, smooth=[1, 10]) paraMesh.scale(1.0/5.0) return mesh, paraMesh, petro
5,355,884
def edits_dir(): """ Return the directory for the editable files (used by the website). """ return _mkifnotexists("")
5,355,885
def get_expected(stage, test_block_config, sessions): """Get expected responses for each type of request Though only 1 request can be made, it can cause multiple responses. Because we need to subcribe to MQTT topics, which might be formatted from keys from included files, the 'expected'/'response' needs to be formatted BEFORE running the request. Args: stage (dict): test stage sessions (dict): all available sessions Returns: dict: mapping of request type: expected response dict """ plugins = load_plugins(test_block_config) expected = {} for p in plugins: if p.plugin.response_block_name in stage: logger.debug("Getting expected response for %s", p.name) plugin_expected = p.plugin.get_expected_from_request(stage, test_block_config, sessions[p.name]) expected[p.name] = plugin_expected return expected
5,355,886
def convolve_hrf(X, onsets, durations, n_vol, tr, ops=100): """ Convolve each X's column iteratively with HRF and align with the timeline of BOLD signal parameters: ---------- X[array]: [n_event, n_sample] onsets[array_like]: in sec. size = n_event durations[array_like]: in sec. size = n_event n_vol[int]: the number of volumes of BOLD signal tr[float]: repeat time in second ops[int]: oversampling number per second Returns: --------- X_hrfed[array]: the result after convolution and alignment """ assert np.ndim(X) == 2, 'X must be a 2D array' assert X.shape[0] == len(onsets) and X.shape[0] == len(durations), \ 'The length of onsets and durations should be matched with the number of events.' assert ops in (10, 100, 1000), 'Oversampling rate must be one of the (10, 100, 1000)!' # unify the precision decimals = int(np.log10(ops)) onsets = np.round(np.asarray(onsets), decimals=decimals) durations = np.round(np.asarray(durations), decimals=decimals) tr = np.round(tr, decimals=decimals) n_clipped = 0 # the number of clipped time points earlier than the start point of response onset_min = onsets.min() if onset_min > 0: # The earliest event's onset is later than the start point of response. # We supplement it with zero-value event to align with the response. X = np.insert(X, 0, np.zeros(X.shape[1]), 0) onsets = np.insert(onsets, 0, 0, 0) durations = np.insert(durations, 0, onset_min, 0) onset_min = 0 elif onset_min < 0: print("The earliest event's onset is earlier than the start point of response.\n" "We clip the earlier time points after hrf_convolution to align with the response.") n_clipped = int(-onset_min * ops) # do convolution in batches for trade-off between speed and memory batch_size = int(100000 / ops) bat_indices = np.arange(0, X.shape[-1], batch_size) bat_indices = np.r_[bat_indices, X.shape[-1]] vol_t = (np.arange(n_vol) * tr * ops).astype(int) # compute volume acquisition timing n_time_point = int(((onsets + durations).max()-onset_min) * ops) X_hrfed = np.zeros([n_vol, 0]) for idx, bat_idx in enumerate(bat_indices[:-1]): X_bat = X[:, bat_idx:bat_indices[idx+1]] # generate X raw time course X_tc = np.zeros((n_time_point, X_bat.shape[-1]), dtype=np.float32) for i, onset in enumerate(onsets): onset_start = int(onset * ops) onset_end = int(onset_start + durations[i] * ops) X_tc[onset_start:onset_end, :] = X_bat[i, :] # generate hrf kernel hrf = spm_hrf(tr, oversampling=tr*ops) hrf = hrf[:, np.newaxis] # convolve X raw time course with hrf kernal X_tc_hrfed = convolve(X_tc, hrf, method='fft') X_tc_hrfed = X_tc_hrfed[n_clipped:, :] # downsample to volume timing X_hrfed = np.c_[X_hrfed, X_tc_hrfed[vol_t, :]] print('hrf convolution: sample {0} to {1} finished'.format(bat_idx+1, bat_indices[idx+1])) return X_hrfed
5,355,887
def flatten(x, params): """ Plain ol' 2D flatten :param x: input tensor :param params: {dict} hyperparams (sub-selection) :return: output tensor """ return layers.Flatten()(x)
5,355,888
def xml_unescape(text): """ Do the inverse of `xml_escape`. Parameters ---------- text: str The text to be escaped. Returns ------- escaped_text: str """ return unescape(text, xml_unescape_table)
5,355,889
def p_entry_open_close(t): """entry : comment | open | close | note | balance | pad""" t[1].build() t[0] = t[1]
5,355,890
def test_validate_variants(dummy_data): """Ensure that only valid variants are being returned in the correct format""" bam_files, vcf_files = dummy_data registry = Registry(bam_files, vcf_files) random_start = bam_files[2] test_colony = Colony(random_start, registry) print(random_start) print(registry.colony_list) print(registry.export_dataframe()) # TODO: More rigorous testing assert len(test_colony.valid_variants) == 1, "Incorrect variant filtering."
5,355,891
def tseb_pt(T_air, T_rad, u, p, z, Rs_1, Rs24, vza, zs, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, albedo, ndvi, lai, clump, hc, time, t_rise, t_end, leaf_width, a_PT_in=1.32, iterations=35): """Priestley-Taylor TSEB Calculates the Priestley Taylor TSEB fluxes using a single observation of composite radiometric temperature and using resistances in series. Parameters ---------- T_air : ee.Image Air temperature (Kelvin). T_rad : ee.Image Radiometric composite temperature (Kelvin). u : ee.Image Wind speed above the canopy (m s-1). p : ee.Image Atmospheric pressure (kPa) z : ee.Image Elevation (m) Rs_1 : ee.Image Overpass insolation (w m-2) Rs24 : ee.Image Daily insolation (w m-2) vza : float View Zenith Angle (radians). zs : ee.Image Solar Zenith Angle (radians). aleafv : ee.Image aleafn : ee.Image aleafl : ee.Image adeadv : ee.Image adeadn : ee.Image adeadl : ee.Image albedo : ee.Image ndvi : ee.Image Normalized Difference Vegetation Index lai : ee.Image Effective Leaf Area Index (m2 m-2). clump : ee.Image hc : ee.Image Canopy height (m). time t_rise : ee.Image t_end : ee.Image leaf_width : ee.Image Average/effective leaf width (m) a_PT_in : float, optional Priestley Taylor coefficient for canopy potential transpiration (the default is 1.32). iterations: int, optional Number of iterations of main calculation (the default is 35) Returns ------- ET : ee.Image Evapotranspiration (mm). References ---------- .. [Norman1995] J.M. Norman, W.P. Kustas, & K.S. Humes (1995), Source approach for estimating soil and vegetation energy fluxes in observations of directional radiometric surface temperature, Agricultural and Forest Meteorology, Volume 77, Issues 3-4, Pages 263-293, http://dx.doi.org/10.1016/0168-1923(95)02265-Y. .. [Kustas1999] W.P. Kustas, & J.M. Norman (1999), Evaluation of soil and vegetation heat flux predictions using a simple two-source model with radiometric temperatures for partial canopy cover, Agricultural and Forest Meteorology, Volume 94, Issue 1, Pages 13-29, http://dx.doi.org/10.1016/S0168-1923(99)00005-2. """ # print('\nINPUTS') # print('T_rad: {:20.14f}'.format(float(utils.image_value(T_rad).values()[0]))) # print('T_air: {:20.14f}'.format(float(utils.image_value(T_air).values()[0]))) # print('u: {:20.14f}'.format(float(utils.image_value(u).values()[0]))) # print('Rs_1: {:20.14f}'.format(float(utils.image_value(Rs_1).values()[0]))) # print('Rs24: {:20.14f}'.format(float(utils.image_value(Rs24).values()[0]))) # # print('vza: {:20.14f}'.format(float(utils.image_value(vza).values()[0]))) # print('zs: {:20.14f}'.format(float(utils.image_value(zs).values()[0]))) # print('albedo: {:20.14f}'.format(float(utils.image_value(albedo).values()[0]))) # print('ndvi: {:20.14f}'.format(float(utils.image_value(ndvi).values()[0]))) # print('lai: {:20.14f}'.format(float(utils.image_value(lai).values()[0]))) # print('clump: {:20.14f}'.format(float(utils.image_value(clump).values()[0]))) # print('hc: {:20.14f}'.format(float(utils.image_value(hc).values()[0]))) # print('time: {:20.14f}'.format(float(utils.image_value(time).values()[0]))) # print('t_rise: {:20.14f}'.format(float(utils.image_value(t_rise).values()[0]))) # print('t_end: {:20.14f}'.format(float(utils.image_value(t_end).values()[0]))) # ************************************************************************ # Correct Clumping Factor f_green = 1. # LAI for leaf spherical distribution F = lai.expression('lai * clump', {'lai': lai, 'clump': clump}) # Fraction cover at nadir (view=0) fc = F.expression('1.0 - exp(-0.5 * F)', {'F': F}) \ .clamp(0.01, 0.9) # LAI relative to canopy projection only lai_c = lai.expression('lai / fc', {'lai': lai, 'fc': fc}) # Houborg modification (according to Anderson et al. 2005) fc_q = lai \ .expression('1 - (exp(-0.5 * F / cos(vza)))', {'F': F, 'vza': vza}) \ .clamp(0.05, 0.90) # Brutsaert (1982) z0m = hc.expression('hc * 0.123', {'hc': hc}) # CGM - add(0) is to mimic numpy copy, check if needed z0h = z0m.add(0) d_0 = hc.expression('hc * (2.0 / 3.0)', {'hc': hc}) # Correction of roughness parameters for bare soils (F < 0.1) d_0 = d_0.where(F.lte(0.1), 0.00001) z0m = z0m.where(F.lte(0.1), 0.01) z0h = z0h.where(F.lte(0.1), 0.0001) # Correction of roughness parameters for water bodies # (NDVI < 0 and albedo < 0.05) water_mask = ndvi.lte(0).And(albedo.lte(0.05)) d_0 = d_0.where(water_mask, 0.00001) z0m = z0m.where(water_mask, 0.00035) z0h = z0h.where(water_mask, 0.00035) # Check to avoid division by 0 in the next computations z0h = z0h.where(z0h.eq(0), 0.001) z0m = z0m.where(z0m.eq(0), 0.01) # DEADBEEF # z_u = ee.Number(50.0) # z_t = ee.Number(50.0) z_u = ee.Image.constant(50.0) z_t = ee.Image.constant(50.0) # z_u = lai.multiply(0).add(50) # z_t = lai.multiply(0).add(50) # Parameters for In-Canopy Wind Speed Extinction leaf = lai.expression( '(0.28 * (F ** (0.66667)) * (hc ** (0.33333)) * ' '(leaf_width ** (-0.33333)))', {'F': F, 'hc': hc, 'leaf_width': leaf_width}) leaf_c = lai.expression( '(0.28 * (lai_c ** (0.66667)) * (hc ** (0.33333)) * ' '(leaf_width ** (-0.33333)))', {'lai_c': lai_c, 'hc': hc, 'leaf_width': leaf_width}) leaf_s = lai.expression( '(0.28 * (0.1 ** (0.66667)) * (hc ** (0.33333)) * ' '(leaf_width ** (-0.33333)))', {'hc': hc, 'leaf_width': leaf_width}) # ************************************************************************ # Atmospheric Parameters # Saturation vapour pressure [kPa] (FAO56 3-8) e_s = T_air.expression( '0.6108 * exp((17.27 * (T_air - 273.16)) / ((T_air - 273.16) + 237.3))', {'T_air': T_air}) # Slope of the saturation vapor pressure [kPa] (FAO56 3-9) Ss = T_air.expression( '4098. * e_s / (((T_air - 273.16) + 237.3) ** 2)', {'e_s': e_s, 'T_air': T_air}) # Latent heat of vaporization (~2.45 at 20 C) [MJ kg-1] (FAO56 3-1) lambda1 = T_air.expression( '(2.501 - (2.361e-3 * (T_air - 273.16)))', {'T_air': T_air}) # Psychrometric constant [kPa C-1] (FAO56 3-10) g = p.expression('1.615E-3 * p / lambda1', {'p': p, 'lambda1': lambda1}) # ************************************************************************ # Initialization of a_PT = albedo.multiply(0).add(a_PT_in) # a_PT = ee.Image.constant(a_PT_in) # a_PT = mask.multiply(a_PT) # CGM - This was also being computed inside albedo_separation function below # Commented out from here for now. # e_atm = T_air.expression( # '1.0 - (0.2811 * (exp(-0.0003523 * ((T_air - 273.16) ** 2))))', # {'T_air': T_air}) Rs_c, Rs_s, albedo_c, albedo_s = tseb_utils.albedo_separation( albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, zs) # CGM - Moved emissivity calculation to separate function. # I removed the Rs0 check. e_atm = tseb_utils.emissivity(T_air) # p = T_air.expression( # '101.3 * (((T_air - (0.0065 * z)) / T_air) ** 5.26)', # {'T_air': T_air, 'z': z}) # Density of air? (kg m-3) r_air = T_air.expression( '101.3 * (((T_air - (0.0065 * z)) / T_air) ** 5.26) / 1.01 / T_air / 0.287', {'T_air': T_air, 'z': z}) cp = ee.Number(1004.16) # cp = ee.Image.constant(1004.16) # Assume neutral conditions on first iteration (use T_air for Ts and Tc) # CGM - Using lai for F to match Python code u_attr = tseb_utils.compute_u_attr( u=u, d0=d_0, z0m=z0m, z_u=z_u, fm=0) r_ah = tseb_utils.compute_r_ah( u_attr=u_attr, d0=d_0, z0h=z0h, z_t=z_t, fh=0) # CGM - Why is this function is passing "lai" to "F"? r_s = tseb_utils.compute_r_s( u_attr=u_attr, T_s=T_air, T_c=T_air, hc=hc, F=lai, d0=d_0, z0m=z0m, leaf=leaf, leaf_s=leaf_s, fm_h=0) r_x = tseb_utils.compute_r_x( u_attr=u_attr, hc=hc, F=lai, d0=d_0, z0m=z0m, xl=leaf_width, leaf_c=leaf_c, fm_h=0) # r_ah, r_s, r_x, u_attr = tseb_utils.compute_resistance( # u, T_air, T_air, hc, lai, d_0, z0m, z0h, z_u, z_t, leaf_width, leaf, # leaf_s, leaf_c, 0, 0, 0) T_c = T_air # DEADBEEF - In IDL, this calculation is in C, not K? T_s = lai.expression( '((T_rad - 273.16) - (fc_q * (T_c - 273.16))) / (1 - fc_q) + 273.16', {'T_rad': T_rad, 'T_c': T_c, 'fc_q': fc_q}) # T_s = lai.expression( # '(T_rad - (fc_q * T_c)) / (1 - fc_q)', # {'T_rad': T_rad, 'T_c': T_c, 'fc_q': fc_q}) # CGM - Initialize to match T_air shape # This doesn't seem to do anything, commenting out for now # H_iter = T_air.multiply(0).add(200.16) EF_s = T_air.multiply(0) # print('\nF: {:20.14f}'.format(float(utils.image_value(F).values()[0]))) # print('fc: {:20.14f}'.format(float(utils.image_value(fc).values()[0]))) # print('lai_c: {:20.14f}'.format(float(utils.image_value(lai_c).values()[0]))) # print('fc_q: {:20.14f}'.format(float(utils.image_value(fc_q).values()[0]))) # print('z0h: {:20.14f}'.format(float(utils.image_value(z0h).values()[0]))) # print('z0m: {:20.14f}'.format(float(utils.image_value(z0m).values()[0]))) # print('leaf: {:20.14f}'.format(float(utils.image_value(leaf).values()[0]))) # print('leaf_c: {:20.14f}'.format(float(utils.image_value(leaf_c).values()[0]))) # print('leaf_s: {:20.14f}'.format(float(utils.image_value(leaf_s).values()[0]))) # print('e_s: {:20.14f}'.format(float(utils.image_value(e_s).values()[0]))) # print('Ss: {:20.14f}'.format(float(utils.image_value(Ss).values()[0]))) # print('lambda1: {:20.14f}'.format(float(utils.image_value(lambda1).values()[0]))) # print('p: {:20.14f}'.format(float(utils.image_value(p).values()[0]))) # print('z: {:20.14f}'.format(float(utils.image_value(z).values()[0]))) # print('g: {:20.14f}'.format(float(utils.image_value(g).values()[0]))) # print('a_PT: {:20.14f}'.format(float(utils.image_value(a_PT).values()[0]))) # print('Rs_c: {:20.14f}'.format(float(utils.image_value(Rs_c).values()[0]))) # print('Rs_s: {:20.14f}'.format(float(utils.image_value(Rs_s).values()[0]))) # print('albedo_c: {:20.14f}'.format(float(utils.image_value(albedo_c).values()[0]))) # print('albedo_s: {:20.14f}'.format(float(utils.image_value(albedo_s).values()[0]))) # print('e_atm: {:20.14f}'.format(float(utils.image_value(e_atm).values()[0]))) # print('r_air: {:20.14f}'.format(float(utils.image_value(r_air).values()[0]))) # print('cp: {:20.14f}'.format(float(cp.getInfo()))) # print('d_0: {:20.14f}'.format(float(utils.image_value(d_0).values()[0]))) # print('z0m: {:20.14f}'.format(float(utils.image_value(z0m).values()[0]))) # print('z0h: {:20.14f}'.format(float(utils.image_value(z0h).values()[0]))) # print('u_attr: {:20.14f}'.format(float(utils.image_value(u_attr).values()[0]))) # print('r_ah: {:20.14f}'.format(float(utils.image_value(r_ah).values()[0]))) # print('r_s: {:20.14f}'.format(float(utils.image_value(r_s).values()[0]))) # print('r_x: {:20.14f}'.format(float(utils.image_value(r_x).values()[0]))) # print('T_c: {:20.14f}'.format(float(utils.image_value(T_c).values()[0]))) # print('T_s: {:20.14f}'.format(float(utils.image_value(T_s).values()[0]))) # print('EF_s: {:20.14f}'.format(float(utils.image_value(EF_s).values()[0]))) # print('Iterations: {}'.format(iterations)) # ************************************************************************ # Start Loop for Stability Correction and Water Stress def iter_func(n, prev): # Extract inputs from previous iteration a_PT_iter = ee.Image(ee.Dictionary(prev).get('a_PT')) EF_s_iter = ee.Image(ee.Dictionary(prev).get('EF_s')) r_ah_iter = ee.Image(ee.Dictionary(prev).get('r_ah')) r_s_iter = ee.Image(ee.Dictionary(prev).get('r_s')) r_x_iter = ee.Image(ee.Dictionary(prev).get('r_x')) T_c_iter = ee.Image(ee.Dictionary(prev).get('T_c')) T_s_iter = ee.Image(ee.Dictionary(prev).get('T_s')) u_attr_iter = ee.Image(ee.Dictionary(prev).get('u_attr')) Rn_c = tseb_utils.compute_Rn_c( albedo_c, T_air, T_c_iter, T_s_iter, e_atm, Rs_c, F) Rn_s = tseb_utils.compute_Rn_s( albedo_s, T_air, T_c_iter, T_s_iter, e_atm, Rs_s, F) Rn = Rn_c.add(Rn_s) # Rn_s, Rn_c, Rn = tseb_utils.compute_Rn( # albedo_c, albedo_s, T_air, T_c_iter, T_s_iter, e_atm, Rs_c, Rs_s, F) G = tseb_utils.compute_G0( Rn, Rn_s, albedo, ndvi, t_rise, t_end, time, EF_s_iter) LE_c = albedo \ .expression( 'f_green * (a_PT * Ss / (Ss + g)) * Rn_c', {'f_green': f_green, 'a_PT': a_PT_iter, 'Ss': Ss, 'g': g, 'Rn_c': Rn_c}) \ .max(0) H_c = albedo.expression( 'Rn_c - LE_c', {'Rn_c': Rn_c, 'LE_c': LE_c}) T_c_iter = tseb_utils.temp_separation_tc( H_c, fc_q, T_air, T_rad, r_ah_iter, r_s_iter, r_x_iter, r_air, cp) T_s_iter = tseb_utils.temp_separation_ts(T_c_iter, fc_q, T_air, T_rad) T_ac = tseb_utils.temp_separation_tac( T_c_iter, T_s_iter, fc_q, T_air, r_ah_iter, r_s_iter, r_x_iter) # T_c_iter, T_s_iter, T_ac = tseb_utils.temp_separation( # H_c, fc_q, T_air, T_rad, r_ah_iter, r_s_iter, r_x_iter, r_air, cp) H_s = albedo.expression( 'r_air * cp * (T_s - T_ac) / r_s', {'r_air': r_air, 'cp': cp, 'T_s': T_s_iter, 'T_ac': T_ac, 'r_s': r_s_iter}) H_c = albedo.expression( 'r_air * cp * (T_c - T_ac) / r_x', {'r_air': r_air, 'cp': cp, 'T_c': T_c_iter, 'T_ac': T_ac, 'r_x': r_x_iter}) H = albedo.expression('H_s + H_c', {'H_s': H_s, 'H_c': H_c}) LE_s = albedo.expression( 'Rn_s - G - H_s', {'Rn_s': Rn_s, 'G': G, 'H_s': H_s}) LE_c = albedo.expression('Rn_c - H_c', {'Rn_c': Rn_c, 'H_c': H_c}) # CGM - Is there a reason this isn't up with the H calculation? H = H.where(H.eq(0), 10.0) # CGM - This wont doing anything at this position in the code. # Commenting out for now. # r_ah_iter = r_ah_iter.where(r_ah_iter.eq(0), 10.0) # CGM - This doesn't seem to do anything, commenting out for now # mask_iter = H_iter.divide(H).lte(1.05).And(H_iter.divide(H).gte(0.95)) # chk_iter = np.sum(mask_iter) / np.size(mask_iter) fh = tseb_utils.compute_stability_fh( H, T_rad, u_attr_iter, r_air, z_t, d_0, cp) fm = tseb_utils.compute_stability_fm( H, T_rad, u_attr_iter, r_air, z_u, d_0, z0m, cp) fm_h = tseb_utils.compute_stability_fm_h( H, T_rad, u_attr_iter, r_air, hc, d_0, z0m, cp) # CGM - z0h is not used in this function, should it be? # fm, fh, fm_h = tseb_utils.compute_stability( # H, T_rad, r_air, cp, u_attr, z_u, z_t, hc, d_0, z0m, z0h) u_attr_iter = tseb_utils.compute_u_attr( u=u, d0=d_0, z0m=z0m, z_u=z_u, fm=fm) r_ah_iter = tseb_utils.compute_r_ah( u_attr=u_attr_iter, d0=d_0, z0h=z0h, z_t=z_t, fh=fh) r_s_iter = tseb_utils.compute_r_s( u_attr=u_attr_iter, T_s=T_s_iter, T_c=T_c_iter, hc=hc, F=lai, d0=d_0, z0m=z0m, leaf=leaf, leaf_s=leaf_s, fm_h=fm_h) # CGM - Why is this function is passing "lai" to "F"? r_x_iter = tseb_utils.compute_r_x( u_attr=u_attr_iter, hc=hc, F=lai, d0=d_0, z0m=z0m, xl=leaf_width, leaf_c=leaf_c, fm_h=fm_h) # r_ah_iter, r_s_iter, r_x_iter, u_attr_iter = tseb_utils.compute_resistance( # u, T_s_iter, T_c_iter, hc, lai, d_0, z0m, z0h, z_u, z_t, # leaf_width, leaf, leaf_s, leaf_c, fm, fh, fm_h) a_PT_iter = a_PT_iter \ .where(LE_s.lte(0), a_PT_iter.subtract(0.05)) \ .where(a_PT_iter.lte(0), 0.01) den_s = albedo.expression('Rn_s - G', {'Rn_s': Rn_s, 'G': G}) den_s = den_s.updateMask(den_s.neq(0)) # den_s[den_s == 0.] = np.nan EF_s_iter = albedo.expression( 'LE_s / den_s', {'LE_s': LE_s, 'den_s': den_s}) return ee.Dictionary({ 'a_PT': a_PT_iter, 'EF_s': EF_s_iter, 'G': G, 'H_c': H_c, 'H_s': H_s, 'LE_c': LE_c, 'LE_s': LE_s, 'Rn_c': Rn_c, 'Rn_s': Rn_s, 'r_ah': r_ah_iter, 'r_s': r_s_iter, 'r_x': r_x_iter, 'T_ac': T_ac, 'T_c': T_c_iter, 'T_s': T_s_iter, 'u_attr': u_attr_iter}) # Iterate the function n times # CGM - Iteration count is an input to the function input_images = ee.Dictionary({ 'a_PT': a_PT, 'EF_s': EF_s, 'G': ee.Image(0), 'H_c': ee.Image(0), 'H_s': ee.Image(0), 'LE_c': ee.Image(0), 'LE_s': ee.Image(0), 'Rn_c': ee.Image(0), 'Rn_s': ee.Image(0), 'r_ah': r_ah, 'r_s': r_s, 'r_x': r_x, 'T_ac': ee.Image(0), 'T_c': T_c, 'T_s': T_s, 'u_attr': u_attr }) iter_output = ee.Dictionary( ee.List.sequence(1, iterations).iterate(iter_func, input_images)) # Unpack the iteration output a_PT = ee.Image(iter_output.get('a_PT')) Rn_c = ee.Image(iter_output.get('Rn_c')) Rn_s = ee.Image(iter_output.get('Rn_s')) G = ee.Image(iter_output.get('G')) H_c = ee.Image(iter_output.get('H_c')) H_s = ee.Image(iter_output.get('H_s')) LE_c = ee.Image(iter_output.get('LE_c')) LE_s = ee.Image(iter_output.get('LE_s')) # T_ac = ee.Image(iter_output.get('T_ac')) # T_c = ee.Image(iter_output.get('T_c')) # T_s = ee.Image(iter_output.get('T_s')) # r_ah = ee.Image(iter_output.get('r_ah')) # r_s = ee.Image(iter_output.get('r_s')) # r_x = ee.Image(iter_output.get('r_x')) # print('\na_PT: {:20.14f}'.format(utils.image_value(a_PT).values()[0])) # print('Rn_c: {:20.14f}'.format(utils.image_value(Rn_c).values()[0])) # print('Rn_s: {:20.14f}'.format(utils.image_value(Rn_s).values()[0])) # print('G: {:20.14f}'.format(utils.image_value(G).values()[0])) # print('H_c: {:20.14f}'.format(utils.image_value(H_c).values()[0])) # print('H_s: {:20.14f}'.format(utils.image_value(H_s).values()[0])) # print('LE_c: {:20.14f}'.format(utils.image_value(LE_c).values()[0])) # print('LE_s: {:20.14f}'.format(utils.image_value(LE_s).values()[0])) # print('r_ah: {:20.14f}'.format(utils.image_value(r_ah).values()[0])) # print('r_s: {:20.14f}'.format(utils.image_value(r_s).values()[0])) # print('r_x: {:20.14f}'.format(utils.image_value(r_x).values()[0])) # print('T_ac: {:20.14f}'.format(utils.image_value(T_ac).values()[0])) # print('T_c: {:20.14f}'.format(utils.image_value(T_c).values()[0])) # print('T_s: {:20.14f}'.format(utils.image_value(T_s).values()[0])) # ************************************************************************ # Check Energy Balance Closure ind = a_PT.lte(0.01) LE_s = LE_s.where(ind, 1.0) LE_c = LE_c.where(ind, 1.0) G = G.where(ind, Rn_s.subtract(H_s)) ind = LE_s.gt(Rn_s) LE_s = LE_s.where(ind, Rn_s) H_s = H_s.where(ind, Rn_s.subtract(G).subtract(LE_s)) # CGM - Check order of operations ind = LE_c.gt(Rn_c.add(100)) # CGM - Not used below since LE_c is recomputed LE_c = LE_c.where(ind, Rn_c.add(100)) H_c = H_c.where(ind, -100) LE_s = albedo.expression( 'Rn_s - G - H_s', {'Rn_s': Rn_s, 'G': G, 'H_s': H_s}) LE_c = albedo.expression('Rn_c - H_c', {'Rn_c': Rn_c, 'H_c': H_c}) # The latent heat of vaporization is 2.45 MJ kg-1 # Assume Rs24 is still in W m-2 day-1 and convert to MJ kg-1 # CGM - Leaving out scaling value for now ET = albedo \ .expression( '((LE_c + LE_s) / Rs_1) * (Rs24 / 2.45) * scaling', {'LE_c': LE_c, 'LE_s': LE_s, 'Rs_1': Rs_1, 'Rs24': Rs24.multiply(0.0864 / 24.0), 'scaling': 1}) \ .max(0.01) # print('\nRn_c: {:20.14f}'.format(utils.image_value(Rn_c).values()[0])) # print('Rn_s: {:20.14f}'.format(utils.image_value(Rn_s).values()[0])) # print('G: {:20.14f}'.format(utils.image_value(G).values()[0])) # print('H_c: {:20.14f}'.format(utils.image_value(H_c).values()[0])) # print('H_s: {:20.14f}'.format(utils.image_value(H_s).values()[0])) # print('LE_c: {:20.14f}'.format(utils.image_value(LE_c).values()[0])) # print('LE_s: {:20.14f}'.format(utils.image_value(LE_s).values()[0])) # print('\nET: {:20.14f}'.format(utils.image_value(ET).values()[0])) return ET
5,355,892
def GetPrivateIpv6GoogleAccessTypeMapper(messages, hidden=False): """Returns a mapper from text options to the PrivateIpv6GoogleAccess enum. Args: messages: The message module. hidden: Whether the flag should be hidden in the choice_arg """ help_text = """ Sets the type of private access to Google services over IPv6. PRIVATE_IPV6_GOOGLE_ACCESS_TYPE must be one of: bidirectional Allows Google services to initiate connections to GKE pods in this cluster. This is not intended for common use, and requires previous integration with Google services. disabled Default value. Disables private access to Google services over IPv6. outbound-only Allows GKE pods to make fast, secure requests to Google services over IPv6. This is the most common use of private IPv6 access. $ gcloud alpha container clusters create \ --private-ipv6-google-access-type=disabled $ gcloud alpha container clusters create \ --private-ipv6-google-access-type=outbound-only $ gcloud alpha container clusters create \ --private-ipv6-google-access-type=bidirectional """ return arg_utils.ChoiceEnumMapper( '--private-ipv6-google-access-type', messages.NetworkConfig.PrivateIpv6GoogleAccessValueValuesEnum, _GetPrivateIPv6CustomMappings(), hidden=hidden, help_str=help_text)
5,355,893
def register(base_command): """ Registers `leapp upgrade` """ base_command.add_sub(upgrade)
5,355,894
def calc_director(moi): """ Calculate the director from a moment of inertia. The director is the dominant eigenvector of the MOI tensor Parameters: ----------- moi : list 3x3 array; MOItensor Returns: -------- director : list 3 element list of director vector """ w, v = np.linalg.eig(moi) director = v[:, np.argmin(w)] return director
5,355,895
def _solve_upper_triangular(A, b): """ Solves Ax=b when A is upper triangular. """ return solve_triangular(A, b, lower=False)
5,355,896
def check_create_account_key(key): """ Returns the user_id if the reset key is valid (matches a user_id and that user does not already have an account). Otherwise returns None. """ query = sqlalchemy.text(""" SELECT user_id FROM members WHERE create_account_key = :k AND user_id NOT IN (SELECT user_id FROM users) """) result = flask.g.db.execute(query, k=key).first() if result is not None: return result['user_id'] else: return None
5,355,897
def find_gaia_files_hp(nside, pixlist, neighbors=True): """Find full paths to Gaia healpix files in a set of HEALPixels. Parameters ---------- nside : :class:`int` (NESTED) HEALPixel nside. pixlist : :class:`list` or `int` A set of HEALPixels at `nside`. neighbors : :class:`bool`, optional, defaults to ``True`` Also return files corresponding to all neighbors that touch the pixels in `pixlist` to prevent edge effects (e.g. a Gaia source is 1 arcsec outside of `pixlist` and so in an adjacent pixel). Returns ------- :class:`list` A list of all Gaia files that need to be read in to account for objects in the passed list of pixels. Notes ----- - The environment variable $GAIA_DIR must be set. """ # ADM the resolution at which the healpix files are stored. filenside = _get_gaia_nside() # ADM check that the GAIA_DIR is set and retrieve it. gaiadir = _get_gaia_dir() hpxdir = os.path.join(gaiadir, 'healpix') # ADM work with pixlist as an array. pixlist = np.atleast_1d(pixlist) # ADM determine the pixels that touch the passed pixlist. pixnum = nside2nside(nside, filenside, pixlist) # ADM if neighbors was sent, then retrieve all pixels that touch each # ADM pixel covered by the provided locations, to prevent edge effects... if neighbors: pixnum = add_hp_neighbors(filenside, pixnum) # ADM reformat in the Gaia healpix format used by desitarget. gaiafiles = [os.path.join(hpxdir, 'healpix-{:05d}.fits'.format(pn)) for pn in pixnum] return gaiafiles
5,355,898
def _crc16(data, start = _CRC16_START) : """Compute CRC16 for bytes/bytearray/memoryview data""" crc = start for b in data : crc ^= b << 8 for _ in range(8) : crc = ((crc << 1) & 0xFFFF) ^ _CRC16_POLY if crc & 0x8000 else (crc << 1) return crc
5,355,899