content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import math def _get_process_num_examples(builder, split, process_batch_size, process_index, process_count, drop_remainder): """Returns the number of examples in a given process's split.""" process_split = _get_process_split( split, process_index=process_index, process_count=process_count, drop_remainder=drop_remainder) num_examples = builder.info.splits[process_split].num_examples if drop_remainder: device_batch_size = process_batch_size // jax.local_device_count() num_examples = ( math.floor(num_examples / device_batch_size) * device_batch_size) return num_examples
a0621a6146e919db78b0ff5e7a5ae6d3c1bb68a6
3,656,513
def export_python_function(earth_model): """ Exports model as a pure python function, with no numpy/scipy/sklearn dependencies. :param earth_model: Trained pyearth model :return: A function that accepts an iterator over examples, and returns an iterator over transformed examples """ i = 0 accessors = [] for bf in earth_model.basis_: if not bf.is_pruned(): accessors.append(bf.func_factory(earth_model.coef_[0, i])) i += 1 def func(example_iterator): return [sum(accessor(row) for accessor in accessors) for row in example_iterator] return func
593d8cf9f1156359f2276f0481e02a2d00d8ffde
3,656,514
def ehi(data, thr_95, axis=0, keepdims=False): """ Calculate Excessive Heat Index (EHI). Parameters ---------- data: list/array 1D/2D array of daily temperature timeseries thr_95: float 95th percentile daily mean value from climatology axis: int The axis along which the calculation is applied (default 0). keepdims: boolean If data is 2d (time in third dimesion) and keepdims is set to True, calculation is applied to the zeroth axis (time) and returns a 2d array of freq-int dists. If set to False (default) all values are collectively assembled before calculation. Returns ------- EHI: float Excessive heat index """ def ehi_calc(pdata, thr_95): if all(np.isnan(pdata)): print("All data missing/masked!") ehi = np.nan else: # run_mean = moving_average(pdata, 3) rmean = run_mean(pdata, 3) ehi = ((rmean > thr_95)).sum() return ehi if keepdims: EHI = np.apply_along_axis(ehi_calc, axis, data, thr_95) else: EHI = ehi_calc(data, thr_95) return EHI
b56166dc070c9f44ce0d8197526c09ba2f95995c
3,656,515
def get_disable_migration_module(): """ get disable migration """ class DisableMigration: def __contains__(self, item): return True def __getitem__(self, item): return None return DisableMigration()
d44a26c5e597f23dbc2434488baf54ebccc5010c
3,656,517
def __sbox_bytes(data, sbox): """S-Box substitution of a list of bytes""" return [__sbox_single_byte(byte, sbox) for byte in data]
db4999ada745c07127d9eff66841877a157839ec
3,656,519
def load_config_with_kwargs(cls, kwargs): """Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema.""" assert_is_a_marshmallow_class(cls) schema = cls.Schema() fields = schema.fields.keys() return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), { k: v for k, v in kwargs.items() if k not in fields }
9058becb8ae387ad012554ff0afe7ac5fcbf62f7
3,656,520
def split_rows(sentences, column_names): """ Creates a list of sentence where each sentence is a list of lines Each line is a dictionary of columns :param sentences: :param column_names: :return: """ new_sentences = [] root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT'] start = [dict(zip(column_names, root_values))] for sentence in sentences: rows = sentence.split('\n') sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#'] sentence = start + sentence new_sentences.append(sentence) return new_sentences
444733a9c169bedae8dc0045cd696cafed7085e2
3,656,521
def _rollup_date(dts, interval=None): """format date/time string based on interval spec'd for summation For Daily, it returns just the date. No time or timezeone. For Hourly, it returns an ISO-8061 datetime range. This provides previously missing clarity around whether the rainfall amount shown was for the period starting at the returned datetime or the period preceeding it (the latter being the correct but approach for datetimes but not dates.) """ if interval == INTERVAL_DAILY: # strip the time entirely from the datetime string. Timezone is lost. return parse(dts).strftime("%Y-%m-%d") elif interval == INTERVAL_HOURLY: # set the minutes, seconds, and microsecond to zeros. Timezone is preserved. # This method returns the total for the hour, e.g a # rainfall total of 1 inch with a timestamp of "2020-04-07T10:00:00-04:00" # is actually 1 inch for intervals within the 10 o'clock hour. # return parse(dts).replace(minute=0, second=0, microsecond=0).isoformat() # NOTE: It may be more appropriate to use a timedelta+1 hour here, # if the rainfall is to be interpreted as the total *up to* a point in time. # Because we're looking at accumulation, we want timestamps that # represent rainfall accumulated during the previous fifteen minutes # within the hour represented. So in a list of [1:00, 1:15, 1:30, 1:45, # 2:00], we scratch the 1:00 since it represents accumulation from # 12:45 to 1:00, outside our hour of interest. Everything else rep's # rain recorded between >1 and <=2 o'clock. We can get that by # bumping everything back 15 minutes, then generating the hourly. # start_dt = parse(dts).replace(minute=0, second=0, microsecond=0) start_dt = parse(dts) start_dt = start_dt - timedelta(minutes=MIN_INTERVAL) start_dt = start_dt.replace(minute=0, second=0, microsecond=0) end_dt = start_dt + timedelta(hours=1) end_dt.replace(minute=0, second=0, microsecond=0) return "{0}/{1}".format(start_dt.isoformat(), end_dt.isoformat()) else: # return it as-is return dts
12f74d9becfa52c626d33174cb628dc9e0112c07
3,656,523
def offset_compensation(time_signal): """ Offset compensation filter. """ return lfilter([1., -1], [1., -0.999], time_signal)
0fc423646071dc07bf88f88698f3248fa302a41e
3,656,524
from typing import Callable from re import T from typing import cast def _alias(default: Callable) -> Callable[[T], T]: """ Decorator which re-assigns a function `_f` to point to `default` instead. Since global function calls in Python are somewhat expensive, this is mainly done to reduce a bit of overhead involved in the functions calls. For example, consider the below example:: def f2(o): return o def f1(o): return f2(o) Calling function `f1` will incur some additional overhead, as opposed to simply calling `f2`. Now assume we wrap `f1` with the `_alias` decorator:: def f2(o): return o @_alias(f2) def f1(o): ... This will essentially perform the assignment of `f1 = f2`, so calling `f1()` in this case has no additional function overhead, as opposed to just calling `f2()`. """ def new_func(_f: T) -> T: return cast(T, default) return new_func
f286472a7f14428ea5243d54a671b9d3d743c9ef
3,656,526
def test_image(filename): """ Return the absolute path to image file having *filename* in test_files directory. """ return absjoin(thisdir, 'test_files', filename)
bda20e51a495e56f8ebf373819e60ebdea3da535
3,656,527
import difflib def menu( ticker: str, start: str, interval: str, stock: pd.DataFrame, ): """Sector and Industry Analysis Menu""" sia_controller = SectorIndustryAnalysisController(ticker, start, interval, stock) sia_controller.call_help(None) while True: # Get input command from user if session and gtff.USE_PROMPT_TOOLKIT: completer = NestedCompleter.from_nested_dict( {c: None for c in sia_controller.CHOICES} ) an_input = session.prompt( f"{get_flair()} (stocks)>(sia)> ", completer=completer, ) else: an_input = input(f"{get_flair()} (stocks)>(sia)> ") try: process_input = sia_controller.switch(an_input) if process_input is not None: return process_input except SystemExit: print("The command selected doesn't exist\n") similar_cmd = difflib.get_close_matches( an_input, sia_controller.CHOICES, n=1, cutoff=0.7 ) if similar_cmd: print(f"Did you mean '{similar_cmd[0]}'?\n") continue
5c3d13d292525abdb5c7f98a2467274c2172cf8f
3,656,528
def fname_template(orun, detname, ofname, nevts, tsec=None, tnsec=None): """Replaces parts of the file name specified as #src, #exp, #run, #evts, #type, #date, #time, #fid, #sec, #nsec with actual values """ template = replace(ofname, '#src', detname) template = replace(template, '#exp', orun.expt) template = replace(template, '#run', 'r%04d'%orun.runnum) template = replace(template, '#type', '%s') t_sec = tsec if tsec is not None else int(orun.timestamp>>32 & 0xFFFFFFFF) t_nsec = tnsec if tnsec is not None else int(orun.timestamp & 0xFFFFFFFF) template = replace(template, '#date', str_tstamp('%Y-%m-%d', t_sec)) template = replace(template, '#time', str_tstamp('%H%M%S', t_sec)) template = replace(template, '#sec', '%d' % t_sec) template = replace(template, '#nsec', '%09d' % t_nsec) template = replace(template, '#evts', 'e%06d' % nevts) if not '%s' in template: template += '-%s' return template
7f38b638d89a7f99ab36b4e08369cfc7f22bb575
3,656,529
def opt_checked(method): """Like `@checked`, but it is legal to not specify the value. In this case, the special `Unset` value is passed to the validation function. Storing `Unset` causes the key to not be emitted during serialization.""" return Checked(method.__name__, method.__doc__, method, True)
5d34db8fcc602dc51d69c128a1855eef44c81453
3,656,530
from datetime import datetime def _metadata(case_study): """Collect metadata in a dictionnary.""" return { 'creation_date': datetime.strftime(datetime.now(), '%c'), 'imagery': case_study.imagery, 'latitude': case_study.lat, 'longitude': case_study.lon, 'area_of_interest': case_study.aoi_latlon.wkt, 'crs': str(case_study.crs), 'country': case_study.country }
eb16892135326662029fe568922f2871f016090e
3,656,531
def CoP_constraints_ds( m, foot_angles, next_support_foot_pos, stateX, stateY, N=16, dt=0.1, h=1.0, g=9.81, tPf=8, ): """ INPUTS m (int): remaining time steps in current foot step; foot_angles ([N, 1] vector): containing the orientations in radians of the foot steps at each time step; next_support_foot_pos ([2, 1] vec): next support foot position; stateX ([3, 1] matrix): position, velocity, acceleration of CoM along x-axis; stateY ([3, 1] matrix): position, velocity, acceleration of CoM along y-axis; N (int): is the length of the preview horizon; dt (float): time step size; h (float): CoM height; g (float): gravitational acceleration; tPf (int): time steps per foot step; Also calls a function that load the data for the foot edge normal vectors and edge to center distances; OUTPUTS leftHandSide: size [ef*N, 2N+2l] Matrix, where l is the number of remaining foots steps contained in the preview horizon and ef is the number of edges in the robot foot, e being the number of the edges of the foot, using a rectangular foot, ef=4; rightHandSide: size [ef*N, 1] Matrix; """ Uz = get_Uz(N=N) FutureStepsMat = stepsInFutureStepsMat(m, N=N) middleMat_diag = np.hstack((Uz, -FutureStepsMat[:, 1:])) middleMat = block_diag(middleMat_diag, middleMat_diag) Sz = get_Sz(N=N) rightVecX = FutureStepsMat[:, :1] * next_support_foot_pos[0] - Sz @ stateX rightVecY = FutureStepsMat[:, :1] * next_support_foot_pos[1] - Sz @ stateY rightVex = np.vstack((rightVecX, rightVecY)) # set_trace() for i in range(N): RotMat = angle2RotMat(foot_angles[i]) if i < m: d, b = init_double_support_CoP() else: d, b = rectangular_foot_CoP() # (Rd^T)^T = dR^T dRot = d @ RotMat.T if i == 0: DMatX = block_diag(dRot[:, :1]) DMatY = block_diag(dRot[:, 1:]) bVec = b else: DMatX = block_diag(DMatX, dRot[:, :1]) DMatY = block_diag(DMatY, dRot[:, 1:]) bVec = np.vstack((bVec, b)) DMat = np.hstack((DMatX, DMatY)) leftHandSide = DMat @ middleMat rightHandSide = bVec + DMat @ rightVex return leftHandSide, rightHandSide
647e9313b79523ae41ab47a61501c1b356d43785
3,656,532
import io def HARRIS(img_path): """ extract HARR features :param img_path: :return: :Version:1.0 """ img = io.imread(img_path) img = skimage.color.rgb2gray(img) img = (img - np.mean(img)) / np.std(img) feature = corner_harris(img, method='k', k=0.05, eps=1e-06, sigma=1) return feature.reshape(feature.shape[0] * feature.shape[1])
5c11c9e5b2947b0ddeb2e1780d11be4020fe53a4
3,656,533
def http_req(blink, url='http://example.com', data=None, headers=None, reqtype='get', stream=False, json_resp=True, is_retry=False): """ Perform server requests and check if reauthorization neccessary. :param blink: Blink instance :param url: URL to perform request :param data: Data to send (default: None) :param headers: Headers to send (default: None) :param reqtype: Can be 'get' or 'post' (default: 'get') :param stream: Stream response? True/FALSE :param json_resp: Return JSON response? TRUE/False :param is_retry: Is this a retry attempt? True/FALSE """ if reqtype == 'post': req = Request('POST', url, headers=headers, data=data) elif reqtype == 'get': req = Request('GET', url, headers=headers) else: raise BlinkException(ERROR.REQUEST) prepped = req.prepare() response = blink.session.send(prepped, stream=stream) if json_resp and 'code' in response.json(): if is_retry: raise BlinkAuthenticationException( (response.json()['code'], response.json()['message'])) else: headers = attempt_reauthorization(blink) return http_req(blink, url=url, data=data, headers=headers, reqtype=reqtype, stream=stream, json_resp=json_resp, is_retry=True) if json_resp: return response.json() return response
0596f82752292216235e9d9f3b14bb01f053d0d7
3,656,535
def make_dataset(path, seq_length, mem_length, local_rank, lazy=False, xl_style=False, shuffle=True, split=None, tokenizer=None, tokenizer_type='CharacterLevelTokenizer', tokenizer_model_path=None, vocab_size=None, model_type='bpe', pad_token=0, character_converage=1.0, non_binary_cols=None, sample_one_document=False, pre_tokenize=False, **kwargs): """function to create datasets+tokenizers for common options""" if split is None: split = [1.] if non_binary_cols is not None: # multilabel dataset support (only for csvs) label_key = non_binary_cols # make tokenizer for dataset if tokenizer is None: tokenizer = make_tokenizer(tokenizer_type, None, tokenizer_model_path, vocab_size, model_type, pad_token, character_converage, **kwargs) # get one or multiple datasets and concatenate if isinstance(path, str): ds = get_dataset(path, tokenizer=tokenizer, pre_tokenize=pre_tokenize, local_rank=local_rank) else: ds = [get_dataset(p, tokenizer=tokenizer, pre_tokenize=pre_tokenize, local_rank=local_rank) for p in path] ds = ConcatDataset(ds) ds_type = '' if 'ds_type' in kwargs: ds_type = kwargs['ds_type'] # Split dataset into train/val/test (and wrap bert dataset) if should_split(split): ds = split_ds(ds, split, shuffle=shuffle) if ds_type.lower() == 'bert': presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False ds = [bert_sentencepair_dataset(d, max_seq_len=seq_length, presplit_sentences=presplit_sentences) if d is not None else None for d in ds] elif ds_type.lower() == 'gpt2': if xl_style: ds = [XLDataset(d, tokenizer, max_seq_len=seq_length, mem_len=mem_length, sample_across_doc=not sample_one_document) if d is not None else None for d in ds] else: ds = [GPT2Dataset(d, tokenizer, max_seq_len=seq_length, sample_across_doc=not sample_one_document) if d is not None else None for d in ds] else: if ds_type.lower() == 'bert': presplit_sentences = kwargs['presplit_sentences'] if 'presplit_sentences' in kwargs else False ds = bert_sentencepair_dataset(ds, max_seq_len=seq_length, presplit_sentences=presplit_sentences) elif ds_type.lower() == 'gpt2': if xl_style: ds = XLDataset(ds, tokenizer, max_seq_len=seq_length, mem_len=mem_length, sample_across_doc=not sample_one_document) else: ds = GPT2Dataset(ds, tokenizer, max_seq_len=seq_length, sample_across_doc=not sample_one_document) return ds, tokenizer
419e50d3dab13d9aa1f096b99a598c52441bb2ae
3,656,536
import re def fix_reference_name(name, blacklist=None): """Return a syntax-valid Python reference name from an arbitrary name""" name = "".join(re.split(r'[^0-9a-zA-Z_]', name)) while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name): if not re.match(r'[a-zA-Z]', name[0]): name = name[1:] continue name = str(name) if not name: name = "data" if blacklist is not None and name in blacklist: get_new_name = lambda index: name+('_%03d' % index) index = 0 while get_new_name(index) in blacklist: index += 1 name = get_new_name(index) return name
2f1a291fc7ac9816bc2620fceeeaf90a1bb3fd4a
3,656,537
from hybridq.gate.gate import _available_gates def get_available_gates() -> tuple[str, ...]: """ Return available gates. """ return tuple(_available_gates)
f4d9e8d617675174f97d7d1cc3d6ea8bdadab725
3,656,540
def main(): """ Entry point Collect all reviews from the file system (FS) & Dump it into JSON representation back to the FS Returns: int: The status code """ collector = Collector() return collector.collect()
d6d15227fe37522357a3f1706cf446026e277a32
3,656,541
def __parse_tokens(sentence: spacy.tokens.Doc) -> ParsedUniversalDependencies: """Parses parts of speech from the provided tokens.""" #tokenize # remove the stopwards, convert to lowercase #bi/n-grams adj = __get_word_by_ud_pos(sentence, "ADJ") adp = __get_word_by_ud_pos(sentence, "ADP") adv = __get_word_by_ud_pos(sentence, "ADV") aux = __get_word_by_ud_pos(sentence, "AUX") verb = __get_word_by_ud_pos(sentence, "VERB") cconj = __get_word_by_ud_pos(sentence, "CCONJ") det = __get_word_by_ud_pos(sentence, "DET") intj = __get_word_by_ud_pos(sentence, "INTJ") noun = __get_word_by_ud_pos(sentence, "NOUN") num = __get_word_by_ud_pos(sentence, "NUM") part = __get_word_by_ud_pos(sentence, "PART") pron = __get_word_by_ud_pos(sentence, "PRON") propn = __get_word_by_ud_pos(sentence, "PROPN") punct = __get_word_by_ud_pos(sentence, "PUNCT") sconj = __get_word_by_ud_pos(sentence, "SCONJ") sym = __get_word_by_ud_pos(sentence, "SYM") verb = __get_word_by_ud_pos(sentence, "VERB") x = __get_word_by_ud_pos(sentence, "X") return ParsedUniversalDependencies( adj = adj, adp = adp, adv = adv, aux = aux, cconj = cconj, det = det, intj = intj, noun = noun, num = num, part = part, pron = pron, propn = propn, punct = punct, sconj = sconj, sym = sym, verb = verb, x = x)
86553239aaac9d89203722f3853989ba0f95b8e3
3,656,542
from datetime import datetime def main(): """ In this main function, we connect to the database, and we create position table and intern table and after that we create new position and new interns and insert the data into the position/intern table """ database = r"interns.db" sql_drop_positions_table=""" DROP TABLE positions """ sql_drop_interns_table=""" DROP TABLE interns """ sql_create_positions_table = """ CREATE TABLE IF NOT EXISTS positions ( name text PRIMARY KEY, description text ); """ sql_create_interns_table = """CREATE TABLE IF NOT EXISTS interns ( id integer PRIMARY KEY, last_name text NOT NULL, first_name text NOT NULL, position_applied text NOT NULL, school text NOT NULL, program text NOT NULL, date_of_entry text NOT NULL, FOREIGN KEY (position_applied) REFERENCES positions (name) ON UPDATE NO ACTION );""" # create a database connection conn = create_connection(database) # create tables if conn is not None: #drop interns table before everything else drop_table(conn, sql_drop_interns_table) #drop positions table before everything else drop_table(conn, sql_drop_positions_table) # create projects table create_table(conn, sql_create_positions_table) # create tasks table create_table(conn, sql_create_interns_table) else: print("Error! cannot create the database connection.") with conn: #create position-later on change the check condition position=("Software Development Intern", "This position is for software development intern"); create_position(conn, position) #create interns: intern_1=("A","B","Software Development Intern","GWU","Data Analytics",datetime.datetime.now()) intern_2=("C","D","Software Development Intern","GWU","Data Analytics",datetime.datetime.now()) create_intern(conn,intern_1) create_intern(conn,intern_2) conn.commit() conn.close() return database
89b88d681b4f4eaeada0a8e8de5a3dadad1ddd15
3,656,543
from typing import Tuple def parse_date(month: int, day: int) -> Tuple[int, int, int]: """Parse a date given month and day only and convert to a tuple. Args: month (int): 1-index month value (e.g. 1 for January) day (int): a day of the month Returns: Tuple[int, int, int]: (year, month, day) """ if month < config.TODAY.month: # Note that if you have not yet recorded/cached the current # records, you should comment out the +1. The +1 is only # meant to increment for future events that happen in # the new year. year = config.TODAY.year + 1 elif month - config.TODAY.month > 1: # I realized that on June 10th, 2020, the schedule for UQs was # posted June 10th but included June 9th (which had passed). # There is a distinct possibility that this will happen again, # when the schedule is posted on New Year's Day (around there) # and includes a day for December. Because events are only # at most a month away in the future, we should check whether # the difference in months is greater than 1. # e.g. 12 - 1 > 1 to represent December of previous year and # January of the current year year = config.TODAY.year - 1 else: year = config.TODAY.year return year, month, day
d9ebb40061c14c9a2b1336465921cea0d5c756a8
3,656,544
def usgs_perlite_parse(*, df_list, source, year, **_): """ Combine, parse, and format the provided dataframes :param df_list: list of dataframes to concat and format :param source: source :param year: year :return: df, parsed and partially formatted to flowbyactivity specifications """ data = {} row_to_use = ["Quantity", "Mine production2"] prod = "" name = usgs_myb_name(source) des = name dataframe = pd.DataFrame() col_name = usgs_myb_year(YEARS_COVERED['perlite'], year) for df in df_list: for index, row in df.iterrows(): if df.iloc[index]["Production"].strip() == "Mine production2": prod = "production" elif df.iloc[index]["Production"].strip() == \ "Imports for consumption:3": prod = "import" elif df.iloc[index]["Production"].strip() == "Exports:3": prod = "export" if df.iloc[index]["Production"].strip() in row_to_use: product = df.iloc[index]["Production"].strip() data = usgs_myb_static_variables() data["SourceName"] = source data["Year"] = str(year) data["Unit"] = "Thousand Metric Tons" data["FlowAmount"] = str(df.iloc[index][col_name]) if str(df.iloc[index][col_name]) == "W": data["FlowAmount"] = WITHDRAWN_KEYWORD data["Description"] = des data["ActivityProducedBy"] = name data['FlowName'] = name + " " + prod dataframe = dataframe.append(data, ignore_index=True) dataframe = assign_fips_location_system( dataframe, str(year)) return dataframe
8b9b1dcf3312cb59f5a27873e791c4bc744599bc
3,656,545
import warnings def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): # token from https://github.com/bioinf-jku/TTUR/blob/master/fid.py """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1 : Numpy array containing the activations of the pool_3 layer of the inception net ( like returned by the function 'get_predictions') for generated samples. -- mu2 : The sample mean over activations of the pool_3 layer, precalcualted on an representive data set. -- sigma1: The covariance matrix over activations of the pool_3 layer for generated samples. -- sigma2: The covariance matrix over activations of the pool_3 layer, precalcualted on an representive data set. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths" assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions" diff = mu1 - mu2 # product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps warnings.warn(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) #raise ValueError("Imaginary component {}".format(m)) print('FID is fucked up') covmean = covmean.real tr_covmean = np.trace(covmean) return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
0f22ce0a99e9b8f2ffca7af4a190c020f376ce8c
3,656,546
def _svdvals_eig(x): # pragma: no cover """SVD-decomposition via eigen, but return singular values only. """ if x.shape[0] > x.shape[1]: s2 = np.linalg.eigvalsh(dag(x) @ x) else: s2 = np.linalg.eigvalsh(x @ dag(x)) return s2**0.5
af47405994cf8fa1504fcb898b7621483eb1e346
3,656,547
def get_3d_object_section(target_object): """Returns 3D section includes given object like stl. """ target_object = target_object.flatten() x_min = min(target_object[0::3]) x_max = max(target_object[0::3]) y_min = min(target_object[1::3]) y_max = max(target_object[1::3]) z_min = min(target_object[2::3]) z_max = max(target_object[2::3]) return [x_min, x_max, y_min, y_max, z_min, z_max]
e11d62ad06ada005d16803b2f440ac700e272599
3,656,548
def make_row(filename, num_cols, col_names): """ Given a genome file, create and return a row of kmer counts to be inerted into the mer matrix. """ # Filepath thefile = str(filename[0]) # Get the genome id from the filepath genomeid = filename[0].split('/')[-1] genomeid = genomeid.split('.')[-2] # Create a temp row to fill and return (later placed in the kmer_matrix) temp_row = [0]*num_cols # Walk through the file for record in SeqIO.parse(thefile, "fasta"): # Retrieve the sequence as a string kmerseq = record.seq #kmerseq = kmerseq._get_seq_str_and_check_alphabet(kmerseq) kmerseq = str(kmerseq) # Retrieve the kmer count as an int kmercount = record.id kmercount = int(kmercount) if kmercount>255: kmercount = 255 # Lookup the seq in the column list for the index col_index = col_names[kmerseq] # Put the kmercount in the right spot in the row temp_row[col_index] = kmercount return genomeid,temp_row
59ed16c4a19da95145ed56164bc35ef24bc7f6bc
3,656,550
def analytic_overlap_NM( DQ: float, w1: float, w2: float, n1: int, n2: int ) -> float: """Compute the overlap between two displaced harmonic oscillators. This function computes the overlap integral between two harmonic oscillators with frequencies w1, w2 that are displaced by DQ for the quantum numbers n1, n2. The integral is computed using an analytic formula for the overlap of two displaced harmonic oscillators. The method comes from B.P. Zapol, Chem. Phys. Lett. 93, 549 (1982). Parameters ---------- DQ : float displacement between harmonic oscillators in amu^{1/2} Angstrom w1, w2 : float frequencies of the harmonic oscillators in eV n1, n2 : integer quantum number of the overlap integral to calculate Returns ------- np.longdouble overlap of the two harmonic oscillator wavefunctions """ w = np.double(w1 * w2 / (w1 + w2)) rho = np.sqrt(factor) * np.sqrt(w / 2) * DQ sinfi = np.sqrt(w1) / np.sqrt(w1 + w2) cosfi = np.sqrt(w2) / np.sqrt(w1 + w2) Pr1 = (-1)**n1 * np.sqrt(2 * cosfi * sinfi) * np.exp(-rho**2) Ix = 0. k1 = n2 // 2 k2 = n2 % 2 l1 = n1 // 2 l2 = n1 % 2 for kx in range(k1+1): for lx in range(l1+1): k = 2 * kx + k2 l = 2 * lx + l2 # noqa: E741 Pr2 = (fact(n1) * fact(n2))**0.5 / \ (fact(k)*fact(l)*fact(k1-kx)*fact(l1-lx)) * \ 2**((k + l - n2 - n1) / 2) Pr3 = (sinfi**k)*(cosfi**l) # f = hermval(rho, [0.]*(k+l) + [1.]) f = herm(np.float64(rho), k+l) Ix = Ix + Pr1*Pr2*Pr3*f return Ix
f0eba159f1bfb3fd05b1a825170e03e02587ef32
3,656,551
def init_manager(mocker): """Fixture to initialize a style constant.""" mocker.patch.object(manager.StyleManager, "__init__", lambda x: None) def _create(): return manager.StyleManager() return _create
da7838352c0a8c13acfcd0d345f78e329978409c
3,656,552
def GaussLegendre(f, n): """Gauss-Legendre integration on [-1, 1] with n points.""" x, w = numint.GaussLegendre(n) I = np.dot(f(x), w) return I
73fcd257e92852b56fcec7d0f21cbbcf87afdb51
3,656,553
from typing import List from typing import Dict from typing import OrderedDict def directory_item_groups( items: List[Item], level: int ) -> Dict[str, List[Item]]: """Split items into groups per directory at the given level. The level is relative to the root directory, which is at level 0. """ module_items = OrderedDict() for item in items: module_items.setdefault(item.parent_path(level), []).append(item) return module_items
2a8e8138097ad48417f9988059a0ed19d63e4877
3,656,554
def mergeSort(x): """ Function to sort an array using merge sort algorithm """ if len(x) == 0 or len(x) == 1: return x else: middle = len(x)//2 a = mergeSort(x[:middle]) b = mergeSort(x[middle:]) return merge(a,b)
9187209cd9e679c790d0cddc18d58e6edc3e6d3a
3,656,555
from typing import Union from typing import Optional from typing import Dict from typing import Any async def join( db, query: Union[dict, str], document: Optional[Dict[str, Any]] = None, session: Optional[AsyncIOMotorClientSession] = None, ) -> Optional[Dict[str, Any]]: """ Join the otu associated with the supplied ``otu_id`` with its sequences. If an OTU is passed, the document will not be pulled from the database. :param db: the application database client :param query: the id of the otu to join or a Mongo query. :param document: use this otu document as a basis for the join :param session: a Motor session to use for database operations :return: the joined otu document """ # Get the otu entry if a ``document`` parameter was not passed. document = document or await db.otus.find_one(query, session=session) if document is None: return None cursor = db.sequences.find({"otu_id": document["_id"]}, session=session) # Merge the sequence entries into the otu entry. return virtool.otus.utils.merge_otu(document, [d async for d in cursor])
d01dc90855692a149a279fbad9b8777d4a850a7d
3,656,556
import time import networkx import math def cp_solve(V, E, lb, ub, col_cov, cuts=[], tl=999999): """Solves a partial problem with a CP model. Args: V: List of vertices (columns). E: List of edges (if a transition between two columns is allowed). col_cov: Matrix of the zone coverages of the columns (c[i][j] == 1 if zone i is covered by column j). Returns: - Objective value of the best Hamiltonian path, -1 if there is no Hamiltonian path within the LB/UB limits, -2 if the graph is not connected (this latter case has been removed). - A feasible solution for this objective value. """ cp_start_time = time.time() num_cols = len(V) num_zones = len(col_cov) # First, check if the graph is disconnected (in which case no # Hamiltonian path exists). G = networkx.Graph() G.add_nodes_from(V) G.add_edges_from(E) # # If the graph is not connected, no Hamiltonian path can exist. # if not networkx.is_connected(G): # return -2, [] # Variables. model = cp_model.CpModel() x = [model.NewIntVar(0, num_cols-1, 'x'+str(i)) for i in range(num_rounds)] # Alternative for GCC, since the constraint is not available in OR-Tools. x_occs = [] for i in range(num_cols): occs = [] for j in range(num_rounds): boolvar = model.NewBoolVar('') model.Add(x[j] == i).OnlyEnforceIf(boolvar) model.Add(x[j] != i).OnlyEnforceIf(boolvar.Not()) occs.append(boolvar) x_occs.append(sum(occs)) # if mp_integer: # model.AddLinearConstraint(x_occs[i], 1, num_rounds-num_cols+1) # Add the CP cuts. for cut in cuts: model.Add(sum(x_occs[i] for i in range(num_cols) if i in cut) <= num_rounds-1) # Objective. if ub == 9999: ub = num_rounds+1 phi = model.NewIntVar(int(lb), math.floor(ub)-1, 'phi') coverages = [model.NewIntVar(0, num_rounds, 'c'+str(i)) for i in range(num_zones)] for i in range(num_zones): model.Add(cp_model.LinearExpr.ScalProd(x_occs, col_cov[i]) == coverages[i]) phi_low = model.NewIntVar(0, num_rounds, 'phi_low') phi_high = model.NewIntVar(0, num_rounds, 'phi_high') model.AddMinEquality(phi_low, coverages) model.AddMaxEquality(phi_high, coverages) model.Add(phi == phi_high-phi_low) model.Minimize(phi) # Regular constraint (Hamiltonian path). # For the initial state, we use a dummy node which is connected to # all other nodes. dummy = max(V)+1 start = dummy end = V arcs = [(dummy, i, i) for i in V] for e in E: arcs.append((e[0], e[1], e[1])) # Node self-loops for v in V: arcs.append((v, v, v)) # If there is only one vertex then a Hamiltonian path exists. if len(V) > 1: model.AddAutomaton(x, start, end, arcs) # Solve the model. solver = cp_model.CpSolver() solver.parameters.max_time_in_seconds = tl status = solver.Solve(model) #assert status == cp_model.OPTIMAL or status == cp_model.INFEASIBLE or status == cp_model.FEASIBLE if status == cp_model.OPTIMAL: solution = [solver.Value(x[i]) for i in range(num_rounds)] return solver.ObjectiveValue(), solution, time.time()-cp_start_time elif status == cp_model.INFEASIBLE or status == cp_model.UNKNOWN: return -1, [], time.time()-cp_start_time elif status == cp_model.FEASIBLE: return solver.ObjectiveValue(), [], time.time()-cp_start_time
6ad8ca02fcf119192e3aad4881a4eb9e0adf30d0
3,656,557
def file_exists(path: Text): """ Returns true if file exists at path. Args: path (str): Local path in filesystem. """ return file_io.file_exists_v2(path)
9d9acf36ad0276a4fa440a54ed859b24e6bfee4e
3,656,558
import requests import json def _get_page_num_detail(): """ 东方财富网-数据中心-特色数据-机构调研-机构调研详细 http://data.eastmoney.com/jgdy/xx.html :return: int 获取 机构调研详细 的总页数 """ url = "http://data.eastmoney.com/DataCenter_V3/jgdy/xx.ashx" params = { "pagesize": "5000", "page": "1", "js": "var SZGpIhFb", "param": "", "sortRule": "-1", "sortType": "0", "rt": "52581407", } res = requests.get(url, params=params) data_json = json.loads(res.text[res.text.find("={")+1:]) return data_json["pages"]
84c32485637cb481f1ebe6fe05609e5b545daece
3,656,559
def freeze_session( session, keep_var_names=None, output_names=None, clear_devices=True): """ Freezes the state of a session into a pruned computation graph. """ graph = session.graph with graph.as_default(): freeze_var_names = list(set(v.op.name for v in tf.global_variables()) .difference(keep_var_names or [])) output_names = output_names or [] output_names += [v.op.name for v in tf.global_variables()] # Graph -> GraphDef ProtoBuf input_graph_def = graph.as_graph_def() if clear_devices: for node in input_graph_def.node: node.device = "" frozen_graph = convert_variables_to_constants( session, input_graph_def, output_names, freeze_var_names) frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph) return frozen_graph
ad8335110c139b73fb0c5cebb56dbdeea702a751
3,656,560
def send_mail(subject, body, recipient_list, bcc_list=None, from_email=None, connection=None, attachments=None, fail_silently=False, headers=None, cc_list=None, dc1_settings=None, content_subtype=None): """ Like https://docs.djangoproject.com/en/dev/topics/email/#send-mail Attachment is a list of tuples (filename, content, mime_type), where mime_type can be None. """ if not dc1_settings: dc1_settings = DefaultDc().settings shadow_email = dc1_settings.SHADOW_EMAIL # Global bcc if shadow_email: if bcc_list: bcc_list = list(bcc_list) bcc_list.append(shadow_email) else: bcc_list = [shadow_email] bcc_list = set(bcc_list) # Default "From:" header if not from_email: from_email = dc1_settings.DEFAULT_FROM_EMAIL # Compose message msg = EmailMessage(subject, body, from_email, recipient_list, bcc_list, connection=connection, attachments=attachments, headers=headers, cc=cc_list) if content_subtype: msg.content_subtype = content_subtype # Send mail if attachments: logger.info('Sending mail to "%s" with subject "%s" and attachments "%s"', recipient_list, subject, [i[0] for i in attachments]) else: logger.info('Sending mail to "%s" with subject "%s"', recipient_list, subject) return msg.send(fail_silently=fail_silently)
36389b7f7e0906aa92ce06c66c4f51faa2643e31
3,656,561
def distinct_by_t(func): """ Transformation for Sequence.distinct_by :param func: distinct_by function :return: transformation """ def distinct_by(sequence): distinct_lookup = {} for element in sequence: key = func(element) if key not in distinct_lookup: distinct_lookup[key] = element return distinct_lookup.values() return Transformation("distinct_by({0})".format(name(func)), distinct_by, None)
3e2811b9f1b69b5c45f65a561b7f67ae477c8825
3,656,562
def _get_partition_info(freq_unit): """ 根据平台单位获取tdw的单位和格式 :param freq_unit: 周期单位 :return: tdw周期单位, 格式 """ if freq_unit == "m": # 分钟任务 cycle_unit = "I" partition_value = "" elif freq_unit == "H": # 小时任务 cycle_unit = "H" partition_value = "YYYYMMDDHH" elif freq_unit == "d": # 天任务 cycle_unit = "D" partition_value = "YYYYMMDD" elif freq_unit == "w": # 周任务 cycle_unit = "W" partition_value = "YYYYMMDD" elif freq_unit == "M": # 月任务 cycle_unit = "M" partition_value = "YYYYMM" elif freq_unit == "O": # 一次性任务 cycle_unit = "O" partition_value = "" else: # 其他任务 cycle_unit = "R" partition_value = "" return cycle_unit, partition_value
1f7df3364a21018daa8d3a61507ee59c467c8ffc
3,656,564
from typing import Any def metadata_property(k: str) -> property: """ Make metadata fields available directly on a base class. """ def getter(self: MetadataClass) -> Any: return getattr(self.metadata, k) def setter(self: MetadataClass, v: Any) -> None: return setattr(self.metadata, k, v) return property(getter, setter)
22d3ab3c8a7029564083a6ba544acd69f2ee5491
3,656,565
import torch def adjust_contrast(img, contrast_factor): """Adjust contrast of an RGB image. Args: img (Tensor): Image to be adjusted. contrast_factor (float): How much to adjust the contrast. Can be any non negative number. 0 gives a solid gray image, 1 gives the original image while 2 increases the contrast by a factor of 2. Returns: Tensor: Contrast adjusted image. """ if not F._is_tensor_image(img): raise TypeError('tensor is not a torch image.') mean = torch.mean(rgb_to_grayscale(img).to(torch.float)) return _blend(img, mean, contrast_factor)
740c68fe269229329cd37d25424178a74f5ac7fc
3,656,566
def license_wtfpl(): """ Create a license object called WTF License. """ return mixer.blend(cc.License, license_name="WTF License")
d202d605fe84556c553fdc7cf70c5815eb1dbee4
3,656,567
import copy def _add_embedding_column_map_fn( k_v, original_example_key, delete_audio_from_output, audio_key, label_key, speaker_id_key): """Combine a dictionary of named embeddings with a tf.train.Example.""" k, v_dict = k_v if original_example_key not in v_dict: raise ValueError( f'Original key not found: {original_example_key} vs {v_dict.keys()}') ex_l = v_dict[original_example_key] assert len(ex_l) == 1, (len(ex_l), k_v[0], ex_l) ex = copy.deepcopy(ex_l[0]) # Beam does not allow modifying the input. assert isinstance(ex, tf.train.Example), type(ex) for name, embedding_l in v_dict.items(): if name == original_example_key: continue assert len(embedding_l) == 1, embedding_l embedding = embedding_l[0] assert isinstance(embedding, np.ndarray) assert embedding.ndim == 2, embedding.ndim # Store the embedding 2D shape and store the 1D embedding. The original # embedding can be recovered with `emb.reshape(feature['shape'])`. ex = _add_embedding_to_tfexample(ex, embedding, f'embedding/{name}') if delete_audio_from_output: ex.features.feature.pop(audio_key, None) # Assert that the label is present. If it's a integer, convert it to bytes. if label_key: if label_key not in ex.features.feature: raise ValueError(f'Label not found: {label_key} vs {ex.features.feature}') lbl_feat = ex.features.feature[label_key] if lbl_feat.int64_list.value: lbl_val_as_bytes = str(lbl_feat.int64_list.value[0]).encode('utf-8') ex.features.feature.pop(label_key, None) ex.features.feature[label_key].bytes_list.value.append(lbl_val_as_bytes) # If provided, assert that the speaker_id field is present, and of type # `bytes`. if speaker_id_key: feats = ex.features.feature assert speaker_id_key in feats, (speaker_id_key, feats.keys()) assert feats[speaker_id_key].bytes_list.value, feats[speaker_id_key] return k, ex
710fd658b0f1d830c8e4e97d473b02f54a0d4414
3,656,568
def modelf(input_shape): """ Function creating the model's graph in Keras. Argument: input_shape -- shape of the model's input data (using Keras conventions) Returns: model -- Keras model instance """ X_input = Input(shape = input_shape) ### START CODE HERE ### # Step 1: CONV layer (≈4 lines) X = Conv1D(196, kernel_size = 15, strides = 4)(X_input) # CONV1D X = BatchNormalization()(X) # Batch normalization X = Activation("relu")(X) # ReLu activation X = Dropout(0.8)(X) # dropout (use 0.8) # Step 2: First GRU Layer (≈4 lines) X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences) X = Dropout(0.8)(X) # dropout (use 0.8) X = BatchNormalization()(X) # Batch normalization # Step 3: Second GRU Layer (≈4 lines) X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences) X = Dropout(0.8)(X) # dropout (use 0.8) X = BatchNormalization()(X) # Batch normalization X = Dropout(0.8)(X) # dropout (use 0.8) # Step 4: Time-distributed dense layer (≈1 line) X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed (sigmoid) ### END CODE HERE ### model = Model(inputs = X_input, outputs = X) return model
d8beaf7335e19c66ea3913ed019647d9e42f92d1
3,656,569
def get_mbed_official_psa_release(target=None): """ Creates a list of PSA targets with default toolchain and artifact delivery directory. :param target: Ask for specific target, None for all targets. :return: List of tuples (target, toolchain, delivery directory). """ psa_targets_release_list = [] psa_secure_targets = [t for t in TARGET_NAMES if Target.get_target(t).is_PSA_secure_target] if target is not None: if target not in psa_secure_targets: raise Exception("{} is not a PSA secure target".format(target)) psa_targets_release_list.append(_get_target_info(target)) else: for t in psa_secure_targets: psa_targets_release_list.append(_get_target_info(target)) return psa_targets_release_list
0f260c1d57b0d21d911fcd6998fadee0791600de
3,656,570
def match_l2(X, Y, match_rows=False, normalize=True): """Return the minimum Frobenius distance between X and Y over permutations of columns (or rows).""" res = _match_factors(X, Y, l2_similarity, match_rows) res['score'] = np.sqrt(-res['score']) if normalize: res['score'] = res['score'] / np.linalg.norm(X, 'fro') return res
181ecde4c0837b69f7a37287bcf9e768fdaa3e58
3,656,571
import time import scipy def doNMFDriedger(V, W, L, r = 7, p = 10, c = 3, plotfn = None, plotfnw = None): """ Implement the technique from "Let It Bee-Towards NMF-Inspired Audio Mosaicing" :param V: M x N target matrix :param W: An M x K matrix of template sounds in some time order\ along the second axis :param L: Number of iterations :param r: Width of the repeated activation filter :param p: Degree of polyphony; i.e. number of values in each column\ of H which should be un-shrunken :param c: Half length of time-continuous activation filter """ N = V.shape[1] K = W.shape[1] tic = time.time() H = np.random.rand(K, N) print("H.shape = ", H.shape) print("Time elapsed H initializing: %.3g"%(time.time() - tic)) errs = np.zeros(L+1) errs[0] = getKLError(V, W.dot(H)) if plotfnw: plt.figure(figsize=(12, 3)) plotfnw(W) plt.savefig("Driedger_W.svg", bbox_inches='tight') if plotfn: res=4 plt.figure(figsize=(res*2, res*2)) for l in range(L): print("NMF Driedger iteration %i of %i"%(l+1, L)) iterfac = 1-float(l+1)/L tic = time.time() #Step 1: Avoid repeated activations print("Doing Repeated Activations...") MuH = scipy.ndimage.filters.maximum_filter(H, size=(1, r)) H[H<MuH] = H[H<MuH]*iterfac #Step 2: Restrict number of simultaneous activations print("Restricting simultaneous activations...") #Use partitions instead of sorting for speed colCutoff = -np.partition(-H, p, 0)[p, :] H[H < colCutoff[None, :]] = H[H < colCutoff[None, :]]*iterfac #Step 3: Supporting time-continuous activations if c > 0: print("Supporting time-continuous activations...") di = K-1 dj = 0 for k in range(-H.shape[0]+1, H.shape[1]): z = np.cumsum(np.concatenate((np.zeros(c), np.diag(H, k), np.zeros(c)))) x2 = z[2*c::] - z[0:-2*c] H[di+np.arange(len(x2)), dj+np.arange(len(x2))] = x2 if di == 0: dj += 1 else: di -= 1 #KL Divergence Version WH = W.dot(H) WH[WH == 0] = 1 VLam = V/WH WDenom = np.sum(W, 0) WDenom[WDenom == 0] = 1 H = H*((W.T).dot(VLam)/WDenom[:, None]) print("Elapsed Time H Update %.3g"%(time.time() - tic)) errs[l+1] = getKLError(V, W.dot(H)) #Output plots every 20 iterations if plotfn and ((l+1)==L or (l+1)%20 == 0): plt.clf() plotfn(V, W, H, l+1, errs) plt.savefig("NMFDriedger_%i.png"%(l+1), bbox_inches = 'tight') return H
3b3b0fe9388992bdd87cfa6b4cb0748f4502adc7
3,656,572
def extract_red(image): """ Returns the red channel of the input image. It is highly recommended to make a copy of the input image in order to avoid modifying the original array. You can do this by calling: temp_image = np.copy(image) Args: image (numpy.array): Input RGB (BGR in OpenCV) image. Returns: numpy.array: Output 2D array containing the red channel. """ # Since Red is last index, we want all rows, columns, and the last channel. return np.copy(image[:, :, 2])
0f591099e439a038ef8e75d65e4eb26c200018d0
3,656,573
def _cleaned_data_to_key(cleaned_data): """ Return a tuple representing a unique key for the cleaned data of an InteractionCSVRowForm. """ # As an optimisation we could just track the pk for model instances, # but that is omitted for simplicity key = tuple(cleaned_data.get(field) for field in DUPLICATE_FIELD_MAPPING) if all(key): return key # Some of the fields are missing (this happens if they did not pass validation) return None
aa08e0cafd0ac4ba3749db65208655dc51671997
3,656,574
def schedule_for_cleanup(request, syn): """Returns a closure that takes an item that should be scheduled for cleanup. The cleanup will occur after the module tests finish to limit the residue left behind if a test session should be prematurely aborted for any reason.""" items = [] def _append_cleanup(item): items.append(item) def cleanup_scheduled_items(): _cleanup(syn, items) request.addfinalizer(cleanup_scheduled_items) return _append_cleanup
ccbdba1a1f8dea0f13e5717d0743739d599e22e6
3,656,575
import base64 def unpickle_context(content, pattern=None): """ Unpickle the context from the given content string or return None. """ pickle = get_pickle() if pattern is None: pattern = pickled_context_re match = pattern.search(content) if match: return pickle.loads(base64.standard_b64decode(match.group(1))) return None
87fa831b038329313364d512107129f69db136ad
3,656,576
def ask_openid(request, openid_url, redirect_to, on_failure=None, sreg_request=None): """ basic function to ask openid and return response """ on_failure = on_failure or signin_failure trust_root = getattr( settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/' ) if xri.identifierScheme(openid_url) == 'XRI' and getattr( settings, 'OPENID_DISALLOW_INAMES', False ): msg = _("i-names are not supported") return on_failure(request, msg) consumer = Consumer(request.session, DjangoOpenIDStore()) try: auth_request = consumer.begin(openid_url) except DiscoveryFailure: msg = _("The password or OpenID was invalid") return on_failure(request, msg) if sreg_request: auth_request.addExtension(sreg_request) redirect_url = auth_request.redirectURL(trust_root, redirect_to) return HttpResponseRedirect(redirect_url)
bb5deefc32d1c4253d518eeead34b290e028a051
3,656,577
import torch def get_accuracy_ANIL(logits, targets): """Compute the accuracy (after adaptation) of MAML on the test/query points Parameters ---------- logits : `torch.FloatTensor` instance Outputs/logits of the model on the query points. This tensor has shape `(num_examples, num_classes)`. targets : `torch.LongTensor` instance A tensor containing the targets of the query points. This tensor has shape `(num_examples,)`. Returns ------- accuracy : `torch.FloatTensor` instance Mean accuracy on the query points """ _, predictions = torch.max(logits, dim=-1) return torch.mean(predictions.eq(targets).float())
2ab61284da6d9cd96c066061823570d64567e9f3
3,656,578
import logging def stream_logger(): """ sets up the logger for the Simpyl object to log to the output """ logger = logging.Logger('stream_handler') handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(message)s')) logger.addHandler(handler) return logger
45f5af00a0006cc8155bb4a134cce531e51e646a
3,656,579
def sql_coordinate_frame_lookup_key(bosslet_config, coordinate_frame): """ Get the lookup key that identifies the coordinate fram specified. Args: bosslet_config (BossConfiguration): Bosslet configuration object coordinate_frame: Identifies coordinate frame. Returns: coordinate_set(str): Coordinate Frame lookup key. """ query = "SELECT id FROM coordinate_frame WHERE name = %s" with bosslet_config.call.connect_rds() as cursor: cursor.execute(query, (coordinate_frame,)) coordinate_set = cursor.fetchall() if len(coordinate_set) != 1: raise Exception( "Can't find coordinate frame: {}".format(coordinate_frame)) else: LOGGER.info("{} coordinate frame id: {}".format(coordinate_frame, coordinate_set[0][0])) return coordinate_set[0][0]
8bf7db01b171e13b0066a806eb097dec4a59c04e
3,656,580
def entry_from_resource(resource, client, loggers): """Detect correct entry type from resource and instantiate. :type resource: dict :param resource: One entry resource from API response. :type client: :class:`~google.cloud.logging.client.Client` :param client: Client that owns the log entry. :type loggers: dict :param loggers: A mapping of logger fullnames -> loggers. If the logger that owns the entry is not in ``loggers``, the entry will have a newly-created logger. :rtype: :class:`~google.cloud.logging.entries._BaseEntry` :returns: The entry instance, constructed via the resource """ if 'textPayload' in resource: return TextEntry.from_api_repr(resource, client, loggers) if 'jsonPayload' in resource: return StructEntry.from_api_repr(resource, client, loggers) if 'protoPayload' in resource: return ProtobufEntry.from_api_repr(resource, client, loggers) return EmptyEntry.from_api_repr(resource, client, loggers)
0519ad63c11e04ca890288953440272de224b9db
3,656,581
def make_preprocesser(training_data): """ Constructs a preprocessing function ready to apply to new dataframes. Crucially, the interpolating that is done based on the training data set is remembered so it can be applied to test datasets (e.g the mean age that is used to fill in missing values for 'Age' will be fixed based on the mean age within the training data set). Summary by column: ['PassengerId', 'Survived', # this is our target, not a feature 'Pclass', # keep as is: ordinal value should work, even though it's inverted (higher number is lower class cabin) 'Name', # omit (could try some fancy stuff like inferring ethnicity, but skip for now) 'Sex', # code to 0 / 1 'Age', # replace missing with median 'SibSp', 'Parch', 'Ticket', # omit (doesn't seem like low hanging fruit, could look more closely for pattern later) 'Fare', # keep, as fare could be finer grained proxy for socio economic status, sense of entitlement / power in getting on boat 'Cabin', # one hot encode using first letter as cabin as the cabin sector 'Embarked'] # one hot encode Params: df: pandas.DataFrame containing the training data Returns: fn: a function to preprocess a dataframe (either before training or fitting a new dataset) """ def pick_features(df): return df[['PassengerId', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked']] # save median Age so we can use it to fill in missing data consistently # on any dataset median_age_series = training_data[['Age', 'Fare']].median() def fix_missing(df): return df.fillna(median_age_series) def map_sex(df): df['Sex'] = df['Sex'].map({'male': 0, 'female': 1}) return df def one_hot_cabin(df): def cabin_sector(cabin): if isinstance(cabin, str): return cabin[0].lower() else: return cabin df[['cabin_sector']] = df[['Cabin']].applymap(cabin_sector) one_hot = pd.get_dummies(df['cabin_sector'], prefix="cabin_sector") interesting_cabin_sectors = ["cabin_sector_{}".format(l) for l in 'bcde'] for column, _ in one_hot.iteritems(): if column.startswith('cabin_sector_') and column not in interesting_cabin_sectors: one_hot = one_hot.drop(column, axis=1) df = df.join(one_hot) df = df.drop('Cabin', axis=1) df = df.drop('cabin_sector', axis=1) return df def one_hot_embarked(df): one_hot = pd.get_dummies(df['Embarked'], prefix="embarked") df = df.join(one_hot) df = df.drop('Embarked', axis=1) return df # We want standard scaling fit on the training data, so we get a scaler ready # for application now. It needs to be applied to data that already has the other # pre-processing applied. training_data_all_but_scaled = map_sex(fix_missing(pick_features(training_data))) stdsc = StandardScaler() stdsc.fit(training_data_all_but_scaled[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']]) def scale_df(df): df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']] = \ stdsc.transform(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']]) df[['Sex']] = df[['Sex']].applymap(lambda x: 1 if x == 1 else -1) for column, _ in df.iteritems(): if column.startswith('cabin_sector_') or column.startswith('embarked_'): df[[column]] = df[[column]].applymap(lambda x: 1 if x == 1 else -1) return df def preprocess(df, scale=True): """ Preprocesses a dataframe so it is ready for use with a model (either for training or prediction). Params: scale: whether to apply feature scaling. E.g with random forests feature scaling isn't necessary. """ all_but_scaled = one_hot_embarked(one_hot_cabin(map_sex(fix_missing(pick_features(df))))) if scale: return scale_df(all_but_scaled) else: return all_but_scaled return preprocess
480ba5b02e5347e768bd5b2cdbc8b19af1ddee8c
3,656,582
def get_breakeven_prob(predicted, threshold = 0): """ This function calculated the probability of a stock being above a certain threshhold, which can be defined as a value (final stock price) or return rate (percentage change) """ predicted0 = predicted.iloc[0,0] predicted = predicted.iloc[-1] predList = list(predicted) over = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 >= threshold] less = [(i*100)/predicted0 for i in predList if ((i-predicted0)*100)/predicted0 < threshold] return (len(over)/(len(over) + len(less)))
a1cededbe7a0fbe7ffe19e9b873f55c8ce369590
3,656,583
def trim_whitespace(sub_map, df, source_col, op_col): """Trims whitespace on all values in the column""" df[op_col] = df[op_col].transform( lambda x: x.strip() if not pd.isnull(x) else x) return df
649a48cbb9246d4842555b5a21bc4d638a00ca00
3,656,584
from re import A from re import T def beneficiary(): """ RESTful CRUD controller """ # Normally only used in Report # - make changes as component of Project s3db.configure("project_beneficiary", deletable = False, editable = False, insertable = False, ) list_btn = A(T("Beneficiary Report"), _href=URL(c="project", f="beneficiary", args="report", vars=get_vars), _class="action-btn") #def prep(r): # if r.method in ("create", "create.popup", "update", "update.popup"): # # Coming from Profile page? # location_id = r.get_vars.get("~.(location)", None) # if location_id: # field = r.table.location_id # field.default = location_id # field.readable = field.writable = False # if r.record: # field = r.table.location_id # field.comment = None # field.writable = False # return True #s3.prep = prep return s3_rest_controller(hide_filter=False)
ec34dd0989154bcfe2ace8506fe1cbe9c1ba9c49
3,656,585
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() #cfg.merge_from_file(args.config_file) #cfg.merge_from_file(model_zoo.get_config_file("/data/mostertrij/tridentnet/detectron2/configs/COCO-Detection/my_script_faster_rcnn_X_101_32x8d_FPN_3x.yaml")) cfg.merge_from_file("/data/mostertrij/tridentnet/detectron2/configs/COCO-Detection/my_script_faster_rcnn_X_101_32x8d_FPN_3x.yaml") DATASET_NAME= "LGZ_v5_more_rotations" cfg.DATASETS.TRAIN = (f"{DATASET_NAME}_train",) cfg.DATASETS.VAL = (f"{DATASET_NAME}_val",) cfg.DATASETS.TEST = (f"{DATASET_NAME}_test",) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg
a3053945cd6680c220fe8ea87189943c44558d8d
3,656,586
def get_distinct_quotation_uid(*args, **kwargs): """ 获取用户 :param args: :param kwargs: :return: List """ field = 'uid' return map(lambda x: getattr(x, field), db_instance.get_distinct_field(Quotation, field, *args, **kwargs))
5a8fe7252f6ac233b69e57c0baac0f1f2d3f51ff
3,656,587
import pathlib def present_from(ref: pathlib.Path, obs: pathlib.Path) -> pathlib.Path: """Build a somehow least surprising difference folder from ref and obs.""" ref_code = ref.parts[-1] if obs.is_file(): return pathlib.Path(*obs.parts[:-1], f'diff-of-{obs.parts[-1]}') present = pathlib.Path(*obs.parts[:-1], f'diff-of-{ref_code}_{obs.parts[-1]}') present.mkdir(parents=True, exist_ok=True) return present
59ae1eefaeacc9ddfac773c0c88974b98757d4a2
3,656,588
def dataQ_feeding(filename_queue, feat_dim, seq_len): """ Reads and parse the examples from alignment dataset Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: MFCC sequence: 200 * 39 dimensions """ class MFCCRECORD(object): pass result = MFCCRECORD() ### use the line reader ### reader = tf.TextLineReader() #values = [] #for i in range(NUM_UP_TO): # key, value = reader.read(filename_queue) # values.append(value) key, value = reader.read(filename_queue) ### try to read NUM_UP_TO lines in one time ### ### read the csv file into features ### # seq = [] record_defaults = [[1.] for i in range(feat_dim*seq_len)] # for value in values: # seq.append(tf.decode_csv(value, record_defaults=record_defaults)) tmp_result = tf.decode_csv(value, record_defaults=record_defaults) ### so we have (NUM_UP_TO, seq_len *feat_dim ) ### ### reshape it into (NUM_UP_TO, seq_len, feat_dim) ### ### result.mfcc: sequence ### mfcc = tf.cast(tf.reshape(tmp_result, shape=(seq_len , \ feat_dim)),tf.float32) ### result.rev_mfcc: reverse of sequence ### # result.rev_mfcc = tf.reverse(result.mfcc, [False, True]) return mfcc, mfcc
23d3e81bdd266f6cebe9bdff2160c4b7294e648c
3,656,589
def dummy_backend(_, **kwargs): """ Dummy backend always returning stats with 0 """ return _default_statement()
875adb50540029022b28de6388738d1e5ba01e30
3,656,590
def comp_mass(self): """Compute the mass of the Frame Parameters ---------- self : Frame A Frame object Returns ------- Mfra: float Mass of the Frame [kg] """ Vfra = self.comp_volume() # Mass computation return Vfra * self.mat_type.struct.rho
b78ef02f045c1f624b3277ec3e358921b3ea5c02
3,656,591
def write_DS9reg(x, y, filename=None, coord='IMAGE', ptype='x', size=20, c='green', tag='all', width=1, text=None): """Write a region file for ds9 for a list of coordinates. Taken from Neil Crighton's barak.io Parameters ---------- x, y : arrays of floats, shape (N,) The coordinates. These may be image or WCS. Please make sure to update the coord keyword accordingly. filename : str, optional A filename to write to. coord : str (`IMAGE` or `J2000`) The coordinate type: `IMAGE` (pixel coordinates) or `J2000` (celestial coordinates). ptype : str or np.array of shape (N,) DS9 point type (e.g. `circle`, `box`, `diamond`, `cross`, `x`, `arrow`, `boxcircle`) size : int or np.array of shape (N,) DS9 point size. c : str or np.array of shape (N,) point colour: `cyan` `blue` `magenta` `red` `green` `yellow` `white` `black`}. tag : str or np.array of shape (N,) DS9 tag. e.g. 'all' width : int or np.array of shape (N,) DS9 width text : str or np.array of shape (N,) Text """ header = ['global font="helvetica 10 normal" select=1 highlite=1 ' 'edit=0 move=1 delete=1 include=1 fixed=0 source\n'] header.append(coord + '\n') x = np.array(x) y = np.array(y) if isinstance(ptype, basestring): ptype = [ptype] * len(x) if isinstance(size, int): size = [size] * len(x) if isinstance(width, int): width = [width] * len(x) if isinstance(text, basestring): text = [text] * len(x) elif text is None: text = list(range(len(x))) if isinstance(tag, basestring): tag = [tag] * len(x) if isinstance(c, basestring): c = [c] * len(x) regions = [] # fmt = ('point(%12.8f,%12.8f) # \ # point=%s %s width=%s text={%s} color=%s tag={%s}\n') for i in xrange(len(x)): s = 'point({:.8f},{:.8f}) # point={} {} width={} text={{{}}} color={} tag={}\n'\ .format(x[i], y[i], ptype[i], size[i], width[i], text[i], c[i], tag[i]) regions.append(s) if filename is not None: fh = open(filename,'w') fh.writelines(header + regions) fh.close() return header, regions
9e2c67c8a681ba7abdd55e7f456079b32ed50688
3,656,592
def checkInputDataValid(lstX:list=None,lstY:list=None,f:object=None)->(int,tuple): """ :param lstX: :param lstY: :param f: :return: int, (int,list, int,int) """ ret=-1 rettuple=(-1,[],-1,-1) if lstX is None or lstY is None: msg = "No input lists of arrays" msg2log(None, msg, f) return ret,rettuple if not lstX or not lstY: msg = "Empty input lists of arrays" msg2log(None, msg, f) return ret,rettuple k=len(lstX) k1=len(lstY) if (k1 != k): msg = "The input lists have a different naumber items: {} vs {}".format(k,k1) msg2log(None, msg, f) return ret,rettuple lstP=[] lstN=[] lstNy=[] for item in lstX: X:np.array=item (n,p)=X.shape lstP.append(p) lstN.append(n) for item in lstY: y:np.array=item (n,)=y.shape lstNy.append(n) p=lstP[0] for i in range(len(lstP)): if p!=lstP[i]: msg="The feature nimbers are different: {} vs {}".format(p,lstP[i]) msg2log(None,msg,f) return ret,rettuple if lstN!=lstNy: msg="Different sample sizes:\n{}\n{}".format(lstN,lstNy) msg2log(None, msg, f) return ret,rettuple rettuple=(k,lstN,p,sum(lstN)) ret=0 return ret,rettuple
daacee0ee3803c02c04fe2b7213c6f8d408b39f6
3,656,593
def parseManualTree(node): """Parses a tree of the manual Main_Page and returns it through a list containing tuples: [(title, href, [(title, href, [...]), ...]), ...]""" if node.nodeType != Node.ELEMENT_NODE: return [] result = [] lastadded = None for e in node.childNodes: if e.nodeType == Node.ELEMENT_NODE: if e.localName == "ol": assert lastadded != None for i in xrange(len(result)): if result[i][:2] == lastadded: result[i] = lastadded + (parseManualTree(e),) elif e.localName == "a": href, title = parseAnchor(e) lastadded = title, href result.append((title, href, None)) return result
6b62e9ad3b3ef4f3a0c6c60a931f1f2e940fe0f9
3,656,594
from typing import Union from typing import List from typing import Dict from typing import Optional from typing import Tuple import tqdm def validation_by_method(mapping_input: Union[List, Dict[str, List]], graph: nx.Graph, kernel: Matrix, k: Optional[int] = 100 ) -> Tuple[Dict[str, list], Dict[str, list]]: """Repeated holdout validation by diffustion method. :param mapping_input: List or value dictionary of labels {'label':value}. :param graph: Network as a graph object. :param kernel: Network as a kernel. :param k: Iterations for the repeated_holdout validation. """ auroc_metrics = defaultdict(list) auprc_metrics = defaultdict(list) for _ in tqdm(range(k)): input_diff, validation_diff = _get_random_cv_split_input_and_validation( mapping_input, kernel ) scores_z = diffuse_raw(graph=None, scores=input_diff, k=kernel, z=True) scores_raw = diffuse_raw(graph=None, scores=input_diff, k=kernel, z=False) scores_page_rank = generate_pagerank_baseline(graph, kernel) method_validation_scores = { 'raw': (validation_diff, scores_raw ), 'z': (validation_diff, scores_z ), 'random': ( validation_diff, _generate_random_score_ranking(kernel) ), 'page_rank': ( validation_diff, scores_page_rank ), } for method, validation_set in method_validation_scores.items(): try: auroc, auprc = _get_metrics(*validation_set) except ValueError: auroc, auprc = (0, 0) print(f'ROC AUC unable to calculate for {validation_set}') auroc_metrics[method].append(auroc) auprc_metrics[method].append(auprc) return auroc_metrics, auprc_metrics
b7ce9e72af55dc6d111948cb393f5e07b7fedd68
3,656,595
def get_about_agent(): """ This method returns general information of the agent, like the name and the about. Args: @param: token: Authentication token. """ data = request.get_json() if "token" in data: channel = get_channel_id(data["token"]) if channel is not None: agent = channel.agent return {"about": agent.about, "name": agent.name} else: return {"message": "token is no correct", "status": False} else: return {"message": "token is no correct", "status": False}
ca4301a9de5d4cb711892a221d4c984489c1e329
3,656,596
def RZ(angle, invert): """Return numpy array with rotation gate around Z axis.""" gate = np.zeros(4, dtype=complex).reshape(2, 2) if not invert: gate[0, 0] = np.cos(-angle/2) + np.sin(-angle/2) * 1j gate[1, 1] = np.cos(angle/2) + np.sin(angle/2) * 1j else: gate[0, 0] = np.cos(-angle/2) - np.sin(-angle/2) * 1j gate[1, 1] = np.cos(angle/2) - np.sin(angle/2) * 1j return gate
d99839fa49d92edea8d98653fd7a38861e6f49d8
3,656,598
def _create_unicode(code: str) -> str: """ Добавление экранизирующего юникод кода перед кодом цвета :param code: Код, приоритетно ascii escape color code :return: """ return u'\u001b[{}m'.format(code)
523973766d4f18daca8870e641ac77967b715532
3,656,599
def compact_axis_angle_from_matrix(R): """Compute compact axis-angle from rotation matrix. This operation is called logarithmic map. Note that there are two possible solutions for the rotation axis when the angle is 180 degrees (pi). We usually assume active rotations. Parameters ---------- R : array-like, shape (3, 3) Rotation matrix strict_check : bool, optional (default: True) Raise a ValueError if the rotation matrix is not numerically close enough to a real rotation matrix. Otherwise we print a warning. Returns ------- a : array-like, shape (3,) Axis of rotation and rotation angle: angle * (x, y, z). The angle is constrained to [0, pi]. """ a = axis_angle_from_matrix(R) return compact_axis_angle(a)
a7493a5ed1c622b9cbec6e9f0771e62f7f4712e2
3,656,600
def _generate_IPRange(Range): """ IP range to CIDR and IPNetwork type Args: Range: IP range Returns: an array with CIDRs """ if len(Range.rsplit('.')) == 7 and '-' in Range and '/' not in Range: if len(Range.rsplit('-')) == 2: start_ip, stop_ip = Range.rsplit('-') if isIP(start_ip) and isIP(stop_ip): return iprange_to_cidrs(start_ip, stop_ip) else: return [] else: return [] elif len(Range.rsplit('.')) == 4 and '-' not in Range and '/' in Range: return IPNetwork(Range) else: return []
d86f8db8e87313b12f35669ee25cc3f3d229c631
3,656,601
def is_dict_homogeneous(data): """Returns True for homogeneous, False for heterogeneous. An empty dict is homogeneous. ndarray behaves like collection for this purpose. """ if len(data) == 0: return True k0, v0 = next(iter(data.items())) ktype0 = type(k0) vtype0 = type(v0) if ktype0 in collection_types or ktype0 == np.ndarray or vtype0 in collection_types or vtype0 == np.ndarray: return False for k, v in data.items(): ktype = type(k) vtype = type(v) if (ktype != ktype0 or ktype in collection_types or ktype == np.ndarray) or \ (vtype != vtype0 or vtype in collection_types or vtype == np.ndarray): return False return True
921e66639cd6a8584e99e14852158594b1001ef9
3,656,602
from typing import Union from typing import Callable from re import T from typing import Generator from typing import Any def translate(item: Union[Callable[P, T], Request]) -> Union[Generator[Any, Any, None], Callable[P, T]]: """Override current language with one from language header or 'lang' parameter. Can be used as a context manager or a decorator. If a function is decorated, one of the parameters for the function must be a `rest_framework.Request` object. """ if not isinstance(item, Request): @wraps(item) def decorator(*args: P.args, **kwargs: P.kwargs) -> Any: request = None for arg in chain(args, kwargs.values()): if isinstance(arg, Request): request = arg break if request is None: raise ValueError("No Request-object in function parameters.") with override(get_language(request)): return item(*args, **kwargs) # type: ignore return decorator @contextmanager def context_manager(request: Request) -> Generator[Any, Any, None]: with override(get_language(request)): yield return context_manager(item)
5042cc77efb1477444f8f9611055fb3e183cf3d3
3,656,603
def get_all(isamAppliance, check_mode=False, force=False, ignore_error=False): """ Retrieving the current runtime template files directory contents """ return isamAppliance.invoke_get("Retrieving the current runtime template files directory contents", "/mga/template_files?recursive=yes", ignore_error=ignore_error)
9ff291b63471b57b110885c35939c8afe3d2f0d8
3,656,604
def main(args, out, err): """ This wraps GURepair's real main function so that we can handle exceptions and trigger our own exit commands. This is the entry point that should be used if you want to use this file as a module rather than as a script. """ cleanUpHandler = BatchCaller(args.verbose, out) gr_instance = GPURepairInstance(args, out, err, cleanUpHandler) def handleTiming(exitCode): if gr_instance.time: print(gr_instance.getTiming(exitCode), file = out) def doCleanUp(timing, exitCode): if timing: # We must call this before cleaning up globals # because it depends on them cleanUpHandler.register(handleTiming, exitCode) # We should call this last. cleanUpHandler.call() try: returnCode = gr_instance.invoke() except Exception: # Something went very wrong doCleanUp(timing = False, exitCode = 0) # It doesn't matter what the exitCode is raise doCleanUp(timing = True, exitCode = returnCode) # Do this outside try block so we don't call twice! return returnCode
c506306a93804ab60c1a6805e9c53a0fd9dd7cfd
3,656,607
def generateLouvainCluster(edgeList): """ Louvain Clustering using igraph """ Gtmp = nx.Graph() Gtmp.add_weighted_edges_from(edgeList) W = nx.adjacency_matrix(Gtmp) W = W.todense() graph = Graph.Weighted_Adjacency( W.tolist(), mode=ADJ_UNDIRECTED, attr="weight", loops=False) # ignore the squiggly underline, not errors louvain_partition = graph.community_multilevel( weights=graph.es['weight'], return_levels=False) size = len(louvain_partition) hdict = {} count = 0 for i in range(size): tlist = louvain_partition[i] for j in range(len(tlist)): hdict[tlist[j]] = i count += 1 listResult = [] for i in range(count): listResult.append(hdict[i]) return listResult, size
c171474bdd81456cbbc488b0a8cb826f881419ec
3,656,609
def exprvars(name, *dims): """Return a multi-dimensional array of expression variables. The *name* argument is passed directly to the :func:`pyeda.boolalg.expr.exprvar` function, and may be either a ``str`` or tuple of ``str``. The variadic *dims* input is a sequence of dimension specs. A dimension spec is a two-tuple: (start index, stop index). If a dimension is given as a single ``int``, it will be converted to ``(0, stop)``. The dimension starts at index ``start``, and increments by one up to, but not including, ``stop``. This follows the Python slice convention. For example, to create a 4x4 array of expression variables:: >>> vs = exprvars('a', 4, 4) >>> vs farray([[a[0,0], a[0,1], a[0,2], a[0,3]], [a[1,0], a[1,1], a[1,2], a[1,3]], [a[2,0], a[2,1], a[2,2], a[2,3]], [a[3,0], a[3,1], a[3,2], a[3,3]]]) """ return _vars(Expression, name, *dims)
6b65872029de938d37c9e968f696587e2a03ff8c
3,656,610
def cell_segmenter(im, thresh='otsu', radius=20.0, image_mode='phase', area_bounds=(0,1e7), ecc_bounds=(0, 1)): """ This function segments a given image via thresholding and returns a labeled segmentation mask. Parameters ---------- im : 2d-array Image to be segmented. This may be of either float or integer data type. thresh : int, float, or 'otsu' Value used during thresholding operation. This can either be a value ('int' or 'float') or 'otsu', the threshold value will be determined automatically using Otsu's thresholding method. radius : float Radius for gaussian blur for background subtractino. Default value is 20. image_mode : 'phase' or 'fluorescence' Mode of microsocopy used to capture the image. If 'phase', objects with intensity values *lower* than the provided threshold will be selected. If 'fluorescence', values *greater* than the provided threshold will be selected. Default value is 'phase'. area_bounds : tuple of ints. Range of areas of acceptable objects. This should be probided in units of square pixels. eec_bounds : tuple of floats Range of eccentricity values of acceptable objects. These values should range between 0.0 and 1.0. Returns ------- im_labeled : 2d-array, int Labeled segmentation mask. """ # Apply a median filter to remove hot pixels med_selem = skimage.morphology.square(3) im_filt = skimage.filters.median(im, selem=med_selem) # Perform gaussian subtraction im_sub = bg_subtract(im_filt, radius) # Determine the thresholding method if thresh is 'otsu': thresh = skimage.filters.threshold_otsu(im_sub) # Determine the image mode and apply threshold if image_mode is 'phase': im_thresh = im_sub < thresh elif image_mode is 'fluorescence': im_thresh = im_sub > thresh else: raise ValueError("Image mode not recognized. Must be 'phase'" + "or 'fluorescence'.") # Label the objects im_label = skimage.measure.label(im_thresh) # Apply the area and eccentricity bounds im_filt = area_ecc_filter(im_label, area_bounds, ecc_bounds) # Remove objects touching the border im_border = skimage.segmentation.clear_border(im_filt, buffer_size=5) # Relabel the image im_border = im_border > 0 im_label = skimage.measure.label(im_border) return im_label
f9a8fa3c29cbb213ed67c3df93106a81f53ae985
3,656,611
from datetime import datetime def generate_report(start_date, end_date): """Generate the text report""" pgconn = get_dbconn('isuag', user='nobody') days = (end_date - start_date).days + 1 totalobs = days * 24 * 17 df = read_sql(""" SELECT station, count(*) from sm_hourly WHERE valid >= %s and valid < %s GROUP by station ORDER by station """, pgconn, params=(start_date, end_date + datetime.timedelta(days=1)), index_col='station') performance = min([100, df['count'].sum() / float(totalobs) * 100.]) return """ Iowa Environmental Mesonet Data Delivery Report =============================================== Dataset: ISU Soil Moisture Network Performance Period: %s thru %s Reported Performance: %.1f%% Reporting Platforms: %.0f Additional Details ================== Total Required Obs: %.0f (24 hourly obs x 17 platforms x %.0f days) Observations Delivered: %.0f Report Generated: %s .END """ % (start_date.strftime("%d %b %Y"), end_date.strftime("%d %b %Y"), performance, len(df.index), totalobs, days, df['count'].sum(), datetime.datetime.now().strftime("%d %B %Y %H:%M %p"))
f71b5ab58922b9018abc1868661f88c268de8f94
3,656,612
import pathlib def whole(eventfile,par_list,tbin_size,mode,ps_type,oversampling,xlims,vlines): """ Plot the entire power spectrum without any cuts to the data. eventfile - path to the event file. Will extract ObsID from this for the NICER files. par_list - A list of parameters we'd like to extract from the FITS file (e.g., from eventcl, PI_FAST, TIME, PI,) tbin_size - the size of the time bins (in seconds!) >> e.g., tbin_size = 2 means bin by 2s >> e.g., tbin_size = 0.05 means bin by 0.05s! mode - whether we want to show or save the plot. ps_type - obtain power spectrum through the periodogram method ('period') or the manual FFT way ('manual') or both ('both') oversampling - whether to perform oversampling. Array will consist of [True/False, oversampling factor] xlims - a list or array: first entry = True/False as to whether to impose an xlim; second and third entry correspond to the desired x-limits of the plot vlines - a list or array: first entry = True/False as to whether to draw a vertical line in the plot; second entry is the equation for the vertical line """ if type(eventfile) != str: raise TypeError("eventfile should be a string!") if 'TIME' not in par_list: raise ValueError("You should have 'TIME' in the parameter list!") if type(par_list) != list and type(par_list) != np.ndarray: raise TypeError("par_list should either be a list or an array!") if mode != 'show' and mode != 'save': raise ValueError("Mode should either be 'show' or 'save'!") if ps_type != 'period' and ps_type != 'manual' and ps_type != 'both': raise ValueError("ps_type should either be 'period' or 'show' or 'save'!") if type(oversampling) != list and type(oversampling) != np.ndarray: raise TypeError("oversampling should either be a list or an array!") if type(xlims) != list and type(xlims) != np.ndarray: raise TypeError("xlims should either be a list or an array!") if type(vlines) != list and type(vlines) != np.ndarray: raise TypeError("vlines should either be a list or an array!") parent_folder = str(pathlib.Path(eventfile).parent) data_dict = Lv0_fits2dict.fits2dict(eventfile,1,par_list) times = data_dict['TIME'] counts = np.ones(len(times)) shifted_t = times-times[0] t_bins = np.linspace(0,np.ceil(shifted_t[-1]),int(np.ceil(shifted_t[-1])*1/tbin_size+1)) summed_data, bin_edges, binnumber = stats.binned_statistic(shifted_t,counts,statistic='sum',bins=t_bins) #binning the time values in the data event_header = fits.open(eventfile)[1].header obj_name = event_header['OBJECT'] obsid = event_header['OBS_ID'] if ps_type == 'period': plt.figure() pdgm_f,pdgm_ps = Lv2_ps_method.pdgm(t_bins,summed_data,xlims,vlines,True,oversampling) plt.title('Power spectrum for ' + obj_name + ', ObsID: ' + str(obsid) + '\n Periodogram method' + '\n Includes whole time interval and energy range',fontsize=12) if mode == 'show': plt.show() elif mode == 'save': filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_pdgm.pdf' plt.savefig(parent_folder+'/'+filename,dpi=900) plt.close() return pdgm_f, pdgm_ps if ps_type == 'manual': plt.figure() manual_f,manual_ps = Lv2_ps_method.manual(t_bins,summed_data,xlims,vlines,True,oversampling) plt.title('Power spectrum for ' + obj_name + ', ObsID ' + str(obsid) + '\n Manual FFT method' + '\n Includes whole time interval and energy range',fontsize=12) if mode == 'show': plt.show() elif mode == 'save': filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_manual.pdf' plt.savefig(parent_folder+'/'+filename,dpi=900) plt.close() return manual_f, manual_ps if ps_type == 'both': pdgm_f,pdgm_ps = Lv2_ps_method.pdgm(t_bins,summed_data,xlims,vlines,False,oversampling) manual_f,manual_ps = Lv2_ps_method.manual(t_bins,summed_data,xlims,vlines,False,oversampling) fig, (ax1,ax2) = plt.subplots(2,1) fig.suptitle('Power spectra for ' + obj_name + ', ObsID ' + str(obsid) + '\n both periodogram and manual FFT method' + '\n Includes whole time interval and energy range' , fontsize=12) ax1.semilogy(pdgm_f,pdgm_ps,'b-')#/np.mean(pdgm_ps),'b-') #periodogram; arrays already truncated! ax1.set_xlabel('Hz',fontsize=12) ax1.set_ylabel('Normalized power spectrum',fontsize=10) ax2.semilogy(manual_f,manual_ps,'r-')#/np.mean(manual_ps),'r-') #manual FFT; arrays already truncated! ax2.set_xlabel('Hz',fontsize=12) ax2.set_ylabel('Normalized power spectrum',fontsize=10) if xlims[0] == True: ax1.set_xlim([xlims[1],xlims[2]]) ax2.set_xlim([xlims[1],xlims[2]]) if vlines[0] == True: ax1.axvline(x=vlines[1],color='k',alpha=0.5,lw=0.5) ax2.axvline(x=vlines[1],color='k',alpha=0.5,lw=0.5) ax2.axhline(y=2,color='k',alpha=0.3,lw=0.3) plt.subplots_adjust(hspace=0.2) if mode == 'show': plt.show() elif mode == 'save': filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_both.pdf' plt.savefig(parent_folder+'/'+filename,dpi=900) plt.close() return pdgm_f, pdgm_ps, manual_f, manual_ps
77b51cc8774bdb1b670e2a6b56a9cd65213f70de
3,656,613
def handle_postback(): """Handles a postback.""" # we need to set an Access-Control-Allow-Origin for use with the test AJAX postback sender # in normal operations this is NOT needed response.set_header('Access-Control-Allow-Origin', '*') args = request.json loan_id = args['request_token'] merchant_loan_id = args.get('merchant_transaction_id') action = args['updates'].get('action') if action == 'refund': # process a refund amount = args['updates']['amount'] return handle_refund(loan_id, amount) loan_status = args['updates']['status'] return handle_status_update(loan_id, loan_status)
59683921b7a21f50c2905c47c33036fd75ce54f4
3,656,614
def get_bb_bev_from_obs(dict_obs, pixor_size=128): """Input dict_obs with (B,H,W,C), return (B,H,W,3)""" vh_clas = tf.squeeze(dict_obs['vh_clas'], axis=-1) # (B,H,W,1) # vh_clas = tf.gather(vh_clas, 0, axis=-1) # (B,H,W) vh_regr = dict_obs['vh_regr'] # (B,H,W,6) decoded_reg = decode_reg(vh_regr, pixor_size) # (B,H,W,8) lidar = dict_obs['lidar'] B = vh_regr.shape[0] images = [] for i in range(B): corners, _ = pixor_postprocess(vh_clas[i], decoded_reg[i]) # (N,4,2) image = get_bev(lidar, corners, pixor_size) # (H,W,3) images.append(image) images = tf.convert_to_tensor(images, dtype=np.uint8) # (B,H,W,3) return images
d286ec0c3132c2dcb931cb941fd247810c0ce1cf
3,656,615
def get_hard_edges(obj): """ :param str obj: :returns: all hard edges from the given mesh in a flat list :rtype: list of str """ return [obj + '.e[' + str(i) + ']' for i, edgeInfo in enumerate(cmds.polyInfo(obj + '.e[*]', ev=True)) if edgeInfo.endswith('Hard\n')]
67de22469a38e55e88d21f1853280138795a04cb
3,656,616
def make_system(l=70): """ Making and finalizing a kwant.builder object describing the system graph of a closed, one-dimensional wire with l number of sites. """ sys = kwant.Builder() lat = kwant.lattice.chain() sys[(lat(x) for x in range(l))] = onsite sys[lat.neighbors()] = hopping return sys.finalized()
fa3d25933fd086519569cbb24ff77bf3c86c1303
3,656,617
from typing import Type from pathlib import Path from typing import Dict def _gen_test_methods_for_rule( rule: Type[CstLintRule], fixture_dir: Path, rules_package: str ) -> TestCasePrecursor: """Aggregates all of the cases inside a single CstLintRule's VALID and INVALID attributes and maps them to altered names with a `test_` prefix so that 'unittest' can discover them later on and an index postfix so that individual tests can be selected from the command line. :param CstLintRule rule: :param Path fixture_dir: :param str rules_package: :returns: :rtype: TestCasePrecursor """ valid_tcs = {} invalid_tcs = {} requires_fixtures = False fixture_paths: Dict[str, Path] = {} fixture_subdir: Path = get_fixture_path(fixture_dir, rule.__module__, rules_package) if issubclass(rule, CstLintRule): if rule.requires_metadata_caches(): requires_fixtures = True if hasattr(rule, "VALID"): for idx, test_case in enumerate(getattr(rule, "VALID")): name = f"test_VALID_{idx}" valid_tcs[name] = test_case if requires_fixtures: fixture_paths[name] = fixture_subdir / f"{rule.__name__}_VALID_{idx}.json" if hasattr(rule, "INVALID"): for idx, test_case in enumerate(getattr(rule, "INVALID")): name = f"test_INVALID_{idx}" invalid_tcs[name] = test_case if requires_fixtures: fixture_paths[name] = fixture_subdir / f"{rule.__name__}_INVALID_{idx}.json" return TestCasePrecursor( rule=rule, test_methods={**valid_tcs, **invalid_tcs}, fixture_paths=fixture_paths, )
5a12d84bdcff039179ef9b9f1105e6beecccbf05
3,656,618
def evaluate_score_batch( predicted_classes=[], # list, len(num_classes), str(code) predicted_labels=[], # shape (num_examples, num_classes), T/F for each code predicted_probabilities=[], # shape (num_examples, num_classes), prob. [0-1] for each code raw_ground_truth_labels=[], # list(('dx1', 'dx2'), ('dx1', 'dx3'), ...) weights_file="evaluation-2020/weights.csv", normal_class="426783006", equivalent_classes=[ ["713427006", "59118001"], ["284470004", "63593006"], ["427172004", "17338001"], ], ): """This is a helper function for getting auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric without needing the directories of labels and prediction outputs. It is useful for directly calculating the scores given the classes, predicted labels, and predicted probabilities. """ label_classes, labels = _load_labels( raw_ground_truth_labels, normal_class=normal_class, equivalent_classes_collection=equivalent_classes, ) output_classes, binary_outputs, scalar_outputs = _load_outputs( predicted_classes, predicted_labels, predicted_probabilities, normal_class=normal_class, equivalent_classes_collection=equivalent_classes, ) classes, labels, binary_outputs, scalar_outputs = organize_labels_outputs( label_classes, output_classes, labels, binary_outputs, scalar_outputs ) weights = load_weights(weights_file, classes) # Only consider classes that are scored with the Challenge metric. indices = np.any(weights, axis=0) # Find indices of classes in weight matrix. classes = [x for i, x in enumerate(classes) if indices[i]] labels = labels[:, indices] scalar_outputs = scalar_outputs[:, indices] binary_outputs = binary_outputs[:, indices] weights = weights[np.ix_(indices, indices)] auroc, auprc = compute_auc(labels, scalar_outputs) accuracy = compute_accuracy(labels, binary_outputs) f_measure = compute_f_measure(labels, binary_outputs) f_beta_measure, g_beta_measure = compute_beta_measures( labels, binary_outputs, beta=2 ) challenge_metric = compute_challenge_metric( weights, labels, binary_outputs, classes, normal_class ) return ( auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric, )
314f94433704cc2986df9082a749caaf52738f08
3,656,619
import tqdm def gauss_kernel(model_cell, x, y, z, sigma=1): """ Convolute aligned pixels given coordinates `x`, `y` and values `z` with a gaussian kernel to form the final image. Parameters ---------- model_cell : :class:`~colicoords.cell.Cell` Model cell defining output shape. x : :class:`~numpy.ndarray` Array with combined x-coordinates of aligned pixels. y : :class:`~numpy.ndarray` Array with combined y-coordinates of aligned pixels. z : :class:`~numpy.ndarray` Array with pixel values of aligned pixels. sigma : :obj:`float` Sigma of the gaussian kernel. Returns ------- output : :class:`~numpy.ndarray` Output aligned image. """ output = np.empty(model_cell.data.shape) coords = np.array([x, y]) for index in tqdm(np.ndindex(model_cell.data.shape), desc='Gaussian kernel', total=np.product(model_cell.data.shape)): xi, yi = index xp, yp = model_cell.coords.x_coords[xi, yi], model_cell.coords.y_coords[xi, yi] dist = distance.cdist(np.array([[xp, yp]]), coords.T).squeeze() bools = dist < 5*sigma weights = gauss_2d(x[bools], y[bools], xp, yp, sigma=sigma) avg = np.average(z[bools], weights=weights) output[xi, yi] = avg return output
0ff61121fbf330e3e15862b82b0929ae3b8748f9
3,656,621
def get_configs_from_multiple_files(): """Reads training configuration from multiple config files. Reads the training config from the following files: model_config: Read from --model_config_path train_config: Read from --train_config_path input_config: Read from --input_config_path Returns: model_config: model_pb2.DetectionModel train_config: train_pb2.TrainConfig input_config: input_reader_pb2.InputReader """ train_config = train_pb2.TrainConfig() with tf.gfile.GFile(FLAGS.train_config_path, 'r') as f: text_format.Merge(f.read(), train_config) model_config = model_pb2.DetectionModel() with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f: text_format.Merge(f.read(), model_config) input_config = input_reader_pb2.InputReader() with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f: text_format.Merge(f.read(), input_config) return model_config, train_config, input_config
4f561235568667a6fe71d77c23769ea8878ebe20
3,656,622
def line_to_numbers(line: str) -> t.List[int]: """Split a spreadsneet line into a list of numbers. raises: ValueError """ return list(map(int, line.split()))
fce9af5e1c213fd91f0edf8d7fa5877f15374908
3,656,623
def bits_to_amps(bits): """helper function to convert raw data from usb device to amps""" return bits*BITS_TO_AMPS_SLOPE + BITS_TO_AMPS_Y_INTERCEPT
5653582987b6a7924c11f037badc1a61541c6ca2
3,656,624
def time_difference(t_early, t_later): """ Compute the time difference between t_early and t_later Parameters: t_early: np.datetime64, list or pandas series. t_later: np.datetime64, list or pandas series. """ if type(t_early) == list: t1 = np.array(t_early) elif type(t_early) == pd.Series: t1 = np.array(t_early.tolist()) else: t1 = np.array([t_early]) if type(t_later) == list: t2 = np.array(t_later) elif type(t_later) == pd.Series: t2 = np.array(t_later.tolist()) else: t2 = np.array([t_later]) timedelta2float = np.vectorize(lambda x: x / np.timedelta64(3600, 's')) t_diff = timedelta2float(t2 - t1) return t_diff
0d4e6bac3aed2e5a2848c4289dadc92120a4f7a1
3,656,627
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True): """ Convolutional block with two convolutions followed by batch normalisation (if True) and with ReLU activations. input_tensor: A tensor. Input tensor on which the convolutional block acts. n_filters: An integer. Number of filters in this block. kernel_size: An integer. Size of convolutional kernel. batchnorm: A bool. Perform batch normalisation after each convolution if True. :return: A tensor. The output of the operation. """ # first convolutional layer x = layers.Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal", padding="same")(input_tensor) if batchnorm: x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) # second convolutional layer x = layers.Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal", padding="same")(x) if batchnorm: x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) return x
8bb435ed1e091fff26d49290a8ca6d0c9c12ec67
3,656,628