content
stringlengths
22
815k
id
int64
0
4.91M
def model_predict(test_data: FeatureVector): """ Endpoint to make a prediction with the model. The endpoint `model/train` should have been used before this one. Args: test_data (FeatureVector): A unit vector of feature """ try: y_predicted = api.ml_model.predict_proba(test_data.to_numpy()) except NotFittedError: raise HTTPException( status_code=500, detail="This LogisticRegression instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.\nUse `model/train` endpoint with 10 examples before", ) y_pred_label = np.argmax(y_predicted, axis=1).astype(np.int32) y_pred_score = np.max(y_predicted, axis=1) return Prediction(label=y_pred_label, probability=y_pred_score)
5,357,300
def print_header(args, argv, preamble='CIFAR10', printfn=print, log=open(os.devnull, 'w'), first=('model','dataset','epoch','batchsize','resume','out')): """ Prints the arguments and header, and returns a logging print function """ def logprint(*args, file=log, **kwargs): if printfn: printfn(*args, **kwargs) print(*args, file=file, **kwargs) file.flush() vargs = vars(args) args_sorted = sorted(vargs.items()) logprint('{' + ', '.join("'{}':{}".format(k,repr(v)) for k,v, in args_sorted) + '}') logprint(' '.join(argv)) logprint('') logprint(preamble) logprint('') logprint('Arguments: ') def print_arg(arg): logprint(' {:20}: {},'.format("'%s'"%arg,repr(vargs[arg]))) for arg in first: print_arg(arg) logprint('') for arg,_ in args_sorted: if arg in first: continue print_arg(arg) logprint('') return logprint
5,357,301
def search_folders(project, folder_name=None, return_metadata=False): """Folder name based case-insensitive search for folders in project. :param project: project name :type project: str :param folder_name: the new folder's name :type folder_name: str. If None, all the folders in the project will be returned. :param return_metadata: return metadata of folders instead of names :type return_metadata: bool :return: folder names or metadatas :rtype: list of strs or dicts """ if not isinstance(project, dict): project = get_project_metadata_bare(project) team_id, project_id = project["team_id"], project["id"] result_list = [] params = { 'team_id': team_id, 'project_id': project_id, 'offset': 0, 'name': folder_name, 'is_root': 0 } total_folders = 0 while True: response = _api.send_request( req_type='GET', path='/folders', params=params ) if not response.ok: raise SABaseException( response.status_code, "Couldn't search folders " + response.text ) response = response.json() results_folders = response["data"] for r in results_folders: if return_metadata: result_list.append(r) else: result_list.append(r["name"]) total_folders += len(results_folders) if response["count"] <= total_folders: break params["offset"] = total_folders return result_list
5,357,302
def tempo_para_percorrer_uma_distancia(distancia, velocidade): """ Recebe uma distância e a velocidade de movimentação, e retorna as horas que seriam gastas para percorrer em linha reta""" horas = distancia / velocidade return round(horas,2)
5,357,303
def getCorrection(start, end, pos): """Correct the angle for the trajectory adjustment Function to get the correct angle correction when the robot deviates from it's estimated trajectory. Args: start: The starting position of the robot. end: The position the robot is supposed to arrive. pos: The current position of the robot. Returns: An angle in radians between -pi and pi to correct the robot trajectory and arrive succesfully at end position. """ (xs, ys) = start (xe, ye) = end (xp, yp) = pos # Discard edge cases with no sense assert(xs != xe or ys != ye) assert(xp != xe or yp != ye) assert(xs != xp or ys != yp) # First get the line equation from start to end points. # line equation follows the following pattern: y = m * x + b m = 0.0 b = 0.0 if abs(xe - xs) > PRECISION: m = (ye - ys) / (xe - xs) b = ys - m * xs else: m = 1 b = - xs # Get the perpendicular line equation to the first line mp = 0.0 bp = 0.0 if abs(xe - xs) < PRECISION: bp = yp elif abs(m) < PRECISION: mp = 1 bp = - xp else: mp = - 1 / m bp = yp - mp * xp # Get the point at the intersection of the two lines xi = 0.0 yi = 0.0 if abs(xe - xs) < PRECISION: xi = b yi = bp elif abs(m) < PRECISION: xi = bp yi = b else: xi = - (bp - b) / (mp - m) yi = m * xi + b # Get the distance between the tree points dist_pi = math.sqrt((xp - xi) * (xp - xi) + (yp - yi) * (yp - yi)) dist_pe = math.sqrt((xp - xe) * (xp - xe) + (yp - ye) * (yp - ye)) dist_sp = math.sqrt((xs - xp) * (xs - xp) + (ys - yp) * (ys - yp)) # Get the offset angles alpha and beta alpha = math.asin(dist_pi / dist_pe) beta = math.asin(dist_pi / dist_sp) return - (alpha + beta)
5,357,304
def decontainerize_parameter(params_obj): """ Given a set of parameters already ran through containerize_parameter(), reverse what was done. """ params = ['reference_data_path', 'test_data_path', 'results_dir'] # Set each of the params back to their original value. for p in params: orig_attr_value = getattr(params_obj, 'orig_{}'.format(p)) setattr(params_obj, p, orig_attr_value) delattr(params_obj, 'orig_{}'.format(p))
5,357,305
def main(input_filepath, output_filepath): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). """ logger = logging.getLogger(__name__) logger.info('making final data set from raw data...') df = load_csv_file_to_df(input_filepath) df = handle_na_and_duplicates(df) df = clean_dataframe(df) df = organize_columns(df) df = concat_abilities(df) out_str = create_monsters_string(df) create_text_output_file(out_str, output_filepath) logger.info('Output file created!') return None
5,357,306
def string2symbols(s): """ Convert string to list of chemical symbols. Args: s: Returns: """ i = None n = len(s) if n == 0: return [] c = s[0] if c.isdigit(): i = 1 while i < n and s[i].isdigit(): i += 1 return int(s[:i]) * string2symbols(s[i:]) if c == "(": p = 0 for i, c in enumerate(s): if c == "(": p += 1 elif c == ")": p -= 1 if p == 0: break j = i + 1 while j < n and s[j].isdigit(): j += 1 if j > i + 1: m = int(s[i + 1 : j]) else: m = 1 return m * string2symbols(s[1:i]) + string2symbols(s[j:]) if c.isupper(): i = 1 if 1 < n and s[1].islower(): i += 1 j = i while j < n and s[j].isdigit(): j += 1 if j > i: m = int(s[i:j]) else: m = 1 return m * [s[:i]] + string2symbols(s[j:]) else: raise ValueError
5,357,307
def set_meta(target, keys, overwrite=False): """Write metadata keys to .md file. TARGET can be a media file or an album directory. KEYS are key/value pairs. Ex, to set the title of test.jpg to "My test image": sigal set_meta test.jpg title "My test image" """ if not os.path.exists(target): sys.stderr.write("The target {} does not exist.\n".format(target)) sys.exit(1) if len(keys) < 2 or len(keys) % 2 > 0: sys.stderr.write("Need an even number of arguments.\n") sys.exit(1) if os.path.isdir(target): descfile = os.path.join(target, 'index.md') else: descfile = os.path.splitext(target)[0] + '.md' if os.path.exists(descfile) and not overwrite: sys.stderr.write("Description file '{}' already exists. " "Use --overwrite to overwrite it.\n".format(descfile)) sys.exit(2) with open(descfile, "w") as fp: for i in range(len(keys)//2): k, v = keys[i*2:(i+1)*2] fp.write("{}: {}\n".format(k.capitalize(), v)) print("{} metadata key(s) written to {}".format(len(keys)//2, descfile))
5,357,308
def coe2rv(a, e, i, node, w, v, MU=Earth.mu, degrees=True): """Given the classical orbital elements (a, e, i, node, w, v), this returns the position (R) and the velocity (V) in an ECI frame - Semimajor-axis (a)[km]: orbit size - Eccentricity (e): orbit shape (0=circle, 1=line) - Inclination (i)[deg]: orbital plane inclination measure from ascending node - Argument of Perigee (w)[deg]: orbit orientation - Ascending Node (Omega)[deg]: location of ascending node - True Anomaly (v)[deg]: location of satellite in orbit relative to perigee - Mean Anomaly (M)[deg]: fictitious angle that varies linearly with time return: R(x,y,z)[km], V(x,y,z)[km/sec] """ # MU = MU/1000/1000/1000 # FIXME? if degrees: i *= deg2rad node *= deg2rad w *= deg2rad v *= deg2rad p = a*(1-e**2) # p = semi-latus rectum (semiparameter) R = np.zeros(3) V = np.zeros(3) sv = sin(v) cv = cos(v) det = 1/(1+e*cv) smup = np.sqrt(MU/p) ### Position Coordinates in Perifocal Coordinate System # R[0] = p*cv / (1+e*cv) # x-coordinate (km) # R[1] = p*sv / (1+e*cv) # y-coordinate (km) # R[2] = 0 # z-coordinate (km) # V[0] = -sqrt(MU/p) * sv # velocity in x (km/s) # V[1] = sqrt(MU/p) * (e+cv) # velocity in y (km/s) # V[2] = 0 # velocity in z (km/s) R[0] = p*cv * det # x-coordinate (km) R[1] = p*sv * det # y-coordinate (km) R[2] = 0 # z-coordinate (km) V[0] = -smup * sv # velocity in x (km/s) V[1] = smup * (e+cv) # velocity in y (km/s) V[2] = 0 # velocity in z (km/s) r313 = R313(-node, -i, -w) # Perifocal -> xyz R = r313.dot(R) V = r313.dot(V) return (R,V,)
5,357,309
def get_dictionary(filename, dict_size=2000): """ Read the tweets and return a list of the 'max_words' most common words. """ all_words = [] with open(filename, 'r') as csv_file: r = csv.reader(csv_file, delimiter=',', quotechar='"') for row in r: tweet = row[3] if len(tweet) <= MAX_TWEET_CHARS: words = preprocess(tweet).split() all_words += words # Make the dictionary out of only the N most common words word_counter = Counter(all_words) dictionary, _ = zip(*word_counter.most_common(min(dict_size, len(word_counter)))) return dictionary
5,357,310
def find_last_index(l, x): """Returns the last index of element x within the list l""" for idx in reversed(range(len(l))): if l[idx] == x: return idx raise ValueError("'{}' is not in list".format(x))
5,357,311
def download_query_alternative(user, password, queryid, batch_size=500): """ This is an alternative implementation of the query downloader. The original implementation only used a batch size of 20 as this allowed for using plain LOC files. Unfortunately this is a bit slow and causes more load on the web server due to a lot of small requests. With the modified implementation, the batch size can be chosen by the user. This is accomplished by using an in-memory extraction of the downloaded ZIP file. Additionally this code uses an XML parser instead of a regex to retrieve the data. :param user: The name of the user to log in with. :type user: str :param password: The password to use for the login. :type password: str :param queryid: The ID of the search query to retrieve the cache codes for. :type queryid: int :param batch_size: The batch size to use for the requests. This must at least be 1 and cannot exceed 500. The upper bound is due to the limits used by the Opencaching.de site. :type batch_size: int :return: The list of cache codes retrieved from the query. :rtype: list[str] :raises ValueError: Some of the input values are invalid. """ # Check the specified batch size. if not 0 < batch_size <= 500: raise ValueError("Invalid batch size.") # Use a custom header. headers = { "User-agent": "opencaching-de_statistics " + "[https://github.com/FriedrichFroebel/opencaching-de_statistics]" } # Try to log in. session = requests.Session() response = session.post( "https://www.opencaching.de/login.php", data={ "action": "login", "target": "query.php", "email": user.encode("utf-8"), "password": password.encode("utf-8"), }, headers=headers, ) # Check if the login has been successful. if "32x32-search.png" not in response.text: raise ValueError("Login failed (bad response).") # Prepare our status variables. oc_codes = [] batch_start = 0 while True: # Build the current URL, then retrieve the data. # In contrast to the original version, we enforce ZIP files here. url = ( f"https://www.opencaching.de/search.php?queryid={queryid}&output=loc" + f"&startat={batch_start}&count={batch_size}&zip=1" ) response = session.get(url, headers=headers) # Check if the request has been successful. # If there has been an error, return the list of OC codes found until now. if response.status_code != 200: print(f"-- Terminating due to bad status code: {response.status_code}") break # Check if we got a ZIP file (in fact this should always be the case). # The first check uses the magic number for non-empty ZIP archives. if response.text.startswith("PK\x03\x04") and not response.text.startswith( "<?xml" ): # This is a zip file, so uncompress it. zip_file = zipfile.ZipFile(io.BytesIO(response.content)) # The ZIP files normally have one file only, so we just retrieve the first # one here. files = zip_file.namelist() if files: filename = files[0] xml_data = zip_file.read(filename) # If this is not a ZIP file or the ZIP file has no content, assume that it has # been a plain XML file. if not xml_data: xml_data = response.text # Parse the XML data. tree = ElementTree.fromstring(xml_data) # Get the name tags from the XML tree and retrieve the ID attribute for this # tag. # If the ID attribute is missing, the corresponding entry will be `None`. new_oc_codes = [name.get("id") for name in tree.iter("name")] # Remove all the `None` elements. new_oc_codes = list(filter(None, new_oc_codes)) # We have reached the end of the results. if not new_oc_codes: break # Add the new codes to the existing list and move on to the next request. oc_codes = oc_codes + new_oc_codes batch_start += batch_size return oc_codes
5,357,312
def parse_template(templ_str, event): """ Parses a template string and find the corresponding element in an event data structure. This is a highly simplified version of the templating that is supported by the Golang template code - it supports only a single reference to a sub element of the event structure. """ matches = TEMPLATE_RE.search(templ_str) tokens = matches.group(1).split('.') ref = event loc = [] for token in tokens: token = token.strip() # Skip the blank tokens if not token: continue if token not in ref: disp_loc = "event" + ''.join(["['{}']".format(_) for _ in loc]) err = "Could not find '{}' in {}".format(token, disp_loc) raise RuntimeError(err) ref = ref[token] loc.append(token) return ref
5,357,313
def current_time_hhmm() -> str: """ Uses the time library to get the current time in hours and minutes Args: None Returns: str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min) (str): Current time formatted as hour:minutes """ logger.info('Getting current time') return str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min)
5,357,314
def mae_loss(output, target): """Creates a criterion that measures the mean absolute error (l1 loss) between each element in the input :math:`output` and target :math:`target`. The loss can be described as: .. math:: \\ell(x, y) = L = \\operatorname{mean}(\\{l_1,\\dots,l_N\\}^\\top), \\quad l_n = \\left| x_n - y_n \\right|, where :math:`N` is the batch size. :math:`output` and :math:`target` are tensors of arbitrary shapes with a total of :math:`n` elements each. :param output: The output of the model or our predictions :type output: torch.Tensor :param target: The expected output or our labels :type target: typing.Union[torch.Tensor] :return: torch.Tensor :rtype: torch.Tensor """ ## TODO 4: Implement L1 loss. Use PyTorch operations. # Use PyTorch operations to return a PyTorch tensor. return torch.sum(torch.abs(output - target))/output.numel() #return nn.functional.l1_loss(output, target)
5,357,315
def test_assign_id_in_html(app: Sphinx) -> None: """It assigns an ID to notes automatically in HTML.""" app.build() tree = cached_parse(os.path.join(app.outdir, "index.html")) notes = tree.find_all("div", class_="note") assert len(notes) == 3 # first note is not inside a section assert notes[0]["id"] == "undefined-note-1" # second note is inside a section 'Test' assert notes[1]["id"] == "test-note-2" # third note has an explicit label ``foo`` assert notes[2]["id"] == "foo"
5,357,316
def aistracker_from_json(filepath, debug=True): """ get an aistracker object from a debug messages JSON that was previously exported from pyaisnmea Args: filepath(str): full path to json file debug(bool): save all message payloads and decoded attributes into messagelog Raises: NoSuitableMessagesFound: if there are no AIS messages in the file Returns: aistracker(ais.AISTracker): object that keeps track of all the ships we have seen messagelog(allmessages.AISMessageLog): object with all the AIS messages """ messagelog = allmessages.AISMessageLog() aistracker = ais.AISTracker() msgnumber = 1 for line in open_file_generator(filepath): try: linemsgdict = json.loads(line) payload = linemsgdict['payload'] msgtime = linemsgdict['rxtime'] msg = aistracker.process_message(payload, timestamp=msgtime) if debug: messagelog.store(msgnumber, payload, msg) msgnumber += 1 except (ais.UnknownMessageType, ais.InvalidMMSI, json.decoder.JSONDecodeError, KeyError, binary.NoBinaryData) as err: AISLOGGER.debug(str(err)) continue if aistracker.messagesprocessed == 0: raise NoSuitableMessagesFound('No AIS messages detected in this file') return (aistracker, messagelog)
5,357,317
def configure(node): """ Generates the script to set the hostname in a node """ script = [] script.append(Statements.exec("hostname %s" % node.getName())) script.append(Statements.createOrOverwriteFile( "/etc/hostname", [node.getName()])) script.append(Statements.exec( "sed -i 's/127.0.0.1/127.0.0.1\t%s/' /etc/hosts" % node.getName())) return script
5,357,318
def register_preprocess(function_name : str, prep_function): """ Register a preprocessing function for use in delta. Parameters ---------- function_name: str Name of the preprocessing function. prep_function: A function of the form prep_function(data, rectangle, bands_list), where data is an input numpy array, rectangle a `delta.imagery.rectangle.Rectangle` specifying the region covered by data, and bands_list is an integer list of bands loaded. The function must return a numpy array. """ global __prep_funcs __prep_funcs[function_name] = prep_function
5,357,319
def processOptional(opt): """ Processes the optional element 50% of the time, skips it the other 50% of the time """ rand = random.random() if rand <= 0.5: return '' else: return processRHS(opt.option)
5,357,320
def remove_non_paired_trials(df): """Remove non-paired trials from a dataset. This function will remove any trials from the input dataset df that do not have a matching pair. A matching pair are trial conditions A->B and B->A. """ # Define target combinations start_pos = np.concatenate(df['startPos'].to_numpy()) end_pos = np.concatenate(df['targPos'].to_numpy()) targ_comb = np.concatenate([start_pos, end_pos], axis=1) uni_targ_comb = np.unique(targ_comb, axis=0) # Convert target combinations to trial conditions start_cond = get_targ_cond(df['startPos']) end_cond = get_targ_cond(df['targPos']) targ_cond = [''.join([s, e]) for s, e in zip(start_cond, end_cond)] mask = get_targ_pairs(start_cond, end_cond) # Remove non-paired targets df = df[np.array(mask)] targ_cond = [tc for tc, m in zip(targ_cond, mask) if m] # Put other target information into a dict for easy access. This is # redundant and probably unnecessary, but is being done just in case this # information may be useful later on. targ_info = { 'start_pos': start_pos, 'end_pos': end_pos, 'targ_comb': targ_comb, 'uni_targ_comb': uni_targ_comb } return df, targ_cond, targ_info
5,357,321
def compute_weighted_means_ds(ds, shp, ds_name='dataset', time_range=None, column_names=[], averager=False, df_output=pd.DataFrame(), output=None, land_only=False, time_stat=False, ): """ Compute spatial weighted mean of xr.Dataset Parameters ---------- ds: xr.DataSet shp: gp.GeoDataFrame gp.GeoDataFrame containing the information needed for xesmf's spatial averaging ds_name: str (optional) Name of the dataset will be written to the pd.DataFrame as an extra column time_range: list (optional) List containing start and end date to select from ``ds`` column_names: list (optional) Extra column names of the pd.DataFrame; the information is read from global attributes of ``ds`` averager: str, xesmf.SpatialAverager (optional) Use CORDEX domain name to calculate a xesmf.SpatialAverager object or use user-given one. df_output: pd.DataFrame (optional) pd.DataFrame to be concatenated with the newly created pd.DataFrame output: str (optional) Name of the output directory path or file land_only: bool (optional) Consider only land points\n !!!This is NOT implemented yet!!!\n As workaround write land sea mask in ``ds['mask']``. xesmf's spatial averager automatically considers ``ds['mask']``. time_stat: str or list (optional) Do some time statistics on ``ds``\n !!!This is NOT implemented yet!!! Returns ------- DataFrame : pd.DataFrame pandas Dataframe containing time series of spatial averages. Example ------- To calculate time series of spatial averages for several 'Bundeländer':\n - select Schleswig-Holstein, Hamburg, Bremen and Lower Saxony\n - Merge those regions to one new region calles NortSeaCoast\n - Select time slice from 2007 to 2009\n - Set CORDEX specific result DataFrame column names\n :: import xarray as xr import xweights as xw path = '/work/kd0956/CORDEX/data/cordex/output/EUR-11/CLMcom/MIROC-MIROC5/rcp85/r1i1p1/CLMcom-CCLM4-8-17/v1/mon/tas/v20171121/' netcdffile = path + 'tas_EUR-11_MIROC-MIROC5_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_mon_200601-201012.nc' ds = xr.open_dataset(netcdffile) df = xw.compute_weighted_means_ds(ds, 'states', subregions=['01_Schleswig-Holstein, '02_Hamburg', '03_Niedersachsen', '04_Bremen'], merge_column=['all', 'NorthSeaCoast'], time_range=['2007-01-01','2009-12-31'], column_names=['institute_id', 'driving_model_id', 'experiment_id', 'driving_model_ensemlbe_member', 'model_id', 'rcm_version_id'], ) """ if land_only: """ Not clear how to find right lsm file for each ds Then write lsm file to ds['mask'] The rest is done by xesmf """ NotImplementedError if not isinstance(ds, xr.Dataset): return df_output if time_range: ds = ds.sel(time=slice(time_range[0], time_range[1])) column_dict = {column:ds.attrs[column] if hasattr(ds, column) else None for column in column_names} try: out = spatial_averager(ds, shp, savg=averager) except: return df_output drop = [i for i in out.coords if not out[i].dims] out = out.drop(labels=drop) if time_stat: """ Not sure if it is usefull to implement here or do it seperately after using xweights """ NotImplementedError df_output = concat_dataframe(df_output, out, column_dict=column_dict, name=ds_name) if output: write_to_csv(df_output, output) return df_output
5,357,322
def action_remove(indicator_id, date, analyst): """ Remove an action from an indicator. :param indicator_id: The ObjectId of the indicator to update. :type indicator_id: str :param date: The date of the action to remove. :type date: datetime.datetime :param analyst: The user removing the action. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) if failed. """ indicator = Indicator.objects(id=indicator_id).first() if not indicator: return {'success': False, 'message': 'Could not find Indicator'} try: indicator.delete_action(date) indicator.save(username=analyst) return {'success': True} except ValidationError, e: return {'success': False, 'message': e}
5,357,323
def repack_orb_to_dalton(A, norb, nclosed, nact, nvirt): """Repack a [norb, norb] matrix into a [(nclosed*nact) + (nclosed*nvirt) + (nact*nvirt)] vector for contraction with the CI Hamiltonian. """ assert norb == nclosed + nact + nvirt assert A.shape == (norb, norb) # These might be available in the global namespace, but this # function should work on its own. range_closed = list(range(0, nclosed)) range_act = list(range(nclosed, nclosed + nact)) range_virt = list(range(nclosed + nact, nclosed + nact + nvirt)) indices_rohf_closed_act = [(i, t) for i in range_closed for t in range_act] indices_rohf_closed_virt = [(i, a) for i in range_closed for a in range_virt] indices_rohf_act_virt = [(t, a) for t in range_act for a in range_virt] B = np.zeros( len(indices_rohf_closed_act) + len(indices_rohf_closed_virt) + len(indices_rohf_act_virt) ) for (i, t) in indices_rohf_closed_act: it = (t - nclosed) * nclosed + i B[it] += A[i, t] for (i, a) in indices_rohf_closed_virt: ia = i * nvirt + a - nclosed - nact + (nclosed * nact) B[ia] += A[i, a] for (t, a) in indices_rohf_act_virt: ta = (t - nclosed) * nvirt + a - nclosed - nact + (nclosed * nact) + (nclosed * nvirt) B[ta] += A[t, a] return B
5,357,324
def get_config(section="MAIN", filename="config.ini"): """ Function to retrieve all information from token file. Usually retrieves from config.ini """ try: config = ConfigParser() with open(filename) as config_file: config.read_file(config_file) return config[section] except FileNotFoundError: print("No configuration file found, check 'config_sample.ini'") raise FileNotFoundError
5,357,325
def _worst_xt_by_core(cores) -> float: """ Assigns a default worst crosstalk value based on the number of cores """ worst_crosstalks_by_core = {7: -84.7, 12: -61.9, 19: -54.8} # Cores: Crosstalk in dB worst_xt = worst_crosstalks_by_core.get(cores) # Worst aggregate intercore XT return worst_xt
5,357,326
def process_files(files): """Decode METAR lines from the given files.""" for file in files: fh = open(file, "r") for line in fh.readlines(): process_line(line)
5,357,327
async def test_config_entry_retry(hass: HomeAssistant) -> None: """Test that a config entry can be retried.""" config_entry = MockConfigEntry( domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=MAC_ADDRESS ) config_entry.add_to_hass(hass) with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True): await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}}) await hass.async_block_till_done() assert config_entry.state == ConfigEntryState.SETUP_RETRY
5,357,328
def _write_title(file: TextIO) -> None: """ Writes the title of test specifications Args: file (TextIO): test spec file """ title = "Test Specification" file.write(title + "\n") file.write("=" * len(title) + "\n") file.write("\n") file.write("This file lists all test cases' specifications.\n") file.write("\n")
5,357,329
def write_status_unsafe(status, message, status_file, tempdir=None): """Write a JSON structure to a file non-atomically""" with open(status_file, "w") as fh: json.dump(status, fh)
5,357,330
def _CheckUploadStatus(status_code): """Validates that HTTP status for upload is 2xx.""" return status_code / 100 == 2
5,357,331
def load(path: str, **kwargs) -> BELGraph: """Read a BEL graph. :param path: The path to a BEL graph in any of the formats with extensions described below :param kwargs: The keyword arguments are passed to the importer function :return: A BEL graph. This is the universal loader, which means any file path can be given and PyBEL will look up the appropriate load function. Allowed extensions are: - bel - bel.nodelink.json - bel.cx.json - bel.jgif.json The previous extensions also support gzipping. Other allowed extensions that don't support gzip are: - bel.pickle / bel.gpickle / bel.pkl - indra.json """ for extension, importer in IMPORTERS.items(): if path.endswith(extension): return importer(path, **kwargs) raise InvalidExtensionError(path=path)
5,357,332
def predict_on_matrix(input_matrix: List[List[Any]], provided_columns_names: Optional[List[str]] = None) \ -> Tuple[List[List[Any]], Tuple[str, ...]]: """ Make prediction on a Matrix of values :param input_matrix: data for prediction :param provided_columns_names: (Optional). Name of columns for provided matrix. :return: result matrix and result column names """ raise Exception(f'Expected error')
5,357,333
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state): """ Wait for all the instances in the cluster to reach a designated state. cluster_instances: a list of boto.ec2.instance.Instance cluster_state: a string representing the desired state of all the instances in the cluster value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as 'running', 'terminated', etc. (would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250) """ sys.stdout.write( "Waiting for cluster to enter '{s}' state.".format(s=cluster_state) ) sys.stdout.flush() start_time = datetime.now() num_attempts = 0 while True: time.sleep(10*( 1.*(num_attempts>0) + 0.1)) # seconds for i in cluster_instances: i.update() statuses = conn.get_all_instance_status(instance_ids=[i.id for i in cluster_instances]) if cluster_state == 'ssh-ready': if all(i.state == 'running' for i in cluster_instances) and \ all(s.system_status.status == 'ok' for s in statuses) and \ all(s.instance_status.status == 'ok' for s in statuses) and \ is_cluster_ssh_available(cluster_instances, opts): break else: if all(i.state == cluster_state for i in cluster_instances): break num_attempts += 1 sys.stdout.write(".") sys.stdout.flush() sys.stdout.write("\n") end_time = datetime.now() print "Cluster is now in '{s}' state. Waited {t} seconds.".format( s=cluster_state, t=(end_time - start_time).seconds )
5,357,334
def smooth_l1_loss( prediction: oneflow._oneflow_internal.BlobDesc, label: oneflow._oneflow_internal.BlobDesc, beta: float = 1.0, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator computes the smooth l1 loss. The equation is: .. math:: & out = \\frac{(\\beta*x)^2}{2}, \\left|x\\right|<\\frac{1}{{\\beta}^2} & out = \\left|x\\right|-\\frac{0.5}{{\\beta}^2}, otherwise Args: prediction (oneflow._oneflow_internal.BlobDesc): The prediction Blob label (oneflow._oneflow_internal.BlobDesc): The label Blob beta (float, optional): The :math:`\\beta` in the equation. Defaults to 1.0. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def smooth_l1_loss_Job(prediction: tp.Numpy.Placeholder((5, )), label: tp.Numpy.Placeholder((5, )) ) -> tp.Numpy: return flow.smooth_l1_loss(prediction=prediction, label=label) prediction = np.array([0.1, 0.4, 0.3, 0.5, 0.9]).astype(np.float32) label = np.array([0.3, 0.9, 2.5, 0.4, 0.3]).astype(np.float32) out = smooth_l1_loss_Job(prediction, label) # out [0.02 0.12499999 1.7 0.005 0.17999998] """ op = ( flow.user_op_builder( name if name is not None else id_util.UniqueStr("SmoothL1Loss_") ) .Op("smooth_l1_loss") .Input("prediction", [prediction]) .Input("label", [label]) .Output("loss") ) op.Attr("beta", float(beta)) return op.Build().InferAndTryRun().RemoteBlobList()[0]
5,357,335
def get_model_fields(model, concrete=False): # type: (Type[Model], Optional[bool]) -> List[Field] """ Gets model field :param model: Model to get fields for :param concrete: If set, returns only fields with column in model's table :return: A list of fields """ if not hasattr(model._meta, 'get_fields'): # Django 1.8+ if concrete: res = model._meta.concrete_fields else: res = model._meta.fields + model._meta.many_to_many else: res = model._meta.get_fields() if concrete: # Many to many fields have concrete flag set to True. Strange. res = [f for f in res if getattr(f, 'concrete', True) and not getattr(f, 'many_to_many', False)] return res
5,357,336
def loss_fun(para): """ This is the loss function """ return -data_processing(my_cir(para))
5,357,337
def update_user_history(user_id, expire_seconds): """Update user requests number history on every task submission""" if not redis.exists(user_id): redis.set(user_id, 1, expire_seconds) else: redis.set(user_id, int(redis.get(user_id)) + 1, expire_seconds)
5,357,338
def GetDynTypeMgr(): """Get the dynamic type manager""" return _gDynTypeMgr
5,357,339
def identifyEntity(tweet, entities): """ Identify the target entity of the tweet from the list of entities :param tweet: :param entities: :return: """ best_score = 0 # best score over all entities targetEntity = "" # the entity corresponding to the best score for word in tweet: for entity in entities: cur_score = 0 # the score for the current entity if word == entity: cur_score = 1 # set the current score to 1 in case the entity name is mentioned in the tweet for entity_related_word in entities[entity]: if word == entity_related_word: cur_score = cur_score + 1 # increment the current score by 1 in case a related term to # the current entity is mentioned in the tweet if cur_score > best_score: # update the best score and the target entity best_score = cur_score targetEntity = entity return targetEntity
5,357,340
def angle_between(a, b): """ compute angle in radian between a and b. Throws an exception if a or b has zero magnitude. :param a: :param b: :return: """ # TODO: check if extreme value that can make the function crash-- use "try" # from numpy.linalg import norm # from numpy import dot # import math arccosInput = np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b) # sct.printv(arccosInput) arccosInput = 1.0 if arccosInput > 1.0 else arccosInput arccosInput = -1.0 if arccosInput < -1.0 else arccosInput sign_angle = np.sign(np.cross(a, b)) # sct.printv(sign_angle) return sign_angle * acos(arccosInput) # @xl_func("numpy_row v1, numpy_row v2: float") # def py_ang(v1, v2): # """ Returns the angle in radians between vectors 'v1' and 'v2' """ # cosang = np.dot(a, b) # sinang = la.norm(np.cross(a, b)) # return np.arctan2(sinang, cosang)
5,357,341
def _apply_D_loss(scores_fake, scores_real, loss_func): """Compute Discriminator losses and normalize loss values Arguments --------- scores_fake : list discriminator scores of generated waveforms scores_real : list discriminator scores of groundtruth waveforms loss_func : object object of target discriminator loss """ loss = 0 real_loss = 0 fake_loss = 0 if isinstance(scores_fake, list): # multi-scale loss for score_fake, score_real in zip(scores_fake, scores_real): total_loss, real_loss, fake_loss = loss_func( score_fake=score_fake, score_real=score_real ) loss += total_loss real_loss += real_loss fake_loss += fake_loss # normalize loss values with number of scales (discriminators) # loss /= len(scores_fake) # real_loss /= len(scores_real) # fake_loss /= len(scores_fake) else: # single scale loss total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real) loss = total_loss return loss, real_loss, fake_loss
5,357,342
def render_html(options): """Start a Flask server to generate HTML report on request.""" # spin up the Flask server config = ProdConfig config.SQLALCHEMY_DATABASE_URI = options['database'] report_options = options['report'] config.CHANJO_PANEL_NAME = report_options.get('panel_name') config.CHANJO_LANGUAGE = report_options.get('language') config.CHANJO_PANEL = report_options.get('panel') config.DEBUG = report_options.get('debug') app = create_app(config=config) host = report_options.get('host', '0.0.0.0') port = report_options.get('port', 5000) click.echo(click.style("open browser to: http://{}:{}".format(host, port), fg='blue')) app.run(host=host, port=port)
5,357,343
def _write_int(ofile, i, indent_level, dicts, key, print_type): """ Writes 'i' to 'ofile'. If 'print_type' is true, the M2K type followed by a colon is written first. """ # appease pychecker if indent_level or dicts or key: pass if print_type: ofile.write(_dec_types[_get_int_type_index(i)]) ofile.write(':') ofile.write(str(i))
5,357,344
def _TestCase3(iTolerance): """ This is test case function #3. |br| Args: iTolerance: maximum tolerance of a difference between an expected value and a real value Returns: Nothing """ tStart = rxcs.console.module_progress('LNA test (case 3) SNR') # Define the input signal mSig = np.random.randn(1e3, 1e3) vCoef = np.array([3, 1, 0.01, 10]) mExp = 3 * mSig + mSig**2 + 0.01*mSig**3 + 10*mSig**4 _checkLNA(mSig, vCoef, mExp, iTolerance) rxcs.console.module_progress_done(tStart) rxcs.console.info('case 3 OK!')
5,357,345
def get_temp_dir(): """ Get path to the temp directory. Returns: str: The path to the temp directory. """ return fix_slashes( tempfile.gettempdir() )
5,357,346
def async_parser(_, objconf, skip=False, **kwargs): """Asynchronously parses the pipe content Args: _ (None): Ignored objconf (obj): The pipe configuration (an Objectify instance) skip (bool): Don't parse the content kwargs (dict): Keyword arguments Kwargs: assign (str): Attribute to assign parsed content (default: content) stream (dict): The original item Returns: Iter[dict]: The stream of items Examples: >>> from riko import get_path >>> from riko.bado import react >>> from riko.bado.mock import FakeReactor >>> from meza.fntools import Objectify >>> from meza.compat import decode >>> >>> def run(reactor): ... callback = lambda x: print(decode(next(x)['content'][:32])) ... url = get_path('cnn.html') ... conf = {'url': url, 'start': '<title>', 'end': '</title>'} ... objconf = Objectify(conf) ... kwargs = {'stream': {}, 'assign': 'content'} ... d = async_parser(None, objconf, **kwargs) ... return d.addCallbacks(callback, logger.error) >>> >>> try: ... react(run, _reactor=FakeReactor()) ... except SystemExit: ... pass ... CNN.com International - Breaking """ if skip: stream = kwargs["stream"] else: url = get_abspath(objconf.url) content = yield io.async_url_read(url) parsed = get_string(content, objconf.start, objconf.end) detagged = get_text(parsed) if objconf.detag else parsed splits = detagged.split(objconf.token) if objconf.token else [detagged] stream = ({kwargs["assign"]: chunk} for chunk in splits) return_value(stream)
5,357,347
def pick_op(r, maxr, w, maxw): """Choose a read or a write operation""" if r == maxr or random.random() >= float(w) / maxw: return "write" else: return "read"
5,357,348
def sim_nochange(request): """ Return a dummy YATSM model container with a no-change dataset "No-change" dataset is simply a timeseries drawn from samples of one standard normal. """ X, Y, dates = _sim_no_change_data() return setup_dummy_YATSM(X, Y, dates, [0])
5,357,349
def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'): """use builtin patch to apply <patchobj> to the working directory. returns whether patch was applied with fuzz factor.""" if files is None: files = {} if eolmode is None: eolmode = ui.config('patch', 'eol', 'strict') if eolmode.lower() not in eolmodes: raise util.Abort(_('Unsupported line endings type: %s') % eolmode) eolmode = eolmode.lower() try: fp = open(patchobj, 'rb') except TypeError: fp = patchobj if cwd: curdir = os.getcwd() os.chdir(cwd) try: ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode) finally: if cwd: os.chdir(curdir) if fp != patchobj: fp.close() if ret < 0: raise PatchError return ret > 0
5,357,350
def test_convert_str_to_datetime(dummy_df, dummy_df_datetime): """Parse strings to datetime format.""" actual = clean.convert_str_to_datetime(dummy_df, colname="reviewdate", datetime_format="%B %d %Y") expected = deepcopy(dummy_df_datetime) # Preserve original dtypes expected["reviewdate"] = expected["reviewdate"].astype("datetime64") expected["recordlabel"] = expected["recordlabel"].astype("float64") pd.testing.assert_frame_equal(actual, expected)
5,357,351
def test_register_interface_extend_task(collector, int_decl): """Test extending a task by adding interfaces. """ collector.contributions['exopy.Task'] = TaskInfos() task, _ = int_decl task.task = 'exopy.Task' task.register(collector, {}) assert collector.contributions['exopy.Task'].interfaces
5,357,352
def get_queryset_descendants(nodes, include_self=False, add_to_result=None): """ RUS: Запрос к базе данных потомков. Если нет узлов, то возвращается пустой запрос. :param nodes: список узлов дерева, по которым необходимо отыскать потомков :param include_self: признак включения в результ исходного спичка узлов :param add_to_result: список ключей узлов которые необходимо дополнительно включить в результат :return: список узлов (QuerySet), отсортированный в порядке обхода дерева """ if not nodes: # HACK: Emulate MPTTModel.objects.none(), because MPTTModel is abstract return EmptyQuerySet(MPTTModel) filters = [] model_class = nodes[0].__class__ if include_self: for n in nodes: if n.get_descendant_count(): lft, rght = n.lft - 1, n.rght + 1 filters.append(Q(tree_id=n.tree_id, lft__gt=lft, rght__lt=rght)) else: filters.append(Q(pk=n.pk)) else: for n in nodes: if n.get_descendant_count(): lft, rght = n.lft, n.rght filters.append(Q(tree_id=n.tree_id, lft__gt=lft, rght__lt=rght)) if add_to_result: if len(add_to_result) > 1: filters.append(Q(id__in=add_to_result)) else: filters.append(Q(pk=add_to_result[0])) if filters: return model_class.objects.filter(reduce(operator.or_, filters)) else: # HACK: Emulate model_class.objects.none() return model_class.objects.filter(id__isnull=True)
5,357,353
def op_mul(lin_op, args): """Applies the linear operator to the arguments. Parameters ---------- lin_op : LinOp A linear operator. args : list The arguments to the operator. Returns ------- NumPy matrix or SciPy sparse matrix. The result of applying the linear operator. """ # Constants convert directly to their value. if lin_op.type in [lo.SCALAR_CONST, lo.DENSE_CONST, lo.SPARSE_CONST]: result = lin_op.data # No-op is not evaluated. elif lin_op.type is lo.NO_OP: return None # For non-leaves, recurse on args. elif lin_op.type is lo.SUM: result = sum(args) elif lin_op.type is lo.NEG: result = -args[0] elif lin_op.type is lo.MUL: coeff = mul(lin_op.data, {}) result = coeff*args[0] elif lin_op.type is lo.DIV: divisor = mul(lin_op.data, {}) result = args[0]/divisor elif lin_op.type is lo.SUM_ENTRIES: result = np.sum(args[0]) elif lin_op.type is lo.INDEX: row_slc, col_slc = lin_op.data result = args[0][row_slc, col_slc] elif lin_op.type is lo.TRANSPOSE: result = args[0].T elif lin_op.type is lo.CONV: result = conv_mul(lin_op, args[0]) elif lin_op.type is lo.PROMOTE: result = np.ones(lin_op.size)*args[0] elif lin_op.type is lo.DIAG_VEC: val = intf.from_2D_to_1D(args[0]) result = np.diag(val) else: raise Exception("Unknown linear operator.") return result
5,357,354
def comparator(x, y): """ default comparator :param x: :param y: :return: """ if x < y: return -1 elif x > y: return 1 return 0
5,357,355
def test_json_parser_input_error(sdc_builder, sdc_executor): """Test JSON parser processor with an invalid input value. The pipeline would look like: dev_raw_data_source >> json_parser >> trash """ pipeline_builder = sdc_builder.get_pipeline_builder() dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source') dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='{ "A" }') json_parser = pipeline_builder.add_stage('JSON Parser', type='processor') json_parser.set_attributes(field_to_parse='/text', target_field='result') trash = pipeline_builder.add_stage('Trash') dev_raw_data_source >> json_parser >> trash pipeline = pipeline_builder.build() sdc_executor.add_pipeline(pipeline) snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot sdc_executor.stop_pipeline(pipeline) #JSONP_03 - Cannot parse the JSON field assert 'JSONP_03' == snapshot[json_parser.instance_name].error_records[0].header['errorCode']
5,357,356
def set_trait(age, age_risk_map, sex, sex_risk_map, race, race_risk_map): """ A trait occurs based on some mix of """ if age in age_risk_map: risk_from_age = age_risk_map[age] else: risk_from_age = 0 if sex in sex_risk_map: risk_from_sex = sex_risk_map[sex] else: risk_from_sex = 0 if race in race_risk_map: risk_from_race = race_risk_map[race] else: risk_from_race = 0 # probability of trait prob_trait = 1 - (1 - risk_from_age) * (1 - risk_from_sex) * (1 - risk_from_race) prob_not_trait = 1 - prob_trait resident_trait = np.random.choice(np.arange(1,3), p=[prob_not_trait,prob_trait]) return resident_trait
5,357,357
def isvalid(save_path, file): """ Returns true if the file described by the parameters is a file with the appropriate file extension. """ return os.path.isfile(os.path.join(save_path, file)) and \ str(file).endswith('.meta')
5,357,358
def test_yang_tree(): """ check that the tree is consistent with the yang """ res = subprocess.run(['pyang', '-f', 'tree', '--tree-line-length', '69', '-p', IETF_DIR, YANG_FILE], stdout=subprocess.PIPE) treefile = Path(YANG_FILE).with_suffix('.tree') tree = open(treefile, 'r').read() assert res.stdout.decode('utf-8') == tree, "YANG tree rendering differs" # remove downloaded yang files for url in urls: base_name, file = url filename = file.split('/')[-1] unlink(f'{IETF_DIR}/{filename}')
5,357,359
def tidy_expression(expr, design=None): """Converts expression matrix into a tidy 'long' format.""" df_long = pd.melt( _reset_index( expr, name='gene'), id_vars=['gene'], var_name='sample') if design is not None: df_long = pd.merge( df_long, _reset_index( design, name='sample'), on='sample', how='left') return df_long
5,357,360
def model_remote_to_local(remote_timestamps, local_timestamps, debug=False): """for timestamps""" a1=remote_timestamps[:,np.newaxis] a2=np.ones( (len(remote_timestamps),1)) A = np.hstack(( a1,a2)) b = local_timestamps[:,np.newaxis] x,resids,rank,s = np.linalg.lstsq(A,b) if debug: print 'in model_remote_to_local: N=%d, resids=%s'%( len(remote_timestamps),resids) gain = x[0,0] offset = x[1,0] return gain,offset
5,357,361
def get_optimizer(name): """Get an optimizer generator that returns an optimizer according to lr.""" if name == 'adam': def adam_opt_(lr): return tf.keras.optimizers.Adam(lr=lr) return adam_opt_ else: raise ValueError('Unknown optimizer %s.' % name)
5,357,362
def get_multi(response: Response, common: dict = Depends(common_parameters)) -> List[ShopToPriceSchema]: """List prices for a shop""" query_result, content_range = shop_to_price_crud.get_multi( skip=common["skip"], limit=common["limit"], filter_parameters=common["filter"], sort_parameters=common["sort"], ) response.headers["Content-Range"] = content_range for result in query_result: result.half = result.price.half if result.price.half and result.use_half else None result.one = result.price.one if result.price.one and result.use_one else None result.two_five = result.price.two_five if result.price.two_five and result.use_two_five else None result.five = result.price.five if result.price.five and result.use_five else None result.joint = result.price.joint if result.price.joint and result.use_joint else None result.piece = result.price.piece if result.price.piece and result.use_piece else None return query_result
5,357,363
async def test_sensor_empty( hass: HomeAssistant, config_entry: ConfigEntry, vehicle_type: str ): """Test for Renault sensors with empty data from Renault.""" entity_registry = mock_registry(hass) device_registry = mock_device_registry(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() mock_vehicle = MOCK_VEHICLES[vehicle_type] check_device_registry(device_registry, mock_vehicle["expected_device"]) expected_entities = mock_vehicle[Platform.SENSOR] assert len(entity_registry.entities) == len(expected_entities) _check_and_enable_disabled_entities(entity_registry, expected_entities) await hass.config_entries.async_reload(config_entry.entry_id) await hass.async_block_till_done() check_entities_no_data(hass, entity_registry, expected_entities, STATE_UNKNOWN)
5,357,364
def send_crash(request, machine_config_info, crashlog): """ Save houdini crashes """ machine_config = get_or_save_machine_config( machine_config_info, get_ip_address(request), datetime.datetime.now()) save_crash(machine_config, crashlog, datetime.datetime.now()) return True
5,357,365
def read_csv_file(filename): """Read csv file into a numpy array """ header_info = {} # Make this Py2.x and Py3.x compatible if sys.version_info[0] < 3: infile = open(filename, 'rb') else: infile = open(filename, 'r', newline='', encoding='utf8') with infile as csvfile: # Make this Py2.x and Py3.x compatible if sys.version_info[0] < 3: data = csv.reader((line.replace(b'\0', b'') for line in csvfile), delimiter=b',') mynext = data.next else: data = csv.reader((line.replace('\0', '') for line in csvfile), delimiter=',') mynext = data.__next__ temp_row = mynext() header_info['timestamp'] = temp_row[0] header_info['file'] = temp_row[1] header_info['title'] = mynext()[1] header_info['model'] = mynext()[1] header_info['serial_number'] = mynext()[1] temp_row = mynext() header_info['center_freq'] = float(temp_row[1]) temp_row = mynext() header_info['span_freq'] = float(temp_row[1]) temp_row = mynext() header_info['resolution_bw'] = float(temp_row[1]) temp_row = mynext() header_info['video_bw'] = float(temp_row[1]) temp_row = mynext() header_info['ref_level'] = float(temp_row[1]) temp_row = mynext() header_info['sweep_time'] = float(temp_row[1]) temp_row = mynext() header_info['num_points'] = int(temp_row[1]) temp_row = mynext() # Skip blank line 12 temp_row = mynext() # Skip blank line 13 temp_row = mynext() num_traces = len(temp_row) - 1 header_info['num_traces'] = num_traces temp_row = mynext() header_info['frequency'] = temp_row[0] data_array = [] if num_traces == 1: for row in data: data_array.append((float(row[0]), float(row[1]))) data = np.array( data_array, dtype={'names': ('frequency', 'amplitude'), 'formats': ('f8', 'f8')}) elif num_traces == 2: for row in data: data_array.append((float(row[0]), [float(row[1]), float(row[2])])) data = np.array( data_array, dtype={'names': ('frequency', 'amplitude'), 'formats': ('f8', '2f8')}) elif num_traces == 3: for row in data: data_array.append((float(row[0]), [float(row[1]), float(row[2]), float(row[3])])) data = np.array( data_array, dtype={'names': ('frequency', 'amplitude'), 'formats': ('f8', '3f8')}) return (header_info, data)
5,357,366
def apply_HAc_dense(A_C, A_L, A_R, Hlist): """ Construct the dense effective Hamiltonian HAc and apply it to A_C. For testing. """ d, chi, _ = A_C.shape HAc = HAc_dense(A_L, A_R, Hlist) HAc_mat = HAc.reshape((d*chi*chi, d*chi*chi)) A_Cvec = A_C.flatten() A_C_p = np.dot(HAc_mat, A_Cvec).reshape(A_C.shape) return A_C_p
5,357,367
def min_index(array, i, j): """Pomocna funkce pro razeni vyberem. Vrati index nejmensiho prvku v poli 'array' mezi 'i' a 'j'-1. """ index = i for k in range(i, j): if array[k] < array[index]: index = k return index
5,357,368
def filterControlChars(value, replacement=' '): """ Returns string value with control chars being supstituted with replacement character >>> filterControlChars(u'AND 1>(2+3)\\n--') u'AND 1>(2+3) --' """ return filterStringValue(value, PRINTABLE_CHAR_REGEX, replacement)
5,357,369
def get_deobfuscator(var_names) -> str: """Creates a deobfuscator for the given set of var names. Args: var_names (list): List of variable names from the `obfuscate` function. Returns: str: Deobfuscator """ return f'\n\ngetattr(getattr(__main__, [x for x in dir(__main__) if x.startswith(\'__b\')][0]), (lambda: "ArithmeticError" and "AssertionError" and "AttributeError" and "BaseException" and "BlockingIOError" and "BrokenPipeError" and "BufferError" and "BytesWarning" and "ChildProcessError" and "ConnectionAbortedError" and "ConnectionError" and "ConnectionRefusedError" and "ConnectionResetError" and "DeprecationWarning" and "EOFError" and "Ellipsis" and "EnvironmentError" and "Exception" and "False" and "FileExistsError" and "FileNotFoundError" and "FloatingPointError" and "FutureWarning" and "GeneratorExit" and "IOError" and "ImportError" and "ImportWarning" and "IndentationError" and "IndexError" and "InterruptedError" and "IsADirectoryError" and "KeyError" and "KeyboardInterrupt" and "LookupError" and "MemoryError" and "ModuleNotFoundError" and "NameError" and "None" and "NotADirectoryError" and "NotImplemented" and "NotImplementedError" and "OSError" and "OverflowError" and "PendingDeprecationWarning" and "PermissionError" and "ProcessLookupError" and "RecursionError" and "ReferenceError" and "ResourceWarning" and "RuntimeError" and "RuntimeWarning" and "StopAsyncIteration" and "StopIteration" and "SyntaxError" and "SyntaxWarning" and "SystemError" and "SystemExit" and "TabError" and "TimeoutError" and "True" and "TypeError" and "UnboundLocalError" and "UnicodeDecodeError" and "UnicodeEncodeError" and "UnicodeError" and "UnicodeTranslateError" and "UnicodeWarning" and "UserWarning" and "ValueError" and "Warning" and "WindowsError" and "ZeroDivisionError" and "__build_class__" and "__debug__" and "__doc__" and "__import__" and "__loader__" and "__name__" and "__package__" and "__spec__" and "abs" and "all" and "any" and "ascii" and "bin" and "bool" and "breakpoint" and "bytearray" and "bytes" and "callable" and "chr" and "classmethod" and "compile" and "complex" and "copyright" and "credits" and "delattr" and "dict" and "dir" and "divmod" and "enumerate" and "eval" and "fdlr" and "exit" and "filter" and "float" and "format" and "frozenset" and "getattr" and "globals" and "hasattr" and "hash" and "help" and "hex" and "id" and "input" and "int" and "isinstance" and "issubclass" and "iter" and "len" and "license" and "list" and "locals" and "map" and "max" and "memoryview" and "min" and "next" and "object" and "oct" and "open" and "ord" and "pow" and "print" and "property" and "quit" and "range" and "repr" and "reversed" and "round" and "set" and "setattr" and "slice" and "sorted" and "staticmethod" and "str" and "sum" and "super" and "tuple" and "type" and "vars" and "zip" and "exec")())(\'\'.join([getattr(binascii, [x for x in dir(binascii) if x.startswith(chr(97)+str((()==())+([]==[]))+chr(98))][0])(globals().get(var_name)).decode() for var_name in {var_names}]))'
5,357,370
def write_trt_rpc(cell_ID, cell_time, lon, lat, area, rank, hmin, hmax, freq, fname, timeformat='%Y%m%d%H%M'): """ writes the rimed particles column data for a TRT cell Parameters ---------- cell_ID : array of ints the cell ID cell_time : array of datetime the time step lon, lat : array of floats the latitude and longitude of the center of the cell area : array of floats the area of the cell rank : array of floats the rank of the cell hmin, hmax : array of floats Minimum and maximum altitude of the rimed particle column freq : array of floats Frequency of the species constituting the rime particle column within the limits of it fname : str file name where to store the data Returns ------- fname : str the name of the file where data has written """ hmin = hmin.filled(fill_value=get_fillvalue()) hmax = hmax.filled(fill_value=get_fillvalue()) freq = freq.filled(fill_value=get_fillvalue()) with open(fname, 'w', newline='') as csvfile: fieldnames = [ 'traj_ID', 'yyyymmddHHMM', 'lon', 'lat', 'area', 'RANKr', 'hmin', 'hmax', 'freq'] writer = csv.DictWriter(csvfile, fieldnames) writer.writeheader() for i, traj_ID_el in enumerate(cell_ID): writer.writerow({ 'traj_ID': traj_ID_el, 'yyyymmddHHMM': cell_time[i].strftime(timeformat), 'lon': lon[i], 'lat': lat[i], 'area': area[i], 'RANKr': rank[i], 'hmin': hmin[i], 'hmax': hmax[i], 'freq': freq[i] }) csvfile.close() return fname
5,357,371
def get_tag(string: str) -> Tag: """Получить тему.""" return Tag.objects.get(tag=string)
5,357,372
def average_precision(gt, pred): """ Computes the average precision. This function computes the average prescision at k between two lists of items. Parameters ---------- gt: set A set of ground-truth elements (order doesn't matter) pred: list A list of predicted elements (order does matter) Returns ------- score: double The average precision over the input lists """ if not gt: return 0.0 score = 0.0 num_hits = 0.0 for i,p in enumerate(pred): if p in gt and p not in pred[:i]: num_hits += 1.0 score += num_hits / (i + 1.0) return score / max(1.0, len(gt))
5,357,373
def rotate_coo(x, y, phi): """Rotate the coordinates in the *.coo files for data sets containing images at different PAs. """ # Rotate around center of image, and keep origin at center xin = 512. yin = 512. xout = 512. yout = 512. cos = math.cos(math.radians(phi)) sin = math.sin(math.radians(phi)) xrot = (x - xin) * cos - (y - yin) * sin + xout yrot = (x - xin) * sin + (y - yin) * cos + yout return [xrot, yrot]
5,357,374
def database(): """ View MongoDB Configuration """ christisMongoconfigPath = get_mongo_configuration_location() if (not Path(christisMongoconfigPath).is_file()): typer.echo("ERROR: The cli.yaml file can't be found please use CLI to generate it!",err=True) raise typer.Exit(code=1) typer.echo("Open CLI Configuration File in {0}".format(christisMongoconfigPath)) typer.echo("\n") with open(christisMongoconfigPath) as mongo: # Here f is the file-like object read_data = mongo.read() typer.echo(read_data)
5,357,375
def get_spectral_info(self): """ Return the channel values """ if self.method=='scouse': self.specx=self.scouseobject.xtrim self.specy=self.my_spectrum.spectrum[self.scouseobject.trimids] self.specrms=self.my_spectrum.rms else: self.specx = self.individual[self.index,0,:] self.specy = self.individual[self.index,1,:] from scousepy.noisy import getnoise noisy=getnoise(self.specx, self.specy) if np.isfinite(noisy.rms): self.specrms = noisy.rms else: self.specrms = 0.0 print('') print(colors.fg._yellow_+"Warning: Could not compute rms. "+noisy.flag+". "+colors._endc_)
5,357,376
def ip_block_array(): """ Return an ipBlock array instance fixture """ return ['10.0.0.1', '10.0.0.2', '10.0.0.3']
5,357,377
def check_term_source_refs_usage(i_df, dir_context): """Checks Term Source REF linkages in investigation, study and assay files :param i_df: An investigation DataFrame :param dir_context: Path to where the investigation file is found :return: None """ check_term_source_refs_in_investigation(i_df) check_term_source_refs_in_assay_tables(i_df, dir_context)
5,357,378
def load_model(model_name, dir_loc=None, alive_bar_on=True): """Load local model_name=model_s if present, else fetch from hf.co.""" if dir_loc is None: dir_loc = "" dir_loc = Path(dir_loc).absolute().as_posix() file_loc = f"{dir_loc}/{model_name}" if Path(file_loc).exists(): if alive_bar_on: with alive_bar( 1, title=f" Loading {dir_loc}/{model_name}, takes ~30 secs ...", length=3, ) as progress_bar: model = joblib.load(file_loc) # model_s = pickle.load(open(file_loc, "rb")) progress_bar() # pylint: disable=not-callable else: logger.info("Loading %s/%s, takes ~30 secs ...", dir_loc, model_name) model = joblib.load(file_loc) else: logger.info( "Fetching and caching %s from huggingface.co... " "The first time may take a while depending on your net.", model_name, ) if alive_bar_on: with alive_bar( 1, title=" Subsequent loading takes ~2-3 secs ...", length=3 ) as progress_bar: try: model = joblib.load( cached_download(hf_hub_url("mikeee/model_s", model_name)) ) except Exception as exc: logger.error(exc) raise progress_bar() # pylint: disable=not-callable else: try: model = joblib.load( cached_download(hf_hub_url("mikeee/model_s", model_name)) ) except Exception as exc: logger.error(exc) raise return model
5,357,379
def get_dev_requirements() -> Generator: """Yield package name and version for Python developer requirements.""" return get_versions("DEVELOPMENT")
5,357,380
def initialize_logging_errors_to_console(logger): """Log errors to the console, in a simple single-line format.""" ch = logging.StreamHandler() ch.setLevel(logging.ERROR) ch.setFormatter(logging.Formatter('Error: %(asctime)s - %(message)s')) logger.addHandler(ch)
5,357,381
def _ndb_key_to_cloud_key(ndb_key): """Convert a ndb.Key to a cloud entity Key.""" return datastore.Key( ndb_key.kind(), ndb_key.id(), project=utils.get_application_id())
5,357,382
def inference(H, images, train=True): """Build the MNIST model up to where it may be used for inference. Parameters ---------- images: Images placeholder, from inputs(). train: whether the network is used for train of inference Returns ------- softmax_linear: Output tensor with the computed logits. """ num_filter_1 = 32 num_filter_2 = 64 # First Convolutional Layer with tf.variable_scope('Conv1') as scope: # Adding Convolutional Layers W_conv1 = weight_variable( 'weights', [5, 5, H['arch']['num_channels'], num_filter_1]) b_conv1 = bias_variable('biases', [num_filter_1]) h_conv1 = tf.nn.relu( conv2d(images, W_conv1) + b_conv1, name=scope.name) _activation_summary(h_conv1) # First Pooling Layer h_pool1 = max_pool_2x2(h_conv1, name='pool1') # Second Convolutional Layer with tf.variable_scope('Conv2') as scope: W_conv2 = weight_variable( 'weights', [5, 5, num_filter_1, num_filter_2]) b_conv2 = bias_variable('biases', [num_filter_2]) h_conv2 = tf.nn.relu( conv2d(h_pool1, W_conv2) + b_conv2, name=scope.name) _activation_summary(h_conv2) # Second Pooling Layer h_pool2 = max_pool_2x2(h_conv2, name='pool2') # Find correct dimension dim = 1 for d in h_pool2.get_shape()[1:].as_list(): dim *= d # Adding Fully Connected Layers with tf.variable_scope('fc1') as scope: W_fc1 = weight_variable('weights', [dim, 1024]) b_fc1 = bias_variable('biases', [1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, dim]) h_fc1 = tf.nn.relu( tf.matmul(h_pool2_flat, W_fc1) + b_fc1, name=scope.name) _activation_summary(h_fc1) # Adding Dropout if train: h_fc1 = tf.nn.dropout(h_fc1, 0.5, name='dropout') with tf.variable_scope('logits') as scope: W_fc2 = weight_variable('weights', [1024, H['arch']['num_classes']]) b_fc2 = bias_variable('biases', [H['arch']['num_classes']]) logits = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, name=scope.name) _activation_summary(logits) return logits
5,357,383
def _find_modules_and_directories(top_level_directory): """ Recursive helper function to find all python files included in top level package. This will recurse down the directory paths of any package to find all modules and subpackages in order to create an exhaustive list of all python files within a given package. :param top_level_directory: Path to the top level of a python package. :type top_level_directory: str :return: Returns a list of paths to all python files within that package. :rtype: list [str] """ modules = [] directories = [] for item in os.listdir(top_level_directory): if item.endswith(".py"): modules.append(os.path.join(top_level_directory, item)) elif os.path.isdir(os.path.join(top_level_directory, item)): directories.append(os.path.join(top_level_directory, item)) for directory in directories: modules.extend(_find_modules_and_directories(directory)) return modules
5,357,384
def UniqueLattice(lattice_vectors,ind): """ Takes a list with two tuples, each representing a lattice vector and a list with the genes of an individual. Returns a list with two tuples, representing the equivalent lattice vectors with the smallest cell circunference. """ x_1 = lattice_vectors(0,ind) x_2 = lattice_vectors(1,ind) lattices = [[(x_1[0]+x_2[0] if (x_1[0]+x_2[0]) > 0 else (x_1[0]-x_2[0]), x_1[1]+x_2[1] if (x_1[1]+x_2[1]) > 0 else x_1[1]-x_2[1]) ,x_2], [(x_1[0]-x_2[0] if (x_1[0]-x_2[0]) > 0 else x_1[0]+x_2[0], x_1[1]-x_2[1] if (x_1[1]-x_2[1]) > 0 else x_1[1]+x_2[1]) ,x_2], [x_1, (x_1[0]+x_2[0] if (x_1[0]+x_2[0]) > 0 else x_1[0]-x_2[0], x_1[1]+x_2[1] if (x_1[1]+x_2[1]) > 0 else x_1[1]-x_2[1])], [x_1, (x_1[0]-x_2[0] if (x_1[0]-x_2[0]) > 0 else x_1[0]+x_2[0], x_1[1]-x_2[1] if (x_1[1]-x_2[1]) > 0 else x_1[1]+x_2[1])]] lattice_radius = [] for lat in lattices: point_1 = lat[0] point_2 = lat[1] m_a = (point_2[1]-point_1[1])/(point_2[0]-point_1[0]) m_b = point_2[1]/point_2[0] x = (m_a*m_b*(point_1[1]) + m_b*(point_1[0]+point_2[0]) - m_a*(point_2[0])) / 2*(m_b-m_a) y = (-1 / m_a) * (x - (point_1[0]-point_2[1])/2) + (point_1[1]-point_2[1])/2 radius_1 = np.sqrt((x-point_1[0])**2 + (y-point_1[1])**2) radius_2 = np.sqrt((x-point_2[0])**2 + (y-point_2[1])**2) if radius_1 >= radius_2: lattice_radius.append(radius_1) else: lattice_radius.append(radius_2) return lattices[lattice_radius.index(min(lattice_radius))]
5,357,385
def register_errors(app: Flask): """注册需要的错误处理程序包到 Flask 程序实例 app 中""" @app.errorhandler(400) # Bad Request 客户端请求的语法错误,服务器无法理解 def bad_request(e): return render_template('error.html', description=e.description, code=e.code), 400 @app.errorhandler(404) # Not Found 服务器无法根据客户端的请求找到资源(网页) def page_not_found(e): return render_template('error.html', description=e.description, code=e.code), 404 @app.errorhandler(500) # Internal Server Error 服务器内部错误,无法完成请求 def internal_server_error(e): return render_template('error.html', description="服务器内部错误,无法完成请求!", code="500"), 500 @app.errorhandler(CSRFError) # CSRF 验证失败 def csrf_error_handle(e): return render_template('error.html', description=e.description, code=e.code), 400
5,357,386
def websafe_encode(data): """Encodes a byte string into websafe-base64 encoding. :param data: The input to encode. :return: The encoded string. """ return urlsafe_b64encode(data).replace(b'=', b'').decode('ascii')
5,357,387
def gravatar(email: Union[str, list]) -> str: """Converts the e-mail address provided into a gravatar URL. If the provided string is not a valid e-mail address, this function just returns the original string. Args: email: e-mail address to convert. Returns: Gravatar URL, or None if the e-mail address is not valid. """ if email is None: email = [] elif isinstance(email, str): email = [email] email.sort() for _email in email: if validators.email(_email): return gravatar_url(_email) return None
5,357,388
def test_cutmix_batch_fail7(): """ Test CutMixBatch op We expect this to fail because labels are not in one-hot format """ logger.info("test_cutmix_batch_fail7") # CutMixBatch Images data1 = ds.Cifar10Dataset(DATA_DIR, num_samples=10, shuffle=False) cutmix_batch_op = vision.CutMixBatch(mode.ImageBatchFormat.NHWC) data1 = data1.batch(5, drop_remainder=True) data1 = data1.map(input_columns=["image", "label"], operations=cutmix_batch_op) with pytest.raises(RuntimeError) as error: images_cutmix = np.array([]) for idx, (image, _) in enumerate(data1): if idx == 0: images_cutmix = image else: images_cutmix = np.append(images_cutmix, image, axis=0) error_message = "CutMixBatch: Label's must be in one-hot format and in a batch" assert error_message in str(error.value)
5,357,389
def bar_2_MPa(value): """ converts pressure in bar to Pa :param value: pressure value in bar :return: pressure value in Pa """ return value * const.bar / const.mega
5,357,390
def poll_all_bme680(bme_config, bme_sensor, pi_id, pi_name, engine, mqtt_client=None): """ Poll all bme680 sensors listed in the config file for this pi Save resulting records to the database specified engine """ if bme_sensor is not None: for location, details in bme_config.iterrows(): bme_pin = int(details.pin) data = poll_bme680(bme_sensor, bme_pin) data = add_local_pi_info(data, pi_id, pi_name, location) save_readings_to_db(data, engine) if mqtt_client and data: publish_readings_from_dict(data, location, mqtt_client)
5,357,391
def walk(x, y, model, theta, conditions=None, var2=0.01, mov=100, d=1, tol=1e-3, mode=True): """Executes the walker implementation. Parameters ---------- x : np.ndarray An $(m, n)$ dimensional array for (cols, rows). y : np.ndarray An $n$ dimensional array that will be compared with model's output. model : function A Python function defined by the user. This function should recieve two arguments $(x, theta)$. theta : np.ndarray The array containing the model's parameters. conditions : list A list containing $2n$-conditions for the (min, max) range of the $n$ parameters. var2 : float Determines the step size of the walker. By default it is set to `1.0`. mov : int Number of movements that walker will perform. By default it is set to `100`. d : float Size of the Gaussian step for the walker. tol : float Convergence criteria for the log-likelihhod. By default it is set to `1e-3`. mode : bool By default it is set to `True`. Returns ------- theta : np.array An ndarray with the updated theta values. nwalk : np.array Updates of theta for each movement performed by the walker. y0 : float The log-likelihood value. """ greach = False nwalk = [] for i in range(mov): nwalk.append(theta) theta_new = update_theta(theta, d) if not greach: y0 = fun_like(x, y, model, theta, conditions, var2) y1 = fun_like(x, y, model, theta_new, conditions, var2) if y0 <= tol and mode: print('Goal reached!') greach = True return theta, nwalk, y0 else: if y1 <= tol and mode: print('Goal reached!') greach = True return theta_new, nwalk, y1 else: ratio = y0 / y1 boltz = np.random.rand(1) prob = np.exp(-ratio) if y1 < y0: theta = theta_new theta_new = update_theta(theta, d) else: if prob > boltz: theta = theta_new theta_new = update_theta(theta, d) else: theta_new = update_theta(theta, d) if mode: print('Maximum number of iterations reached!') print(f'The log-likelihood is: {y0}') return theta, nwalk, y0
5,357,392
def izbor_letov(): """Glavna stran.""" # Iz cookieja dobimo uporabnika in morebitno sporočilo (username, ime, priimek) = get_potnik() c.execute("SELECT distinct drzava FROM lokacija ORDER BY drzava") drzave=c.fetchall() drzava_kje = bottle.request.forms.drzava_kje mesto_kje = bottle.request.forms.mesto_kje letalisce_kje = bottle.request.forms.letalisce_kje drzava_kam = bottle.request.forms.drzava_kam mesto_kam = bottle.request.forms.mesto_kam letalisce_kam = bottle.request.forms.letalisce_kam if "None" in [drzava_kje, mesto_kje, letalisce_kje, drzava_kam, mesto_kam, letalisce_kam]: return bottle.template("main.html", ime=ime, username=username, napaka="Prosimo, izpolnete vsa polja!", drzave=drzave) elif letalisce_kje==letalisce_kam: return bottle.template("main.html", ime=ime, username=username, napaka="Začetno in končno letališče se morata razlikovati, prosimo ponovno izpolnite obrazec.", drzave=drzave) else: izbor = get_leti(letalisce_kje, letalisce_kam, drzava_kje, drzava_kam) leti_mesto = get_leti_mesto(mesto_kje, drzava_kje, mesto_kam, drzava_kam) leti_mesto_drzava = get_leti_mesto_drzava(mesto_kje, drzava_kje, mesto_kam, drzava_kam) if izbor == []: return bottle.template("leti.html", ime=ime, username=username, letalisce_kje=letalisce_kje, letalisce_kam=letalisce_kam, napaka="Za relacijo \""+letalisce_kje+" ("+mesto_kje+", "+drzava_kje+") : "+letalisce_kam+" ("+mesto_kam+", "+drzava_kam+")\" ni znanih letov. "+" "+"Poizkusite ponovno s kakterim drugim letališčem v bližini.", leti_mesto=leti_mesto, leti_mesto_drzava=leti_mesto_drzava, izbor=izbor) else: return bottle.template("leti.html", ime=ime, username=username, letalisce_kje=letalisce_kje, letalisce_kam=letalisce_kam, napaka=None, leti_mesto_drzava=leti_mesto_drzava, izbor=izbor, leti_mesto=leti_mesto)
5,357,393
def generate_data_from_cvs(csv_file_paths): """Generate data from list of csv_file_paths. csv_file_paths contains path to CSV file, column_name, and its label `csv_file_paths`: A list of CSV file path, column_name, and label """ data = [] for item in csv_file_paths: values = read_csv(item[0], item[1]) data.append([ item[2], values ]) return data
5,357,394
def log_density_gaussian(x, mu, logvar): """Calculates log density of a gaussian. Parameters ---------- mu: torch.Tensor or np.ndarray or float Mean. logvar: torch.Tensor or np.ndarray or float Log variance. """ normalization = - 0.5 * (math.log(2 * math.pi) + logvar) inv_var = torch.exp(-logvar) log_density = normalization - 0.5 * ((x - mu)**2 * inv_var) return log_density
5,357,395
def parse_path(path: Optional[str] = None, root: str = '/') \ -> Iterator[str]: """Parse PATH variable :param path: PATH string to parse, default to the ``PATH`` environment variable :param root: Path to prepend to all paths found :return: Iterator over the processed paths """ if path is None: path = os.environ.get('PATH') if path is None: return logger.debug("Parsing path %s", path) for k in path.split(':'): if not k: yield normpath(os.getcwd()) else: yield normpath(root + '/' + k)
5,357,396
def invalid_hexadecimal(statement): """Identifies problem caused by invalid character in an hexadecimal number.""" if statement.highlighted_tokens: # Python 3.10 prev = statement.bad_token wrong = statement.next_token else: prev = statement.prev_token wrong = statement.bad_token if not (prev.immediately_before(wrong) and prev.string.lower().startswith("0x")): return {} hint = _("Did you made a mistake in writing an hexadecimal integer?\n") cause = _( "It looks like you used an invalid character (`{character}`) in an hexadecimal number.\n\n" "Hexadecimal numbers are base 16 integers that use the symbols `0` to `9`\n" "to represent values 0 to 9, and the letters `a` to `f` (or `A` to `F`)\n" "to represent values 10 to 15.\n" "In Python, hexadecimal numbers start with either `0x` or `0X`,\n" "followed by the characters used to represent the value of that integer.\n" ).format(character=wrong.string[0]) return {"cause": cause, "suggest": hint}
5,357,397
def test_multi_regex_text_strip(): """Test with multiple character regex delimiter""" std_tests_strip('!@', re.compile(r'!@')) std_tests_strip('!@', re.compile(r'!@'), block_size=1)
5,357,398
async def clear(ctx): """Clears requests made by you. Also removes stale requests.""" if not cog_Admin.ADMIN_bot_enabled: return clear_requests(remove_all=True) s = ctx.message.author.name + ', your requests and all stale requests have been removed.' await bot.say(s)
5,357,399