content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def trunc_artist(df: pd.DataFrame, artist: str, keep: float = 0.5, random_state: int = None): """ Keeps only the requested portion of songs by the artist (this method is not in use anymore) """ data = df.copy() df_artist = data[data.artist == artist] data = data[data.artist != artist] orig_length = len(df_artist) try: df_artist = df_artist.sample(int(len(df_artist) * keep), random_state=random_state) except ValueError: pass new_length = len(df_artist) print("Truncating data for {artist}, original length = {orig}, new length = {new}".format(artist=artist, orig=orig_length, new=new_length)) data = data.append(df_artist) return data.reset_index(drop=True)
7157e223bdf87d0463820565e40eade3e1725ae5
3,658,400
async def test_postprocess_results(original, expected): """Test Application._postprocess_results.""" callback1_called = False callback2_called = False app = Application("testing") @app.result_postprocessor async def callback1(app, message): nonlocal callback1_called callback1_called = True return message + 1 @app.result_postprocessor async def callback2(app, message): nonlocal callback2_called callback2_called = True # Nothing is returned out of Application._postprocess_results so # the assertion needs to happen inside a callback. assert message == expected await app._postprocess_results([original]) assert callback1_called assert callback2_called
9c2a6bdfcb281d62959135be01693baaaf266780
3,658,401
def toContinuousCategory( oX: pd.DataFrame, features: list = [], drop: bool = True, int_: bool = True, float_: bool = True, quantile: bool = True, nbin: int = 10, inplace: bool = True, verbose: bool = True, ) -> pd.DataFrame: """ Transforms any float, continuous integer values of a pandas dataframe to category values. Parameters: X: dataset Keywords: features: default: [] The column names to be transform from continuous to category. drop: default: True) If True then the datetime feature/column will be removed. int_: Default: True set integer=False if not continuous and not to transform into category. float_: Default: True set floaty=False if not continuous and not to transform into category. quantile: Default: True use quantile bin. quantile is simular to v/(maxy-miny), works on any any scale. False, use fixed-width bin. miny,maxy arguments are ignored. nbin: default: 10 Alternately ``nbins`` can be integer for number of bins. Or it can be array of quantiles, e.g. [0, .25, .5, .75, 1.] or array of fixed-width bin boundaries i.e. [0., 4., 10, 100]. verbose: Default True True: output False: silent inplace: Default: True True: replace 1st argument with resulting dataframe False: (boolean)change unplace the dataframe X Returns: pd.DataFrame Raises: TypeError('" requires boolean type.") Note: Binning, also known as quantization is used for transforming continuous numeric features (``np.number`` type) into ``category`` type. These categories group the continuous values into bins. Each bin represents a range of continuous numeric values. Specific strategies of binning data include fixed-width (``quantile_bins=False``) and adaptive binning (``quantile_bins = True``). Datasets that are used as ``train``, ``valid``, and ``test`` must have same bin widths and labels and thus the same categories. Assumes **paso** data cleaning steps (such as removal of Null and NA values) have already been applied. Fixed-width bin, only works, WITHOUT SCALING, with datasets with multiple features for tree-based models such as CART, random forest, xgboost, lightgbm, catboost,etc. Namely Deep Learning using neural nets won't work. quantile is similar to min-max scaling: v/(maxy-miny) works on any any scale **Statistical problems with linear binning.** Binning increases type I and type II error; (simple proof is that as number of bins approaches infinity then information loss approaches zero). In addition, changing the number of bins will alter the bin distrution shape, unless the distribution is uniformLY FLAT. **Quantile binning can only be used with a singular data set.** Transforming a Continuous featuree ino a Category feature based on percentiles (QUANTILES) is WRONG if you have a train and test data sets. Quaniles are based on the data set and will be different unless each data set is distribution is equal. In rhe limit there are only two bins, then almost no relationship can be modeled. We are essentially doing a t-test. **if there are nonlinear or even nonmonotonic relationships between features** If you need linear binning, not quantile, use ``quantile_bins=False`` and specify the bin width (``delta``) or fixed bin boundaries of any distribution of cuts you wish with ``nbin`` = [ cut-1, cut-2...cut-n ] **If you want Quantile-binning.** Despite the above warnings, your use case may require. qantile binning. Quantile based binning is a faily good strategy to use for adaptive binning. Quantiles are specific values or cut-points which partition the continuous valued distribution of a feature into discrete contiguous bins or intervals. Thus, q-Quantiles partition a numeric attribute into q equal (percetage-width) partitions. Well-known examples of quantiles include the 2-Quantile ,median, divides the data distribution into two equal (percetage-width) bins, 4-Quantiles, ,standard quartiles, 4 equal bins (percetage-width) and 10-Quantiles, deciles, 10 equal width (percetage-width) bins. **You should maybe looking for outliers AFTER applying a Gaussian transformation.** """ _fun_name = toContinuousCategory.__name__ # todo put in decorator if inplace: X = oX else: X = oX.copy() validate_bool_kwarg(int_, "int_") validate_bool_kwarg(float_, "float_") # handles float, continuous integer. set integer=False if not contunuous # any other dataframe value type left as is. if features == []: features = X.columns for nth, feature in enumerate(features): if (float_ and X[feature].dtype == float) or (int_ and X[feature].dtype == int): nbin = _must_be_list_tuple_int(nbin) # import pdb; pdb.set_trace() # debugging starts here if quantile: # quantile is similar to min-max scaling: v/(maxy-miny) # works on any any scale X[feature + "q"] = pd.qcut(X[feature], nbin, duplicates="drop") else: # fixed-width bin, only works, WITHOUT SCALING, with datasets with multiple features # for tree-based models such as CART, random forest, xgboost, lightgbm, X[feature + "fw"] = pd.cut(X[feature], nbin, duplicates="drop") # drop feature, if a list and its short, then their is an error. # no drop for integer=False or float_=False if drop: X.drop(features, axis=1, inplace=True) if verbose: logger.info("{} features:: {}".format(_fun_name, features)) return X
bc8bc9c339d998e4e3c337a039421ec835a6f16f
3,658,402
def task_migrate(): """Create django databases""" return { 'actions': ['''cd CCwebsite && python3 manage.py migrate'''] }
d0d146c2e628abbe33714ae0ff6a546aab9842cc
3,658,403
import numpy def distance_to_arc(alon, alat, aazimuth, plons, plats): """ Calculate a closest distance between a great circle arc and a point (or a collection of points). :param float alon, alat: Arc reference point longitude and latitude, in decimal degrees. :param azimuth: Arc azimuth (an angle between direction to a north and arc in clockwise direction), measured in a reference point, in decimal degrees. :param float plons, plats: Longitudes and latitudes of points to measure distance. Either scalar values or numpy arrays of decimal degrees. :returns: Distance in km, a scalar value or numpy array depending on ``plons`` and ``plats``. A distance is negative if the target point lies on the right hand side of the arc. Solves a spherical triangle formed by reference point, target point and a projection of target point to a reference great circle arc. """ azimuth_to_target = azimuth(alon, alat, plons, plats) distance_to_target = geodetic_distance(alon, alat, plons, plats) # find an angle between an arc and a great circle arc connecting # arc's reference point and a target point t_angle = (azimuth_to_target - aazimuth + 360) % 360 # in a spherical right triangle cosine of the angle of a cathetus # augmented to pi/2 is equal to sine of an opposite angle times # sine of hypotenuse, see # http://en.wikipedia.org/wiki/Spherical_trigonometry#Napier.27s_Pentagon angle = numpy.arccos( (numpy.sin(numpy.radians(t_angle)) * numpy.sin(distance_to_target / EARTH_RADIUS)) ) return (numpy.pi / 2 - angle) * EARTH_RADIUS
e8868a2ce9125cc75e587a8a408f5b479b6a198a
3,658,404
def model_predict(test_data: FeatureVector): """ Endpoint to make a prediction with the model. The endpoint `model/train` should have been used before this one. Args: test_data (FeatureVector): A unit vector of feature """ try: y_predicted = api.ml_model.predict_proba(test_data.to_numpy()) except NotFittedError: raise HTTPException( status_code=500, detail="This LogisticRegression instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.\nUse `model/train` endpoint with 10 examples before", ) y_pred_label = np.argmax(y_predicted, axis=1).astype(np.int32) y_pred_score = np.max(y_predicted, axis=1) return Prediction(label=y_pred_label, probability=y_pred_score)
c8b473d09092e03be85e986287350dd3115cf88d
3,658,405
import os def print_header(args, argv, preamble='CIFAR10', printfn=print, log=open(os.devnull, 'w'), first=('model','dataset','epoch','batchsize','resume','out')): """ Prints the arguments and header, and returns a logging print function """ def logprint(*args, file=log, **kwargs): if printfn: printfn(*args, **kwargs) print(*args, file=file, **kwargs) file.flush() vargs = vars(args) args_sorted = sorted(vargs.items()) logprint('{' + ', '.join("'{}':{}".format(k,repr(v)) for k,v, in args_sorted) + '}') logprint(' '.join(argv)) logprint('') logprint(preamble) logprint('') logprint('Arguments: ') def print_arg(arg): logprint(' {:20}: {},'.format("'%s'"%arg,repr(vargs[arg]))) for arg in first: print_arg(arg) logprint('') for arg,_ in args_sorted: if arg in first: continue print_arg(arg) logprint('') return logprint
c1213f441696dbabedafe9888a681cf64bab4249
3,658,406
def search_folders(project, folder_name=None, return_metadata=False): """Folder name based case-insensitive search for folders in project. :param project: project name :type project: str :param folder_name: the new folder's name :type folder_name: str. If None, all the folders in the project will be returned. :param return_metadata: return metadata of folders instead of names :type return_metadata: bool :return: folder names or metadatas :rtype: list of strs or dicts """ if not isinstance(project, dict): project = get_project_metadata_bare(project) team_id, project_id = project["team_id"], project["id"] result_list = [] params = { 'team_id': team_id, 'project_id': project_id, 'offset': 0, 'name': folder_name, 'is_root': 0 } total_folders = 0 while True: response = _api.send_request( req_type='GET', path='/folders', params=params ) if not response.ok: raise SABaseException( response.status_code, "Couldn't search folders " + response.text ) response = response.json() results_folders = response["data"] for r in results_folders: if return_metadata: result_list.append(r) else: result_list.append(r["name"]) total_folders += len(results_folders) if response["count"] <= total_folders: break params["offset"] = total_folders return result_list
cf8a9d95efcdb90d0891ef4ca588edf6375ed2af
3,658,407
def tempo_para_percorrer_uma_distancia(distancia, velocidade): """ Recebe uma distância e a velocidade de movimentação, e retorna as horas que seriam gastas para percorrer em linha reta""" horas = distancia / velocidade return round(horas,2)
e7754e87e010988284a6f89497bb1c5582ea0e85
3,658,408
import math def getCorrection(start, end, pos): """Correct the angle for the trajectory adjustment Function to get the correct angle correction when the robot deviates from it's estimated trajectory. Args: start: The starting position of the robot. end: The position the robot is supposed to arrive. pos: The current position of the robot. Returns: An angle in radians between -pi and pi to correct the robot trajectory and arrive succesfully at end position. """ (xs, ys) = start (xe, ye) = end (xp, yp) = pos # Discard edge cases with no sense assert(xs != xe or ys != ye) assert(xp != xe or yp != ye) assert(xs != xp or ys != yp) # First get the line equation from start to end points. # line equation follows the following pattern: y = m * x + b m = 0.0 b = 0.0 if abs(xe - xs) > PRECISION: m = (ye - ys) / (xe - xs) b = ys - m * xs else: m = 1 b = - xs # Get the perpendicular line equation to the first line mp = 0.0 bp = 0.0 if abs(xe - xs) < PRECISION: bp = yp elif abs(m) < PRECISION: mp = 1 bp = - xp else: mp = - 1 / m bp = yp - mp * xp # Get the point at the intersection of the two lines xi = 0.0 yi = 0.0 if abs(xe - xs) < PRECISION: xi = b yi = bp elif abs(m) < PRECISION: xi = bp yi = b else: xi = - (bp - b) / (mp - m) yi = m * xi + b # Get the distance between the tree points dist_pi = math.sqrt((xp - xi) * (xp - xi) + (yp - yi) * (yp - yi)) dist_pe = math.sqrt((xp - xe) * (xp - xe) + (yp - ye) * (yp - ye)) dist_sp = math.sqrt((xs - xp) * (xs - xp) + (ys - yp) * (ys - yp)) # Get the offset angles alpha and beta alpha = math.asin(dist_pi / dist_pe) beta = math.asin(dist_pi / dist_sp) return - (alpha + beta)
9f1073cb4c071abfecac20c85c56e5fb1638de6e
3,658,409
import logging def main(input_filepath, output_filepath): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). """ logger = logging.getLogger(__name__) logger.info('making final data set from raw data...') df = load_csv_file_to_df(input_filepath) df = handle_na_and_duplicates(df) df = clean_dataframe(df) df = organize_columns(df) df = concat_abilities(df) out_str = create_monsters_string(df) create_text_output_file(out_str, output_filepath) logger.info('Output file created!') return None
fe799a34f9cb5811228853469dbff92592a87e69
3,658,410
def string2symbols(s): """ Convert string to list of chemical symbols. Args: s: Returns: """ i = None n = len(s) if n == 0: return [] c = s[0] if c.isdigit(): i = 1 while i < n and s[i].isdigit(): i += 1 return int(s[:i]) * string2symbols(s[i:]) if c == "(": p = 0 for i, c in enumerate(s): if c == "(": p += 1 elif c == ")": p -= 1 if p == 0: break j = i + 1 while j < n and s[j].isdigit(): j += 1 if j > i + 1: m = int(s[i + 1 : j]) else: m = 1 return m * string2symbols(s[1:i]) + string2symbols(s[j:]) if c.isupper(): i = 1 if 1 < n and s[1].islower(): i += 1 j = i while j < n and s[j].isdigit(): j += 1 if j > i: m = int(s[i:j]) else: m = 1 return m * [s[:i]] + string2symbols(s[j:]) else: raise ValueError
1f08ba5c02536f4b67c9bd573c0dde8fbe46dc74
3,658,411
def coe2rv(a, e, i, node, w, v, MU=Earth.mu, degrees=True): """Given the classical orbital elements (a, e, i, node, w, v), this returns the position (R) and the velocity (V) in an ECI frame - Semimajor-axis (a)[km]: orbit size - Eccentricity (e): orbit shape (0=circle, 1=line) - Inclination (i)[deg]: orbital plane inclination measure from ascending node - Argument of Perigee (w)[deg]: orbit orientation - Ascending Node (Omega)[deg]: location of ascending node - True Anomaly (v)[deg]: location of satellite in orbit relative to perigee - Mean Anomaly (M)[deg]: fictitious angle that varies linearly with time return: R(x,y,z)[km], V(x,y,z)[km/sec] """ # MU = MU/1000/1000/1000 # FIXME? if degrees: i *= deg2rad node *= deg2rad w *= deg2rad v *= deg2rad p = a*(1-e**2) # p = semi-latus rectum (semiparameter) R = np.zeros(3) V = np.zeros(3) sv = sin(v) cv = cos(v) det = 1/(1+e*cv) smup = np.sqrt(MU/p) ### Position Coordinates in Perifocal Coordinate System # R[0] = p*cv / (1+e*cv) # x-coordinate (km) # R[1] = p*sv / (1+e*cv) # y-coordinate (km) # R[2] = 0 # z-coordinate (km) # V[0] = -sqrt(MU/p) * sv # velocity in x (km/s) # V[1] = sqrt(MU/p) * (e+cv) # velocity in y (km/s) # V[2] = 0 # velocity in z (km/s) R[0] = p*cv * det # x-coordinate (km) R[1] = p*sv * det # y-coordinate (km) R[2] = 0 # z-coordinate (km) V[0] = -smup * sv # velocity in x (km/s) V[1] = smup * (e+cv) # velocity in y (km/s) V[2] = 0 # velocity in z (km/s) r313 = R313(-node, -i, -w) # Perifocal -> xyz R = r313.dot(R) V = r313.dot(V) return (R,V,)
489ba6c1e484fa054063dddbeff5b686b35c0458
3,658,412
import csv from typing import Counter def get_dictionary(filename, dict_size=2000): """ Read the tweets and return a list of the 'max_words' most common words. """ all_words = [] with open(filename, 'r') as csv_file: r = csv.reader(csv_file, delimiter=',', quotechar='"') for row in r: tweet = row[3] if len(tweet) <= MAX_TWEET_CHARS: words = preprocess(tweet).split() all_words += words # Make the dictionary out of only the N most common words word_counter = Counter(all_words) dictionary, _ = zip(*word_counter.most_common(min(dict_size, len(word_counter)))) return dictionary
20917b0c9cda18d5436b438e0cdcf0c83d464899
3,658,413
def find_last_index(l, x): """Returns the last index of element x within the list l""" for idx in reversed(range(len(l))): if l[idx] == x: return idx raise ValueError("'{}' is not in list".format(x))
f787b26dd6c06507380bf2e336a58887d1f1f7ea
3,658,414
import requests import zipfile import io def download_query_alternative(user, password, queryid, batch_size=500): """ This is an alternative implementation of the query downloader. The original implementation only used a batch size of 20 as this allowed for using plain LOC files. Unfortunately this is a bit slow and causes more load on the web server due to a lot of small requests. With the modified implementation, the batch size can be chosen by the user. This is accomplished by using an in-memory extraction of the downloaded ZIP file. Additionally this code uses an XML parser instead of a regex to retrieve the data. :param user: The name of the user to log in with. :type user: str :param password: The password to use for the login. :type password: str :param queryid: The ID of the search query to retrieve the cache codes for. :type queryid: int :param batch_size: The batch size to use for the requests. This must at least be 1 and cannot exceed 500. The upper bound is due to the limits used by the Opencaching.de site. :type batch_size: int :return: The list of cache codes retrieved from the query. :rtype: list[str] :raises ValueError: Some of the input values are invalid. """ # Check the specified batch size. if not 0 < batch_size <= 500: raise ValueError("Invalid batch size.") # Use a custom header. headers = { "User-agent": "opencaching-de_statistics " + "[https://github.com/FriedrichFroebel/opencaching-de_statistics]" } # Try to log in. session = requests.Session() response = session.post( "https://www.opencaching.de/login.php", data={ "action": "login", "target": "query.php", "email": user.encode("utf-8"), "password": password.encode("utf-8"), }, headers=headers, ) # Check if the login has been successful. if "32x32-search.png" not in response.text: raise ValueError("Login failed (bad response).") # Prepare our status variables. oc_codes = [] batch_start = 0 while True: # Build the current URL, then retrieve the data. # In contrast to the original version, we enforce ZIP files here. url = ( f"https://www.opencaching.de/search.php?queryid={queryid}&output=loc" + f"&startat={batch_start}&count={batch_size}&zip=1" ) response = session.get(url, headers=headers) # Check if the request has been successful. # If there has been an error, return the list of OC codes found until now. if response.status_code != 200: print(f"-- Terminating due to bad status code: {response.status_code}") break # Check if we got a ZIP file (in fact this should always be the case). # The first check uses the magic number for non-empty ZIP archives. if response.text.startswith("PK\x03\x04") and not response.text.startswith( "<?xml" ): # This is a zip file, so uncompress it. zip_file = zipfile.ZipFile(io.BytesIO(response.content)) # The ZIP files normally have one file only, so we just retrieve the first # one here. files = zip_file.namelist() if files: filename = files[0] xml_data = zip_file.read(filename) # If this is not a ZIP file or the ZIP file has no content, assume that it has # been a plain XML file. if not xml_data: xml_data = response.text # Parse the XML data. tree = ElementTree.fromstring(xml_data) # Get the name tags from the XML tree and retrieve the ID attribute for this # tag. # If the ID attribute is missing, the corresponding entry will be `None`. new_oc_codes = [name.get("id") for name in tree.iter("name")] # Remove all the `None` elements. new_oc_codes = list(filter(None, new_oc_codes)) # We have reached the end of the results. if not new_oc_codes: break # Add the new codes to the existing list and move on to the next request. oc_codes = oc_codes + new_oc_codes batch_start += batch_size return oc_codes
2de7c3b453809c86093d1884438613985f7041b3
3,658,415
def parse_template(templ_str, event): """ Parses a template string and find the corresponding element in an event data structure. This is a highly simplified version of the templating that is supported by the Golang template code - it supports only a single reference to a sub element of the event structure. """ matches = TEMPLATE_RE.search(templ_str) tokens = matches.group(1).split('.') ref = event loc = [] for token in tokens: token = token.strip() # Skip the blank tokens if not token: continue if token not in ref: disp_loc = "event" + ''.join(["['{}']".format(_) for _ in loc]) err = "Could not find '{}' in {}".format(token, disp_loc) raise RuntimeError(err) ref = ref[token] loc.append(token) return ref
ec5c3822c390cbb4beff6428b91cd8b12157f2e3
3,658,416
import time def current_time_hhmm() -> str: """ Uses the time library to get the current time in hours and minutes Args: None Returns: str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min) (str): Current time formatted as hour:minutes """ logger.info('Getting current time') return str(time.gmtime().tm_hour) + ":" + str(time.gmtime().tm_min)
c7902ac8a8fb2528bacf6a5bc8459865604dd204
3,658,417
import torch def mae_loss(output, target): """Creates a criterion that measures the mean absolute error (l1 loss) between each element in the input :math:`output` and target :math:`target`. The loss can be described as: .. math:: \\ell(x, y) = L = \\operatorname{mean}(\\{l_1,\\dots,l_N\\}^\\top), \\quad l_n = \\left| x_n - y_n \\right|, where :math:`N` is the batch size. :math:`output` and :math:`target` are tensors of arbitrary shapes with a total of :math:`n` elements each. :param output: The output of the model or our predictions :type output: torch.Tensor :param target: The expected output or our labels :type target: typing.Union[torch.Tensor] :return: torch.Tensor :rtype: torch.Tensor """ ## TODO 4: Implement L1 loss. Use PyTorch operations. # Use PyTorch operations to return a PyTorch tensor. return torch.sum(torch.abs(output - target))/output.numel() #return nn.functional.l1_loss(output, target)
159c50cf673750c1d27b8ad8b2a5bbde3bb76111
3,658,418
import json def aistracker_from_json(filepath, debug=True): """ get an aistracker object from a debug messages JSON that was previously exported from pyaisnmea Args: filepath(str): full path to json file debug(bool): save all message payloads and decoded attributes into messagelog Raises: NoSuitableMessagesFound: if there are no AIS messages in the file Returns: aistracker(ais.AISTracker): object that keeps track of all the ships we have seen messagelog(allmessages.AISMessageLog): object with all the AIS messages """ messagelog = allmessages.AISMessageLog() aistracker = ais.AISTracker() msgnumber = 1 for line in open_file_generator(filepath): try: linemsgdict = json.loads(line) payload = linemsgdict['payload'] msgtime = linemsgdict['rxtime'] msg = aistracker.process_message(payload, timestamp=msgtime) if debug: messagelog.store(msgnumber, payload, msg) msgnumber += 1 except (ais.UnknownMessageType, ais.InvalidMMSI, json.decoder.JSONDecodeError, KeyError, binary.NoBinaryData) as err: AISLOGGER.debug(str(err)) continue if aistracker.messagesprocessed == 0: raise NoSuitableMessagesFound('No AIS messages detected in this file') return (aistracker, messagelog)
99426c11d33fc8bb00cdb4cfec51b60e8d8f481d
3,658,419
def configure(node): """ Generates the script to set the hostname in a node """ script = [] script.append(Statements.exec("hostname %s" % node.getName())) script.append(Statements.createOrOverwriteFile( "/etc/hostname", [node.getName()])) script.append(Statements.exec( "sed -i 's/127.0.0.1/127.0.0.1\t%s/' /etc/hosts" % node.getName())) return script
b0acf0f6a1363f1c7ad5a8e6dce6cb5d45586135
3,658,420
import random def processOptional(opt): """ Processes the optional element 50% of the time, skips it the other 50% of the time """ rand = random.random() if rand <= 0.5: return '' else: return processRHS(opt.option)
bda8130952f11f4df9342764d749dd6c93109d8e
3,658,421
def remove_non_paired_trials(df): """Remove non-paired trials from a dataset. This function will remove any trials from the input dataset df that do not have a matching pair. A matching pair are trial conditions A->B and B->A. """ # Define target combinations start_pos = np.concatenate(df['startPos'].to_numpy()) end_pos = np.concatenate(df['targPos'].to_numpy()) targ_comb = np.concatenate([start_pos, end_pos], axis=1) uni_targ_comb = np.unique(targ_comb, axis=0) # Convert target combinations to trial conditions start_cond = get_targ_cond(df['startPos']) end_cond = get_targ_cond(df['targPos']) targ_cond = [''.join([s, e]) for s, e in zip(start_cond, end_cond)] mask = get_targ_pairs(start_cond, end_cond) # Remove non-paired targets df = df[np.array(mask)] targ_cond = [tc for tc, m in zip(targ_cond, mask) if m] # Put other target information into a dict for easy access. This is # redundant and probably unnecessary, but is being done just in case this # information may be useful later on. targ_info = { 'start_pos': start_pos, 'end_pos': end_pos, 'targ_comb': targ_comb, 'uni_targ_comb': uni_targ_comb } return df, targ_cond, targ_info
30b5b86d9354c55dd2514114dc1180f397f2e56c
3,658,422
def compute_weighted_means_ds(ds, shp, ds_name='dataset', time_range=None, column_names=[], averager=False, df_output=pd.DataFrame(), output=None, land_only=False, time_stat=False, ): """ Compute spatial weighted mean of xr.Dataset Parameters ---------- ds: xr.DataSet shp: gp.GeoDataFrame gp.GeoDataFrame containing the information needed for xesmf's spatial averaging ds_name: str (optional) Name of the dataset will be written to the pd.DataFrame as an extra column time_range: list (optional) List containing start and end date to select from ``ds`` column_names: list (optional) Extra column names of the pd.DataFrame; the information is read from global attributes of ``ds`` averager: str, xesmf.SpatialAverager (optional) Use CORDEX domain name to calculate a xesmf.SpatialAverager object or use user-given one. df_output: pd.DataFrame (optional) pd.DataFrame to be concatenated with the newly created pd.DataFrame output: str (optional) Name of the output directory path or file land_only: bool (optional) Consider only land points\n !!!This is NOT implemented yet!!!\n As workaround write land sea mask in ``ds['mask']``. xesmf's spatial averager automatically considers ``ds['mask']``. time_stat: str or list (optional) Do some time statistics on ``ds``\n !!!This is NOT implemented yet!!! Returns ------- DataFrame : pd.DataFrame pandas Dataframe containing time series of spatial averages. Example ------- To calculate time series of spatial averages for several 'Bundeländer':\n - select Schleswig-Holstein, Hamburg, Bremen and Lower Saxony\n - Merge those regions to one new region calles NortSeaCoast\n - Select time slice from 2007 to 2009\n - Set CORDEX specific result DataFrame column names\n :: import xarray as xr import xweights as xw path = '/work/kd0956/CORDEX/data/cordex/output/EUR-11/CLMcom/MIROC-MIROC5/rcp85/r1i1p1/CLMcom-CCLM4-8-17/v1/mon/tas/v20171121/' netcdffile = path + 'tas_EUR-11_MIROC-MIROC5_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_mon_200601-201012.nc' ds = xr.open_dataset(netcdffile) df = xw.compute_weighted_means_ds(ds, 'states', subregions=['01_Schleswig-Holstein, '02_Hamburg', '03_Niedersachsen', '04_Bremen'], merge_column=['all', 'NorthSeaCoast'], time_range=['2007-01-01','2009-12-31'], column_names=['institute_id', 'driving_model_id', 'experiment_id', 'driving_model_ensemlbe_member', 'model_id', 'rcm_version_id'], ) """ if land_only: """ Not clear how to find right lsm file for each ds Then write lsm file to ds['mask'] The rest is done by xesmf """ NotImplementedError if not isinstance(ds, xr.Dataset): return df_output if time_range: ds = ds.sel(time=slice(time_range[0], time_range[1])) column_dict = {column:ds.attrs[column] if hasattr(ds, column) else None for column in column_names} try: out = spatial_averager(ds, shp, savg=averager) except: return df_output drop = [i for i in out.coords if not out[i].dims] out = out.drop(labels=drop) if time_stat: """ Not sure if it is usefull to implement here or do it seperately after using xweights """ NotImplementedError df_output = concat_dataframe(df_output, out, column_dict=column_dict, name=ds_name) if output: write_to_csv(df_output, output) return df_output
e575d17eefe8de66c0b6fd63abcf5d3bd6cac6ae
3,658,423
def action_remove(indicator_id, date, analyst): """ Remove an action from an indicator. :param indicator_id: The ObjectId of the indicator to update. :type indicator_id: str :param date: The date of the action to remove. :type date: datetime.datetime :param analyst: The user removing the action. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) if failed. """ indicator = Indicator.objects(id=indicator_id).first() if not indicator: return {'success': False, 'message': 'Could not find Indicator'} try: indicator.delete_action(date) indicator.save(username=analyst) return {'success': True} except ValidationError, e: return {'success': False, 'message': e}
806c818cd4c18624d9713a02d5c1826cab43a631
3,658,424
def repack_orb_to_dalton(A, norb, nclosed, nact, nvirt): """Repack a [norb, norb] matrix into a [(nclosed*nact) + (nclosed*nvirt) + (nact*nvirt)] vector for contraction with the CI Hamiltonian. """ assert norb == nclosed + nact + nvirt assert A.shape == (norb, norb) # These might be available in the global namespace, but this # function should work on its own. range_closed = list(range(0, nclosed)) range_act = list(range(nclosed, nclosed + nact)) range_virt = list(range(nclosed + nact, nclosed + nact + nvirt)) indices_rohf_closed_act = [(i, t) for i in range_closed for t in range_act] indices_rohf_closed_virt = [(i, a) for i in range_closed for a in range_virt] indices_rohf_act_virt = [(t, a) for t in range_act for a in range_virt] B = np.zeros( len(indices_rohf_closed_act) + len(indices_rohf_closed_virt) + len(indices_rohf_act_virt) ) for (i, t) in indices_rohf_closed_act: it = (t - nclosed) * nclosed + i B[it] += A[i, t] for (i, a) in indices_rohf_closed_virt: ia = i * nvirt + a - nclosed - nact + (nclosed * nact) B[ia] += A[i, a] for (t, a) in indices_rohf_act_virt: ta = (t - nclosed) * nvirt + a - nclosed - nact + (nclosed * nact) + (nclosed * nvirt) B[ta] += A[t, a] return B
05b356e9ded74c180d2a220f147cd69e91a5b597
3,658,425
def get_config(section="MAIN", filename="config.ini"): """ Function to retrieve all information from token file. Usually retrieves from config.ini """ try: config = ConfigParser() with open(filename) as config_file: config.read_file(config_file) return config[section] except FileNotFoundError: print("No configuration file found, check 'config_sample.ini'") raise FileNotFoundError
32d6c579b0ce002a601ea9041b54e9ce03858eb4
3,658,426
def _worst_xt_by_core(cores) -> float: """ Assigns a default worst crosstalk value based on the number of cores """ worst_crosstalks_by_core = {7: -84.7, 12: -61.9, 19: -54.8} # Cores: Crosstalk in dB worst_xt = worst_crosstalks_by_core.get(cores) # Worst aggregate intercore XT return worst_xt
331fdd7dc20db6909a6952483cfa9699f983a721
3,658,427
def _CheckUploadStatus(status_code): """Validates that HTTP status for upload is 2xx.""" return status_code / 100 == 2
d799797af012e46945cf413ff54d2ee946d364ba
3,658,428
def load(path: str, **kwargs) -> BELGraph: """Read a BEL graph. :param path: The path to a BEL graph in any of the formats with extensions described below :param kwargs: The keyword arguments are passed to the importer function :return: A BEL graph. This is the universal loader, which means any file path can be given and PyBEL will look up the appropriate load function. Allowed extensions are: - bel - bel.nodelink.json - bel.cx.json - bel.jgif.json The previous extensions also support gzipping. Other allowed extensions that don't support gzip are: - bel.pickle / bel.gpickle / bel.pkl - indra.json """ for extension, importer in IMPORTERS.items(): if path.endswith(extension): return importer(path, **kwargs) raise InvalidExtensionError(path=path)
871c7e3becac089758c94f7416def0020e63f9c1
3,658,429
from typing import Optional def smooth_l1_loss( prediction: oneflow._oneflow_internal.BlobDesc, label: oneflow._oneflow_internal.BlobDesc, beta: float = 1.0, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator computes the smooth l1 loss. The equation is: .. math:: & out = \\frac{(\\beta*x)^2}{2}, \\left|x\\right|<\\frac{1}{{\\beta}^2} & out = \\left|x\\right|-\\frac{0.5}{{\\beta}^2}, otherwise Args: prediction (oneflow._oneflow_internal.BlobDesc): The prediction Blob label (oneflow._oneflow_internal.BlobDesc): The label Blob beta (float, optional): The :math:`\\beta` in the equation. Defaults to 1.0. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The result Blob For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp @flow.global_function() def smooth_l1_loss_Job(prediction: tp.Numpy.Placeholder((5, )), label: tp.Numpy.Placeholder((5, )) ) -> tp.Numpy: return flow.smooth_l1_loss(prediction=prediction, label=label) prediction = np.array([0.1, 0.4, 0.3, 0.5, 0.9]).astype(np.float32) label = np.array([0.3, 0.9, 2.5, 0.4, 0.3]).astype(np.float32) out = smooth_l1_loss_Job(prediction, label) # out [0.02 0.12499999 1.7 0.005 0.17999998] """ op = ( flow.user_op_builder( name if name is not None else id_util.UniqueStr("SmoothL1Loss_") ) .Op("smooth_l1_loss") .Input("prediction", [prediction]) .Input("label", [label]) .Output("loss") ) op.Attr("beta", float(beta)) return op.Build().InferAndTryRun().RemoteBlobList()[0]
ddebf5ba77ca8e4d2a964e5c86e05a0b61db9ded
3,658,430
def get_model_fields(model, concrete=False): # type: (Type[Model], Optional[bool]) -> List[Field] """ Gets model field :param model: Model to get fields for :param concrete: If set, returns only fields with column in model's table :return: A list of fields """ if not hasattr(model._meta, 'get_fields'): # Django 1.8+ if concrete: res = model._meta.concrete_fields else: res = model._meta.fields + model._meta.many_to_many else: res = model._meta.get_fields() if concrete: # Many to many fields have concrete flag set to True. Strange. res = [f for f in res if getattr(f, 'concrete', True) and not getattr(f, 'many_to_many', False)] return res
9e9172b2e606041c6f9dbf3a991e79d73518227f
3,658,431
def loss_fun(para): """ This is the loss function """ return -data_processing(my_cir(para))
5703755e3f5547be933f85224c103c58acbeaabb
3,658,432
def GetDynTypeMgr(): """Get the dynamic type manager""" return _gDynTypeMgr
7acf02dd2072ea819c847f53fbf11e68146b2400
3,658,433
def identifyEntity(tweet, entities): """ Identify the target entity of the tweet from the list of entities :param tweet: :param entities: :return: """ best_score = 0 # best score over all entities targetEntity = "" # the entity corresponding to the best score for word in tweet: for entity in entities: cur_score = 0 # the score for the current entity if word == entity: cur_score = 1 # set the current score to 1 in case the entity name is mentioned in the tweet for entity_related_word in entities[entity]: if word == entity_related_word: cur_score = cur_score + 1 # increment the current score by 1 in case a related term to # the current entity is mentioned in the tweet if cur_score > best_score: # update the best score and the target entity best_score = cur_score targetEntity = entity return targetEntity
d6825dfddf01706ee266e0f1c82128a42bcb8554
3,658,434
def angle_between(a, b): """ compute angle in radian between a and b. Throws an exception if a or b has zero magnitude. :param a: :param b: :return: """ # TODO: check if extreme value that can make the function crash-- use "try" # from numpy.linalg import norm # from numpy import dot # import math arccosInput = np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b) # sct.printv(arccosInput) arccosInput = 1.0 if arccosInput > 1.0 else arccosInput arccosInput = -1.0 if arccosInput < -1.0 else arccosInput sign_angle = np.sign(np.cross(a, b)) # sct.printv(sign_angle) return sign_angle * acos(arccosInput) # @xl_func("numpy_row v1, numpy_row v2: float") # def py_ang(v1, v2): # """ Returns the angle in radians between vectors 'v1' and 'v2' """ # cosang = np.dot(a, b) # sinang = la.norm(np.cross(a, b)) # return np.arctan2(sinang, cosang)
c739915a75c36c26b7b3f002239de931653e4d09
3,658,435
def _apply_D_loss(scores_fake, scores_real, loss_func): """Compute Discriminator losses and normalize loss values Arguments --------- scores_fake : list discriminator scores of generated waveforms scores_real : list discriminator scores of groundtruth waveforms loss_func : object object of target discriminator loss """ loss = 0 real_loss = 0 fake_loss = 0 if isinstance(scores_fake, list): # multi-scale loss for score_fake, score_real in zip(scores_fake, scores_real): total_loss, real_loss, fake_loss = loss_func( score_fake=score_fake, score_real=score_real ) loss += total_loss real_loss += real_loss fake_loss += fake_loss # normalize loss values with number of scales (discriminators) # loss /= len(scores_fake) # real_loss /= len(scores_real) # fake_loss /= len(scores_fake) else: # single scale loss total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real) loss = total_loss return loss, real_loss, fake_loss
9432962af57193c07a268d00a3f1f01d372cb6a0
3,658,436
import tempfile def get_temp_dir(): """ Get path to the temp directory. Returns: str: The path to the temp directory. """ return fix_slashes( tempfile.gettempdir() )
3d0dd90c8187ac7b13913e7d4cd2b481c712fa6b
3,658,437
import random def pick_op(r, maxr, w, maxw): """Choose a read or a write operation""" if r == maxr or random.random() >= float(w) / maxw: return "write" else: return "read"
a45f53bf12538412b46f78e2c076966c26cf61ac
3,658,438
def sim_nochange(request): """ Return a dummy YATSM model container with a no-change dataset "No-change" dataset is simply a timeseries drawn from samples of one standard normal. """ X, Y, dates = _sim_no_change_data() return setup_dummy_YATSM(X, Y, dates, [0])
a39ba5824644764ae2aaf4e4d95c68d1c26bd132
3,658,439
import os def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'): """use builtin patch to apply <patchobj> to the working directory. returns whether patch was applied with fuzz factor.""" if files is None: files = {} if eolmode is None: eolmode = ui.config('patch', 'eol', 'strict') if eolmode.lower() not in eolmodes: raise util.Abort(_('Unsupported line endings type: %s') % eolmode) eolmode = eolmode.lower() try: fp = open(patchobj, 'rb') except TypeError: fp = patchobj if cwd: curdir = os.getcwd() os.chdir(cwd) try: ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode) finally: if cwd: os.chdir(curdir) if fp != patchobj: fp.close() if ret < 0: raise PatchError return ret > 0
64060526a6ed028dc48ebfbf447751a69590fb65
3,658,440
from functools import reduce import operator def get_queryset_descendants(nodes, include_self=False, add_to_result=None): """ RUS: Запрос к базе данных потомков. Если нет узлов, то возвращается пустой запрос. :param nodes: список узлов дерева, по которым необходимо отыскать потомков :param include_self: признак включения в результ исходного спичка узлов :param add_to_result: список ключей узлов которые необходимо дополнительно включить в результат :return: список узлов (QuerySet), отсортированный в порядке обхода дерева """ if not nodes: # HACK: Emulate MPTTModel.objects.none(), because MPTTModel is abstract return EmptyQuerySet(MPTTModel) filters = [] model_class = nodes[0].__class__ if include_self: for n in nodes: if n.get_descendant_count(): lft, rght = n.lft - 1, n.rght + 1 filters.append(Q(tree_id=n.tree_id, lft__gt=lft, rght__lt=rght)) else: filters.append(Q(pk=n.pk)) else: for n in nodes: if n.get_descendant_count(): lft, rght = n.lft, n.rght filters.append(Q(tree_id=n.tree_id, lft__gt=lft, rght__lt=rght)) if add_to_result: if len(add_to_result) > 1: filters.append(Q(id__in=add_to_result)) else: filters.append(Q(pk=add_to_result[0])) if filters: return model_class.objects.filter(reduce(operator.or_, filters)) else: # HACK: Emulate model_class.objects.none() return model_class.objects.filter(id__isnull=True)
7de9fe6c146c9569bc78b714b75238b770f9157e
3,658,441
from operator import mul def op_mul(lin_op, args): """Applies the linear operator to the arguments. Parameters ---------- lin_op : LinOp A linear operator. args : list The arguments to the operator. Returns ------- NumPy matrix or SciPy sparse matrix. The result of applying the linear operator. """ # Constants convert directly to their value. if lin_op.type in [lo.SCALAR_CONST, lo.DENSE_CONST, lo.SPARSE_CONST]: result = lin_op.data # No-op is not evaluated. elif lin_op.type is lo.NO_OP: return None # For non-leaves, recurse on args. elif lin_op.type is lo.SUM: result = sum(args) elif lin_op.type is lo.NEG: result = -args[0] elif lin_op.type is lo.MUL: coeff = mul(lin_op.data, {}) result = coeff*args[0] elif lin_op.type is lo.DIV: divisor = mul(lin_op.data, {}) result = args[0]/divisor elif lin_op.type is lo.SUM_ENTRIES: result = np.sum(args[0]) elif lin_op.type is lo.INDEX: row_slc, col_slc = lin_op.data result = args[0][row_slc, col_slc] elif lin_op.type is lo.TRANSPOSE: result = args[0].T elif lin_op.type is lo.CONV: result = conv_mul(lin_op, args[0]) elif lin_op.type is lo.PROMOTE: result = np.ones(lin_op.size)*args[0] elif lin_op.type is lo.DIAG_VEC: val = intf.from_2D_to_1D(args[0]) result = np.diag(val) else: raise Exception("Unknown linear operator.") return result
a1f770d2132fc9c3a60d4de3c3d87f59a03241eb
3,658,442
def comparator(x, y): """ default comparator :param x: :param y: :return: """ if x < y: return -1 elif x > y: return 1 return 0
53fc36f1afc3347689a1230c5ee3ba25d90f1239
3,658,443
def set_trait(age, age_risk_map, sex, sex_risk_map, race, race_risk_map): """ A trait occurs based on some mix of """ if age in age_risk_map: risk_from_age = age_risk_map[age] else: risk_from_age = 0 if sex in sex_risk_map: risk_from_sex = sex_risk_map[sex] else: risk_from_sex = 0 if race in race_risk_map: risk_from_race = race_risk_map[race] else: risk_from_race = 0 # probability of trait prob_trait = 1 - (1 - risk_from_age) * (1 - risk_from_sex) * (1 - risk_from_race) prob_not_trait = 1 - prob_trait resident_trait = np.random.choice(np.arange(1,3), p=[prob_not_trait,prob_trait]) return resident_trait
fe9f6c75ae4d7f80c2da86af4315b35fe29df482
3,658,444
import os def isvalid(save_path, file): """ Returns true if the file described by the parameters is a file with the appropriate file extension. """ return os.path.isfile(os.path.join(save_path, file)) and \ str(file).endswith('.meta')
55f76212eaaae3be6706a01f3f28d24005d28f75
3,658,445
def tidy_expression(expr, design=None): """Converts expression matrix into a tidy 'long' format.""" df_long = pd.melt( _reset_index( expr, name='gene'), id_vars=['gene'], var_name='sample') if design is not None: df_long = pd.merge( df_long, _reset_index( design, name='sample'), on='sample', how='left') return df_long
7c904e13a55f38cc05309b5927f2fdbb23c3f8c9
3,658,446
def model_remote_to_local(remote_timestamps, local_timestamps, debug=False): """for timestamps""" a1=remote_timestamps[:,np.newaxis] a2=np.ones( (len(remote_timestamps),1)) A = np.hstack(( a1,a2)) b = local_timestamps[:,np.newaxis] x,resids,rank,s = np.linalg.lstsq(A,b) if debug: print 'in model_remote_to_local: N=%d, resids=%s'%( len(remote_timestamps),resids) gain = x[0,0] offset = x[1,0] return gain,offset
74e9a6e367be1e77be715c8f17818abaa268923e
3,658,447
def get_optimizer(name): """Get an optimizer generator that returns an optimizer according to lr.""" if name == 'adam': def adam_opt_(lr): return tf.keras.optimizers.Adam(lr=lr) return adam_opt_ else: raise ValueError('Unknown optimizer %s.' % name)
8c97ee9f4b77d0fc80914ac7cbb49a448d48644a
3,658,448
from typing import List def get_multi(response: Response, common: dict = Depends(common_parameters)) -> List[ShopToPriceSchema]: """List prices for a shop""" query_result, content_range = shop_to_price_crud.get_multi( skip=common["skip"], limit=common["limit"], filter_parameters=common["filter"], sort_parameters=common["sort"], ) response.headers["Content-Range"] = content_range for result in query_result: result.half = result.price.half if result.price.half and result.use_half else None result.one = result.price.one if result.price.one and result.use_one else None result.two_five = result.price.two_five if result.price.two_five and result.use_two_five else None result.five = result.price.five if result.price.five and result.use_five else None result.joint = result.price.joint if result.price.joint and result.use_joint else None result.piece = result.price.piece if result.price.piece and result.use_piece else None return query_result
f97868e66c7743127d2d2951b732ff4c62708ae5
3,658,449
from datetime import datetime def send_crash(request, machine_config_info, crashlog): """ Save houdini crashes """ machine_config = get_or_save_machine_config( machine_config_info, get_ip_address(request), datetime.datetime.now()) save_crash(machine_config, crashlog, datetime.datetime.now()) return True
43e44950bdb4b6dc305bb1f36651daa31b4f813e
3,658,450
import sys import csv def read_csv_file(filename): """Read csv file into a numpy array """ header_info = {} # Make this Py2.x and Py3.x compatible if sys.version_info[0] < 3: infile = open(filename, 'rb') else: infile = open(filename, 'r', newline='', encoding='utf8') with infile as csvfile: # Make this Py2.x and Py3.x compatible if sys.version_info[0] < 3: data = csv.reader((line.replace(b'\0', b'') for line in csvfile), delimiter=b',') mynext = data.next else: data = csv.reader((line.replace('\0', '') for line in csvfile), delimiter=',') mynext = data.__next__ temp_row = mynext() header_info['timestamp'] = temp_row[0] header_info['file'] = temp_row[1] header_info['title'] = mynext()[1] header_info['model'] = mynext()[1] header_info['serial_number'] = mynext()[1] temp_row = mynext() header_info['center_freq'] = float(temp_row[1]) temp_row = mynext() header_info['span_freq'] = float(temp_row[1]) temp_row = mynext() header_info['resolution_bw'] = float(temp_row[1]) temp_row = mynext() header_info['video_bw'] = float(temp_row[1]) temp_row = mynext() header_info['ref_level'] = float(temp_row[1]) temp_row = mynext() header_info['sweep_time'] = float(temp_row[1]) temp_row = mynext() header_info['num_points'] = int(temp_row[1]) temp_row = mynext() # Skip blank line 12 temp_row = mynext() # Skip blank line 13 temp_row = mynext() num_traces = len(temp_row) - 1 header_info['num_traces'] = num_traces temp_row = mynext() header_info['frequency'] = temp_row[0] data_array = [] if num_traces == 1: for row in data: data_array.append((float(row[0]), float(row[1]))) data = np.array( data_array, dtype={'names': ('frequency', 'amplitude'), 'formats': ('f8', 'f8')}) elif num_traces == 2: for row in data: data_array.append((float(row[0]), [float(row[1]), float(row[2])])) data = np.array( data_array, dtype={'names': ('frequency', 'amplitude'), 'formats': ('f8', '2f8')}) elif num_traces == 3: for row in data: data_array.append((float(row[0]), [float(row[1]), float(row[2]), float(row[3])])) data = np.array( data_array, dtype={'names': ('frequency', 'amplitude'), 'formats': ('f8', '3f8')}) return (header_info, data)
8aa8c7bb1aeda1b85c99e09ee7033753ffd6d9a2
3,658,451
def apply_HAc_dense(A_C, A_L, A_R, Hlist): """ Construct the dense effective Hamiltonian HAc and apply it to A_C. For testing. """ d, chi, _ = A_C.shape HAc = HAc_dense(A_L, A_R, Hlist) HAc_mat = HAc.reshape((d*chi*chi, d*chi*chi)) A_Cvec = A_C.flatten() A_C_p = np.dot(HAc_mat, A_Cvec).reshape(A_C.shape) return A_C_p
b13f9db7287fcdf275e8f7c9a7fb542e7b79323c
3,658,452
def min_index(array, i, j): """Pomocna funkce pro razeni vyberem. Vrati index nejmensiho prvku v poli 'array' mezi 'i' a 'j'-1. """ index = i for k in range(i, j): if array[k] < array[index]: index = k return index
4c59362fac2e918ba5a0dfe9f6f1670b3e95d68c
3,658,453
def filterControlChars(value, replacement=' '): """ Returns string value with control chars being supstituted with replacement character >>> filterControlChars(u'AND 1>(2+3)\\n--') u'AND 1>(2+3) --' """ return filterStringValue(value, PRINTABLE_CHAR_REGEX, replacement)
a0f508d281f0c12311a5c2aa2f898def5eb38913
3,658,454
def get_deobfuscator(var_names) -> str: """Creates a deobfuscator for the given set of var names. Args: var_names (list): List of variable names from the `obfuscate` function. Returns: str: Deobfuscator """ return f'\n\ngetattr(getattr(__main__, [x for x in dir(__main__) if x.startswith(\'__b\')][0]), (lambda: "ArithmeticError" and "AssertionError" and "AttributeError" and "BaseException" and "BlockingIOError" and "BrokenPipeError" and "BufferError" and "BytesWarning" and "ChildProcessError" and "ConnectionAbortedError" and "ConnectionError" and "ConnectionRefusedError" and "ConnectionResetError" and "DeprecationWarning" and "EOFError" and "Ellipsis" and "EnvironmentError" and "Exception" and "False" and "FileExistsError" and "FileNotFoundError" and "FloatingPointError" and "FutureWarning" and "GeneratorExit" and "IOError" and "ImportError" and "ImportWarning" and "IndentationError" and "IndexError" and "InterruptedError" and "IsADirectoryError" and "KeyError" and "KeyboardInterrupt" and "LookupError" and "MemoryError" and "ModuleNotFoundError" and "NameError" and "None" and "NotADirectoryError" and "NotImplemented" and "NotImplementedError" and "OSError" and "OverflowError" and "PendingDeprecationWarning" and "PermissionError" and "ProcessLookupError" and "RecursionError" and "ReferenceError" and "ResourceWarning" and "RuntimeError" and "RuntimeWarning" and "StopAsyncIteration" and "StopIteration" and "SyntaxError" and "SyntaxWarning" and "SystemError" and "SystemExit" and "TabError" and "TimeoutError" and "True" and "TypeError" and "UnboundLocalError" and "UnicodeDecodeError" and "UnicodeEncodeError" and "UnicodeError" and "UnicodeTranslateError" and "UnicodeWarning" and "UserWarning" and "ValueError" and "Warning" and "WindowsError" and "ZeroDivisionError" and "__build_class__" and "__debug__" and "__doc__" and "__import__" and "__loader__" and "__name__" and "__package__" and "__spec__" and "abs" and "all" and "any" and "ascii" and "bin" and "bool" and "breakpoint" and "bytearray" and "bytes" and "callable" and "chr" and "classmethod" and "compile" and "complex" and "copyright" and "credits" and "delattr" and "dict" and "dir" and "divmod" and "enumerate" and "eval" and "fdlr" and "exit" and "filter" and "float" and "format" and "frozenset" and "getattr" and "globals" and "hasattr" and "hash" and "help" and "hex" and "id" and "input" and "int" and "isinstance" and "issubclass" and "iter" and "len" and "license" and "list" and "locals" and "map" and "max" and "memoryview" and "min" and "next" and "object" and "oct" and "open" and "ord" and "pow" and "print" and "property" and "quit" and "range" and "repr" and "reversed" and "round" and "set" and "setattr" and "slice" and "sorted" and "staticmethod" and "str" and "sum" and "super" and "tuple" and "type" and "vars" and "zip" and "exec")())(\'\'.join([getattr(binascii, [x for x in dir(binascii) if x.startswith(chr(97)+str((()==())+([]==[]))+chr(98))][0])(globals().get(var_name)).decode() for var_name in {var_names}]))'
0ad26818cd8a802aabb666631f096d5b2f6c47a0
3,658,455
import csv def write_trt_rpc(cell_ID, cell_time, lon, lat, area, rank, hmin, hmax, freq, fname, timeformat='%Y%m%d%H%M'): """ writes the rimed particles column data for a TRT cell Parameters ---------- cell_ID : array of ints the cell ID cell_time : array of datetime the time step lon, lat : array of floats the latitude and longitude of the center of the cell area : array of floats the area of the cell rank : array of floats the rank of the cell hmin, hmax : array of floats Minimum and maximum altitude of the rimed particle column freq : array of floats Frequency of the species constituting the rime particle column within the limits of it fname : str file name where to store the data Returns ------- fname : str the name of the file where data has written """ hmin = hmin.filled(fill_value=get_fillvalue()) hmax = hmax.filled(fill_value=get_fillvalue()) freq = freq.filled(fill_value=get_fillvalue()) with open(fname, 'w', newline='') as csvfile: fieldnames = [ 'traj_ID', 'yyyymmddHHMM', 'lon', 'lat', 'area', 'RANKr', 'hmin', 'hmax', 'freq'] writer = csv.DictWriter(csvfile, fieldnames) writer.writeheader() for i, traj_ID_el in enumerate(cell_ID): writer.writerow({ 'traj_ID': traj_ID_el, 'yyyymmddHHMM': cell_time[i].strftime(timeformat), 'lon': lon[i], 'lat': lat[i], 'area': area[i], 'RANKr': rank[i], 'hmin': hmin[i], 'hmax': hmax[i], 'freq': freq[i] }) csvfile.close() return fname
fd634914a8c3d96d10d4dcc81514d492d6be899c
3,658,456
def get_tag(string: str) -> Tag: """Получить тему.""" return Tag.objects.get(tag=string)
816bbaecc4cf45e2fc75b1e428842b5502a353bc
3,658,457
def average_precision(gt, pred): """ Computes the average precision. This function computes the average prescision at k between two lists of items. Parameters ---------- gt: set A set of ground-truth elements (order doesn't matter) pred: list A list of predicted elements (order does matter) Returns ------- score: double The average precision over the input lists """ if not gt: return 0.0 score = 0.0 num_hits = 0.0 for i,p in enumerate(pred): if p in gt and p not in pred[:i]: num_hits += 1.0 score += num_hits / (i + 1.0) return score / max(1.0, len(gt))
ca265471d073b6a0c7543e24ef0ba4f872737997
3,658,458
import math def rotate_coo(x, y, phi): """Rotate the coordinates in the *.coo files for data sets containing images at different PAs. """ # Rotate around center of image, and keep origin at center xin = 512. yin = 512. xout = 512. yout = 512. cos = math.cos(math.radians(phi)) sin = math.sin(math.radians(phi)) xrot = (x - xin) * cos - (y - yin) * sin + xout yrot = (x - xin) * sin + (y - yin) * cos + yout return [xrot, yrot]
a57a4c36119e96d757bd23f28a0790f6d68661fc
3,658,459
def ip_block_array(): """ Return an ipBlock array instance fixture """ return ['10.0.0.1', '10.0.0.2', '10.0.0.3']
c74756f34b97d2550cb238bd63e0c9505f3935d3
3,658,460
from pathlib import Path import joblib def load_model(model_name, dir_loc=None, alive_bar_on=True): """Load local model_name=model_s if present, else fetch from hf.co.""" if dir_loc is None: dir_loc = "" dir_loc = Path(dir_loc).absolute().as_posix() file_loc = f"{dir_loc}/{model_name}" if Path(file_loc).exists(): if alive_bar_on: with alive_bar( 1, title=f" Loading {dir_loc}/{model_name}, takes ~30 secs ...", length=3, ) as progress_bar: model = joblib.load(file_loc) # model_s = pickle.load(open(file_loc, "rb")) progress_bar() # pylint: disable=not-callable else: logger.info("Loading %s/%s, takes ~30 secs ...", dir_loc, model_name) model = joblib.load(file_loc) else: logger.info( "Fetching and caching %s from huggingface.co... " "The first time may take a while depending on your net.", model_name, ) if alive_bar_on: with alive_bar( 1, title=" Subsequent loading takes ~2-3 secs ...", length=3 ) as progress_bar: try: model = joblib.load( cached_download(hf_hub_url("mikeee/model_s", model_name)) ) except Exception as exc: logger.error(exc) raise progress_bar() # pylint: disable=not-callable else: try: model = joblib.load( cached_download(hf_hub_url("mikeee/model_s", model_name)) ) except Exception as exc: logger.error(exc) raise return model
1847e061c6980fd4fd185f79d48682cbf7cb14ff
3,658,461
from typing import Generator def get_dev_requirements() -> Generator: """Yield package name and version for Python developer requirements.""" return get_versions("DEVELOPMENT")
728658648d6bce6fecbf4c1bc6b6de42c315b3c0
3,658,462
def _ndb_key_to_cloud_key(ndb_key): """Convert a ndb.Key to a cloud entity Key.""" return datastore.Key( ndb_key.kind(), ndb_key.id(), project=utils.get_application_id())
ce71b0d13f2e37ded12bf87ad133492a9b68d0c7
3,658,463
def inference(H, images, train=True): """Build the MNIST model up to where it may be used for inference. Parameters ---------- images: Images placeholder, from inputs(). train: whether the network is used for train of inference Returns ------- softmax_linear: Output tensor with the computed logits. """ num_filter_1 = 32 num_filter_2 = 64 # First Convolutional Layer with tf.variable_scope('Conv1') as scope: # Adding Convolutional Layers W_conv1 = weight_variable( 'weights', [5, 5, H['arch']['num_channels'], num_filter_1]) b_conv1 = bias_variable('biases', [num_filter_1]) h_conv1 = tf.nn.relu( conv2d(images, W_conv1) + b_conv1, name=scope.name) _activation_summary(h_conv1) # First Pooling Layer h_pool1 = max_pool_2x2(h_conv1, name='pool1') # Second Convolutional Layer with tf.variable_scope('Conv2') as scope: W_conv2 = weight_variable( 'weights', [5, 5, num_filter_1, num_filter_2]) b_conv2 = bias_variable('biases', [num_filter_2]) h_conv2 = tf.nn.relu( conv2d(h_pool1, W_conv2) + b_conv2, name=scope.name) _activation_summary(h_conv2) # Second Pooling Layer h_pool2 = max_pool_2x2(h_conv2, name='pool2') # Find correct dimension dim = 1 for d in h_pool2.get_shape()[1:].as_list(): dim *= d # Adding Fully Connected Layers with tf.variable_scope('fc1') as scope: W_fc1 = weight_variable('weights', [dim, 1024]) b_fc1 = bias_variable('biases', [1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, dim]) h_fc1 = tf.nn.relu( tf.matmul(h_pool2_flat, W_fc1) + b_fc1, name=scope.name) _activation_summary(h_fc1) # Adding Dropout if train: h_fc1 = tf.nn.dropout(h_fc1, 0.5, name='dropout') with tf.variable_scope('logits') as scope: W_fc2 = weight_variable('weights', [1024, H['arch']['num_classes']]) b_fc2 = bias_variable('biases', [H['arch']['num_classes']]) logits = tf.add(tf.matmul(h_fc1, W_fc2), b_fc2, name=scope.name) _activation_summary(logits) return logits
bf7e0f60bdc85d52fb6778cc40eedaa63c0387e3
3,658,464
import os def _find_modules_and_directories(top_level_directory): """ Recursive helper function to find all python files included in top level package. This will recurse down the directory paths of any package to find all modules and subpackages in order to create an exhaustive list of all python files within a given package. :param top_level_directory: Path to the top level of a python package. :type top_level_directory: str :return: Returns a list of paths to all python files within that package. :rtype: list [str] """ modules = [] directories = [] for item in os.listdir(top_level_directory): if item.endswith(".py"): modules.append(os.path.join(top_level_directory, item)) elif os.path.isdir(os.path.join(top_level_directory, item)): directories.append(os.path.join(top_level_directory, item)) for directory in directories: modules.extend(_find_modules_and_directories(directory)) return modules
2aecb5974f83ce01b2a8e4a6fb8313399756c1d4
3,658,465
def UniqueLattice(lattice_vectors,ind): """ Takes a list with two tuples, each representing a lattice vector and a list with the genes of an individual. Returns a list with two tuples, representing the equivalent lattice vectors with the smallest cell circunference. """ x_1 = lattice_vectors(0,ind) x_2 = lattice_vectors(1,ind) lattices = [[(x_1[0]+x_2[0] if (x_1[0]+x_2[0]) > 0 else (x_1[0]-x_2[0]), x_1[1]+x_2[1] if (x_1[1]+x_2[1]) > 0 else x_1[1]-x_2[1]) ,x_2], [(x_1[0]-x_2[0] if (x_1[0]-x_2[0]) > 0 else x_1[0]+x_2[0], x_1[1]-x_2[1] if (x_1[1]-x_2[1]) > 0 else x_1[1]+x_2[1]) ,x_2], [x_1, (x_1[0]+x_2[0] if (x_1[0]+x_2[0]) > 0 else x_1[0]-x_2[0], x_1[1]+x_2[1] if (x_1[1]+x_2[1]) > 0 else x_1[1]-x_2[1])], [x_1, (x_1[0]-x_2[0] if (x_1[0]-x_2[0]) > 0 else x_1[0]+x_2[0], x_1[1]-x_2[1] if (x_1[1]-x_2[1]) > 0 else x_1[1]+x_2[1])]] lattice_radius = [] for lat in lattices: point_1 = lat[0] point_2 = lat[1] m_a = (point_2[1]-point_1[1])/(point_2[0]-point_1[0]) m_b = point_2[1]/point_2[0] x = (m_a*m_b*(point_1[1]) + m_b*(point_1[0]+point_2[0]) - m_a*(point_2[0])) / 2*(m_b-m_a) y = (-1 / m_a) * (x - (point_1[0]-point_2[1])/2) + (point_1[1]-point_2[1])/2 radius_1 = np.sqrt((x-point_1[0])**2 + (y-point_1[1])**2) radius_2 = np.sqrt((x-point_2[0])**2 + (y-point_2[1])**2) if radius_1 >= radius_2: lattice_radius.append(radius_1) else: lattice_radius.append(radius_2) return lattices[lattice_radius.index(min(lattice_radius))]
e2474a54cf3351ff112ecb6d139eec8eac2ef1fa
3,658,466
def register_errors(app: Flask): """注册需要的错误处理程序包到 Flask 程序实例 app 中""" @app.errorhandler(400) # Bad Request 客户端请求的语法错误,服务器无法理解 def bad_request(e): return render_template('error.html', description=e.description, code=e.code), 400 @app.errorhandler(404) # Not Found 服务器无法根据客户端的请求找到资源(网页) def page_not_found(e): return render_template('error.html', description=e.description, code=e.code), 404 @app.errorhandler(500) # Internal Server Error 服务器内部错误,无法完成请求 def internal_server_error(e): return render_template('error.html', description="服务器内部错误,无法完成请求!", code="500"), 500 @app.errorhandler(CSRFError) # CSRF 验证失败 def csrf_error_handle(e): return render_template('error.html', description=e.description, code=e.code), 400
27634a139aab88215b77e53a25758d6096571a09
3,658,467
def websafe_encode(data): """Encodes a byte string into websafe-base64 encoding. :param data: The input to encode. :return: The encoded string. """ return urlsafe_b64encode(data).replace(b'=', b'').decode('ascii')
ed5b06d2fab3dcc64275cb0046cabd88f63894ec
3,658,468
from typing import Union def gravatar(email: Union[str, list]) -> str: """Converts the e-mail address provided into a gravatar URL. If the provided string is not a valid e-mail address, this function just returns the original string. Args: email: e-mail address to convert. Returns: Gravatar URL, or None if the e-mail address is not valid. """ if email is None: email = [] elif isinstance(email, str): email = [email] email.sort() for _email in email: if validators.email(_email): return gravatar_url(_email) return None
8807eefd40472068310455c1c477933dbaa67be0
3,658,469
def bar_2_MPa(value): """ converts pressure in bar to Pa :param value: pressure value in bar :return: pressure value in Pa """ return value * const.bar / const.mega
d6c8084a6603f74bd1fb11739e4f4d9100cf14de
3,658,470
def walk(x, y, model, theta, conditions=None, var2=0.01, mov=100, d=1, tol=1e-3, mode=True): """Executes the walker implementation. Parameters ---------- x : np.ndarray An $(m, n)$ dimensional array for (cols, rows). y : np.ndarray An $n$ dimensional array that will be compared with model's output. model : function A Python function defined by the user. This function should recieve two arguments $(x, theta)$. theta : np.ndarray The array containing the model's parameters. conditions : list A list containing $2n$-conditions for the (min, max) range of the $n$ parameters. var2 : float Determines the step size of the walker. By default it is set to `1.0`. mov : int Number of movements that walker will perform. By default it is set to `100`. d : float Size of the Gaussian step for the walker. tol : float Convergence criteria for the log-likelihhod. By default it is set to `1e-3`. mode : bool By default it is set to `True`. Returns ------- theta : np.array An ndarray with the updated theta values. nwalk : np.array Updates of theta for each movement performed by the walker. y0 : float The log-likelihood value. """ greach = False nwalk = [] for i in range(mov): nwalk.append(theta) theta_new = update_theta(theta, d) if not greach: y0 = fun_like(x, y, model, theta, conditions, var2) y1 = fun_like(x, y, model, theta_new, conditions, var2) if y0 <= tol and mode: print('Goal reached!') greach = True return theta, nwalk, y0 else: if y1 <= tol and mode: print('Goal reached!') greach = True return theta_new, nwalk, y1 else: ratio = y0 / y1 boltz = np.random.rand(1) prob = np.exp(-ratio) if y1 < y0: theta = theta_new theta_new = update_theta(theta, d) else: if prob > boltz: theta = theta_new theta_new = update_theta(theta, d) else: theta_new = update_theta(theta, d) if mode: print('Maximum number of iterations reached!') print(f'The log-likelihood is: {y0}') return theta, nwalk, y0
ef7386f4c7141edfcdeb041b47d741e186f207e2
3,658,471
def izbor_letov(): """Glavna stran.""" # Iz cookieja dobimo uporabnika in morebitno sporočilo (username, ime, priimek) = get_potnik() c.execute("SELECT distinct drzava FROM lokacija ORDER BY drzava") drzave=c.fetchall() drzava_kje = bottle.request.forms.drzava_kje mesto_kje = bottle.request.forms.mesto_kje letalisce_kje = bottle.request.forms.letalisce_kje drzava_kam = bottle.request.forms.drzava_kam mesto_kam = bottle.request.forms.mesto_kam letalisce_kam = bottle.request.forms.letalisce_kam if "None" in [drzava_kje, mesto_kje, letalisce_kje, drzava_kam, mesto_kam, letalisce_kam]: return bottle.template("main.html", ime=ime, username=username, napaka="Prosimo, izpolnete vsa polja!", drzave=drzave) elif letalisce_kje==letalisce_kam: return bottle.template("main.html", ime=ime, username=username, napaka="Začetno in končno letališče se morata razlikovati, prosimo ponovno izpolnite obrazec.", drzave=drzave) else: izbor = get_leti(letalisce_kje, letalisce_kam, drzava_kje, drzava_kam) leti_mesto = get_leti_mesto(mesto_kje, drzava_kje, mesto_kam, drzava_kam) leti_mesto_drzava = get_leti_mesto_drzava(mesto_kje, drzava_kje, mesto_kam, drzava_kam) if izbor == []: return bottle.template("leti.html", ime=ime, username=username, letalisce_kje=letalisce_kje, letalisce_kam=letalisce_kam, napaka="Za relacijo \""+letalisce_kje+" ("+mesto_kje+", "+drzava_kje+") : "+letalisce_kam+" ("+mesto_kam+", "+drzava_kam+")\" ni znanih letov. "+" "+"Poizkusite ponovno s kakterim drugim letališčem v bližini.", leti_mesto=leti_mesto, leti_mesto_drzava=leti_mesto_drzava, izbor=izbor) else: return bottle.template("leti.html", ime=ime, username=username, letalisce_kje=letalisce_kje, letalisce_kam=letalisce_kam, napaka=None, leti_mesto_drzava=leti_mesto_drzava, izbor=izbor, leti_mesto=leti_mesto)
664de2c3cf2507ac43efa22105a51b1e14ad441a
3,658,472
def generate_data_from_cvs(csv_file_paths): """Generate data from list of csv_file_paths. csv_file_paths contains path to CSV file, column_name, and its label `csv_file_paths`: A list of CSV file path, column_name, and label """ data = [] for item in csv_file_paths: values = read_csv(item[0], item[1]) data.append([ item[2], values ]) return data
1c9f393a18edc9c2fcc3f28cdbeb71fb9c006731
3,658,473
import math import torch def log_density_gaussian(x, mu, logvar): """Calculates log density of a gaussian. Parameters ---------- mu: torch.Tensor or np.ndarray or float Mean. logvar: torch.Tensor or np.ndarray or float Log variance. """ normalization = - 0.5 * (math.log(2 * math.pi) + logvar) inv_var = torch.exp(-logvar) log_density = normalization - 0.5 * ((x - mu)**2 * inv_var) return log_density
3fdc751aa58b3ec82e1aa454f593879d5da4c310
3,658,474
def invalid_hexadecimal(statement): """Identifies problem caused by invalid character in an hexadecimal number.""" if statement.highlighted_tokens: # Python 3.10 prev = statement.bad_token wrong = statement.next_token else: prev = statement.prev_token wrong = statement.bad_token if not (prev.immediately_before(wrong) and prev.string.lower().startswith("0x")): return {} hint = _("Did you made a mistake in writing an hexadecimal integer?\n") cause = _( "It looks like you used an invalid character (`{character}`) in an hexadecimal number.\n\n" "Hexadecimal numbers are base 16 integers that use the symbols `0` to `9`\n" "to represent values 0 to 9, and the letters `a` to `f` (or `A` to `F`)\n" "to represent values 10 to 15.\n" "In Python, hexadecimal numbers start with either `0x` or `0X`,\n" "followed by the characters used to represent the value of that integer.\n" ).format(character=wrong.string[0]) return {"cause": cause, "suggest": hint}
a0b252001dd1f0f466302a131c2a460743a8c197
3,658,475
def get_pool_name(pool_id): """Returns AS3 object name for TLS profiles related to pools :param pool_id: octavia pool id :return: AS3 object name """ return "{}{}".format(constants.PREFIX_TLS_POOL, pool_id)
2a850d48f52d822712cdfc3543532c9b0dd80fd6
3,658,476
def search_sliceable_by_yielded_chunks_for_str(sliceable, search_string, starting_index, down, case_insensitive): """This is the main entry point for everything in this module.""" for chunk, chunk_start_idx in search_chunk_yielder(sliceable, starting_index, down): found_at_chunk_idx = search_list_for_str(chunk, search_string, 0 if down else len(chunk) - 1, down, case_insensitive) if found_at_chunk_idx is not None: return found_at_chunk_idx + chunk_start_idx return None
7179179403098cd1d3993a35cf59c9162384ac4d
3,658,477
def split_page(array, limit, index): """ 按限制要求分割数组,返回下标所指向的页面 :param array: 需要分割的数组 :param limit: 每个数组的大小 :param index: 需要返回的分割后的数组 :return: 数组 """ end = index * limit start = end - limit return array[start:end]
ecce83d6e2e09d47e124536f294ece1e1631e6b6
3,658,478
def creatKdpCols(mcTable, wls): """ Create the KDP column Parameters ---------- mcTable: output from getMcSnowTable() wls: wavelenght (iterable) [mm] Returns ------- mcTable with an empty column 'sKDP_*' for storing the calculated KDP of a given wavelength. """ for wl in wls: wlStr = '{:.2e}'.format(wl) mcTable['sKDP_{0}'.format(wlStr)] = np.ones_like(mcTable['time'])*np.nan return mcTable
9adc20c1ff94778bec4551156b5774863eb2203f
3,658,479
def get_products_by_user(user_openid, allowed_keys=None, filters=None): """Get all products that user can manage.""" return IMPL.get_products_by_user(user_openid, allowed_keys=allowed_keys, filters=filters)
458664aa75c5b423ccfb2a80287c565cae51e0d0
3,658,480
def sample_from_ensemble(models, params, weights=None, fallback=False, default=None): """Sample models in proportion to weights and execute with model_params. If fallback is true then call different model from ensemble if the selected model throws an error. If Default is not None then return default if all models fail """ if len(models) > 1: model = ergo.random_choice(models, weights) else: model = models[0] try: result = model(**params) if np.isnan(result): raise KeyError return result except (KeyError, IndexError): if fallback and len(models) > 1: models_copy = models.copy() weights_copy = weights.copy() i = models.index(model) del models_copy[i] del weights_copy[i] return sample_from_ensemble( models_copy, params, weights_copy, fallback, default ) return default
c771108cb36cff2cb48af22a9efaad749d267ce0
3,658,481
def Flatten(matrix): """Flattens a 2d array 'matrix' to an array.""" array = [] for a in matrix: array += a return array
00389b4dd295274d8081331d6ae78f233f0b5b59
3,658,482
def create_verification_token( data: dict ) -> VerificationTokenModel: """ Save a Verification Token instance to database. Args: data (dictionary): Returns: VerificationToken: Verification Token entity of VerificationTokenModel object Raises: None """ orm_verification_token = VerificationTokenModel( user_id=data.get('user_id'), token_type=data.get('token_type', 'SMS'), token=True ) orm_verification_token.save() return orm_verification_token
9008bc298c8e8075031f7e14e8cb0f288e894869
3,658,483
from typing import Union from typing import Sequence from typing import Tuple def _find_highest_cardinality(arrays: Union[int, Sequence, np.ndarray, Tuple]) -> int: """Find the highest cardinality of the given array. Args: arrays: a list of arrays or a single array Returns: The highest cardinality of the given array. """ return max([len(array) for array in arrays if hasattr(array, "__len__")] + [1])
abe9ad85ffabb88f9097b9c2de97319f1342f586
3,658,484
import logging from datetime import datetime import pytz def get_yesterday() -> tuple: """Get yesterday`s date and split it to year,month and day strings""" logging.debug("Starting get_yesterday function.") today = datetime.now(pytz.timezone("America/New_York")) yesterday = (today - timedelta(days=1)).strftime("%Y-%m-%d") yesterday_split = yesterday.split("-") year = yesterday_split[0] month = yesterday_split[1] day = yesterday_split[2] return year, month, day
1ccf514f0f121489d2e467c2f8bc3f8cc7715324
3,658,485
def rowmap(table, rowmapper, header, failonerror=False): """ Transform rows via an arbitrary function. E.g.:: >>> import petl as etl >>> table1 = [['id', 'sex', 'age', 'height', 'weight'], ... [1, 'male', 16, 1.45, 62.0], ... [2, 'female', 19, 1.34, 55.4], ... [3, 'female', 17, 1.78, 74.4], ... [4, 'male', 21, 1.33, 45.2], ... [5, '-', 25, 1.65, 51.9]] >>> def rowmapper(row): ... transmf = {'male': 'M', 'female': 'F'} ... return [row[0], ... transmf[row['sex']] if row['sex'] in transmf else None, ... row.age * 12, ... row.height / row.weight ** 2] ... >>> table2 = etl.rowmap(table1, rowmapper, ... header=['subject_id', 'gender', 'age_months', ... 'bmi']) >>> table2 +------------+--------+------------+-----------------------+ | subject_id | gender | age_months | bmi | +============+========+============+=======================+ | 1 | 'M' | 192 | 0.0003772112382934443 | +------------+--------+------------+-----------------------+ | 2 | 'F' | 228 | 0.0004366015456998006 | +------------+--------+------------+-----------------------+ | 3 | 'F' | 204 | 0.0003215689675106949 | +------------+--------+------------+-----------------------+ | 4 | 'M' | 252 | 0.0006509906805544679 | +------------+--------+------------+-----------------------+ | 5 | None | 300 | 0.0006125608384287258 | +------------+--------+------------+-----------------------+ The `rowmapper` function should accept a single row and return a single row (list or tuple). """ return RowMapView(table, rowmapper, header, failonerror=failonerror)
dabceae8171330d3f8c4cdba7b50be2106ad1438
3,658,486
def squeeze(dataset, how: str = 'day'): """ Squeezes the data in dataset by close timestamps Args: dataset (DataFrame) - the data to squeeze how (str) - one of 'second', 'minute', 'hour', 'day', 'month' (default day) Returns: dataset (DataFrame) - a dataframe where the indexes are squeezed together by closely related timestamps determined by parameter how """ return dataset.groupby(by = lambda ts: timestamp_floor(ts, how = how))
e41cbc4e054218b1f88ed0745fcc980df29ac8d4
3,658,487
def callback(): """ Process response for "Login" try from Dropbox API. If all OK - redirects to ``DROPBOX_LOGIN_REDIRECT`` url. Could render template with error message on: * oAuth token is not provided * oAuth token is not equal to request token * Error response from Dropbox API Default template to render is ``'dropbox/callback.html'``, you could overwrite it with ``DROPBOX_CALLBACK_TEMPLATE`` config var. """ # Initial vars dropbox = current_app.extensions['dropbox'] template = dropbox.DROPBOX_CALLBACK_TEMPLATE or 'dropbox/callback.html' # Get oAuth token from Dropbox oauth_token = request.args.get('oauth_token') if not oauth_token: return render_template(template, error_oauth_token=True) # oAuth token **should** be equal to stored request token try: key, secret = session.get(DROPBOX_REQUEST_TOKEN_KEY) or (None, None) except ValueError: return render_template(template, error_request_token=True) if oauth_token != key: return render_template(template, error_not_equal_tokens=True) # Do login with current request token try: dropbox.login(OAuthToken(key, secret)) except ErrorResponse as e: return render_template(template, error_response=True, error=e) # Redirect to resulted page redirect_to = safe_url_for(dropbox.DROPBOX_LOGIN_REDIRECT or '/') return redirect(redirect_to)
8b35d67d065a5ec65606b6e505cfccc51460fe1c
3,658,488
def get_ws_param(args, attr): """get the corresponding warm start parameter, if it is not exists, use the value of the general parameter""" assert hasattr(args, attr), 'Invalid warm start parameter!' val = getattr(args, attr) if hasattr(args, 'ws_' + attr): ws_val = getattr(args, 'ws_' + attr) if isinstance(ws_val, str): ws_val = ws_val.strip() if ws_val or isinstance(ws_val, list) or isinstance(ws_val, int) or isinstance(ws_val, float): val = ws_val return val
ea1d762654153602f8ad54048e54995c26304e40
3,658,489
def _redundant_relation(lex: lmf.Lexicon, ids: _Ids) -> _Result: """redundant relation between source and target""" redundant = _multiples(chain( ((s['id'], r['relType'], r['target']) for s, r in _sense_relations(lex)), ((ss['id'], r['relType'], r['target']) for ss, r in _synset_relations(lex)), )) return {src: {'type': typ, 'target': tgt} for src, typ, tgt in redundant}
cc32c55a35cd7056a249ad05bd0b483af18fcd3a
3,658,490
def get_ph_bs_symm_line(bands_path, has_nac=False, labels_dict=None): """ Creates a pymatgen PhononBandStructure from a band.yaml file. The labels will be extracted from the dictionary, if present. If the 'eigenvector' key is found the eigendisplacements will be calculated according to the formula: \\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v and added to the object. Args: bands_path: path to the band.yaml file has_nac: True if the data have been obtained with the option --nac option. Default False. labels_dict: dict that links a qpoint in frac coords to a label. """ return get_ph_bs_symm_line_from_dict(loadfn(bands_path), has_nac, labels_dict)
40b135c09c829348d0693574b745ad5c114ec037
3,658,491
import subprocess import logging import ipaddress def get_peer_ip(result_host_dic: dict): """ find peer multi address based on peerID :param result_host_dic: [provider_peerID : who provides (peerID)] :return: dic {provider_peerID : Address[]} """ provider_ip = {} for peer in result_host_dic.keys(): process = subprocess.Popen(['/root/ipfs_bin/ipfs', 'dht', 'findpeer', peer], stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: r_code = process.wait(timeout=300) if r_code != 0: logging.info(f"Error on IPFS findpeer with Peer {peer} and exit code {r_code}") provider_ip[peer] = [] return provider_ip # case of no route find for line in process.stderr.readlines(): if 'Error' in str(line): logging.info(f"Error on IPFS findpeer with Peer {peer} output {str(line)}") provider_ip[peer] = [] return provider_ip provider_ip[peer] = [] with open(f'{peer}_ip.txt', 'w+') as stdout: for line in process.stdout.readlines(): line = line.decode('utf-8') # store all peer ip stdout.write(line) line = line.replace("\n", "") line = line.split("/") ip_type = line[1] ip_value = line[2] protocol = line[3] port = line[4] if ip_type == 'ip6' and ip_value == '::1': # local v6 ignore continue elif ip_type == 'ip4': # exclude private ip address if ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('10.0.0.0/8') or \ ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('172.16.0.0/12') or \ ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('127.0.0.0/8') or \ ipaddress.ip_address(ip_value) in ipaddress.IPv4Network('192.168.0.0/16'): continue # add valid ip address info logging.info(f'Peer {peer} has external IP {ip_value}:{port}, {ip_type}, {protocol}') if peer not in provider_ip.keys(): provider_ip[peer] = [] address = Address(ip_value, ip_type, port, protocol) provider_ip[peer].append(address) except subprocess.TimeoutExpired as e: logging.info(f"Timeout for {peer}") return provider_ip
2bed8cf2be996d0d71516eb9c000ea7b2f0212b8
3,658,492
def LinterPath(): """Ascertain the dxl.exe path from this .py files path because sublime.packages_path is unavailable at startup.""" ThisPath = abspath(dirname(__file__)) if isfile(ThisPath): # We are in a .sublime-package file in the 'Installed Package' folder return abspath(join(ThisPath, '..', '..', 'Packages', 'DXL', 'Lint', 'dxl.exe')) else: # We are in a subfolder of the 'Packages' folder return abspath(join(ThisPath, '..', 'DXL', 'Lint', 'dxl.exe'))
5e7e8e5761b69ba3383b10af92f4d9a442bab69e
3,658,493
import base64 def encrypt_and_encode(data, key): """ Encrypts and encodes `data` using `key' """ return base64.urlsafe_b64encode(aes_encrypt(data, key))
b318e5e17c7a5b8f74036157ce547a3c0d68129c
3,658,494
def _get_undelimited_identifier(identifier): """ Removes delimiters from the identifier if it is delimited. """ if pd.notna(identifier): identifier = str(identifier) if _is_delimited_identifier(identifier): return identifier[1:-1] return identifier
cd31b5cd2aea8f6c115fa117da30960f5f6dd8d8
3,658,495
def build_movie_json(mongodb_result, hug_timer): """ For reducing the duplicate lines in the 'get_goat_movies' function. TODO: Modify nodejs code if integrating this info! """ combined_json_list = [] movie_vote_quantities = [] for result in mongodb_result: #print(result) total_votes = int(result['goat_upvotes'] + result['goat_downvotes']) movie_vote_quantities.append(total_votes) #median_vote_quantity = np.median(movie_vote_quantities) mean_vote_quantity = np.mean(movie_vote_quantities) std_deviation = np.std(movie_vote_quantities) for result in mongodb_result: total_result_votes = int(result['goat_upvotes'] + result['goat_downvotes']) goat_score = int((result['goat_upvotes'] / total_result_votes)*100) # % of votes that are upvotes #absolute_diff = abs(total_result_votes - median_vote_quantity) # Median vs Mean for identifying outliers? absolute_diff = abs(total_result_votes - mean_vote_quantity) # Median vs Mean for identifying outliers? if (absolute_diff <= 2*std_deviation): # If within 2 std deviations, don't punish goat_score! adjustment = 1 else: # If they have greater than 2*std_deviation then we punish their score adjustment = 1 - (((absolute_diff/std_deviation) - 2) * 0.1) # 10% per 1 std dev past 2nd adjusted_goat_score = int(goat_score * adjustment) combined_json_list.append({'imdbID': result['imdbID'], 'year': result['year'], 'title': result['title'], 'imdb_rating': result['imdbRating'], 'runtime': result['runtime'], 'upvotes': result['goat_upvotes'], 'downvotes': result['goat_downvotes'], 'adustment': adjustment, 'goat_score': adjusted_goat_score}) return combined_json_list
27eeac479911c24e46f7df2b34aa7d4897e4b94b
3,658,496
def has_product_been_used(uuid): """Check if this product has been used previously.""" existing = existing_processed_products() if not isinstance(existing, pd.DataFrame): return False has_uuid = not existing.query("uuid == @uuid").empty return has_uuid
f361c5177c0152179300d6c1356139ba8f7face9
3,658,497
def _FilterMemberData( mr, owner_ids, committer_ids, contributor_ids, indirect_member_ids, project): """Return a filtered list of members that the user can view. In most projects, everyone can view the entire member list. But, some projects are configured to only allow project owners to see all members. In those projects, committers and contributors do not see any contributors. Regardless of how the project is configured or the role that the user plays in the current project, we include any indirect members through user groups that the user has access to view. Args: mr: Commonly used info parsed from the HTTP request. owner_views: list of user IDs for project owners. committer_views: list of user IDs for project committers. contributor_views: list of user IDs for project contributors. indirect_member_views: list of user IDs for users who have an indirect role in the project via a user group, and that the logged in user is allowed to see. project: the Project we're interested in. Returns: A list of owners, committer and visible indirect members if the user is not signed in. If the project is set to display contributors to non-owners or the signed in user has necessary permissions then additionally a list of contributors. """ visible_members_ids = set() # Everyone can view owners and committers visible_members_ids.update(owner_ids) visible_members_ids.update(committer_ids) # The list of indirect members is already limited to ones that the user # is allowed to see according to user group settings. visible_members_ids.update(indirect_member_ids) # If the user is allowed to view the list of contributors, add those too. if permissions.CanViewContributorList(mr, project): visible_members_ids.update(contributor_ids) return sorted(visible_members_ids)
be258b2d0559423a70fb5722734144f6a946b70e
3,658,498
def escape_name(name): """Escape sensor and request names to be valid Python identifiers.""" return name.replace('.', '_').replace('-', '_')
856b8fe709e216e027f5ab085dcab91604c93c2e
3,658,499