content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def complete_session(session: namedtuple, speeches: list) -> dict: """ This will result in loss of data bc content will be reduced to speeches. HTML_classes, speaker_flow, speaker_role etc. will not be given any longer since it's assumed that speakers are either members of parliament or ministers. Another important reduction is that speeches have been stripped of annotations like applause or calls. Updated keys in speeches: date protocol_no agenda_item - topic speaker party - if mop, will be ministry if minister speech - complete speech; no hall action, no interruptions Updated keys in session: date period index content - all speeches of a single session Speeches are given as a list of complete sentences. """ reduced_data = {} period = int(session.protocol_no.split('/')[0]) index = int(session.protocol_no.split('/')[-1]) reduced_data["date"] = session.date reduced_data["period"] = period reduced_data["index"] = index reduced_data["content"] = speeches return reduced_data
185e1518e48252fcdc222aeddf8f2ba30884c93e
3,656,861
import fileinput import re def replace_text_in_file(file_path, replace_this, for_that, case_insensitive=False, is_regex=False, keep_copy=False, number_of_subs=0): """ replace a string or regex (if is_regex is set) from a file given in file_path, with another string. This is a replacement for sed if needed. @param str file_path: path to the file to be changed @param str replace_this: string or regex to match and replace @param str for_that: string that will replace the match @param bool case_insensitive: flag to indicate if case is important @param bool is_regex: flag to indicate if replace_this is a regular expression or a plain string @param bool keep_copy: flag to keep copy of original file or not. The original file will be timestamped @param int number_of_subs: number of times to do the substitution. A zero means replace all @rtype: tuple """ if not is_regex: replace_this = re.escape(replace_this) new_file_path = duplicate_file_with_stamp(file_path) if keep_copy else file_path for current_line in fileinput.input(file_path, inplace=True): current_line, num_subs_made = re.subn(replace_this, for_that, current_line, flags=(re.IGNORECASE if case_insensitive else 0), count=number_of_subs) number_of_subs = 0 if not number_of_subs else (number_of_subs - num_subs_made) return file_path, new_file_path
12c978759fd5a31bacb396d3068ab762b442dd27
3,656,862
def load_annotations(ann_file): """Load the annotation according to ann_file into video_infos.""" video_infos = [] anno_database = mmcv.load(ann_file) for video_name in anno_database: video_info = anno_database[video_name] video_info['video_name'] = video_name video_infos.append(video_info) return video_infos
ac337917f313e5c695a5388c481e12787d7d78a0
3,656,863
from typing import List from typing import Dict def seq_hist(seq_lens: List[int]) -> Dict[int, int]: """Returns a dict of sequence_length/count key/val pairs. For each entry in the list of sequence lengths, tabulates the frequency of appearance in the list and returns the data as a dict. Useful for histogram operations on sequence length. """ seq_count = {} for slen in seq_lens: if slen in seq_count: seq_count[slen] += 1 else: seq_count[slen] = 1 return seq_count
5778b7566d1b64e8db0e2dce6bbf53e06cdb196d
3,656,865
from typing import List def clifford_canonical_F( pauli_layer: List[int], gamma: np.ndarray, delta: np.ndarray ) -> Circuit: """ Returns a Hadamard free Clifford circuit using the canonical form of elements of the Borel group introduced in https://arxiv.org/abs/2003.09412. The canonical form has the structure O P CZ CX where O is a pauli operator, P is a layer of sqrt(Z) gates, CZ is a layer of CZ gates, and CX is a layer of CX gates. The inputs describe on which qubits the gates in these layers act. :param pauli_layer: Description of which Pauli gate should act on each qubits. This is an element of {0,1,2,3}^n with 0 -> I, 1->X, 2->Y, 3->Z. :type pauli_layer: List[int] :param gamma: Describes on which qubits CX acts. In particular the circuit contains CX_{i,j} if gamma[i][j]=1. The gates are ordered such the control qubit index increases with time. :type gamma: List[List[int]] :param delta: Describes on which qubits CZ acts. In particular the circuit contains CX_{i,j} if delta[i][j]=1. The gates are ordered such the control qubit index increases with time. The circuit include S_i if delta[i][i]=1. :type delta: List[List[int]] :return: A Hadamard free Clifford circuit. :rtype: Circuit """ circ = Circuit(len(pauli_layer)) # Add layer of CX gates for j in range(len(delta)): for i in range(j): if delta[i][j]: circ.CX(i, j, opgroup="Clifford 2") # Add layer of CZ gates for j in range(len(gamma)): for i in range(j): if gamma[i][j]: circ.CZ(i, j, opgroup="Clifford 2") # Add layer of S gates for i in range(len(gamma)): if gamma[i][i]: circ.S(i, opgroup="Clifford 1") # Add Pauli gate for i, gate in enumerate(pauli_layer): if gate == 0: circ.X(i, opgroup="Clifford 1") elif gate == 1: circ.Y(i, opgroup="Clifford 1") elif gate == 2: circ.Z(i, opgroup="Clifford 1") return circ
9818866b3196ccf9608f7ea8a17145bfa9ddb2d2
3,656,867
def calculate_second_moment_nondegenerate( mu1: float, mu2: float, sigma1: float, sigma2: float, a: float, alpha: float ) -> float: """The second (raw) moment of a random variable :math:`\\min(Y_1, Y_2)`. Args: mu1: mean of the first Gaussian random variable :math:`Y_1` mu2: mean of the second Gaussian random variable :math:`Y_2` sigma1: standard deviation of the first Gaussian random variable :math:`Y_1` sigma2: standard deviation of the second Gaussian random variable :math:`Y_2` a: value of a(X1, X2) alpha: value of alpha(X1, X2) Note: For a Gaussian variable, the relationship between the raw second moment, mean, and the standard deviation (which is calculated using the *central* moment) is .. math:: \\nu_2 = \\nu_1^2 + \\sigma^2 """ # The first, second and third term first = (mu1 ** 2 + sigma1 ** 2) * numeric.normal_cdf(alpha) secnd = (mu2 ** 2 + sigma2 ** 2) * numeric.normal_cdf(-alpha) third = (mu1 + mu2) * a * numeric.normal_pdf(alpha) return first + secnd - third
74869b0b461777ae9cce658829c2c90ff9a4adff
3,656,868
from re import X def q_make( x, y, z, angle): """q_make: make a quaternion given an axis and an angle (in radians) notes: - rotation is counter-clockwise when rotation axis vector is pointing at you - if angle or vector are 0, the identity quaternion is returned. double x, y, z : axis of rotation double angle : angle of rotation about axis in radians """ length=0 cosA=0 sinA=0 destQuat = [0.0,0.0,0.0,0.0] #/* normalize vector */ length = sqrt( x*x + y*y + z*z ) #/* if zero vector passed in, just return identity quaternion */ if ( length < Q_EPSILON ) : destQuat[X] = 0 destQuat[Y] = 0 destQuat[Z] = 0 destQuat[W] = 1 return x /= length y /= length z /= length cosA = cos(angle / 2.0) sinA = sin(angle / 2.0) destQuat[W] = cosA destQuat[X] = sinA * x destQuat[Y] = sinA * y destQuat[Z] = sinA * z return destQuat
bd95a3d6f89599297a089d75882aa319e474a1b7
3,656,869
def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a mssql database using pymssql. """ return create_engine( _create_mssql_pymssql(username, password, host, port, database), **kwargs )
a4d644839879ae374ba091f1b9e79fd210c03e3e
3,656,870
def get_right_list_elements(result): """Some of the results are empty - therefore, the try-except. Others are lists with more than one element and only specific elements are relevant. Args: result (dict of lists): result of the xpath elements. Returns: dict of strs """ for key in ["title", "ort", "merkmale", "weitere_eigenschaften", "beschreibung"]: try: result[key] = result[key][0] except: pass for key in ["preis", "anzahl_raeume", "wohnflaeche", "grundstuecksflaeche"]: try: result[key] = result[key][1] except: pass return result
b81e80363f82dfe43878b3d8cb319f7129ebfc50
3,656,871
def gen_pixloc(frame_shape, xgap=0, ygap=0, ysize=1., gen=True): """ Generate an array of physical pixel coordinates Parameters ---------- frame : ndarray uniformly illuminated and normalized flat field frame xgap : int (optional) ygap : int (optional) ysize : float (optional) gen : bool, optional Only allows True right now Returns ------- locations : ndarray A 3D array containing the x center, y center, x width and y width of each pixel. The returned array has a shape: frame.shape + (4,) """ #dnum = settings.get_dnum(det) msgs.info("Deriving physical pixel locations on the detector") locations = np.zeros((frame_shape[0],frame_shape[1],4)) if gen: msgs.info("Pixel gap in the dispersion direction = {0:4.3f}".format(xgap)) msgs.info("Pixel size in the dispersion direction = {0:4.3f}".format(1.0)) xs = np.arange(frame_shape[0]*1.0)*xgap xt = 0.5 + np.arange(frame_shape[0]*1.0) + xs msgs.info("Pixel gap in the spatial direction = {0:4.3f}".format(ygap)) msgs.info("Pixel size in the spatial direction = {0:4.3f}".format(ysize)) ys = np.arange(frame_shape[1])*ygap*ysize yt = ysize*(0.5 + np.arange(frame_shape[1]*1.0)) + ys xloc, yloc = np.meshgrid(xt, yt) # xwid, ywid = np.meshgrid(xs,ys) msgs.info("Saving pixel locations") locations[:,:,0] = xloc.T locations[:,:,1] = yloc.T locations[:,:,2] = 1.0 locations[:,:,3] = ysize else: msgs.error("Have not yet included an algorithm to automatically generate pixel locations") return locations
e09bb42cc0b003f6cedf5eed79ee65293aab13e2
3,656,872
def select(df: pd.DataFrame, time_key, from_time='00-00-00 00', to_time='99-01-01 00'): """ :param df: :param time_key: :param from_time: :param to_time: :return: :rtype: pandas.DataFrame """ select_index = (df[time_key] >= from_time) & (df[time_key] < to_time) return df.loc[select_index, :].reset_index(drop=True)
e925c2543bfabf9091fae18d9dc47c01364e1df8
3,656,873
def load_apogee_distances(dr=None, unit='distance', cuts=True, extinction=True, keepdims=False): """ Load apogee distances (absolute magnitude from stellar model) :param dr: Apogee DR :type dr: int :param unit: which unit you want to get back - "absmag" for absolute magnitude - "fakemag" for fake magnitude - "distance" for distance in parsec :type unit: string :param cuts: Whether to cut bad data (negative parallax and percentage error more than 20%), or a float to set the threshold :type cuts: Union[boolean, float] :param extinction: Whether to take extinction into account, only affect when unit is NOT 'distance' :type extinction: bool :param keepdims: Whether to preserve indices the same as APOGEE allstar DR14, no effect when cuts=False, set to -9999 for bad indices when cuts=True keepdims=True :type keepdims: boolean :return: numpy array of ra, dec, array, err_array :rtype: ndarrays :History: | 2018-Jan-25 - Written - Henry Leung (University of Toronto) | 2021-Jan-29 - Updated - Henry Leung (University of Toronto) """ fullfilename = apogee_distances(dr=dr) with fits.open(fullfilename) as F: hdulist = F[1].data # Convert kpc to pc distance = hdulist['BPG_dist50'] * 1000 dist_err = (hdulist['BPG_dist84'] - hdulist['BPG_dist16']) * 1000 allstarfullpath = allstar(dr=dr) with fits.open(allstarfullpath) as F: k_mag = F[1].data['K'] if extinction: k_mag = extinction_correction(k_mag, F[1].data['AK_TARG']) ra = F[1].data['RA'] dec = F[1].data['DEC'] # Bad index refers to nan index bad_index = np.argwhere(np.isnan(distance)) if unit == 'distance': # removed astropy units because of -9999. is dimensionless, will have issues output = distance output_err = dist_err elif unit == 'absmag': absmag, absmag_err = mag_to_absmag(k_mag, 1 / distance * u.arcsec, (1 / distance) * (dist_err / distance)) output = absmag output_err = absmag_err elif unit == 'fakemag': # fakemag requires parallax (mas) fakemag, fakemag_err = mag_to_fakemag(k_mag, 1000 / distance * u.mas, (1000 / distance) * (dist_err / distance)) output = fakemag output_err = fakemag_err else: raise ValueError('Unknown unit') # Set the nan index to -9999. as they are bad and unknown. Not magic_number as this is an APOGEE dataset output[bad_index], output_err[bad_index] = -9999., -9999. if cuts is False: pass else: distance[bad_index], dist_err[bad_index] = -9999., -9999. good_idx = ((dist_err / distance < (0.2 if cuts is True else cuts)) & (distance != -9999.)) if not keepdims: ra = ra[good_idx] dec = dec[good_idx] output = output[good_idx] output_err = output_err[good_idx] else: output[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999. output_err[(dist_err / distance > (0.2 if cuts is True else cuts))] = -9999. return ra, dec, output, output_err
763d646c284cb056295ae57d7a7c9ced87964406
3,656,874
import json import logging def userstudy(config, data_train): """ Update the model based on feedback from user study. - [config]: hyperparameters for model fine-tuning - [data_train]: data pool to sample from """ def preprocess_data(doc, queries): """ Create a new field in [doc] called [antecedent_map] which processes the user-labeled [antecedents]. Add all labeled spans to [queries]. in queries). """ ante_map = {} for entry in doc['antecedents']: span = tuple(entry[0]) if entry[1] == -1: label = None elif entry[1] == 0: label = '0' else: label = [tuple(entry[1])] ante_map[span] = label doc['antecedent_map'] = ante_map del doc['antecedents'] # update queries to know what has been queried queries[doc['doc_key']] = list(ante_map.keys()) # return # spans labeled return len(ante_map) # preprocess antecedents and get queries data_fp = config['userstudy'] / 'train_data.jsonl' data = [] queries = defaultdict(list) num_queries = 0 with open(data_fp, 'r') as f: for line in f: doc = json.loads(line) # update doc and queries n = preprocess_data(doc, queries) num_queries += n data.append(doc) # finetune model on data src_path = config['src_path'] logging.info( f'Finetuning src model on {num_queries} queries from {len(data)} docs' ) scores_dev, model = finetune_on_queries(config, data, config['userstudy'], src_path) # test model results_fp = config['userstudy'] / 'results_test.json' scores_test = eval_scores(model, config, "test") output_results(results_fp, config, 1, scores_test)
d3240142b55b202833364a9583b9c7c7e237bea2
3,656,875
import re def clique_create(request): """ Creates a new grouping in the database (this integration must be stored in the db to be useful) Arguments: /group-create "groupname" "@user1 @user2" """ requesting_user_id = request.POST.get('user_id') args = re.findall(DOUBLE_QUOTE_ARG_REGEX, request.POST.get("text")) # Check to see if everything looks right if len(args) != 2: return make_clique_group_error("Error in arguments (Double quotes are required!). Usage:\n" "`/group-create \"groupName\" \"@user1 @user2\"") if CliqueGroup.objects.filter(name=args[0]).count() > 0: return make_clique_group_error("This group <{}> already exists!".format(args[0])) # Move on to creating the group raw_group_members = re.findall(SLACK_ID_REGEX, args[1]) group_users = [] for slack_id in raw_group_members: try: group_users.append(CliqueUser.objects.get(slack_id=slack_id)) except CliqueUser.DoesNotExist: # This is the first time that we've seen this user # we need to add them to the db new_user = CliqueUser(slack_id=slack_id) new_user.save() group_users.append(new_user) # Case where the owner is 1) new and 2) not in the group try: CliqueUser.objects.get(slack_id=requesting_user_id) except CliqueUser.DoesNotExist: # This is the first time that we've seen this user # we need to add them to the db CliqueUser(slack_id=requesting_user_id).save() new_group = CliqueGroup( creator=CliqueUser.objects.get(slack_id=requesting_user_id), name=args[0] ) new_group.save() for clique_user in group_users: new_group.members.add(clique_user) new_group.save() # Testing response string resp_string = 'Group <{0}> has been created with users:'.format(args[0]) resp_string += ' '.join(format_user(user.slack_id) for user in new_group.members.all()) return JsonResponse({"replace_original": True, "text": resp_string})
b990079ad0685b8c524dec65166da40f0e664ef7
3,656,876
import warnings def cg_atoms(atoms, units, sites, scale, scaleValue, siteMap, keepSingleAtoms, package): """ Get positions for atoms in the coarse-grained structure and the final bond description. Returns a dictionary of the lattice, fractional coordinates, and bonds. Also provides the option to scale the lattice. Args ---- atoms: pymatgen.core.Structure Pymatgen Structure object. units: list List of tuple(atomIndex, Image) for all atoms found in the building unit so far in the algorithm. sites: list Specifying atoms in each site-type. One list per site-type. I.e. for ZIF-8 (Zn(mIm)2) Zn is an A site, and the C, N, H (imidazolate ring) are B sites, so you would pass: scale: str Scaling method to be used. Currently supported: "min_xx": minimum bond length between any atoms. "min_ab": minimum bond length between building units. "avg_ab": average bond length between building units. scaleValue: float Length (Å) to scale the characteristic bond length (defined by "scale") to. siteMap: list A list of atoms to map each building unit to. Should be of the same length as the number of site-types. E.g. to map Zn(mIm)2 to a coarse-grained structure, siteMap = ["Si", "O"] would map all A sites (Zn) to Si, and all B sites (mIm) to O. If not set, will default to "Dummy Species" with labels DA, DB, DC, ... Note if creating an ASE Atoms object, real atoms must be used, and so siteMap *must* be set. keepSingleAtoms: bool If True, the chemical identity of the single atom building units will be preserved. E.g. for BIF-1-Li ( [LiB(im)]4 ) where Li and B are A sites, the final coarse-grained structure would keep the Li and B atoms, but add dummy species for the imidazolate units. package: str "pymatgen" or "ase". If set, will return the Structure/Atoms object of the specified package, respectively. As noted in siteMap, ASE requires that real elements are set for the Atoms object. """ # Extract unit cell. lattice = atoms.lattice.copy() # Extract labels, positions, and images for each building unit. l, p, _ = zip(*[(l,*u.frac_img) for l,u in units.items()]) # Extract bonds in format consistent with TopoCIF specification; i.e. # node1_label, node2_label, distance, sym_op1, x1, y1, z1, sym_op2, # x2, y2, z2, link_type, multiplicity. There will be a list of tuples, # one tuple per unit, and the length of each tuple will be the number of # bonds stored. b = [u.unit_bonds for u in units.values()] # Determine scaling type now, because can avoid calling next section # twice to calculate the bond distances if it is "min_xx" scaling. if scale is not None: scale = scale.lower() if scale == "min_xx": # Get all distances (ignoring self-distances along diagonal). d = lattice.get_all_distances(p,p) np.fill_diagonal(d, 1000) # Get scale factor and scale the lattice to the new volume. sf = ( scaleValue / np.amin(d) )**3 lattice = lattice.scale(lattice.volume * sf) elif scale in ["min_ab", "avg_ab"]: # Get the bond distances from the formatted bonds. _, d = format_bonds(lattice,l,p,b,return_lengths=True) # Get scale factor and scale the lattice to new volume. if scale == "min_ab": sf = ( scaleValue / np.amin(d) )**3 elif scale == "avg_ab": sf = ( scaleValue / np.mean(d) )**3 lattice = lattice.scale(lattice.volume * sf) else: warnings.warn(f"Scale method {scale} is not supported.") # Get the final TopoCIF-formatted bonds. b = format_bonds(lattice, l, p, b) # The atomMap must provide a one-to-one mapping for every site-type # in the structure. assert len(siteMap) == len(sites), "Povide a one-to-one " + \ f"mapping of dummy-sites to atomic symbols " + \ f"({len(sites)} != {len(siteMap)})" # Relabel each atom with a new symbol. l, symbols, b = relabel(units, siteMap, keepSingleAtoms, b) # Sort structure information into a dictionary. s_info = { "lattice": lattice, "symbols": symbols, "labels": l, "frac_coords": p, "bonds": b } # If package specified return either a Pymatgen Structure object, or an ASE # atoms object. s = py_structure(s_info["lattice"],s_info["symbols"],s_info["frac_coords"]) if package is not None and package.lower() == "ase": s = AseAtomsAdaptor.get_atoms(s) return s_info, s
5cd2a6b8b0ce886b92912eb3dd8b14f0ea14f602
3,656,877
def compute_msa_weights(msa, threshold=.8): """ msa (Bio.Align.MultipleSeqAlignment): alignment for which sequence frequency based weights are to be computed threshold (float): sequence identity threshold for reweighting NOTE that columns where both sequences have a gap will not be taken into account when computing identity """ weights = np.zeros(len(msa)) seq_identities = np.zeros((len(msa), len(msa))) for i in range(len(msa)): for j in range(i+1, len(msa)): seq_identities[i, j] = _compute_sequence_identity(msa[i], msa[j]) seq_identities = seq_identities + np.diag(np.ones(len(msa))) ms = np.sum(seq_identities>threshold, 1) weights = 1./ms return weights
13663501f57eef204533795c2271276a99b4a403
3,656,879
def search_storefront(client, phrase): """Execute storefront search on client matching phrase.""" resp = client.get(reverse("search:search"), {"q": phrase}) return [prod for prod, _ in resp.context["results"].object_list]
0509c39cd9adb0b4c6d0849c8b068dcb3455b807
3,656,880
def is_repo_in_config(config, repo, rev, hook_id): """Get if a repository is defined in a pre-commit configuration. Parameters ---------- config : dict Pre-commit configuration dictionary. repo : str Repository to search. rev : str Repository tag revision. hook_id : Hook identifier. Returns ------- dict : Information about if the repository and the hook have been found. """ response = {"repo_found": False, "hook_found": False, "same_rev": False} for repo_ in config["repos"]: if repo_["repo"] == repo: response["repo_found"] = True response["hook_found"] = hook_id in [hook["id"] for hook in repo_["hooks"]] response["same_rev"] = repo_["rev"] == rev break return response
855315c50f4bfe53a4f9b7a5d392bb539e364617
3,656,881
def mat33_to_quat(mat): """ Convert matrix to quaternion. :param mat: 3x3 matrix :return: list, quaternion [x, y, z, w] """ wxyz = transforms3d.quaternions.mat2quat(mat) return [wxyz[1], wxyz[2], wxyz[3], wxyz[0]]
1dcedf919674895a1ed647314c328525e4068dfe
3,656,882
def reshape(x, new_shape): """ Reshapes a tensor without changing its data. Args: x (Tensor): A tensor to be reshaped. new_shape (Union[int, list(int), tuple(int)]): The new shape should be compatible with the original shape. If the tuple has only one element, the result will be a 1-D tensor of that length. One shape dimension can be :math:`-1`. In this case, the value is inferred from the length of the tensor and remaining dimensions. Returns: Reshaped Tensor. Has the same data type as the original tensor `x`. Raises: TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor. ValueError: If new_shape is not compatible with the original shape. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> x = np.asarray([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]) >>> output = np.reshape(x, (3, 2)) >>> print(output) [[-0.1 0.3] [ 3.6 0.4] [ 0.5 -3.2]] >>> output = np.reshape(x, (3, -1)) >>> print(output) [[-0.1 0.3] [ 3.6 0.4] [ 0.5 -3.2]] >>> output = np.reshape(x, (6, )) >>> print(output) [-0.1 0.3 3.6 0.4 0.5 -3.2] """ _check_input_tensor(x) return x.reshape(new_shape)
583ffc9e40c328586ec9d21b7a73cbc610eb5c29
3,656,883
def update_depth(depth_grid, elapsed_ts, depth_factor): """Just in time Update Depth for lake to pond Parameters ---------- depth_grid: np.array like (float) grid of current lake depths elapsed_ts: float number timesteps since start year depth_factor: float Returns ------- np.array updated depth grid """ new = np.zeros(depth_grid.shape) for row in range(depth_grid.shape[0]): for col in range(depth_grid.shape[0]): new[row,col] = \ depth_grid[row,col] + (np.sqrt(elapsed_ts) / depth_factor) return new
e3fe2498421697ce584b385544a7501f54c02b85
3,656,884
def get_submissions(config, event_name, state='new'): """ Retrieve a list of submissions and their associated files depending on their current status Parameters ---------- config : dict configuration event_name : str name of the RAMP event state : str, optional state of the requested submissions (default is 'new') Returns ------- List of tuples (int, List[str]) : (submission_id, [path to submission files on the db]) Raises ------ ValueError : when mandatory connexion parameters are missing from config UnknownStateError : when the requested state does not exist in the database """ if state not in STATES: raise UnknownStateError("Unrecognized state : '{}'".format(state)) # Create database url db_url = URL(**config) db = create_engine(db_url) # Create a configured "Session" class Session = sessionmaker(db) # Link the relational model to the database Model.metadata.create_all(db) # Connect to the dabase and perform action with db.connect() as conn: session = Session(bind=conn) submissions = select_submissions_by_state(session, event_name, state) if not submissions: return [] subids = [submission.id for submission in submissions] subfiles = [submission.files for submission in submissions] filenames = [[f.path for f in files] for files in subfiles] return list(zip(subids, filenames))
e724c44b00db489f27c42acf7b21ba06a4ce0def
3,656,885
def split_dataframe(df, size=10*1024*1024): """Splits huge dataframes(CSVs) into smaller segments of given size in bytes""" # size of each row row_size = df.memory_usage().sum() / len(df) # maximum number of rows in each segment row_limit = int(size // row_size) # number of segments seg_num = (len(df)+row_limit-1)//row_limit # split df into segments segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)] return segments
46f34d388e6f596bfcf803b4569eb3015344bafb
3,656,886
from pathlib import Path from typing import Optional def convert_table_codes(input_filename: Path, output_filename: Path = None, column: str = 'countryCode', namespace: Optional[str] = None, fuzzy:int = 0) -> Path: """ Adds a 'regionCode' column to the given table containing iso-3 country codes. Parameters ---------- input_filename: Path output_filename: Path column: str, default 'countryCode namespace: {'iso2', 'iso3', 'm49'}; default None fuzzy: int; default 0 The score to use when fuzzy matching when above 0. If 0, the regular code search is used instead. Returns ------- path: Path Location of the output table. """ table = load_table(input_filename) if column not in table.columns: message = "'{}' is not a valid column. Expected one of {}".format(column, list(table.columns)) raise ValueError(message) old_values = table[column].values if fuzzy: new_values = [fuzzy_search(i,fuzzy) for i in old_values] else: new_values = [get_codes(i, namespace) for i in old_values] new_values = [(v['iso3'] if v else v) for v in new_values] table['regionCode'] = new_values if output_filename is None: output_filename = input_filename.with_suffix('.edited.tsv') elif output_filename.is_dir(): output_filename = output_filename / input_filename.name opath = save_table(table, output_filename) return opath
1bba37fda512cf13e99a08a0ab7ebfdf10a4e330
3,656,887
def allow_view(user): """Is the current user allowed to view the user account? Yes, if current user is admin, staff or self. """ if not flask.g.current_user: return False if flask.g.am_admin: return True if flask.g.am_staff: return True if flask.g.current_user['username'] == user['username']: return True return False
334e56796235e5bfab6f2220443c80fc5ff68a51
3,656,888
from datetime import datetime import time def httptimestamp(inhttpdate): """ Return timestamp from RFC1123 (HTTP/1.1). """ dat = datetime.datetime(*eut.parsedate(inhttpdate)[:5]) return int(time.mktime(dat.timetuple()))
acfcdbea1a9d331b7623478841c6cd1d45fd45bf
3,656,889
from datetime import datetime def calculate_duration(start_date, end_date=None): """ Calculate how many years and months have passed between start and end dates """ # If end date not defined, use current date if not end_date: end_date = datetime.date.today() years = end_date.year - start_date.year months = end_date.month - start_date.month if months < 0: years = years - 1 months = months + 12 return years, months
d41c52d4d0274ce3b829b33f07c9730a34ab4cbd
3,656,890
import typing def actives(apikey: str) -> typing.List[typing.Dict]: """ Query FMP /actives/ API :param apikey: Your API key. :return: A list of dictionaries. """ path = f"actives" query_vars = {"apikey": apikey} return __return_json_v3(path=path, query_vars=query_vars)
de4eecc6f3006158407efd51d67b6a5ac40d2cfd
3,656,892
def print_unicodeinfo(val: str, key: str) -> str: """ Prints the occurrence, unicode character or guideline rules and additional information :param args: arguments instance :param val: count of the occurrences of key :param key: key (glyph or guideline rules) :return: """ return f"{val:-{6}} {'{'}{repr(key) if controlcharacter_check(key) else key}{'}'}{addinfo(key)}"
194bb1d03613e9708f8deea8c233d02dacd3e3b6
3,656,893
def qx_to_npx(df): """ Return df with qx converted to npx. """ df = 1 - df out = df.cumprod().shift() for i in df.index: out.loc[i, i] = 1 return out
683a26f57dfb7ae1762df84f74186f0b88cb4688
3,656,894
def homepage(selenium, config): """Get homepage with selenium.""" selenium.get(config.BASE_URL) selenium.set_window_size(config.WINDOW_WIDTH, config.WINDOW_HEIGHT) custom_click_cookie_rollbar(selenium, config.MAX_WAIT_TIME) return selenium
39217a38ac09d41093070ed06803e36485f04e2b
3,656,895
import torch def _if_scalar_type_as(g, self, tensor): """ Convert self into the same type of tensor, as necessary. We only support implicit casting for scalars, so we never actually need to insert an ONNX cast operator here; just fix up the scalar. """ if isinstance(self, torch._C.Value): return self elif tensor.type().kind() == "TensorType" or tensor.type().kind() == "CompleteTensorType": ty = tensor.type().scalarType().lower() return getattr(self, ty)() else: return self
8e53bf67c8bbc78f142ffcb8027c0876eed951fe
3,656,896
def read_images_text(path): """ see: src/base/reconstruction.cc void Reconstruction::ReadImagesText(const std::string& path) void Reconstruction::WriteImagesText(const std::string& path) """ images = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() image_id = int(elems[0]) qvec = np.array(tuple(map(float, elems[1:5]))) tvec = np.array(tuple(map(float, elems[5:8]))) camera_id = int(elems[8]) image_name = elems[9] elems = fid.readline().split() xys = np.column_stack([tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))]) point3D_ids = np.array(tuple(map(int, elems[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids) return images
2aed7477e43bdcb73ad9eb866960b814278bbf0c
3,656,897
def emailIsValid(email): """Return true if email is valid otherwise false""" return EMAIL_RE.match(email) is not None
d9e28b68e31f1ab95c63aa80cd4a2a461cbac852
3,656,898
def calculate_line_number(text): """Calculate line numbers in the text""" return len([line for line in text.split("\n") if line.strip() != ""])
f35533945203ec2f47a89e7072ddd9b172f5554b
3,656,899
def links_at_node(shape): """Get link ids for each node. Parameters ---------- shape : tuple of int Shape of grid of nodes. Returns ------- (N, 4) ndarray of int Array of link ids. Examples -------- >>> from landlab.grid.structured_quad.links import links_at_node >>> links_at_node((4, 3)) # doctest: +NORMALIZE_WHITESPACE array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [-1, 4, 1, -1], [ 5, 7, -1, 2], [ 6, 8, 5, 3], [-1, 9, 6, 4], [10, 12, -1, 7], [11, 13, 10, 8], [-1, 14, 11, 9], [15, -1, -1, 12], [16, -1, 15, 13], [-1, -1, 16, 14]]) """ (south_links, west_links) = _node_in_link_ids(shape) (north_links, east_links) = _node_out_link_ids(shape) return ( np.vstack( (east_links.flat, north_links.flat, west_links.flat, south_links.flat) ) .transpose() .copy() )
0f354530d5c6b415c886df25e1b15ba2477de8c9
3,656,900
def manage_addFancyContent(self, id, REQUEST=None): """Add the fancy fancy content.""" id = self._setObject(id, FancyContent(id)) return ''
47efd8df7d0ccc12894729d142a09a8a53562ff5
3,656,901
def convert_sentences(sentences, tokenizer): """ Truncate each sentence to 512 bpes in order to fit on BERT and convert it to bpes. :param tokenizer: The BERT tokenizer we used in order convert each sentence to ids. :param sentences: The tokenized sentences of the summary we are processing. :return: The ids of the summary sentences. """ sentences_ids = [] for i, sent in enumerate(sentences): if len(sent) > 512: sentences[i] = sentences[i][:511].append('[SEP]') sentences_ids.append(tokenizer.convert_tokens_to_ids(sentences[i])) return sentences_ids
48cde2cba0af288bff9f49cb2ffc66dd22cfd952
3,656,902
from typing import Union from typing import Tuple def imscale(image: Imagelike, scale: Union[float, Tuple[float, float]], **kwargs) -> np.ndarray: """Scale the given image. The result will be a new image scaled by the specified scale. """ global _resizer if _resizer is None: _resizer = ImageResizer() return _resizer.scale(image, scale, **kwargs)
1c0949b445620febe1482ea4d32ae2dd4ac44e04
3,656,903
def _packages_info() -> dict: """Return a dict with installed packages version""" return Dependencies.installed_packages()
a4095968c7553aad017e97ab88322c616586961f
3,656,905
def _first(root: TreeNode) -> TreeNode: """Return a first in "inorder" traversal order of the `root` subtree Args: root (TreeNode): root of subtree Returns: TreeNode: first node in subtree """ if root.left is None: return root return _first(root.left)
6464b7b920b32d3e3fd309eb1a7de26bd21a5710
3,656,907
def get_library_version() -> str: """ Returns the version of minecraft-launcher-lib """ return __version__
aaa0703835cb00370bf30e96f2988f4c2e16bb51
3,656,908
def load_image(image): """reshape and convert image to fit the model""" img = cv2.imread(image) # Load images img = cv2.resize(img, (257, 257), interpolation=cv2.INTER_LINEAR) # resize img = (np.float32(img) - 127.5) / 127.5 # change image to float and normalize img = img.reshape((1, 257, 257, 3)) # resize return img
642f1da152b7e852e46c57d4c2608e469ba7bddb
3,656,910
def hist_trigger_time_diff(df_dev): """ plots """ df = devices_trigger_time_diff(df_dev.copy()) fig = go.Figure() trace = go.Histogram(x=np.log(df['row_duration'].dt.total_seconds()/60), nbinsx=200, ) fig.add_trace(trace) return fig
43ae70a2ff9a6b7f9927d91c88c2d540f7b8ca24
3,656,911
def verify_spec(spec_utid, proxy_utid): """ For a specific unit test id (utid) compares the spec with the proxy """ results='' for key in spec_utid: results += '%s: spec=%s, proxy=%s (%s) *** ' % (key,spec_utid[key],proxy_utid[key],(spec_utid.get(key)==proxy_utid.get(key))) return results
b9854e23f0d88ed4f9abcc0c16236a2d543b9eb0
3,656,912
def lammps_created_gsd(job): """Check if the mdtraj has converted the production to a gsd trajectory for the job.""" return job.isfile("trajectory-npt.gsd")
a66c899a20e9602098150f46067d5505572232c2
3,656,913
from datetime import datetime def neo4j_data_age(data, max_data_age=None): """ Checks the noclook_last_seen property against datetime.datetime.now() and if the difference is greater than max_data_age (hours) (django_settings.NEO4J_MAX_DATA_AGE will be used if max_data_age is not specified) and the noclook_auto_manage is true the data is said to be expired. Returns noclook_last_seen as a datetime and a "expired" boolean. """ if not max_data_age: max_data_age = django_settings.NEO4J_MAX_DATA_AGE max_age = timedelta(hours=int(max_data_age)) now = datetime.now() last_seen = isots_to_dt(data) expired = False if last_seen and (now-last_seen) > max_age and data.get('noclook_auto_manage', False): expired = True return last_seen, expired
77f703f972b7b67ec5de48c9f8a0aceef3cd0646
3,656,914
import optparse def ProfileOptions(parser): """Build option group for profiling chrome. Args: parser: OptionParser object for parsing the command-line. Returns: Option group that contains profiling chrome options. """ profile_options = optparse.OptionGroup(parser, 'Profile Chrome Options') browsers = sorted(util.get_supported_browsers().keys()) profile_options.add_option('-b', '--browser', help='Select among installed browsers. ' 'One of ' + ', '.join(browsers) + '. "stable" is used by ' 'default.', type='choice', choices=browsers, default='stable') profile_options.add_option('-t', '--time', help=('Stops tracing after N seconds. ' 'Default is 5 seconds'), default=5, metavar='N', type='int', dest='trace_time') profile_options.add_option('-e', '--serial', help='adb device serial number.', type='string', default=util.get_default_serial(), dest='device_serial_number') profile_options.add_option('-f', '--trace_format', help='Format of saved trace: proto, json, html.' ' Default is proto.', default='proto', dest='trace_format') profile_options.add_option('-p', '--platform', help='Device platform. Only Android is supported.', default='android', dest='platform') profile_options.add_option('--buf-size', help='Use a trace buffer size ' ' of N KB.', type='int', metavar='N', dest='trace_buf_size') profile_options.add_option( '--enable_profiler', help='Comma-separated string of ' 'profiling options to use. Supports options for memory or ' 'cpu or both. Ex: --enable_profiler=memory ' 'or --enable_profiler=memory,cpu. ', dest='enable_profiler') profile_options.add_option('--chrome_categories', help='Chrome tracing ' 'categories to record.', type='string', default=_DEFAULT_CHROME_CATEGORIES) profile_options.add_option( '--skip_symbolize', help='Skips symbolization after recording trace profile, if specified.', action='store_true', dest='skip_symbolize') profile_options.add_option('--compress', help='Compress the resulting trace ' 'with gzip. ', action='store_true') # This is kept for backwards compatibility. Help is suppressed because this # should be specified through the newer |trace_format| flag. profile_options.add_option('--json', help=optparse.SUPPRESS_HELP, dest='write_json') return profile_options
57b41cf7a629b566aec995be2d6181357000fc1c
3,656,915
def _clean_unicode(value): """Return the value as a unicode.""" if isinstance(value, str): return value.decode('utf-8') else: return unicode(value)
be04bf30cecd7f25d0c39c05f6d5e6d995438c0b
3,656,916
def deslugify_province(prov): """ Province slug to name, i.e. dashes to spaces and title case. KZN is a special case. """ if prov == 'kwazulu-natal': return 'KwaZulu-Natal' return prov.replace('-', ' ').title()
8e88ea7325c3b911495780b4437bc02784fbad82
3,656,917
import re def parse_vectors(vectors): """ Basic cleanup of vector or vectors Strip out V from V#s. Similar to parse tables, this by no means guarantees a valid entry, just helps with some standard input formats Parameters ---------- vectors : list of str or str A string or list of strings of vector names to be parsed Returns ------- list of str vectors with unnecessary characters removed """ def parse_vector(vector): """Strip string to numeric elements only""" if isinstance(vector, int): # Already parsed earlier return vector return int(re.sub(r'\D', '', vector)) if isinstance(vectors, str): return [parse_vector(vectors)] return [parse_vector(v) for v in vectors]
d2161e45bae51db21d7668ea6008ddb9ada16c4e
3,656,920
def sort_slopes(sds): """Sort slopes from bottom to top then right to left""" sds = np.array(sds) scores = sds[:, 0, 1] + sds[:, 1, 1] * 1e6 inds = np.argsort(scores) return sds[inds]
3bb62bf3be98176ae096bfe5f55b203173c3a425
3,656,921
def serialize_skycoord(o): """ Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification. Args: o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`. """ representation = o.representation.get_name() frame = o.frame.name r = o.represent_as('spherical') d = dict( _type='astropy.coordinates.SkyCoord', frame=frame, representation=representation, lon=r.lon, lat=r.lat) if len(o.distance.unit.to_string()): d['distance'] = r.distance return d
52830d9243cac36573c358f1579987eb43435892
3,656,923
def redis_sentinel(create_sentinel, sentinel, loop): """Returns Redis Sentinel client instance.""" redis_sentinel = loop.run_until_complete( create_sentinel([sentinel.tcp_address], timeout=2, loop=loop)) assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG' return redis_sentinel
3b779c9ef73e3bc5949afadbace34a9dcca1273a
3,656,924
from typing import Tuple from typing import Dict def compute_features( seq_path: str, map_features_utils_instance: MapFeaturesUtils, social_features_utils_instance: SocialFeaturesUtils, ) -> Tuple[np.ndarray, Dict[str, np.ndarray]]: """Compute social and map features for the sequence. Args: seq_path (str): file path for the sequence whose features are to be computed. map_features_utils_instance: MapFeaturesUtils instance. social_features_utils_instance: SocialFeaturesUtils instance. Returns: merged_features (numpy array): SEQ_LEN x NUM_FEATURES map_feature_helpers (dict): Dictionary containing helpers for map features """ args = parse_arguments() df = pd.read_csv(seq_path, dtype={"TIMESTAMP": str}) # Get social and map features for the agent agent_track = df[df["OBJECT_TYPE"] == "AGENT"].values # Social features are computed using only the observed trajectory social_features = social_features_utils_instance.compute_social_features( df, agent_track, args.obs_len, args.obs_len + args.pred_len, RAW_DATA_FORMAT) # agent_track will be used to compute n-t distances for future trajectory, # using centerlines obtained from observed trajectory map_features, map_feature_helpers = map_features_utils_instance.compute_map_features( agent_track, args.obs_len, args.obs_len + args.pred_len, RAW_DATA_FORMAT, args.mode, ) # Combine social and map features # If track is of OBS_LEN (i.e., if it's in test mode), use agent_track of full SEQ_LEN, # But keep (OBS_LEN+1) to (SEQ_LEN) indexes having None values if agent_track.shape[0] == args.obs_len: agent_track_seq = np.full( (args.obs_len + args.pred_len, agent_track.shape[1]), None) agent_track_seq[:args.obs_len] = agent_track merged_features = np.concatenate( (agent_track_seq, social_features, map_features), axis=1) else: merged_features = np.concatenate( (agent_track, social_features, map_features), axis=1) return merged_features, map_feature_helpers
bd8414b81bc3b1856773767d4f8db8897436ddf3
3,656,925
def summarizeTitlesByLength(titlesAlignments, limit=None): """ Sort match titles by sequence length. @param titlesAlignments: A L{dark.titles.TitlesAlignments} instance. @param limit: An C{int} limit on the number of results to show. @return: An C{IPython.display.HTML} instance with match titles sorted by sequence length. """ return _sortHTML(titlesAlignments, 'length', limit)
31f9a358032018b51910148dfaa82d4deb08191f
3,656,926
def _diff_tail(msg): """`msg` is an arbitrary length difference "path", which could be coming from any part of the mapping hierarchy and ending in any kind of selector tree. The last item is always the change message: add, replace, delete <blah>. The next to last should always be a selector key of some kind. Back up from there to find the first mapping tuple. """ tail = [] for part in msg[::-1]: if isinstance(part, tuple) and len(part) == 2 and isinstance(part[0], str) and part[0].endswith("map"): tail.append(part[1]) break else: tail.append(part) return tuple(reversed(tail))
224a4ca5f73b1f147c27599b62f0540480e40a0d
3,656,927
def select_standard_name(session, cluster, importance_table_name): """ Use cluster members for a WHERE ... IN (...) query Use SQLAlchemy to handle the escaping """ stmt = session.query('name from %s' % importance_table_name) \ .filter(column('name').in_(list(cluster))) \ .order_by('"count" DESC') \ .limit(1) rv = session.execute(stmt) res = list(rv) return res[0][0]
173113f8abf6b675fefe7279cfa1e28579747085
3,656,928
def calculate_depth(experiment): """ Calculate the minor, major, total depth Args: experiment (remixt.Experiment): experiment object Returns: pandas.DataFrame: read depth table with columns, 'major', 'minor', 'total', 'length' """ data = remixt.analysis.experiment.create_segment_table(experiment) data['segment_length'] = data['end'] - data['start'] + 1 data['length_ratio'] = data['length'] / data['segment_length'] data['allele_readcount'] = data['minor_readcount'] + data['major_readcount'] data['high_quality'] = ( (data['length'] > np.percentile(data['length'].values, 10)) & (data['allele_readcount'] > np.percentile(data['allele_readcount'].values, 10)) & (data['length_ratio'] > np.percentile(data['length_ratio'].values, 10))) phi = remixt.likelihood.estimate_phi(experiment.x) p = remixt.likelihood.proportion_measureable_matrix(phi) # Filter segments for which read depth calculation will be nan/inf data = data[(data['length'] > 0) & np.all(p > 0, axis=1)] data.rename(columns={ 'major_depth': 'major', 'minor_depth': 'minor', 'total_depth': 'total', }, inplace=True) data = data[[ 'chromosome', 'start', 'end', 'length', 'major', 'minor', 'total', 'high_quality', ]] return data
d4db665eff37f6590a2362af8896db25b8ae758b
3,656,929
import random def checkerboard(key, nsq, size, dtype=np.float32): """Create a checkerboard background image with random colors. NOTE: only supports a single value for nsq (number squares). Args: key: JAX PRNGkey. nsq (int): number of squares per side of the checkerboard. size (int): size of one side of the checkerboard in pixels. dtype: desired return data type. Returns: canvas (np.array): checkerboard background image. """ assert size % nsq == 0 sq = size // nsq color1, color2 = random.uniform(key, (2, 3), dtype=dtype) canvas = np.full((nsq, sq, nsq, sq, 3), color1, dtype=dtype) canvas = canvas.at[::2, :, 1::2, :, :].set(color2) canvas = canvas.at[1::2, :, ::2, :, :].set(color2) return canvas.reshape(sq * nsq, sq * nsq, 3)
4f6428450a05fcb92ba05e22e336d887860fb143
3,656,930
import torch def choice(x, a): """Generate a random sample from an array of given size.""" if torch.is_tensor(x): return x[torch.randint(len(x), (a,))] return x
af21321bcd12fe5f1a5eb59b8f0db14096899b5d
3,656,931
def correct_gene_names(df): """ Fix datetime entries in Gene names """ update_symbols = [] for i, gs in enumerate(df.Gene_Symbol): if (not (isinstance(gs, str))) or (':' in gs): update_symbols.append(mapping.get_name_from_uniprot(df.Uniprot_Id.iloc[i])) else: update_symbols.append(gs) df.Gene_Symbol = update_symbols return df
5ca1aa1da60f238f9c377640b9f1a350658ea9d0
3,656,932
def process_repl_args(args): """ Process PANDA replay-related arguments. """ assert False, 'Not implemented yet.' cmd = [] cmd.extend(['-display', 'none']) return cmd # p_test "${panda_rr}-rr-snp" f "trace memory snapshot" # p_test "${panda_rr}-rr-nondet.log" f "trace nondet log" # -pandalog ${opts[-plog]} -replay $panda_rr
660495454f3b04f76d9aa0447262cb3a8c06b543
3,656,933
def choose(n, k): """ A fast way to calculate binomial coefficients by Andrew Dalke (contrib). """ if 0 <= k <= n: ntok = 1 ktok = 1 for t in range(1, min(k, n - k) + 1): # changed from xrange ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0
22c8639b3e110673164faa1ea84d669d5f8816d4
3,656,934
import pickle def _get_ReaLiSe_dataset(which="15"): """ For its """ print("Loading ReaLiSe Dataset !") print("Hint: The Data You loading now is the preprocessed sighan from ReaLise, ") ddp_exec("os.system('date')") path = "../SE_tmp_back/milestone/ReaLiSe/data/" train_dataset = pickle.load(open(path + "trainall.times2.pkl", "rb")) eval_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb")) test_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb")) print("Hint: Using **SIGHAN" + which + "** for eval & test !") def trans2mydataset(features): new = [] for feature in features: tmp = {} tmp["input_ids"] = feature["src_idx"][:128] tmp["labels"] = feature["tgt_idx"][:128] tmp["attention_mask"] = ([1] * len(tmp["input_ids"]))[:128]#feature["lengths"])[:128] new.append(tmp) return mydataset(new) print("Loaded successfully !") ddp_exec("os.system('date')") print("over") return trans2mydataset(train_dataset), trans2mydataset(eval_dataset), trans2mydataset(test_dataset)
418f443fa3e2094b5288bcaf3490780632b2922c
3,656,935
def generate_check_phrase() -> bytes: """ Generate check-phrase for connecting of auxiliary socket. :return: some array of ATOM_LENGTH bytes. """ return get_random_bytes(ATOM_LENGTH)
9bcd270bd1f9c3a7943d4910c065cc9fdee02141
3,656,936
import pickle def load_pickle(filename: str): """ Load a file from disk. Parameters ---------- filename: str Name of the file that is loaded. Returns ------- """ return pickle.load(open(filename, 'rb'))
cae6710ba18664f244c55525c14a6bda0bea314d
3,656,937
def crop_to_reference(dataset: xr.Dataset, ref_dataset: xr.Dataset) -> xr.Dataset: """ Crops horizontal coordinates to match reference dataset """ if "longitude" not in dataset.coords.keys(): raise ValueError("Longitude is not a coordinate of dataset.") if "longitude" not in ref_dataset.coords.keys(): raise ValueError("Longitude is not a coordinate of reference dataset.") if "latitude" not in dataset.coords.keys(): raise ValueError("Latitude is not a coordinate of dataset.") if "latitude" not in ref_dataset.coords.keys(): raise ValueError("Latitude is not a coordinate of reference dataset.") dataset = dataset.where(dataset.latitude == ref_dataset.latitude, drop=True)\ .where(dataset.longitude == ref_dataset.longitude, drop=True) return dataset
c915ec99dca5cd33531c049447e23e380590b1af
3,656,940
def parse_line(description, inline_comments=_INLINE_COMMENT_PREFIXES): """ Parse a line and correctly add the description(s) to a collection """ # manually strip out the comments # py2 cannot ignore comments on a continuation line # https://stackoverflow.com/q/9110428/1177288 # # PY3 can do it for you with 'inline_comment_prefixes' = '#;' if PY2: for comment_prefix in inline_comments: pos = description.find(comment_prefix) if pos != -1: # comment line or inline comment (after a space) if pos == 0 or description[pos - 1].isspace(): description = description[:pos] if not description: return None # there can be trailing commas if you copy from source code descriptions = description.strip(',').split(',') # strip all the spaces and quotes descriptions = [desc.strip().strip("'").strip('"').strip() for desc in descriptions] return descriptions
6fc58aef5b103ce429ed82378bce81a4550abb0f
3,656,941
def target_frame(): """Input frame.""" return 'IAU_ENCELADUS'
8c57ab924a7b4471ac2f549493ebc176e853c652
3,656,942
def cards(cs): """Parse cards""" cs = cs.split(' ') result = np.zeros([len(valueL), len(colorL)], int) for c in cs: result[np.where(valueL == c[0])[0][0], np.where(colorL == c[1])[0][0]] = 1 return result
9db7aa3ae9b42fb7b3fcd67371bca02b455fd8e4
3,656,943
def _get_max_diag_idx(m, n_A, n_B, diags, start, percentage): """ Determine the diag index for when the desired percentage of distances is computed Parameters ---------- m : int Window size n_A : int The length of the time series or sequence for which to compute the matrix profile `T_A` n_B : int The length of the time series or sequence that contain your query subsequences of interest `T_B` diags : ndarray The diag of diagonals to process and compute start : int The (inclusive) diag index from which to start percentage : float Approximate percentage completed. The value is between 0.0 and 1.0. Returns ------- max_diag_id : int The diag index that corresponds to desired percentage of distances to compute n_dist_computed : int The number of distances computed """ max_n_dist = 0 for diag_idx in range(diags.shape[0]): k = diags[diag_idx] if k >= 0: max_n_dist += min(n_A - m + 1 - k, n_B - m + 1) else: max_n_dist += min(n_A - m + 1, n_B - m + 1 + k) n_dist_computed = 0 for diag_idx in range(start, diags.shape[0]): k = diags[diag_idx] if k >= 0: n_dist_computed += min(n_A - m + 1 - k, n_B - m + 1) else: n_dist_computed += min(n_A - m + 1, n_B - m + 1 + k) if n_dist_computed / max_n_dist > percentage: # pragma: no cover break max_diag_idx = diag_idx + 1 return max_diag_idx, n_dist_computed
b6f86ee110ae4fa16638f86f2dcf324e7ebfb674
3,656,944
def get_argument_values(arg_defs, arg_asts, variables): """Prepares an object map of argument values given a list of argument definitions and list of argument AST nodes.""" if arg_asts: arg_ast_map = {arg.name.value: arg for arg in arg_asts} else: arg_ast_map = {} result = {} for arg_def in arg_defs: name = arg_def.name value_ast = arg_ast_map.get(name) if value_ast: value_ast = value_ast.value value = value_from_ast( value_ast, arg_def.type, variables ) if value is None: value = arg_def.default_value if value is not None: result[name] = value return result
0bad38e7155d04ac297e2112b8f9b70e5fcc18a0
3,656,945
def get_identifier(positioner_id, command_id, uid=0, response_code=0): """Returns a 29 bits identifier with the correct format. The CAN identifier format for the positioners uses an extended frame with 29-bit encoding so that the 11 higher bits correspond to the positioner ID, the 8 middle bits are the command number, the following 6 bits are the unique identifier, and the 4 lower bits are the response code. Parameters ---------- positioner_id : int The Id of the positioner to command, or zero for broadcast. command_id : int The ID of the command to send. uid : int The unique identifier response_code : int The response code. Returns ------- identifier : `int` The decimal integer corresponding to the 29-bit identifier. Examples -------- :: >>> get_identifier(5, 17, uid=5) 1328128 >>> bin(1328128) '0b101000100010000000000' """ posid_bin = format(positioner_id, "011b") cid_bin = format(command_id, "08b") cuid_bin = format(uid, "06b") response_bin = format(int(response_code), "04b") identifier = posid_bin + cid_bin + cuid_bin + response_bin assert len(identifier) == 29 return int(identifier, 2)
57a1ce7004186e8c1c88c06665311e71010705c4
3,656,946
def standardized(array): """Normalize the values in an array. Arguments: array (np.ndarray): Array of values to normalize. Returns: array with zero mean and unit standard deviation. """ return (array - array.mean()) / max(1e-4, array.std())
1764dfd1e4e173d2ca081edeb8b7165a79d63b7d
3,656,947
import json def newaddress(fn,passphrase,addr_type=0): """ getnetaddress """ wallet = Wallet(fn).fromFile(passphrase) # Address Types # addr_type == 0, deposit # addr_type == 1, change # addr_type == 2, staking # addr_type == 3, Dealer # Address types aren't programmatically important, but help to organize if addr_type is None: addr_type = 0 k = wallet.create_address(save=True,addr_type=addr_type) d = { "new_address" : (k.address_type(),k.address(),k.address(True)) } return json.dumps(d, sort_keys=True, indent=4)
8afca8b83ea8464d3aeb02f5d2e406d2f5bebc53
3,656,948
import logging def index(args): """Handles the index step of the program.""" if not args.index: # build index logging.info(" Building index...") index_list = generate_index(args.input_dir) if not index_list: # list is empty logging.error(" Empty index. Exiting...") return logging.info(" Index built!") if not args.no_index: # save index np.save(args.dump_index, index_list) logging.info(" Index saved as: {}".format(args.dump_index)) return index_list else: # load index from file index_list = load_index(args.index) return index_list
5e8e37d387612eb81984c7bff48e747780475f78
3,656,949
def _output_object_or_file_map_configurator(prerequisites, args): """Adds the output file map or single object file to the command line.""" return _output_or_file_map( output_file_map = prerequisites.output_file_map, outputs = prerequisites.object_files, args = args, )
7d362be5d6478764810ae3a9013ce1cb807efde3
3,656,951
def get_file_name(): """This function asl the user for file and returns it""" f_name = input('Input your file name: ') return f_name
5d3e524ebe423410f721afb070bfba9d804ed19f
3,656,952
import itertools def minimum_distance(geo1, geo2): """ get the minimum distance between atoms in geo1 and those in geo2 """ xyzs1 = coordinates(geo1) xyzs2 = coordinates(geo2) return min(cart.vec.distance(xyz1, xyz2) for xyz1, xyz2 in itertools.product(xyzs1, xyzs2))
ce6493d7e12bd3f48db209a01fe85eb4305835d0
3,656,954
def prepare(): """ Get the list of filtered tweets by target entity where each item contains the tweet with its original attributes when downloaded from Twitter :return: """ path = '../../Data.json' List = loadData(path) # load data tweets = [List[i]['text'] for i in range(len(List))] # store the text of each tweet in a list tweets = [process(item, False) for item in tweets] # get the list of processed tweets filtered_tweets = tweetsEntitiesMapping(tweets) # filter tweets by target entity ids_list = filtered_tweets[3] # get the list of ids of the filtered tweets in the original list count = 0 list_tweets = [] # store the filtered tweet objects for item in List: if count in ids_list: list_tweets.append(item) count = count + 1 return list_tweets
0707993267bd6e76d432b08e947582f8a151f591
3,656,955
import requests def deletecall(bam_url,api_call,call_parameters,delete_entity,header): """API request to delete and return values""" call_url = "http://"+bam_url+"/Services/REST/v1/"+api_call+"?" print("You are requesting to delete:") print(delete_entity) answer = input("Do you want to proceed (y (yes) or n (no))? ") try: if answer.lower() == "y": response = requests.delete(call_url,params=call_parameters, headers=header) return response.json() elif answer.lower() == "n": return "You aborted deletion" else: return "You entered an invalid character" except requests.exceptions.RequestException as e: print(e)
f6cffd225b9dd8d4d387b472d5ef522e2a48d738
3,656,957
def haDecFromAzAlt (azAlt, lat): """Converts alt/az position to ha/dec position. Inputs: - azAlt (az, alt) (deg) - lat latitude (degrees); >0 is north of the equator, <0 is south Returns a tuple containing: - haDec (HA, Dec) (deg), a tuple; HA is in the range (-180, 180] - atPole true => object near the pole (see Error Conditions) Error Conditions: - If converted position is too near the north or south pole, atPole is set true and HA is some arbitrary value. Details: Sign conventions: - azimuth is 0 south and 90 east - ha/dec is the usual left-handed coordinate system History: 3/01 ROwen Converted to Python from TCC's sph_AzAlt2HADec 1-2. 2/02 ROwen Minor tweaks to header. 2002-07-02 ROwen Renamed from azAltToHADec. 2003-05-06 ROwen Changed HA range from [0, 360) to (-180, 180] """ # convert spherical az/alt (deg) to direction cosines azAltDC = dcFromSC (azAlt) # convert az/alt direction cosines to -ha/dec direction cosines negHADecDC = Cnv.haDecFromAzAlt (azAltDC, lat) # convert -ha/dec direction cosines to spherical -ha/dec (deg) ((negHA, dec), atPole) = scFromDC (negHADecDC) return ((opscore.RO.MathUtil.wrapCtr(-negHA), dec), atPole)
9387d6771dd3fd4754a874141679902954adbecf
3,656,958
def get_description(expression, options=None): """Generates a human readable string for the Cron Expression Args: expression: The cron expression string options: Options to control the output description Returns: The cron expression description """ descripter = ExpressionDescriptor(expression, options) return descripter.get_description(DescriptionTypeEnum.FULL)
b52bb4bda67074e5b9270f33f68892e371234dc4
3,656,959
def midpoint(close, length=None, offset=None, **kwargs): """Indicator: Midpoint""" # Validate arguments close = verify_series(close) length = int(length) if length and length > 0 else 1 min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length offset = get_offset(offset) # Calculate Result lowest = close.rolling(length, min_periods=min_periods).min() highest = close.rolling(length, min_periods=min_periods).max() midpoint = 0.5 * (lowest + highest) # Offset if offset != 0: midpoint = midpoint.shift(offset) # Handle fills if 'fillna' in kwargs: midpoint.fillna(kwargs['fillna'], inplace=True) if 'fill_method' in kwargs: midpoint.fillna(method=kwargs['fill_method'], inplace=True) # Name and Categorize it midpoint.name = f"MIDPOINT_{length}" midpoint.category = 'overlap' return midpoint
3b14546715bec61dfd73a70d4a83042366c1ef08
3,656,961
from ..distributions.baseclass import Dist import numpy def quad_fejer(order, domain=(0, 1), growth=False, segments=1): """ Generate the quadrature abscissas and weights in Fejer quadrature. Args: order (int, numpy.ndarray): Quadrature order. domain (chaospy.distributions.baseclass.Dist, numpy.ndarray): Either distribution or bounding of interval to integrate over. growth (bool): If True sets the growth rule for the quadrature rule to only include orders that enhances nested samples. segments (int): Split intervals into N subintervals and create a patched quadrature based on the segmented quadrature. Can not be lower than `order`. If 0 is provided, default to square root of `order`. Nested samples only exist when the number of segments are fixed. Returns: (numpy.ndarray, numpy.ndarray): abscissas: The quadrature points for where to evaluate the model function with ``abscissas.shape == (len(dist), N)`` where ``N`` is the number of samples. weights: The quadrature weights with ``weights.shape == (N,)``. Example: >>> abscissas, weights = quad_fejer(3, (0, 1)) >>> abscissas.round(4) array([[0.0955, 0.3455, 0.6545, 0.9045]]) >>> weights.round(4) array([0.1804, 0.2996, 0.2996, 0.1804]) >>> abscissas, weights = quad_fejer(3, (0, 1), segments=2) >>> abscissas.round(4) array([[0.125, 0.375, 0.625, 0.875]]) >>> weights.round(4) array([0.2222, 0.2222, 0.2222, 0.2222]) """ if isinstance(domain, Dist): abscissas, weights = quad_fejer( order, (domain.lower, domain.upper), growth) weights *= domain.pdf(abscissas).flatten() weights /= numpy.sum(weights) return abscissas, weights order = numpy.asarray(order, dtype=int).flatten() lower, upper = numpy.array(domain) lower = numpy.asarray(lower).flatten() upper = numpy.asarray(upper).flatten() dim = max(lower.size, upper.size, order.size) order = order*numpy.ones(dim, dtype=int) lower = lower*numpy.ones(dim) upper = upper*numpy.ones(dim) segments = segments*numpy.ones(dim, dtype=int) if growth: order = numpy.where(order > 0, 2**(order+1)-2, 0) abscissas, weights = zip(*[_fejer(order_, segment) for order_, segment in zip(order, segments)]) return combine_quadrature(abscissas, weights, (lower, upper))
ec2472e134a2adab5cfa42703fdaafde844aee79
3,656,962
from pathlib import Path def probe(app: FastFlixApp, file: Path) -> Box: """ Run FFprobe on a file """ command = [ f"{app.fastflix.config.ffprobe}", "-v", "quiet", "-loglevel", "panic", "-print_format", "json", "-show_format", "-show_streams", f"{file}", ] result = execute(command) try: return Box.from_json(result.stdout) except BoxError: logger.error(f"Could not read output: {result.stdout} - {result.stderr}") raise FlixError(result.stderr)
055ac6003642bc78d1fcabbbb89765d1cacb3d80
3,656,963
def is_standard_time_series(time_series, window=180): """ Check the length of time_series. If window = 180, then the length of time_series should be 903. The mean value of last window should be larger than 0. :param time_series: the time series to check, like [data_c, data_b, data_a] :type time_series: pandas.Series :param window: the length of window :return: True or False :return type: boolean """ if len(time_series) == 5 * window + 3 and np.mean(time_series[(4 * window + 2):]) > 0: return True else: return False
7fb3212c69efb076dbab9555cf1eab9698475f9b
3,656,964
def get_comment_type(token, comment_syntax): """ SQLエンジン関連コメントTypeを返す """ if is_block_comment(token): return comment_syntax.get_block_comment_type(token) elif is_line_comment(token): return comment_syntax.get_line_comment_type(token)
0ddd68b4cd12909c5689f5620b785ccb8a45cbeb
3,656,965
def get_country_code(country_name): """ Return the Pygal 2-digit country code for the given country.""" for code, name in COUNTRIES.items(): if name == country_name: return code # If the country wasn't found, return None. return None
485684fe01ade5e2ad558523ca839a468c083686
3,656,967
def get_divmod(up, down, minute=False, limit=2): """ 获取商 :param up: 被除数 :param down: 除数 :param minute: 换算成分钟单位 :param limit: 保留小数的位数 :return: 商 """ if up == 0: return 0 if down == 0: return 0 if minute: return round(up/down/60.0, limit) return round(float(up)/down, limit)
253304cde82fd4a3aa70737f4caabb20b5166349
3,656,968
def find_kernel_base(): """Find the kernel base.""" return idaapi.get_fileregion_ea(0)
20315c1fecc8d2a4ecf7301ccedeca84d4027285
3,656,969
def get_padding(x, padding_value=0, dtype=tf.float32): """Return float tensor representing the padding values in x. Args: x: int tensor with any shape padding_value: int value that dtype: type of the output Returns: float tensor with same shape as x containing values 0 or 1. 0 -> non-padding, 1 -> padding """ # print("get_padding", dtype) with tf.name_scope("padding"): return tf.cast(tf.equal(x, padding_value), dtype=dtype)
d11650796b980a53a5790588ac123c5323b867bd
3,656,970
import typing def canonical_symplectic_form_inverse (darboux_coordinates_shape:typing.Tuple[int,...], *, dtype:typing.Any) -> np.ndarray: """ Returns the inverse of canonical_symplectic_form(dtype=dtype). See documentation for that function for more. In particular, the inverse of the canonical symplectic form is [ 0 I ] [ -I 0 ] The inverse of the canonical symplectic form is a section of TM \wedge TM or can be thought of (as it is used here) as an alternating section of TM \otimes TM and therefore "naturally converts" a covector field on M (i.e. a section of T^{*}M) into a vector field on M (i.e. a section of TM). This form is what's used in the definition of the symplectic gradient of a function. """ validate_darboux_coordinates_shape_or_raise(darboux_coordinates_shape) assert vorpy.tensor.dimension_of_shape(darboux_coordinates_shape) % 2 == 0 configuration_space_dimension = vorpy.tensor.dimension_of_shape(darboux_coordinates_shape) // 2 omega_inv = vorpy.tensor.contract( 'ik,jl', canonical_symplectic_form_abstract_inverse(dtype=dtype), np.eye(configuration_space_dimension, dtype=dtype), dtype=dtype, ) assert omega_inv.shape == (2,configuration_space_dimension,2,configuration_space_dimension) return omega_inv.reshape(darboux_coordinates_shape+darboux_coordinates_shape)
4ef3c820c7919fcd1bb7fda3fdf2482f3cd70c03
3,656,971
def update_with_error(a, b, path=None): """Merges `b` into `a` like dict.update; however, raises KeyError if values of a key shared by `a` and `b` conflict. Adapted from: https://stackoverflow.com/a/7205107 """ if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): update_with_error(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value elif a[key] is None: a[key] = b[key] elif (isinstance(a[key], (list, tuple)) and not isinstance(a[key], str) and isinstance(b[key], (list, tuple)) and not isinstance(b[key], str) and len(a[key]) == len(b[key]) and all((av is None or av == bv) for av, bv in zip(a[key], b[key]))): # yapf: disable a[key] = b[key] else: raise KeyError('Conflict at {}: {} vs. {}'.format('.'.join(path + [str(key)]), a[key], b[key])) else: a[key] = b[key] return a
201650bba4fcae21d353f88ff22a9559aea61ff4
3,656,972
import re def tokenize(sent): """Return the tokens of a sentence including punctuation. >>> tokenize("Bob dropped the apple. Where is the apple?") ["Bob", "dropped", "the", "apple", ".", "Where", "is", "the", "apple", "?"] """ return [x.strip() for x in re.split(r"(\W+)?", sent) if x and x.strip()]
09456d2ae7d590ba8d6373a27993a52c0693027b
3,656,973
def tree_unflatten(flat, tree, copy_from_tree=None): """Unflatten a list into a tree given the tree shape as second argument. Args: flat: a flat list of elements to be assembled into a tree. tree: a tree with the structure we want to have in the new tree. copy_from_tree: optional list of elements that we just copy from tree. This argument is used when the flat version does not contain all elements of the expected tree but just a subset, while the rest are filled from the tree itself. It allows to omit "unnecessary" elements. For example, consider trees (A, (B, X), X) and (X, (A, X), B) where X is some element we do not care about. Flattening the first tree and removing X will yield a flat list [A, B] and the second tree can then be reconstructed from this list and the tree (X, (E, X), E) with copy_from_tree=[X]. One example where this is used is the weights-tree of a model, where layers with no weights have () in the tree and we use copy_from_tree=[()] to restore a model from a file that only has a list of trainable weights. Returns: A pair (new_tree, rest_of_flat) where the new tree that has the structure of tree but with leaves from flat, and the remaining elements of flat if more were provided than the number of leaves of tree (useful for recursion). """ if copy_from_tree is not None and tree in copy_from_tree: return tree, flat if isinstance(tree, (list, tuple)): new_tree, rest = [], flat for t in tree: new_t, rest = tree_unflatten(rest, t, copy_from_tree=copy_from_tree) new_tree.append(new_t) new_tree = tuple(new_tree) if isinstance(tree, tuple) else new_tree return new_tree, rest if isinstance(tree, dict): new_tree, rest = {}, flat for k in tree: new_v, rest = tree_unflatten(rest, tree[k], copy_from_tree=copy_from_tree) new_tree[k] = new_v return new_tree, rest return flat[0], flat[1:]
711bc67a20835091360d0fbc64e0a8842eec53ba
3,656,974
def ByteOffsetToCodepointOffset( line_value, byte_offset ): """The API calls for byte offsets into the UTF-8 encoded version of the buffer. However, ycmd internally uses unicode strings. This means that when we need to walk 'characters' within the buffer, such as when checking for semantic triggers and similar, we must use codepoint offets, rather than byte offsets. This method converts the |byte_offset|, which is a utf-8 byte offset, into a codepoint offset in the unicode string |line_value|.""" byte_line_value = ToBytes( line_value ) return len( ToUnicode( byte_line_value[ : byte_offset - 1 ] ) ) + 1
0a826157c43cb73a5dff31c20c906144b4a0eaa6
3,656,975
def get_authed_tweepy(access_token, token_secret): """Returns an authed instance of the twitter api wrapper tweepy for a given user.""" social_app_twitter = get_object_or_404(SocialApp, provider='twitter') auth = tweepy.OAuthHandler(social_app_twitter.client_id, social_app_twitter.secret) auth.set_access_token(access_token, token_secret) return tweepy.API(auth)
33bbf0cabdf2bbd3fc543efc4d921119d29c7729
3,656,976
def suffix_for_status(status): """Return ``title`` suffix for given status""" suffix = STATUS_SUFFIXES.get(status) if not suffix: return '' return ' {}'.format(suffix)
a908d28c6e461dcc8277784e82e383642b5ecfa3
3,656,977