content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def new_token(): """ Generate an access token for the user. This endpoint requires basic auth with nickname and password. """ return jsonify({'token': generate_token(g.current_user['id'])})
07497cebfd29a133ab986c86b72b603975378ed8
3,658,800
def get_room_info(room_real_id: int, verify: utils.Verify = None, cookies = None): """ 获取直播间信息(标题,简介等) :param room_real_id: 真实房间ID :param verify: :return: """ if verify is None: verify = utils.Verify() api = API["live"]["info"]["room_info"] if cookies is None: resp = utils.get(api["url"], {"room_id": room_real_id}, cookies=verify.get_cookies()) else: resp = utils.get(api["url"], {"room_id": room_real_id}, cookies=cookies) return resp
a6aa07886034a5f8c539026f8afacaa149860252
3,658,801
def parse_raw(setup, id=None, first_line_is_header=(-1,0,1)): """Used in conjunction with lazy_import and parse_setup in order to make alterations before parsing. Parameters ---------- setup : dict Result of h2o.parse_setup id : str, optional An id for the frame. first_line_is_header : int, optional -1,0,1 if the first line is to be used as the header Returns ------- H2OFrame """ if id: setup["destination_frame"] = _quoted(id).replace("%",".").replace("&",".") if first_line_is_header != (-1,0,1): if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1") setup["check_header"] = first_line_is_header fr = H2OFrame() fr._parse_raw(setup) return fr
56d490eeaa28258ee668ed5efcc0f8a869acaa2b
3,658,802
from datetime import datetime def import_year(year: int = None) -> bool: """Downloads, extracts and imports the Losungen of a given year. The year defaults to the next year.""" session: Session = SessionMaker() repo = TagesLosungRepository(session) year = datetime.date.today().year + 1 if year is None else year losungen = repo.get_by_year(year) session.close() if losungen: return True # Already imported if download_zip(year): extract_zip() import_xml() logger.info("Successfully imported Losungen for %i", year) return True logger.warning("Failed to download zip archive for %i", year) return False
a0e5933178f5d18332f0b231f7a6ec43c0651714
3,658,803
import re def isURL(url: str) -> bool: """ Check whether a given string is a URL. """ return url is not None and re.match(urlregex, url) is not None
6d32fee1fa374c07214d2a75cc39b868338ffa1c
3,658,804
def rmse(Y_true, Y_hat): """ returns root mean squared error Args: Y_true : true outputs [N,(1)] Y_hat : predicted outputs [N, (1)] """ if Y_true.ndim == 2: Y_true = Y_true[:, 0] if Y_hat.ndim == 2: Y_hat = Y_hat[:, 0] return np.sqrt(np.mean((Y_true - Y_hat)**2))
676d14a5058632fbf1cd40e4d60d5cfb4c46e137
3,658,805
def getAllDescWords(itemList): """Returns a list of "description words" for each item named in itemList.""" itemList = list(set(itemList)) # make itemList unique descWords = [] for item in itemList: descWords.extend(NYCitems[item][DESCWORDS]) return list(set(descWords))
fb7ea77fac5aae3abc2e6dbcc1c3af7ac404b5c2
3,658,806
from one.api import ONE import ibllib.atlas as atlas from ibllib.atlas import Insertion import atlaselectrophysiology.load_histology as hist import numpy as np import matplotlib.pyplot as plt def plot_probe_trajectory_histology( x, y, subject_ID, axc, axs, provenance = 'Planned', project = 'ibl_neuropixel_brainwide_01', gr_percentile_min=0.2, rd_percentile_min=1, rd_percentile_max=99.99, font_size = 8, label_size = 8 ): """Plot slices of Histology data along the insertion at [x,y] for subject ID. Slices made in coronal and sagittal planes. The slices through the Histology data can be made along any of the provenances of the probe at [x,y] for subject ID - Planned, Micro-manipulator, Histology track, Ephys aligned histology track. axc : AxesSubplot, None MUST pass an AxesSubplot object for plotting to! For coronal plot. axs : AxesSubplot, None MUST pass an AxesSubplot object for plotting to! For sagittal plot. """ # connect to ONE one = ONE() # get list of all trajectories at [x,y], for project trajs = one.alyx.rest('trajectories', 'list', x=x, y=y, project=project) # keeping subjs and labs for look-up later if needed.. subjs = [sess['session']['subject'] for sess in trajs] labs = [sess['session']['lab'] for sess in trajs] #aidx = subjs.index(atlas_ID) sidx = subjs.index(subject_ID) # Fetch trajectory metadata for traj: traj = one.alyx.rest('trajectories', 'list', session=trajs[sidx]['session']['id'], probe=trajs[sidx]['probe_name'], provenance=provenance) if traj == []: raise Exception("No trajectory found with provenance: " + provenance) # get insertion object from ANY (the first) trajectory ins = Insertion.from_dict(traj[0]) axis_labels = np.array(['ml (µm)', 'dv (µm)', 'ap (µm)']) #fig1, ax1 = plt.subplots() # new figure and axes objects - CORONAL #fig2, ax2 = plt.subplots() # new figure and axes objects - SAGITTAL # set axes to local variables ax1 = axc ax2 = axs lab = labs[ sidx ] # this returns index in labs where subject_ID is in subjs hist_paths = hist.download_histology_data(subject_ID, lab) # create the brain atlases from the data ba_gr = atlas.AllenAtlas(hist_path=hist_paths[0]) # green histology channel autofl. ba_rd = atlas.AllenAtlas(hist_path=hist_paths[1]) # red histology channel cm-dii # CORONAL # implementing tilted slice here to modify its cmap # get tilted slice of the green and red channel brain atlases # using the .image data as this contains the signal gr_tslice, width, height, depth = ba_gr.tilted_slice(ins.xyz, 1, volume = ba_gr.image) rd_tslice, width, height, depth = ba_rd.tilted_slice(ins.xyz, 1, volume = ba_rd.image) gr_tslice_roi = gr_tslice[120:240, 150:300] # isolate large slice over thalamus for max pixel value rd_tslice_roi = rd_tslice[120:240, 150:300] width = width * 1e6 height = height * 1e6 depth = depth * 1e6 cmap = plt.get_cmap('bone') # get the transfer function from y-axis to squeezed axis for second axe ab = np.linalg.solve(np.c_[height, height * 0 + 1], depth) height * ab[0] + ab[1] # linearly scale the values in 2d numpy arrays to between 0-255 (8bit) # Using gr_tslice min and gr_tslice_roi max to scale autofl. # using rd_tslice min and percentile (99.99 default) to scale CM-DiI gr_in = np.interp(gr_tslice, (np.percentile(gr_tslice, gr_percentile_min), gr_tslice_roi.max()), (0, 255)) rd_in = np.interp(rd_tslice, (np.percentile(rd_tslice, rd_percentile_min), np.percentile(rd_tslice, rd_percentile_max)), (0, 255)) # join together red, green, blue numpy arrays to form a RGB image ALONG A NEW DIMENSION # NOTE need a blue component, have added a set of zeros as blue channel should be BLANK # NOTE2: converted to unit8 bit, as pyplot imshow() method only reads this format Z = np.stack([ rd_in.astype(dtype=np.uint8), gr_in.astype(dtype=np.uint8), np.zeros(np.shape(gr_tslice)).astype(dtype=np.uint8) ]) # transpose the columns to the FIRST one is LAST # i.e the NEW DIMENSION [3] is the LAST DIMENSION Zt = np.transpose(Z, axes=[1,2,0]) # can now add the RGB array to imshow() ax1.imshow(Zt, interpolation='none', aspect='auto', extent=np.r_[width, height], cmap=cmap, vmin=np.min(gr_in), vmax=np.max(gr_in) ) sec_ax = ax1.secondary_yaxis('right', functions=( lambda x: x * ab[0] + ab[1], lambda y: (y - ab[1]) / ab[0])) ax1.set_xlabel(axis_labels[0], fontsize=font_size) ax1.set_ylabel(axis_labels[1], fontsize=font_size) sec_ax.set_ylabel(axis_labels[2], fontsize=font_size) ax1.tick_params(axis='x', labelrotation = 90) ax1.tick_params(axis='x', labelsize = label_size) ax1.tick_params(axis='y', labelsize = label_size) sec_ax.tick_params(axis='y', labelsize = label_size) # SAGITTAL # implementing tilted slice here to modify its cmap # get tilted slice of the green and red channel brain atlases # using the .image data as this contains the signal gr_tslice, width, height, depth = ba_gr.tilted_slice(ins.xyz, 0, volume = ba_gr.image) rd_tslice, width, height, depth = ba_rd.tilted_slice(ins.xyz, 0, volume = ba_rd.image) width = width * 1e6 height = height * 1e6 depth = depth * 1e6 cmap = plt.get_cmap('bone') # get the transfer function from y-axis to squeezed axis for second axe ab = np.linalg.solve(np.c_[height, height * 0 + 1], depth) height * ab[0] + ab[1] # linearly scale the values in 2d numpy arrays to between 0-255 (8bit) # Using gr_tslice min and max to scale the image # weirdly rd_in has very large min and max (problem with the original data acquisition?) so best to scale whole RGB with gr_in/1.5! gr_in = np.interp(gr_tslice, (gr_tslice.min(), gr_tslice.max()), (0, 255)) rd_in = np.interp(rd_tslice, (gr_tslice.min(), gr_tslice.max()/1.5), (0, 255)) # join together red, green, blue numpy arrays to form a RGB image ALONG A NEW DIMENSION # NOTE need a blue component, have added a set of zeros as blue channel should be BLANK # NOTE2: converted to unit8 bit, as pyplot imshow() method only reads this format Z = np.stack([ rd_in.astype(dtype=np.uint8), gr_in.astype(dtype=np.uint8), np.zeros(np.shape(gr_tslice)).astype(dtype=np.uint8) ]) # transpose the columns to the FIRST one is LAST # i.e the NEW DIMENSION [3] is the LAST DIMENSION Zt = np.transpose(Z, axes=[1,2,0]) # can now add the RGB array to ax2 via imshow() ax2.imshow(Zt, interpolation='none', aspect='auto', extent=np.r_[width, height], cmap=cmap, vmin=np.min(gr_in), vmax=np.max(gr_in) ) #start = ins.xyz[:, 1] * 1e6 #end = ins.xyz[:, 2] * 1e6 #xCoords = np.array([start[0], end[0]]) sec_ax = ax2.secondary_yaxis('right', functions=( lambda x: x * ab[0] + ab[1], lambda y: (y - ab[1]) / ab[0])) ax2.set_xlabel(axis_labels[2], fontsize=font_size) ax2.set_ylabel(axis_labels[1], fontsize=font_size) sec_ax.set_ylabel(axis_labels[0], fontsize=font_size) ax2.tick_params(axis='x', labelrotation = 90) ax2.tick_params(axis='x', labelsize = label_size) ax2.tick_params(axis='y', labelsize = label_size) sec_ax.tick_params(axis='y', labelsize = label_size) plt.tight_layout() # tighten layout around xlabel & ylabel # add a line of the Insertion object onto ax1 (cax - coronal) # plotting PLANNED insertion #ax1.plot(ins.xyz[:, 0] * 1e6, ins.xyz[:, 2] * 1e6, colour, linewidth=linewidth) #ax2.plot(ins.xyz[:, 1] * 1e6, ins.xyz[:, 2] * 1e6, colour, linewidth=linewidth) return {'coronal-slice': ax1, 'sagittal-slice': ax2, 'x': x, 'y': y, 'provenance': provenance, 'subject_id': subject_ID }
a1f7722d0907ca3e11e0ea86a1927e08a92d1c84
3,658,807
def create_constrained_mechanical_system_from_component(structural_component, constant_mass=False, constant_damping=False, constraint_formulation='boolean', **formulation_options): """ Create a mechanical system from a component where the constraints are applied by a constraint formulation Parameters ---------- structural_component : amfe.component.StructuralComponent Structural component describing the mechanical system constant_mass : bool Flag indicating if mass matrix is constant constant_damping : bool Flag indicating if damping matrix is constant constraint_formulation : str {'boolean', 'lagrange', 'nullspace_elimination'} String describing the constraint formulation that shall be used formulation_options : dict options passed to the set_options method of the constraint formulation Returns ------- system : amfe.solver.translators.MechanicalSystem formulation : amfe.constraint.ConstraintFormulation """ system_unconstrained = create_mechanical_system_from_structural_component(structural_component) constraint_formulation = _create_constraint_formulation(system_unconstrained, structural_component, constraint_formulation, **formulation_options) if constant_mass: M = MemoizeConstant(constraint_formulation.M) else: M = constraint_formulation.M if constant_damping: D = MemoizeConstant(constraint_formulation.D) else: D = constraint_formulation.D f_int = constraint_formulation.f_int K = constraint_formulation.K f_ext = constraint_formulation.f_ext dimension = constraint_formulation.dimension system = MechanicalSystem(dimension, M, D, K, f_ext, f_int) return system, constraint_formulation
e661ba16a691266e60b14d4594db16e09d81c2e2
3,658,808
def parse_certificate_issuer_id(id): """ :param id: The resource collection type. :type id: str :rtype: KeyVaultId """ return parse_object_id('certificates/issuers', id)
919ad42ede4081c67c38f9d44945045d3f84bf87
3,658,809
def normalize_whitespace( text, no_line_breaks=False, strip_lines=True, keep_two_line_breaks=False ): """ Given ``text`` str, replace one or more spacings with a single space, and one or more line breaks with a single newline. Also strip leading/trailing whitespace. """ if strip_lines: text = "\n".join([x.strip() for x in text.splitlines()]) if no_line_breaks: text = constants.MULTI_WHITESPACE_TO_ONE_REGEX.sub(" ", text) else: if keep_two_line_breaks: text = constants.NONBREAKING_SPACE_REGEX.sub( " ", constants.TWO_LINEBREAK_REGEX.sub(r"\n\n", text) ) else: text = constants.NONBREAKING_SPACE_REGEX.sub( " ", constants.LINEBREAK_REGEX.sub(r"\n", text) ) return text.strip()
46d60967f48cb2b14ee44eaa4979592b87e8d811
3,658,810
import numpy def nancumprod(x1, **kwargs): """ Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one. For full documentation refer to :obj:`numpy.nancumprod`. Limitations ----------- Parameter ``x`` is supported as :obj:`dpnp.ndarray`. Keyword arguments ``kwargs`` are currently unsupported. Otherwise the functions will be executed sequentially on CPU. Input array data types are limited by supported DPNP :ref:`Data types`. .. seealso:: :obj:`dpnp.cumprod` : Return the cumulative product of elements along a given axis. Examples -------- >>> import dpnp as np >>> a = np.array([1., np.nan]) >>> result = np.nancumprod(a) >>> [x for x in result] [1.0, 1.0] >>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]]) >>> result = np.nancumprod(b) >>> [x for x in result] [1.0, 2.0, 2.0, 8.0, 8.0, 48.0] """ if not use_origin_backend(x1) and not kwargs: if not isinstance(x1, dparray): pass else: return dpnp_nancumprod(x1) return call_origin(numpy.nancumprod, x1, **kwargs)
e388081ca78decb8b05a6138173cb487a1c72c58
3,658,811
from typing import Dict from typing import Any from typing import List import os def _attr_manually_specified_tests_get_errors( yaml_path: str, yaml_entry: Dict[str, Any], tag: str, attr: str, grep_tags: List[str] ) -> List[str]: """Report incorrect manually-specified test attributes This function ensures that manually-specified tests refer to files that actually exist. Arguments: yaml_path: A path to a .drift-data.yml file yaml_entry: The YAML entry to validate tag: The region tag corresponding to the specified YAML entry attr: The attribute of the YAML entry to validate grep_tags: A list of tags existing (not necessarily parsed out of) the source code Returns: An error message if the manually-specified tests are invalid; None otherwise """ errors = [] yaml_dirname = os.path.dirname(yaml_path) for test_path in yaml_entry.keys(): if test_path in constants.RESERVED_YAML_KEYS: continue # Skip non-filepaths if not os.path.isabs(test_path): test_path = os.path.join(yaml_dirname, test_path) if not os.path.exists(test_path): errors.append( cli_yaml_errors.MissingTestFileViolation( test_path, yaml_path)) return errors
4bce9ecd8987cb300cefddc664c5f49a0ba1d8af
3,658,812
def error(data, mn, mx, confidence): """ Compute the error components. :param data: the collected data. :param mn: the critical value (minimum). :param mx: the critical value (maximum). :param confidence: the confidence level. :return: (Dict) the dictionary of errors. """ return errutils.error_two_tails(data, mn, mx, confidence)
31ba96b58a5017a3bd3a5166b460878a886f2bb3
3,658,813
def retry_connection(f): """Decorator. Recconect on failure. """ def retry(*args, **kwargs): seconds_to_retry = 5 success = False while (not success): try: result = f(*args, **kwargs) success = True return result except: print "{0}: {1} --> connection problems . retry in {2} seconds.".format(curr_date(), f.__name__, seconds_to_retry) time.sleep(seconds_to_retry) # return None return retry
d9ccbe725f50a6061f77ac76d02e11c52dd91cb1
3,658,814
def shift_mean(x_mod, x_org): """ Shift the mean value of `x_mod` such that it equals the mean of `x_org`. Parameters ---------- x_org : ndarray The array which hold the "true" mean value. x_mod : ndarray The modified copy of `x_org` which must have its mean value shifted. Returns ------- shifted_x_mod : ndarray A copy of `x_mod` with the same mean value as `x_org`. Examples -------- For example, >>> import numpy as np >>> from magni.imaging.visualisation import shift_mean >>> x_org = np.arange(4).reshape(2, 2) >>> x_mod = np.ones((2, 2)) >>> print('{:.1f}'.format(x_org.mean())) 1.5 >>> print('{:.1f}'.format(x_mod.mean())) 1.0 >>> shifted_x_mod = shift_mean(x_mod, x_org) >>> print('{:.1f}'.format(shifted_x_mod.mean())) 1.5 >>> np.set_printoptions(suppress=True) >>> shifted_x_mod array([[ 1.5, 1.5], [ 1.5, 1.5]]) """ @_decorate_validation def validate_input(): _numeric('x_mod', ('integer', 'floating', 'complex'), shape=(-1, -1)) _numeric('x_org', ('integer', 'floating', 'complex'), shape=x_mod.shape) validate_input() return x_mod + (x_org.mean() - x_mod.mean())
0f04e37a9434548cff77a1c92d7540595ee5a1cf
3,658,815
def conversation_detail(request, pk): """ Retrieve, update or delete a conversation. """ try: conversation = Conversation.objects.get(pk=pk) except Conversation.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.method == 'GET': serializer = Conv_Serializer(conversation) return Response("serializer.data") elif request.method == 'PUT': serializer = Conv_Serializer(conversation, data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) elif request.method == 'DELETE': conversation.delete() return Response(status=status.HTTP_204_NO_CONTENT)
5c4a0b20f38ca7b75415ecb88f25e9992e2a3e57
3,658,816
def purchase_products(product_id): """Purchase a product""" app.logger.info("Request to purchase product with id %s", product_id) check_content_type("application/json") product = Product.find(product_id) if not product: abort( status.HTTP_404_NOT_FOUND, "product with id '{}' was not found.".format(product_id) ) return make_response(jsonify(product.serialize()), status.HTTP_200_OK)
c6681110ffaa25cab1ea2fd649c845c513b7b178
3,658,817
def process_alerts(data): """ Returns a Pandas DataFrame from the API call. :return: A pandas DataFrame. """ data_dicts = data.get("data", []) lines = [] for data_dict in data_dicts: data_dict["alertDescription"] = helper.extract_json_field( data_dict.get("alertProps", {}), "description.descriptionId") description_dict = helper.extract_json_field( data_dict.get("alertProps", {}), "description.descriptionObj") data_dict.update(description_dict) alert_context = helper.extract_json_field( data_dict.get("keys", {}), "src.keys.alert") if alert_context: data_dict.update(alert_context) lines.append(data_dict) return pd.DataFrame(lines)
64a06486ebfde2610f11110b55a73a359fe8d0c0
3,658,818
def validate(df): """Validate the timeseries dataframe """ err_msgs = [] warn_msgs = [] # check column names for col in EXP_COLS: if col not in df: err_msgs.append(f"**{col}** column missing") msgs = { "errors": err_msgs, "warnings": warn_msgs } is_valid_file = len(err_msgs) == 0 return msgs, is_valid_file
74480413646d1f7480c7915cdd1d28116ace83c6
3,658,819
def _gcs_uri_rewriter(raw_uri): """Rewrite GCS file paths as required by the rewrite_uris method. The GCS rewriter performs no operations on the raw_path and simply returns it as the normalized URI. The docker path has the gs:// prefix replaced with gs/ so that it can be mounted inside a docker image. Args: raw_uri: (str) the raw GCS URI, prefix, or pattern. Returns: normalized: a cleaned version of the uri provided by command line. docker_path: the uri rewritten in the format required for mounting inside a docker worker. """ docker_path = raw_uri.replace('gs://', 'gs/', 1) return raw_uri, docker_path
6e476860cb175dd2936cc0c080d3be1d09e04b77
3,658,820
def remove_apostrophe(text): """Remove apostrophes from text""" return text.replace("'", " ")
c7d918e56646a247564a639462c4f4d26bb27fc4
3,658,821
def generate_initials(text): """ Extract initials from a string Args: text(str): The string to extract initials from Returns: str: The initials extracted from the string """ if not text: return None text = text.strip() if text: split_text = text.split(" ") if len(split_text) > 1: return (split_text[0][0] + split_text[-1][0]).upper() else: return split_text[0][0].upper() return None
709e53392c790585588da25290a80ab2d19309f8
3,658,822
def nmf_manifold_vec_update(X, U, V, k_to_W, k_to_D, k_to_L, k_to_feat_inds, n_steps=10, gamma=1.0, delta=1.0, i=0, verbose=False, norm_X=None): """ Perform <n_steps> update steps with a fixed Laplacian matrix for each latent factor Parameters ---------- X : np.array data to factor U : np.array previous setting of U to update V : np.array previous setting of V to update k_to_W : dict mapping of latent factor to weighted adjacency matrix k_to_D : dict mapping of latent factor to diagonal matrix that is the sum of W along a row (or column) k_to_L : dict mapping of latent factor to L = D - W n_steps : int number of update steps to perform gamma : float relative importance of manifold regularization term delta : float relative importance of ignoring manifold penalty i : int number of previous iterations verbose : bool if True, print objective function value after each iteration norm_X : float or None stored value of the norm of X """ obj_data = None m, k_latent = U.shape n, k_latent = V.shape for n_step in range(n_steps): U_up_num = X.dot(V) U_up_denom = U.dot((V.transpose().dot(V))) + U U = np.multiply(U, np.divide(U_up_num, U_up_denom, out=np.ones_like(U_up_num), where=U_up_denom!=0)) # 0 / 0 := 1 V_up_num_recon = X.transpose().dot(U) V_up_denom_recon = V.dot((U.transpose().dot(U))) # update each column vector of V separately to accomodate different Laplacians V_up_num_man = np.zeros((n, k_latent)) V_up_denom_man = np.zeros((n, k_latent)) V_up_num_ign = np.zeros((n, k_latent)) for k in range(k_latent): W = k_to_W[k] D = k_to_D[k] V_up_num_man[:,k] = gamma * W.dot(V[:,k]) V_up_denom_man[:,k] = gamma * D.dot(V[:,k]) nz_inds = k_to_feat_inds[k] V_up_num_ign[nz_inds,k] = delta * np.power(V[nz_inds,k] + 1, -2) V_up_num = V_up_num_recon + (V_up_num_man + V_up_num_ign) V_up_denom = V_up_denom_recon + V_up_denom_man V_up_denom[V_up_denom < EPSILON] = EPSILON V = np.multiply(V, np.divide(V_up_num, V_up_denom, out=np.ones_like(V_up_num), where=V_up_denom!=0)) V[V < EPSILON] = EPSILON obj_data = nmf_manifold_vec_obj(X, U, V, k_to_L, k_to_feat_inds, gamma=gamma, delta=delta) print(i+n_step+1, obj_data['obj']) if(verbose): print(obj_data) return U, V, obj_data
f1998d8ccd000892f441341240216ada5fd46a70
3,658,823
def check_xyz_species_for_drawing(xyz, species): """A helper function to avoid repetative code""" if species is not None and xyz is None: xyz = xyz if xyz is not None else species.final_xyz if species is not None and not isinstance(species, ARCSpecies): raise InputError('Species must be an ARCSpecies instance. Got {0}.'.format(type(species))) if species is not None and not species.final_xyz: raise InputError('Species {0} has an empty final_xyz attribute.'.format(species.label)) return xyz
26caa32c55eee43dab53f85e442775095da92580
3,658,824
def GetUDPStreamSample(command_out, sending_vm, receiving_vm, request_bandwidth, network_type, iteration): """Get a sample from the nuttcp string results. Args: command_out: the nuttcp output. sending_vm: vm sending the UDP packets. receiving_vm: vm receiving the UDP packets. request_bandwidth: the requested bandwidth in the nuttcp sample. network_type: the type of the network, external or internal. iteration: the run number of the test. Returns: sample from the results of the nuttcp tests. """ data_line = command_out.split('\n')[0].split(' ') data_line = [val for val in data_line if val] actual_bandwidth = float(data_line[6]) units = data_line[7] packet_loss = data_line[16] metadata = { 'receiving_machine_type': receiving_vm.machine_type, 'receiving_zone': receiving_vm.zone, 'sending_machine_type': sending_vm.machine_type, 'sending_zone': sending_vm.zone, 'packet_loss': packet_loss, 'bandwidth_requested': request_bandwidth, 'network_type': network_type, 'iteration': iteration } return sample.Sample('bandwidth', actual_bandwidth, units, metadata)
d9f0e75602768ee574d280215ebc78ebd67a520b
3,658,825
def setSwaggerParamDesc(swagger,searchParams): """ Set the Swagger GET Parameter Description to what is stored in the search Parameters using helper function """ for id in range(len(swagger['tags'])): # Paths are prefaced with forward slash idName = '/'+swagger['tags'][id]['name'] # Filter out Capability statement if idName != '/CapabilityStatement': for paramId in range(len(swagger['paths'][idName]['get']['parameters'])): # Get the parameter name to use getParamDesc function paramName = swagger['paths'][idName]['get']['parameters'][paramId]['name'] # Set description to what is returned from search Parameters swagger['paths'][idName]['get']['parameters'][paramId]['description'] = getParamDesc(searchParams,idName,paramName) swagger = removeFormatParam(swagger) return swagger
e83c4c713718d382e5ce6f2429d029d4eb9ae588
3,658,826
def parse_args(args=[], doc=False): """ Handle parsing of arguments and flags. Generates docs using help from `ArgParser` Args: args (list): argv passed to the binary doc (bool): If the function should generate and return manpage Returns: Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage """ parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}") parser.add_argument("username", help="Username of the new user to add") parser.add_argument("-p", dest="password", help="Password for the new user") parser.add_argument("-n", dest="noninteractive", action="store_false", help="Don't ask for user input") parser.add_argument("--version", action="store_true", help=f"print program version") args = parser.parse_args(args) arg_helps_with_dups = parser._actions arg_helps = [] [arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps] NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}" SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... " DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n" for item in arg_helps: # Its a positional argument if len(item.option_strings) == 0: # If the argument is optional: if item.nargs == "?": SYNOPSIS += f"[{item.dest.upper()}] " elif item.nargs == "+": SYNOPSIS += f"[{item.dest.upper()}]... " else: SYNOPSIS += f"{item.dest.upper()} " else: # Boolean flag if item.nargs == 0: if len(item.option_strings) == 1: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n" elif item.nargs == "+": DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n" if doc: return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n" else: return args, parser
ca77aad1d31287f1394678db90c0857dbdae6a43
3,658,827
import array def interact(u, v): """Compute element-wise mean(s) from two arrays.""" return tuple(mean(array([u, v]), axis=0))
9dd567568d5301dd62fcf19b7b4ac0130fc5b527
3,658,828
def part_allocation_count(build, part, *args, **kwargs): """ Return the total number of <part> allocated to <build> """ return build.getAllocatedQuantity(part)
84c94ca4e1b1006e293851189d17f63fc992b420
3,658,829
def stat_threshold(Z,mce='fdr_bh',a_level=0.05,side='two',copy=True): """ Threshold z maps Parameters ---------- mce: multiple comparison error correction method, should be among of the options below. [defualt: 'fdr_bh']. The options are from statsmodels packages: `b`, `bonferroni` : one-step correction `s`, `sidak` : one-step correction `hs`, `holm-sidak` : step down method using Sidak adjustments `h`, `holm` : step-down method using Bonferroni adjustments `sh`, `simes-hochberg` : step-up method (independent) `hommel` : closed method based on Simes tests (non-negative) `fdr_i`, `fdr_bh` : Benjamini/Hochberg (non-negative) `fdr_n`, `fdr_by` : Benjamini/Yekutieli (negative) 'fdr_tsbh' : two stage fdr correction (Benjamini/Hochberg) 'fdr_tsbky' : two stage fdr correction (Benjamini/Krieger/Yekutieli) 'fdr_gbs' : adaptive step-down fdr correction (Gavrilov, Benjamini, Sarkar) """ if copy: Z = Z.copy() if side=='one': sideflag = 1 elif side=='two' or 'double': sideflag = 2 Idx = np.triu_indices(Z.shape[0],1) Zv = Z[Idx] Pv = sp.norm.cdf(-np.abs(Zv))*sideflag [Hv,adjpvalsv] = smmt.multipletests(Pv,method = mce)[:2] adj_pvals = np.zeros(Z.shape) Zt = np.zeros(Z.shape) Zv[np.invert(Hv)] = 0 Zt[Idx] = Zv Zt = Zt + Zt.T; adj_pvals[Idx] = adjpvalsv adj_pvals = adj_pvals + adj_pvals.T; adj_pvals[range(Z.shape[0]),range(Z.shape[0])] = 0 return Zt, binarize(Zt), adj_pvals
3c582c0a59f8bd5544f8620870732562200f4f0a
3,658,830
def esmf_grid(lon, lat, periodic=False, mask=None): """ Create an ESMF.Grid object, for constructing ESMF.Field and ESMF.Regrid. Parameters ---------- lon, lat : 2D numpy array Longitute/Latitude of cell centers. Recommend Fortran-ordering to match ESMPy internal. Shape should be ``(Nlon, Nlat)`` for rectilinear grid, or ``(Nx, Ny)`` for general quadrilateral grid. periodic : bool, optional Periodic in longitude? Default to False. Only useful for source grid. mask : 2D numpy array, optional Grid mask. According to the ESMF convention, masked cells are set to 0 and unmasked cells to 1. Shape should be ``(Nlon, Nlat)`` for rectilinear grid, or ``(Nx, Ny)`` for general quadrilateral grid. Returns ------- grid : ESMF.Grid object """ # ESMPy expects Fortran-ordered array. # Passing C-ordered array will slow down performance. for a in [lon, lat]: warn_f_contiguous(a) warn_lat_range(lat) # ESMF.Grid can actually take 3D array (lon, lat, radius), # but regridding only works for 2D array assert lon.ndim == 2, 'Input grid must be 2D array' assert lon.shape == lat.shape, 'lon and lat must have same shape' staggerloc = ESMF.StaggerLoc.CENTER # actually just integer 0 if periodic: num_peri_dims = 1 else: num_peri_dims = None # ESMPy documentation claims that if staggerloc and coord_sys are None, # they will be set to default values (CENTER and SPH_DEG). # However, they actually need to be set explicitly, # otherwise grid._coord_sys and grid._staggerloc will still be None. grid = ESMF.Grid( np.array(lon.shape), staggerloc=staggerloc, coord_sys=ESMF.CoordSys.SPH_DEG, num_peri_dims=num_peri_dims, ) # The grid object points to the underlying Fortran arrays in ESMF. # To modify lat/lon coordinates, need to get pointers to them lon_pointer = grid.get_coords(coord_dim=0, staggerloc=staggerloc) lat_pointer = grid.get_coords(coord_dim=1, staggerloc=staggerloc) # Use [...] to avoid overwritting the object. Only change array values. lon_pointer[...] = lon lat_pointer[...] = lat # Follows SCRIP convention where 1 is unmasked and 0 is masked. # See https://github.com/NCPP/ocgis/blob/61d88c60e9070215f28c1317221c2e074f8fb145/src/ocgis/regrid/base.py#L391-L404 if mask is not None: # remove fractional values mask = np.where(mask == 0, 0, 1) # convert array type to integer (ESMF compat) grid_mask = mask.astype(np.int32) if not (grid_mask.shape == lon.shape): raise ValueError( 'mask must have the same shape as the latitude/longitude' 'coordinates, got: mask.shape = %s, lon.shape = %s' % (mask.shape, lon.shape) ) grid.add_item(ESMF.GridItem.MASK, staggerloc=ESMF.StaggerLoc.CENTER, from_file=False) grid.mask[0][:] = grid_mask return grid
8087cfbf0c4923338984913dcd1a421e3a46dd29
3,658,831
import logging def magic_series(grid): """ Check if grid satisfies the definition series[k] == sum(series[i] == k) """ logging.debug("Grid:\n{}".format(grid)) magic = (grid.sum(1) == np.where(grid.T)[1]) logging.debug("Magic check:\n{}".format(magic)) return magic.all()
0deb972084f77e004c192d1c606d1ec34b193d61
3,658,832
def convert_to_numeral(decimal_integer: int, roman_format="brackets"): """Convert decimal to Roman numeral. roman_format is a str containing either 'brackets' or 'latex' The default option, 'brackets', converts 3,000,000,000 to [[MMM]] and 3,000,000 to [MMM]. 'latex' outputs a LaTeX formula for the numeral. """ def barfunction_latex(prefix: str, unbarred_string: str, num_of_bars: int, separator_size: int = 2): """Return a LaTeX-renderable representation of overline bars.""" bars_before = (r"\overline{" * num_of_bars) + r"\text{" bars_after = r"}" + ("}" * num_of_bars) if prefix: separation = f"\\hspace{{{separator_size}pt}}" else: separation = "" return prefix + separation + bars_before + unbarred_string + bars_after def barfunction_brackets(prefix: str, unbarred_string: str, num_of_bars: int): """Represent bars as (possibly nested) square brackets. For example, 3,000,000,000 is converted to [[MMM]]. """ bars_before = ("[" * num_of_bars) bars_after = ("]" * num_of_bars) return prefix + bars_before + unbarred_string + bars_after def latex_surround_with_dollars(string): """Surround LaTeX math expression with dollar signs.""" return "$" + string + "$" def list_occurring_roman_symbols(roman_symbols, integer_value): """List symbols that occur in Roman representation of number. + roman_symbols is [(int, str)], a list of tuples, each of which representing one Roman symbol and its corresponding integer value. For example, (3, 'III'). + integer_value is the value to be converted. Return: remainder, list_of_occurring_symbols + remainder: what remains from the number, which was too small to represent with the provided symbols + list_of_occurring_symbols: a list of the symbols present in the Roman representation of the number. """ remainder = integer_value list_of_occurring_symbols = [] for integer_value, str_roman_symbol in roman_symbols: repetitions, remainder = divmod(remainder, integer_value) list_of_occurring_symbols.append(str_roman_symbol * repetitions) return remainder, list_of_occurring_symbols def apply_barfunction(list_of_occurring_symbols, barfunction, numeral_string, num_of_bars): """Build up Roman numeral representation applying barfunction. The barfunction is only applied if list_of_occurring_symbols is not empty, otherwise the original numeral_string is returned untouched. """ unbarred_string = "".join(list_of_occurring_symbols) if unbarred_string: numeral_string = barfunction(numeral_string, unbarred_string, num_of_bars) return numeral_string if roman_format == 'latex': barfunction = barfunction_latex elif roman_format == 'brackets': barfunction = barfunction_brackets else: raise ValueError('roman_format should be either "latex" or "brackets"') remainder = decimal_integer numeral_string = "" for symbolset in ROMAN_NUMERAL_TABLE: num_of_bars = symbolset["bars"] symbols = symbolset["symbols"] remainder, list_of_occurring_symbols = list_occurring_roman_symbols( symbols, remainder) numeral_string = apply_barfunction(list_of_occurring_symbols, barfunction, numeral_string, num_of_bars) if roman_format == 'latex': return latex_surround_with_dollars(numeral_string) return numeral_string
ebfd2b323879bcca9e20be0d9598104bf0f31e33
3,658,833
def transpose(x): """Tensor transpose """ return np.transpose(x)
286c7e36629ff8e38ad5d0233bd1f8fd823514f2
3,658,834
from typing import Optional from typing import List from typing import Tuple def greedy_reduction_flat(m: Mat2) -> Optional[List[Tuple[int, int]]]: """Returns a list of tuples (r1,r2) that specify which row should be added to which other row in order to reduce one row of m to only contain a single 1. In contrast to :func:`greedy_reduction`, it preforms the brute-force search starting with the highest indices, and places the row operations in such a way that the resulting depth is log_2 of the number of rows that have to be added together. Used in :func:`lookahead_extract_base`""" indicest = find_minimal_sums(m, True) if indicest is None: return indicest return flat_indices(m, list(indicest))[0]
85c8098dd6e727abe64c3d1410c63161309b5135
3,658,835
def estimate_psd(vec, num_segs=DEFAULT_NUM_SEGS, overlap=DEFAULT_OVERLAP, dt=DEFAULT_DT, tukey_alpha=DEFAULT_TUKEY_ALPHA, one_sided=True): """ estimates the PSD using a DFT divides vec into "num_segs" with a fractional overlap of "overlap" between neighbors returns the average PSD from these samples (arithmetic mean) if one_sided, returns the one-sided PSD. Otherwise, returns the two-sided PSD (one half the one-sided PSD). WARNING: your logic on how to split segments may be fragile... """ N = len(vec) if overlap > N - num_segs: raise ValueError, "overlap is too big!" n = N/(1. + (num_segs-1.)*(1.-overlap)) ### compute the number of entries per segment overlap = int(n*overlap) ### compute the number of overlapping entries n = int(n) seglen = dt*n ### compute dfts for each segment separately psds = np.empty((n/2, num_segs), complex) for segNo in xrange(num_segs): start = segNo*(n-overlap) psds[:,segNo], freqs = dft(vec[start:start+n]*tukey(n, tukey_alpha), dt=dt) ### average mean_psd = np.sum(psds.real**2 + psds.imag**2, axis=1) / (seglen*num_segs) if one_sided: mean_psd *= 2 ### multiply by 2 to account for the power at negative frequencies in the one-sided PSD return mean_psd, freqs
1c2d8c51bfd75d617f75dbc4aa3304c05c36e899
3,658,836
def load_data(connection_string: str): """ Load data from a source. Source could be: - A JSON File - A MongoDB Load data from a file --------------------- If you want to load data from a File, you must to provide this connection string: >>> connection_string = "/path/to/my/file.json" or using URI format: >>> connection_string = "file:///path/to/my/file.json" Load file from a MongoDB ------------------------ If you want to load data from a MongoDB database, you must to provide a connection string like: >>> connection_string = "mongodb://mongo.example.com:27017" Or event more complicate: >>> connection_string = "mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test" :param connection_string: :type connection_string: :return: :rtype: """ assert isinstance(connection_string, str) if connection_string.startswith("mongodb://"): data = _load_from_mongo(connection_string) elif connection_string.startswith("file://"): data = _load_from_file(connection_string) else: data = _load_from_file("file://{}".format(connection_string)) # Load JSON info return APITest(**data)
abb806e62510077abf8a0b686a5882f637502275
3,658,837
def himmelblau(xy): """ Himmelblau's function, as a set of residuals (cost = sum(residuals**2)) The standard Himmelbau's function is with data as [11, 7], and four minimum at (3.0, 2.0), ~(-2.8, 3.1), ~(-3.8, -3.3), ~(3.6, -1.8). Himmelblau's function is a quadratic model in both x and y. Its data- space dimension (2) is equal to its model-space dimension (2), so there is only parameter-effect curvature. Parameters ---------- - xy : 2-element list-like The x,y parameters of the model. Returns ------- 2-element list-like The residuals of the model. Notes ------ https://en.wikipedia.org/wiki/Himmelblau%27s_function """ x, y = xy r1 = x*x + y r2 = y*y + x return np.array([r1, r2])
6951c77afd39596e7a799fe413bc2fc96a4818c2
3,658,838
from typing import Dict def parse_instrument_data(smoothie_response: str) -> Dict[str, bytearray]: """ Parse instrument data. Args: smoothie_response: A string containing a mount prefix (L or R) followed by : and a hex string. Returns: mapping of the mount prefix to the hex string. """ try: items = smoothie_response.split("\n")[0].strip().split(":") mount = items[0] if mount not in {"L", "R"}: raise ParseError( error_message=f"Invalid mount '{mount}'", parse_source=smoothie_response ) # data received from Smoothieware is stringified HEX values # because of how Smoothieware handles GCODE messages data = bytearray.fromhex(items[1]) except (ValueError, IndexError, TypeError, AttributeError): raise ParseError( error_message="Unexpected argument to parse_instrument_data", parse_source=smoothie_response, ) return {mount: data}
59f02a5d83b600f5fb4104f72f860925487f6422
3,658,839
import itertools import os def count_frames(directory): """ counts the number of consecutive pickled frames in directory Args: directory: str of directory Returns: 0 for none, otherwise >0 """ for i in itertools.count(start=0): pickle_file = os.path.join(directory, f"{str(i).zfill(12)}.pickle") if not os.path.isfile(pickle_file): return i
94df6183e34a5d498493f20c03b00346bc38c50f
3,658,840
def _volume_sum_check(props: PropsDict, sum_to=1, atol=1e-3) -> bool: """Check arrays all sum to no more than 1""" check_broadcastable(**props) sum_ar = np.zeros((1,)) for prop in props: sum_ar = sum_ar + props[prop] try: assert sum_ar.max() <= sum_to + atol except AssertionError: raise ValueError(f"Volume fractions for {props.keys()} sum to greater than one") return True
631743276b833fd9ea58ae766614b851764ee771
3,658,841
import os def get_management_confs_in_domain(body=None): # noqa: E501 """get management configuration items and expected values in the domain get management configuration items and expected values in the domain # noqa: E501 :param body: domain info :type body: dict | bytes :rtype: ConfFiles """ if connexion.request.is_json: body = DomainName.from_dict(connexion.request.get_json()) # noqa: E501 # Check whether the domain exists domain = body.domain_name # check the input domain checkRes = Format.domainCheck(domain) if not checkRes: num = 400 base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") return base_rsp, num isExist = Format.isDomainExist(domain) if not isExist: base_rsp = BaseResponse(400, "The current domain does not exist") return base_rsp, 400 # The parameters of the initial return value assignment expected_conf_lists = ConfFiles(domain_name = domain, conf_files = []) # get the path in domain domainPath = os.path.join(TARGETDIR, domain) # When there is a file path is the path of judgment for the configuration items for root, dirs, files in os.walk(domainPath): if len(files) > 0 and len(root.split('/')) > 3: if "hostRecord.txt" in files: continue for d_file in files: d_file_path = os.path.join(root, d_file) contents = Format.get_file_content_by_read(d_file_path) feature = os.path.join(root.split('/')[-1], d_file) yang_modules = YangModule() d_module = yang_modules.getModuleByFeature(feature) file_lists = yang_modules.getFilePathInModdule(yang_modules.module_list) file_path = file_lists.get(d_module.name()).split(":")[-1] conf = ConfFile(file_path = file_path, contents = contents) expected_conf_lists.conf_files.append(conf) print("expected_conf_lists is :{}".format(expected_conf_lists)) if len(expected_conf_lists.domain_name) > 0: base_rsp = BaseResponse(200, "Get management configuration items and expected " + "values in the domain succeccfully") else: base_rsp = BaseResponse(400, "The file is Null in this domain") return expected_conf_lists
95eace508c06be0b9cbf3d232ca2ccdbbaec467d
3,658,842
def small_view(data, attribute): """ Extract a downsampled view from a dataset, for quick statistical summaries """ shp = data.shape view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp]) return data[attribute, view]
62273269f87cbe6803ef0b5a8e47a681ca1f4d26
3,658,843
def playerStandings(): """Returns a list of the players and their win records, sorted by wins. The first entry in the list should be the player in first place, or a player tied for first place if there is currently a tie. Returns: A list of tuples, each of which contains (id, name, wins, matches): id: the player's unique id (assigned by the database) name: the player's full name (as registered) wins: the number of matches the player has won matches: the number of matches the player has played """ ## connecting with db db = connect() ## creating a cursor object c = db.cursor() ## get the scores table from the matches table using the below sql query query = ''' SELECT wins_table.id, wins_table.team_name, wins_table.wins, wins_table.wins + loses_table.loses as total FROM (SELECT TEAMS.*, (SELECT COUNT(*) FROM MATCHES WHERE MATCHES.winner = TEAMS.id) AS WINS FROM TEAMS) as wins_table, (SELECT TEAMS.*, (SELECT COUNT(*) FROM MATCHES WHERE MATCHES.loser = TEAMS.id) AS LOSES FROM TEAMS) as loses_table WHERE wins_table.id = loses_table.id ORDER BY wins_table.wins desc; ''' ## execute the query c.execute(query) ## query result result = c.fetchall() ## closing the connection with the database db.close() return result
c6554d1ff34dd08f756d1ad19665deacac4467de
3,658,844
def get_all_feature_names(df: pd.DataFrame, target: str = None) -> list: """Get a list of all feature names in a dataframe. Args: df (pd.DataFrame): dataframe of features and target variable target (str): name of target column in df Returns: all_feature_names (list): list of all feature names """ # if using the main df if target in df.columns.tolist(): df = df.loc[ :, ~df.columns.isin([target])] all_feature_names = df.columns.tolist() # if using samples_df with true and predicted labels else: df = df.loc[ :, ~df.columns.isin( [ 'true_label', 'predicted_label' ] ) ] all_feature_names = df.columns.tolist() return all_feature_names
b0b1964832c6f56200a3d7fbbccd1030e9c52a93
3,658,845
import os import tempfile def skipUnlessAddressSanitizer(func): """Decorate the item to skip test unless Clang -fsanitize=thread is supported.""" def is_compiler_with_address_sanitizer(self): compiler_path = self.getCompiler() compiler = os.path.basename(compiler_path) f = tempfile.NamedTemporaryFile() if lldbplatformutil.getPlatform() == 'windows': return "ASAN tests not compatible with 'windows'" cmd = "echo 'int main() {}' | %s -x c -o %s -" % (compiler_path, f.name) if os.popen(cmd).close() is not None: return None # The compiler cannot compile at all, let's *not* skip the test cmd = "echo 'int main() {}' | %s -fsanitize=address -x c -o %s -" % (compiler_path, f.name) if os.popen(cmd).close() is not None: return "Compiler cannot compile with -fsanitize=address" return None return skipTestIfFn(is_compiler_with_address_sanitizer)(func)
e7bc085fc508aae526859d0f690db43ae7e7e865
3,658,846
import random def generate_enhancer_promoter_pair(ep_df): """ """ std_ep_pair = ep_df[['chrom-Enh','chromStart','chromEnd','TSS']] min_ep_gap = abs((std_ep_pair['chromEnd']-std_ep_pair['chromStart']).min()) max_ep_gap = abs((std_ep_pair['chromEnd']-std_ep_pair['chromStart']).max()) fake_samples = [] for enhancer in std_ep_pair[['chrom-Enh','chromStart','chromEnd']].values: for promoter in std_ep_pair['TSS'].values: gap = abs(enhancer[-1]-promoter) if gap>min_ep_gap and gap<max_ep_gap: current_sample = np.r_[enhancer, promoter] fake_samples.append(current_sample) fake_samples = random.sample(fake_samples, std_ep_pair.shape[0]) fake_ep_pair = pd.DataFrame(fake_samples, columns=['chrom-Enh','chromStart','chromEnd','TSS']) return std_ep_pair, fake_ep_pair
b87906e6e2d5a23a729aa3f9b19fcd086db2e7c8
3,658,847
from typing import Union from typing import Tuple from typing import Dict def constant_lrs( draw, return_kwargs: bool = False ) -> Union[ st.SearchStrategy[lr_scheduler_pb2.ConstantLR], st.SearchStrategy[Tuple[lr_scheduler_pb2.ConstantLR, Dict]], ]: """Returns a SearchStrategy for an ConstantLR plus maybe the kwargs.""" kwargs: Dict = {} # initialise and return all_fields_set(lr_scheduler_pb2.ConstantLR, kwargs) constant_lr = lr_scheduler_pb2.ConstantLR(**kwargs) if not return_kwargs: return constant_lr return constant_lr, kwargs
d7354717a052de2852ea61e55b1b2c3e3df19010
3,658,848
def get_read_only_storage_manager(): """Get the current Flask app's read only storage manager, create if necessary""" return current_app.config.setdefault('read_only_storage_manager', ReadOnlyStorageManager())
cd5dac64a834ac98accb6824d5e971d763acc677
3,658,849
def __parse_sql(sql_rows): """ Parse sqlite3 databse output. Modify this function if you have a different database setup. Helper function for sql_get(). Parameters: sql_rows (str): output from SQL SELECT query. Returns: dict """ column_names = ['id', 'requester', 'item_name', 'custom_name', 'quantity', 'crafting_discipline', 'special_instruction', 'status', 'rarity', 'resource_provided', 'pub-date', 'crafter', 'stats'] request_dict = {str(row[0]): {column_names[i]: row[i] for i,_ in enumerate(column_names)} for row in sql_rows} return request_dict
09c61da81af069709dd020b8643425c4c6964137
3,658,850
import scipy import random def _generate_to(qubo, seed, oct_upper_bound, bias=0.5): """ Given a QUBO, an upper bound on oct, and a bias of bipartite vertices, generate an Erdos-Renyi graph such that oct_upper_bound number of vertices form an OCT set and the remaining vertices are partitioned into partites (left partite set with probability of "bias"). Edges between the partite sets are then removed. """ # Compute parameters needed for ER n = qubo.order() p = qubo.size() / scipy.special.binom(n, 2) # Generate graph graph = nx.erdos_renyi_graph(n=n, p=p, seed=seed) random.seed(seed) # Compute partite sets on the remaining vertices nodes = list(graph.nodes())[oct_upper_bound:] partite1 = set() partite2 = set() for node in nodes: if random.random() < bias: partite1.add(node) else: partite2.add(node) # Remove edges within a partite set for edge in chain(combinations(partite1, 2), combinations(partite2, 2)): if graph.has_edge(*edge): graph.remove_edge(*edge) # Name the graph graph.graph['name'] = '{}-{}-{}'.format(qubo.graph['name'], 'to', seed) # Sanitize the graph and return graph = reset_labels(graph) return graph
653aedbd44bf87a9908c8abcf2c9480b836f4a03
3,658,851
def sqlCreate(fields=None, extraFields=None, addCoastGuardFields=True, dbType='postgres'): """Return the sqlhelp object to create the table. @param fields: which fields to put in the create. Defaults to all. @param extraFields: A sequence of tuples containing (name,sql type) for additional fields @param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format @type addCoastGuardFields: bool @param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres') @return: An object that can be used to generate a return @rtype: sqlhelp.create """ if fields is None: fields = fieldList c = sqlhelp.create('waterlevel',dbType=dbType) c.addPrimaryKey() if 'MessageID' in fields: c.addInt ('MessageID') if 'RepeatIndicator' in fields: c.addInt ('RepeatIndicator') if 'UserID' in fields: c.addInt ('UserID') if 'Spare' in fields: c.addInt ('Spare') if 'dac' in fields: c.addInt ('dac') if 'fid' in fields: c.addInt ('fid') if 'month' in fields: c.addInt ('month') if 'day' in fields: c.addInt ('day') if 'hour' in fields: c.addInt ('hour') if 'min' in fields: c.addInt ('min') if 'stationid' in fields: c.addVarChar('stationid',7) if 'waterlevel' in fields: c.addInt ('waterlevel') if 'datum' in fields: c.addInt ('datum') if 'sigma' in fields: c.addInt ('sigma') if 'source' in fields: c.addInt ('source') if addCoastGuardFields: # c.addInt('cg_s_rssi') # Relative signal strength indicator # c.addInt('cg_d_strength') # dBm receive strength # c.addVarChar('cg_x',10) # Idonno c.addInt('cg_t_arrival') # Receive timestamp from the AIS equipment 'T' c.addInt('cg_s_slotnum') # Slot received in c.addVarChar('cg_r',15) # Receiver station ID - should usually be an MMSI, but sometimes is a string c.addInt('cg_sec') # UTC seconds since the epoch c.addTimestamp('cg_timestamp') # UTC decoded cg_sec - not actually in the data stream return c
0a9bbbed4dd9c20e1126716bb64e2279d4ab29b6
3,658,852
def _section_cohort_management(course, access): """ Provide data for the corresponding cohort management section """ course_key = course.id ccx_enabled = hasattr(course_key, 'ccx') section_data = { 'section_key': 'cohort_management', 'section_display_name': _('Cohorts'), 'access': access, 'ccx_is_enabled': ccx_enabled, 'course_cohort_settings_url': reverse( 'course_cohort_settings', kwargs={'course_key_string': str(course_key)} ), 'cohorts_url': reverse('cohorts', kwargs={'course_key_string': str(course_key)}), 'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': str(course_key)}), 'verified_track_cohorting_url': reverse( 'verified_track_cohorting', kwargs={'course_key_string': str(course_key)} ), } return section_data
161f01b96952b8538d737c13718d455b69542b51
3,658,853
def rivers_by_station_number(stations, N): """Returns a list of N tuples on the form (river name, number of stations on the river). These tuples are sorted in decreasing order of station numbers. If many stations have the same number of stations as the 'Nth' river, these are also included.""" riversList = stations_by_river(stations) #Get list of rivers to consider riverNumber = [] for River in riversList: riverNumber.append((River, len(riversList[River]))) #Get tuple of (river name, number of stations) riverNumber.sort(key= lambda x:x[1], reverse=True) #Sort into decreasing numerical order #This code is used to include any rivers with equal number of stations to the 'final' one being output. extraStations = 0 #search through next few rivers to see how many have the same number of stations for i in range(N, len(riverNumber)): if riverNumber[i][1] == riverNumber[N-1][1]: extraStations += 1 else: break #as items pre-sorted once the number is not equal can exit N += extraStations #adjust value of N return riverNumber[:N]
5f958116ae833d2ad4921662f753ca8f30a0af73
3,658,854
import json def load_default_data() -> dict[str, str]: """Finds and opens a .json file with streamer data. Reads from the file and assigns the data to streamer_list. Args: None Returns: A dict mapping keys (Twitch usernames) to their corresponding URLs. Each row is represented as a seperate streamer. For example: { "GMHikaru":"https://www.twitch.tv/GMHikaru" } """ with open("statum\static\streamers.json", "r") as default_streamers: streamer_list: dict[str, str] = json.load(default_streamers) default_streamers.close() return streamer_list
bfeef64922fb4144228e031b9287c06525c4254d
3,658,855
def get_value_key(generator, name): """ Return a key for the given generator and name pair. If name None, no key is generated. """ if name is not None: return f"{generator}+{name}" return None
0ad630299b00a23d029ea15543982125b792ad53
3,658,856
import math def wav_to_log_spectrogram_clips(wav_file): """convert audio into logrithmic spectorgram, then chop it into 2d-segmentation of 100 frames""" # convert audio into spectorgram sound, sr = librosa.load(wav_file, sr=SR, mono=True) stft = librosa.stft(sound, n_fft=N_FFT, hop_length=HOP_LEN, win_length=WIN_LEN) mag, phase = librosa.magphase(stft) db_spectro = librosa.amplitude_to_db(mag) # chop magnitude of spectrogram into clips, each has 1025 bins, 100 frames db_spectro_clips = np.empty((0, FREQ_BINS, 100)) for i in range(math.floor(mag.shape[1] / 100)): db_spectro_clips = np.concatenate((db_spectro_clips, db_spectro[np.newaxis, :, i * 100: (i + 1) * 100])) return db_spectro_clips
51ccf7d5687005f3eb01f382d37b6d7e09e45730
3,658,857
def get_title(mods): """ Function takes the objects MODS and extracts and returns the text of the title. """ title = mods.find("{{{0}}}titleInfo/{{{0}}}title".format(MODS_NS)) if title is not None: return title.text
652a9cc61c8d2538c80818759666022b19058074
3,658,858
def get_from_identity(session, key, passive): """Look up the given key in the given session's identity map, check the object for expired state if found. """ instance = session.identity_map.get(key) if instance is not None: state = attributes.instance_state(instance) # expired - ensure it still exists if state.expired: if not passive & attributes.SQL_OK: # TODO: no coverage here return attributes.PASSIVE_NO_RESULT elif not passive & attributes.RELATED_OBJECT_OK: # this mode is used within a flush and the instance's # expired state will be checked soon enough, if necessary return instance try: state._load_expired(state, passive) except orm_exc.ObjectDeletedError: session._remove_newly_deleted([state]) return None return instance else: return None
d910dc32311fafb5e6461971ea77ea22a02aadba
3,658,859
def sample_ingredient(user, name='Cinnamon'): """ Create and return a sample ingredient :param user: User(custom) object :param name: name of the ingredient :return: Ingredient object """ return Ingredient.objects.create(user=user, name=name)
2828e1f42f6d755ac636d93d72b291cad3ba0061
3,658,860
import sys def check_if_string(data): """ Takes a data as argument and checks if the provided argument is an instance of string or not Args: data: Data to check for. Returns: result: Returns a boolean if the data provided is instance or not """ if sys.version_info[0] == 2: return isinstance(data, basestring) else: return isinstance(data, str)
403aabfde1c0d2f3c1ec4610cf83a4abe644acdb
3,658,861
def viterbi(O,S,Y, pi, A, B): """Generates a path which is a sequence of most likely states that generates the given observation Y. Args: O (numpy.ndarray): observation space. Size: 1 X N S (numpy.ndarray): state space. Size: 1 X K Y (list): observation sequence. Size: 1 X T pi (numpy.ndarray): inial probablities. Size: 1 X K A (numpy.ndarray): transition matrix. Size: K X K B (numpy.ndarray): emission matrix Size: N X K Returns: list: list of most likely sequence of POS tags """ # Reference: https://en.wikipedia.org/wiki/Viterbi_algorithm#Pseudocode #************************************************************************** ## Example data for trial # input # O = np.arange(1,7) # observation space # uniq words # Size = 1 X N # S = np.asarray([0, 1, 2]) # State space # uniq POS tags # Size = 1 X K # Y = np.array([0, 2, 0, 2, 2, 1]).astype(np.int32) # Observation sequnece T # # Size = 1 X T # pi = np.array([0.6, 0.2, 0.2]) # Initial probablity # Size = 1 X K # A = np.array([[0.8, 0.1, 0.1], # [0.2, 0.7, 0.1], # [0.1, 0.3, 0.6]]) # transition matrix # Size = K X K # B = np.array([[0.7, 0.0, 0.3], # [0.1, 0.9, 0.0], # [0.0, 0.2, 0.8]]) # emission matrix # Size = K X N # print("O",O) # print("S",S) # print("pi",pi) # print("Y",Y) # print("A",A,'\n') # print("B",B) # output # X = [0, 0, 0, 2, 2, 1] # Most likely path/sequence #************************************************************************** N = len(O) K = len(S) T = len(Y) T1 = np.zeros(shape=(K,T)) T2 = np.zeros(shape=(K,T)) for i in range(K): T1[i,0] = pi[i] * B[i, Y[0]] T2[i,0] = 0 for j in range(1, T): for i in range(K): if Y[j] == -1: # Unkown word handling. Set B[i, Y[j]] = 1 for all tags if Y[j] == -1 # aka word not found in train set. next_prob = T1[:,j-1] * A[:, i] * 1 else: next_prob = T1[:,j-1] * A[:, i] * B[i, Y[j]] T1[i,j] = np.max(next_prob) T2[i,j] = np.argmax(next_prob) Z = [None] * T X = [None] * T # Backpointer Z[T-1] = np.argmax(T1[:,T-1]) X[T-1] = S[Z[T-1]] for j in reversed(range(1, T)): Z[j-1] = T2[int(Z[j]),j] X[j-1] = S[int(Z[j-1])] return X
db533c584cf2a287cfcc6f4097566cdb493c42cc
3,658,862
def int_to_bigint(value): """Convert integers larger than 64 bits to bytearray Smaller integers are left alone """ if value.bit_length() > 63: return value.to_bytes((value.bit_length() + 9) // 8, 'little', signed=True) return value
0f2d64887dc15d1902b8e10b0257a187ed75187f
3,658,863
def xcorr(S, dtmax=10): """ Cross correlate each pair of columns in S at offsets up to dtmax """ # import pdb; pdb.set_trace() (T,N) = S.shape H = np.zeros((N,N,dtmax)) # Compute cross correlation at each time offset for dt in np.arange(dtmax): # print "Computing cross correlation at offset %d" % dt # Compute correlation in sections to conserve memory chunksz = 16 for n1 in np.arange(N, step=chunksz): for n2 in np.arange(N, step=chunksz): n1c = min(n1 + chunksz, N) n2c = min(n2 + chunksz, N) # Corr coef is a bit funky. We want the upper right quadrant # of this matrix. The result is ((n1c-n1)+(n2c-n2)) x ((n1c-n1)+(n2c-n2)) H[n1:n1c, n2:n2c, dt] = np.corrcoef(S[:T-dt, n1:n1c].T, S[dt:, n2:n2c].T)[:(n1c-n1),(n1c-n1):] # Set diagonal to zero at zero offset (obviously perfectly correlated) if dt == 0: H[:,:,0] = H[:,:,0]-np.diag(np.diag(H[:,:,0])) return H
7b27b2ce5c574db253554e8d6c2ebf0ac7c354ca
3,658,864
def register_hooks(): """Exec all the rules files. Gather the hooks from them and load them into the hook dict for later use. """ global HOOKS_LOADED for name, path in load_rules().items(): globals = {} with open(path) as f: exec(compile(f.read(), path, 'exec'), globals) DESCRIPTIONS[name] = globals['__doc__'] for hook_name in HOOKS.keys(): if hook_name in globals: HOOKS[hook_name].append(globals[hook_name]) HOOKS_LOADED = True return HOOKS
c4bfd57fa0a503f4a5be7004fe2145b42c28727a
3,658,865
import time def jboss_status(jboss_cli_home, server_ip, jboss_admin_port, jboss_admin, jboss_admin_pwd, timeout='60000'): """ | ##@函数目的: Jboss状态 | ##@参数说明: | ##@返回值: | ##@函数逻辑: | ##@开发人:jhuang | ##@时间: """ time_start = time.time() jboss_cli = 'jboss-cli.sh' if jboss_cli_home[-1] != '/': jboss_cli_home = jboss_cli_home + '/' ret = exec_shell( 'sh %sbin/%s --connect --controller=%s:%s --user=%s --password=%s --command="deployment-info" --timeout=%s' % ( jboss_cli_home, jboss_cli, server_ip, jboss_admin_port, jboss_admin, jboss_admin_pwd, timeout)) logger.debug('获取Jboss状态用时:%s' % (time.time() - time_start)) return ret
055b84c0217738c34c8f051264327388103dbef3
3,658,866
def proxy_rotator(): """Return a cycle object of proxy dict""" return Proxy.get_proxy_rotator()
4b988214818599ba19cd45f43aeec03e9cc37e08
3,658,867
def pow(a, b): """ Return an attribute that represents a ^ b. """ return multiplyDivide(a, b, MultiplyDivideOperation.POWER)
17551ad9a872a854c177e43317f1d22242a10cd5
3,658,868
async def send_simple_embed_to_channel(bot: commands.Bot, channel_name: str, message: str, color: str = config["colors"]["default"]) -> discord.Message: """Send a simple embed message to the channel with the given name in the given guild, using the given message and an optional colour. Args: bot (commands.Bot): The bot containing the guild with the channel to send the message to. channel_name (int): The name of the channel to send the message to. message (str): The contents of the message color (str, optional): The colour that will be used in the embed. Defaults to config["colors"]["default"]. Returns: discord.Message: The embed message that was sent. """ guild: discord.Guild = bot_util.get_guild(bot, config["guild-id"]) channel: discord.TextChannel = guild_util.get_channel_by_name(guild, channel_name) return await channel.send(embed = discord.Embed(description = message, color = int(color, 0)))
577594c5abdb946ac04decac3ef94ea0e8296535
3,658,869
def retry_on_server_errors_timeout_or_quota_issues_filter(exception): """Retry on server, timeout and 403 errors. 403 errors can be accessDenied, billingNotEnabled, and also quotaExceeded, rateLimitExceeded.""" if HttpError is not None and isinstance(exception, HttpError): if exception.status_code == 403: return True return retry_on_server_errors_and_timeout_filter(exception)
18be4224af641b35cfba50d0ec85a1d22908d1e4
3,658,870
import os import glob def load_all_data(data_path): """Load all mode data.""" image_list = [] for cam in os.listdir(data_path): image_dir = os.path.join(data_path, cam, 'dets') cam_image_list = glob(image_dir+'/*.png') cam_image_list = sorted(cam_image_list) print(f'{len(cam_image_list)} images for {cam}') image_list += cam_image_list print(f'{len(image_list)} images in total') return image_list
188483de8942ba4014c5152ec1a151636e805bdf
3,658,871
import argparse import sys def cmd_line_parser(): """ This function parses the command line parameters and arguments """ parser = argparse.ArgumentParser(usage="python " + sys.argv[0] + " [-h] [passive/active] -d [Domain] [Options]", epilog='\tExample: \r\npython ' + sys.argv[0] + " passive -d baidu.com -o html") parser._optionals.title = "OPTIONS" parser._positionals.title = "POSITION OPTIONS" parser.add_argument("scan_model", type=str, help="active or passive") # active part active = parser.add_argument_group("active", "active scan configuration options") active.add_argument("-x", "--xxxxx", dest="load_config_file", default=False, action="store_true", help="xxxxxxxxxxxx") # passive part passive = parser.add_argument_group("passive", "passive scan configuration options") passive.add_argument("-w", "--word-list", default=False, help="Custom brute force dictionary path") # other parser.add_argument("-d", "--domain", dest="domain", default=False, help="Target to scan") parser.add_argument("-m", "--multi-domain", dest="domains_file", default=False, help="Multi Target to scan") parser.add_argument("-o", "--format", default=False, help="The format of the output file") if len(sys.argv) == 1: sys.argv.append("-h") return parser.parse_args()
b00ed099de4f2db3c66367e07278c9c0c67f3633
3,658,872
def CSourceForElfSymbolTable(variable_prefix, names, str_offsets): """Generate C source definition for an ELF symbol table. Args: variable_prefix: variable name prefix names: List of symbol names. str_offsets: List of symbol name offsets in string table. Returns: String containing C source fragment. """ out = ( r'''// NOTE: ELF32_Sym and ELF64_Sym have very different layout. #if UINTPTR_MAX == UINT32_MAX // ELF32_Sym # define DEFINE_ELF_SYMBOL(name, name_offset, address, size) \ { (name_offset), (address), (size), ELF_ST_INFO(STB_GLOBAL, STT_FUNC), \ 0 /* other */, 1 /* shndx */ }, #else // ELF64_Sym # define DEFINE_ELF_SYMBOL(name, name_offset, address, size) \ { (name_offset), ELF_ST_INFO(STB_GLOBAL, STT_FUNC), \ 0 /* other */, 1 /* shndx */, (address), (size) }, #endif // !ELF64_Sym ''') out += 'static const ELF::Sym k%sSymbolTable[] = {\n' % variable_prefix out += ' { 0 }, // ST_UNDEF\n' out += ' LIST_ELF_SYMBOLS_%s(DEFINE_ELF_SYMBOL)\n' % variable_prefix out += '};\n' out += '#undef DEFINE_ELF_SYMBOL\n' return out
233c55815cf5b72092d3c60be42caffc95570c22
3,658,873
def _kahan_reduction(x, y): """Implements the Kahan summation reduction.""" (s, c), (s1, c1) = x, y for val in -c1, s1: u = val - c t = s + u # TODO(b/173158845): XLA:CPU reassociates-to-zero the correction term. c = (t - s) - u s = t return s, c
808f21403b92ceaf4aa50aa62d58753d62034ea1
3,658,874
from typing import Optional def get_endpoint_access(endpoint_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointAccessResult: """ Resource schema for a Redshift-managed VPC endpoint. :param str endpoint_name: The name of the endpoint. """ __args__ = dict() __args__['endpointName'] = endpoint_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:redshift:getEndpointAccess', __args__, opts=opts, typ=GetEndpointAccessResult).value return AwaitableGetEndpointAccessResult( address=__ret__.address, endpoint_create_time=__ret__.endpoint_create_time, endpoint_status=__ret__.endpoint_status, port=__ret__.port, vpc_endpoint=__ret__.vpc_endpoint, vpc_security_group_ids=__ret__.vpc_security_group_ids, vpc_security_groups=__ret__.vpc_security_groups)
6b38fbd0b27d1ce892fc37c37944979907796862
3,658,875
def predictor( service: MLFlowDeploymentService, data: np.ndarray, ) -> Output(predictions=np.ndarray): """Run a inference request against a prediction service""" service.start(timeout=10) # should be a NOP if already started prediction = service.predict(data) prediction = prediction.argmax(axis=-1) return prediction
c3b5f7241aeab0520db535134912431edf467137
3,658,876
def get_domain(ns, domain): """ Return LMIInstance of given LMI_SSSDDomain. :type domain: string :param domain: Name of the domain to find. :rtype: LMIInstance of LMI_SSSDDomain """ keys = {'Name': domain} try: inst = ns.LMI_SSSDDomain.new_instance_name(keys).to_instance() except wbem.CIMError, err: if err[0] == wbem.CIM_ERR_NOT_FOUND: raise LmiFailed("Cannot find the domain: %s" % domain) raise return inst
beadbd0c172a07c2b55b5bf2b22a05abf562b95b
3,658,877
def mmd_loss(embedding, auxiliary_labels, weights_pos, weights_neg, params): """ Computes mmd loss, weighted or unweighted """ if weights_pos is None: return mmd_loss_unweighted(embedding, auxiliary_labels, params) return mmd_loss_weighted(embedding, auxiliary_labels, weights_pos, weights_neg, params)
6b592159587ec49fc6fd77ed286f338d11582a4b
3,658,878
def perdidas (n_r,n_inv,n_x,**kwargs): """Calcula las perdidas por equipos""" n_t=n_r*n_inv*n_x for kwargs in kwargs: n_t=n_t*kwargs return n_t
157825059ad192ba90991bff6206b289755ce0ba
3,658,879
def _symlink_dep_cmd(lib, deps_dir, in_runfiles): """ Helper function to construct a command for symlinking a library into the deps directory. """ lib_path = lib.short_path if in_runfiles else lib.path return ( "ln -sf " + relative_path(deps_dir, lib_path) + " " + deps_dir + "/" + lib.basename + "\n" )
6672decdee61dfc7f5604c6ebe1c07ac99800a91
3,658,880
def boundingBoxEdgeLengths(domain): """ Returns the edge lengths of the bounding box of a domain :param domain: a domain :type domain: `escript.Domain` :rtype: ``list`` of ``float`` """ return [ v[1]-v[0] for v in boundingBox(domain) ]
a98fc867961bbf6a2ab518da6c933f1295d858db
3,658,881
def get_user(isamAppliance, user): """ Get permitted features for user NOTE: Getting an unexplained error for this function, URL maybe wrong """ return isamAppliance.invoke_get("Get permitted features for user", "/authorization/features/users/{0}/v1".format(user))
0b2fd6c58e2623f8400daa942c83bd0757edd21f
3,658,882
from typing import Sequence from typing import List from typing import Optional def autoupdate( config_file: str, store: Store, tags_only: bool, freeze: bool, repos: Sequence[str] = (), add_unused_hooks: bool = False, ) -> int: """Auto-update the pre-commit config to the latest versions of repos.""" migrate_config(config_file, quiet=True) retv = 0 rev_infos: List[Optional[RevInfo]] = [] changed = False config = load_config(config_file) for repo_config in config['repos']: if repo_config['repo'] in {LOCAL, META}: continue info = RevInfo.from_config(repo_config) if repos and info.repo not in repos: rev_infos.append(None) continue output.write(f'Updating {info.repo} ... ') new_info = info.update(tags_only=tags_only, freeze=freeze) try: _check_hooks_still_exist_at_rev(repo_config, new_info, store) except RepositoryCannotBeUpdatedError as error: output.write_line(error.args[0]) rev_infos.append(None) retv = 1 continue if new_info.rev != info.rev: changed = True if new_info.frozen: updated_to = f'{new_info.frozen} (frozen)' else: updated_to = new_info.rev msg = f'updating {info.rev} -> {updated_to}.' output.write_line(msg) rev_infos.append(new_info) else: output.write_line('already up to date.') rev_infos.append(None) if add_unused_hooks: unused_hooks = _get_unused_hooks(repo_config, new_info, store) if unused_hooks: changed = True for unused_hook in unused_hooks: repo_config['hooks'].append({'id': unused_hook}) if changed: _write_new_config(config_file, rev_infos) return retv
f45aeae70d6e33b841791a09d9a1578834246e75
3,658,883
def plot_energy_resolution_cta_performance(cta_site, ax=None, **kwargs): """ Plot the cta performances (June 2018) for the true_energy resolution Parameters ---------- cta_site: string see `ctaplot.ana.cta_performance` ax: `matplotlib.pyplot.axes` kwargs: args for `matplotlib.pyplot.plot` Returns ------- ax: `matplotlib.pyplot.axes` """ ax = plt.gca() if ax is None else ax cta_req = ana.cta_performance(cta_site) e_cta, ar_cta = cta_req.get_energy_resolution() kwargs.setdefault('label', "CTA performance {}".format(cta_site)) ax.set_ylabel(r"$(\Delta energy/energy)_{68}$") ax.set_xlabel(rf'$E_R$ [{e_cta.unit.to_string("latex")}]') with quantity_support(): ax.plot(e_cta, ar_cta, **kwargs) ax.set_xscale('log') ax.grid(True, which='both') ax.legend() return ax
67f76bcaffb85339f45803d32daf3e2d783fb097
3,658,884
from typing import OrderedDict def _n_nested_blocked_random_indices(sizes, n_iterations): """ Returns indices to randomly resample blocks of an array (with replacement) in a nested manner many times. Here, "nested" resampling means to randomly resample the first dimension, then for each randomly sampled element along that dimension, randomly resample the second dimension, then for each randomly sampled element along that dimension, randomly resample the third dimension etc. Parameters ---------- sizes : OrderedDict Dictionary with {names: (sizes, blocks)} of the dimensions to resample n_iterations : int The number of times to repeat the random resampling """ shape = [s[0] for s in sizes.values()] indices = OrderedDict() for ax, (key, (_, block)) in enumerate(sizes.items()): indices[key] = _get_blocked_random_indices( shape[: ax + 1] + [n_iterations], ax, block ) return indices
730ddba8f0753c29ebcf55c8449f365e6fc0b9ab
3,658,885
def phase_type_from_parallel_erlang2(theta1, theta2, n1, n2): """Returns initial probabilities :math:`\\alpha` and generator matrix :math:`S` for a phase-type representation of two parallel Erlang channels with parametrisation :math:`(\\theta_1, n_1)` and :math:`(\\theta_2, n_2)` (rate and steps of Erlang channels). `Note`: To obtain a phase-type density pass the results of this method into the method `utils.phase_type_pdf`. `Note`: The two Erlang channels split at the first substep into each channel. The parametrisation implies the rate :math:`n\\cdot\\theta` on the individual exponentially-distributed substeps for the respective channel. Parameters ---------- theta1 : float Rate parameter of the first complete Erlang channel (inverse of the mean Erlang waiting time). theta2 : float Rate parameter of the second complete Erlang channel (inverse of the mean Erlang waiting time). n1 : int or float Number of steps of the first Erlang channel (shape parameter). n2 : int or float Number of steps of the second Erlang channel (shape parameter). Returns ------- alpha : 1d numpy.ndarray The initial probability vector of the phase-type distribution (with shape `(1,m)` where :math:`m=n_1+n_2-1`). S : 2d numpy.ndarray The transient generator matrix of the phase-type distribution (with shape `(m,m)` where :math:`m=n_1+n_2-1`). """ ### self-written, copied from env_PHdensity notebook ### butools can then be used to get density and network image with: ### 1) pdf = ph.PdfFromPH(a, A, x) ### 2) ph.ImageFromPH(a, A, 'display') # some checks for theta in (theta1, theta2): if not isinstance(theta, float): raise ValueError('Float expected for theta.') for n in (n1, n2): if isinstance(n, int): pass elif isinstance(n, float) and n.is_integer(): pass else: raise ValueError('Integer number expected for n.') if n<1: raise ValueError('Steps n expected to be 1 or more.') # preallocate initial probs and subgenerator matrix alpha = np.zeros((1, int(n1 + n2)-1)) S = np.zeros((int(n1 + n2)-1, int(n1 + n2)-1)) # first index sets source alpha[0, 0] = 1.0 # substep rates r1 = n1 * theta1 r2 = n2 * theta2 # outflux from source # (from competing channels) S[0, 0] = -(r1+r2) # fill matrix (first channel) l = [0] + list(range(1, int(n1))) for i, inext in zip(l[0:-1], l[1:]): S[i, inext] = r1 S[inext, inext] = -r1 # fill matrix (second channel) l = [0] + list(range(int(n1), int(n1+n2)-1)) for i, inext in zip(l[0:-1], l[1:]): S[i, inext] = r2 S[inext, inext] = -r2 return alpha, S
667fc2abdb38e2e623a5f91f33ffb60f9b9e5ca8
3,658,886
def get_regions(max_time_value): """ Partition R into a finite collection of one-dimensional regions depending on the appearing max time value. """ regions = [] bound = 2 * max_time_value + 1 for i in range(0, bound + 1): if i % 2 == 0: temp = i // 2 r = Constraint('[' + str(temp) + ',' + str(temp) + ']') regions.append(r) else: temp = (i - 1) // 2 if temp < max_time_value: r = Constraint('(' + str(temp) + ',' + str(temp + 1) + ')') regions.append(r) else: r = Constraint('(' + str(temp) + ',' + '+' + ')') regions.append(r) return regions
1cc825592e07dc0bef30f04896e57df189d28bb3
3,658,887
def label_edges(g: nx.DiGraph) -> nx.DiGraph: """Label all the edges automatically. Args: g: the original directed graph. Raises: Exception: when some edge already has attribute "label_". Returns: The original directed graph with all edges labelled. """ g_labelled = nx.DiGraph(g) i = 1 for edge in g_labelled.edges.data(): if _ATTR_LABEL in edge[2]: raise Exception( f"The edge {edge[0]}-{edge[1]} already has the {_ATTR_LABEL} attribute." ) else: edge[2][_ATTR_LABEL] = f"e{i}" i += 1 return g_labelled
a74559cdce8d75a65913def6c545b86ed45b2ead
3,658,888
from datetime import datetime import calendar def report_charts(request, report, casetype='Call'): """Return charts for the last 4 days based on the Call Summary Data""" # The ussual filters. query = request.GET.get('q', '') interval = request.GET.get('interval', 'daily') category = request.GET.get('category', '') if report == 'categorysummary': y_axis = 'category' elif report == 'dailysummary': y_axis = 'daily' else: y_axis = request.GET.get('y_axis', '') datetime_range = request.GET.get("datetime_range") agent = request.GET.get("agent") form = ReportFilterForm(request.GET) # Update the search url to chart based views. search_url = reverse('report_charts', kwargs={'report': report}) # Convert date range string to datetime object if datetime_range: try: a, b = [datetime_range.split(" - ")[0], datetime_range.split(" - ")[1]] from_date = datetime.strptime(a, '%m/%d/%Y %I:%M %p') to_date = datetime.strptime(b, '%m/%d/%Y %I:%M %p') current = from_date delta = to_date - from_date date_list = [] if interval == 'hourly': for i in range(int(delta.total_seconds()//3600)): date_list.append(from_date + timedelta(seconds=i*3600)) elif interval == 'monthly': while current <= to_date: current += relativedelta(months=1) date_list.append(current) elif interval == 'weekly': while current <= to_date: current += relativedelta(weeks=1) date_list.append(current) else: while current <= to_date: current += relativedelta(days=1) date_list.append(current) epoch_list = [date_item.strftime('%m/%d/%Y %I:%M %p') for date_item in date_list] # Add filter to ajax query string. except Exception as e: from_date = None to_date = None else: from_date = None to_date = None # Start date base = datetime.today() date_list = [base - timedelta(days=x) for x in range(0, 3)] epoch_list = [date_item.strftime('%m/%d/%Y %I:%M %p') for date_item in date_list] epoch_list.reverse() e = None datetime_ranges = pairwise(epoch_list) callsummary_data = [] total_calls = 0 for datetime_range in datetime_ranges: # Date time list returns desending. We want assending. datetime_range_string = " - ".join(datetime_range) if y_axis == 'category': categories = [i[0] for i in Category.objects.values_list('hl_category').distinct()] for category in categories: report_data = report_factory(report='chartreport', datetime_range=datetime_range_string, agent=agent, query=query, category=category, casetype=casetype) # Append data to tables list. callsummary_data.append(report_data) total_calls = total_calls + report_data.get('total_offered').get('count') else: report_data = report_factory(report='chartreport', datetime_range=datetime_range_string, agent=agent, query=query, category=category, casetype=casetype) # Append data to tables list. callsummary_data.append(report_data) total_calls = total_calls + report_data.get('total_offered').get('count') # Multibar chart page. if y_axis != 'daily': summary_table = CallSummaryTable(callsummary_data) tooltip_date = "%d %b %Y %H:%M:%S %p" extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " calls"}, "date_format": tooltip_date} if y_axis == 'category': categories = [i[0] for i in Category.objects.values_list('hl_category').distinct()] chartdata = { 'x': epoch_list, } for i in range(len(categories)): chartdata['name%s' % str(i+1)] = categories[i] category_related = [] for data in callsummary_data: if data.get('category') == categories[i]: category_related.append(data) chartdata['y%s' % str(i+1)] = [d.get('total_offered').get('count') for d in category_related] chartdata['extra%s' % str(i+1)] = extra_serie elif y_axis == 'daily': daysummary_data = [] month_names = [] day_names = list(calendar.day_name) chartdata = {} day_related = {} for day_name in day_names: day_related[day_name] = [] for i in range(len(day_names)): day_summary = {} chartdata['name%s' % str(i+1)] = day_names[i] day_total_offered = 0 month_name = 'None' for data in callsummary_data: if data.get('day') == day_names[i]: day_related[day_names[i]].append(data) day_total_offered = day_total_offered + data.get('total_offered').get('count') day_related[day_names[i]][-1]['day_total_offered'] = day_total_offered month_name = data.get('month') day_summary['month'] = month_name month_names.append(month_name) day_summary['%s' % (day_names[i].lower())] = day_total_offered chartdata['y%s' % str(i+1)] = [d.get('day_total_offered') for d in day_related[day_names[i]]] chartdata['extra%s' % str(i+1)] = extra_serie chartdata['x'] = month_names daysummary_data.append(day_summary) else: ydata = [d.get('total_offered').get('count') for d in callsummary_data] ydata2 = [d.get('total_answered') for d in callsummary_data] ydata3 = [d.get('total_abandoned') for d in callsummary_data] chartdata = { 'x': epoch_list, 'name1': 'Total Offered', 'y1': ydata, 'extra1': extra_serie, 'name2': 'Total Answered', 'y2': ydata2, 'extra2': extra_serie, 'name3': 'Total Abandoned', 'y3': ydata3, 'extra3': extra_serie, } charttype = "multiBarChart" chartcontainer = 'multibarchart_container' # container name if y_axis == 'daily': summary_table = DaySummaryTable(daysummary_data) export_format = request.GET.get('_export', None) if TableExport.is_valid_format(export_format): exporter = TableExport(export_format, summary_table) return exporter.response('table.{}'.format(export_format)) data = { 'title': 'callsummary', 'form': form, 'summary_table': summary_table, 'datetime_ranges_number': len(datetime_ranges), 'error': e, 'y_axis': y_axis, 'search_url': search_url, 'total_calls': total_calls, 'charttype': charttype, 'casetype': casetype, 'chartdata': chartdata, 'chartcontainer': chartcontainer, 'extra': { 'name': 'Call data', 'x_is_date': False, 'x_axis_format': '', 'tag_script_js': True, 'jquery_on_ready': True, }, } if report == 'ajax': return render(request, 'helpline/report_charts_factory.html', data) else: return render(request, 'helpline/report_charts.html', data)
0e9721446e66ee901732a6b0792075ccee607eaa
3,658,889
def _get_optimizer(learning_rate: float, gradient_clip_norm: float): """Gets model optimizer.""" kwargs = {'clipnorm': gradient_clip_norm} if gradient_clip_norm > 0 else {} return tf.keras.optimizers.Adagrad(learning_rate, **kwargs)
92b9b70c533828232872250eca724c2568638f2f
3,658,890
def is_my_message(msg): """ Функция для проверки, какому боту отправлено сообщение. Для того, чтобы не реагировать на команды для других ботов. :param msg: Объект сообщения, для которого проводится проверка. """ text = msg.text.split()[0].split("@") if len(text) > 1: if text[1] != config.bot_name: return False return True
e99c8587ffbc1e582154785d657212f37358e926
3,658,891
from typing import Any from typing import Dict def execute_search_query(client: Client, query: Any, data_range: str) -> Dict[str, Any]: """Execute search job and waiting for the results :type client: ``Client`` :param client: Http client :type query: ``Any`` :param query: Search query :type data_range: ``str`` :param data_range: http url query for getting range of data :return: Search result :rtype: ``Dict[str, Any]`` """ response = client.varonis_execute_search(query) location = get_search_result_path(response) search_result = client.varonis_get_search_result(location, data_range, SEARCH_RESULT_RETRIES) return search_result
68a58f9c4bc7c2b4a754cce8bd97022d327d5155
3,658,892
def static(directory: str) -> WSGIApp: """Return a WSGI app that serves static files under the given directory. Powered by WhiteNoise. """ app = WhiteNoise(empty_wsgi_app()) if exists(directory): app.add_files(directory) return app
9eae5f688b50d6c6c523e69ee0e79f667fb1d567
3,658,893
def check_filter(id): """ Helper function to determine if the current crime is in the dictionary """ if id not in important_crime: return 30 else: return important_crime[id] * 30
9ca74e57abd32db6176216f31deae193e0cac0d4
3,658,894
def rand_email(domain=None): """Generate a random zone name :return: a random zone name e.g. example.org. :rtype: string """ domain = domain or rand_zone_name() return 'example@%s' % domain.rstrip('.')
3653319c77b7e304ea03b7bb06888d115f45dc1e
3,658,895
def wordcount_for_reddit(data, search_word): """Return the number of times a word has been used.""" count = 0 for result in data: # do something which each result from scrape for key in result: stringed_list = str(result[key]) text_list = stringed_list.split() for word in text_list: if search_word == 'Go': if word == search_word: count += 1 elif word.lower() == search_word.lower(): count += 1 return count
b0967aa896191a69cd1b969589b34522299ff415
3,658,896
from typing import Union from typing import List from typing import Tuple from typing import Optional def openeo_to_eodatareaders(process_graph_json_in: Union[dict, str], job_data: str, process_defs: Union[dict, list, str], vrt_only: bool = False, existing_node_ids: List[Tuple] = None) \ -> Tuple[List[Tuple[str, List[str], Optional[str], List[str], str]], Graph]: """ This function translates an OpenEO process graph into a sequence of calls to EODataProcessor, one for each node of the process graph. Each openEO process is wrapped into an apply/reduce call using EODataProcessor methods. """ # Translate openEO PG to traversable object if isinstance(process_graph_json_in, dict): process_graph_json = deepcopy(process_graph_json_in) else: process_graph_json = process_graph_json_in graph = translate_process_graph(process_graph_json, process_defs=process_defs).sort(by='dependency') # Get wrapper processes -> TODO: is this really needed? wrapper_processes = get_wrapper_processes() nodes = [] N_nodes = len(graph.ids) last_node = False for k, node_id in enumerate(graph.ids): cur_node = graph[node_id] wrapper_name = None wrapper_dimension = None node_dependencies = None if k + 1 == N_nodes: last_node = True if cur_node.is_reducer: # Current process is classified as "reducer" in its process definition if cur_node.parent_process: # Current process has parent, must be an embedded process graph wrapper_name = cur_node.parent_process.process_id wrapper_dimension = cur_node.parent_process.dimension else: # Current process is of type "reducer" but has no parent, must be one of these processes: # "reduce_dimension", "reduce_dimension_binary" wrapper_name = cur_node.process_id wrapper_dimension = cur_node.dimension else: wrapper_name = cur_node.process_id recuder_dimension = None # for clarity, this will be needed when also 'apply_dimension' is supported by EODataProcessor # Workaround for process "array_element" until it has the category "reducer" set # TODO remove when the process definition is updated if (not cur_node.is_reducer) and (cur_node.parent_process): # Current process has parent, must be an embedded process graph wrapper_name = cur_node.parent_process.process_id wrapper_dimension = cur_node.parent_process.dimension # NB find better solution if wrapper_dimension: wrapper_dimension = check_dim_name(wrapper_dimension) if cur_node.content['process_id'] == 'run_udf': operator = "UdfExec" params = map_udf(cur_node.content, job_data, cur_node.id) else: operator = "EODataProcessor" params = map_process( cur_node.content, cur_node.id, cur_node.is_result, job_data, wrapper_name=wrapper_name, wrapper_dimension=wrapper_dimension, vrt_only=vrt_only, last_node=last_node ) # Get dependencies if cur_node.result_process and (cur_node.process_id in wrapper_processes): # The current process is a wrapper process, which embeds a process graph # Its only dependency is the node in the embedded process graph with 'result' set to True. node_dependencies = [cur_node.result_process.id] else: node_dependencies = list(cur_node.dependencies.ids) # Add to nodes list nodes.append((cur_node.id, params, node_dependencies, operator)) return nodes, graph
519f91466accef2a8f1dbc0dc14f672bcbd763ad
3,658,897
def calc_precision(gnd_assignments, pred_assignments): """ gnd_clusters should be a torch tensor of longs, containing the assignment to each cluster assumes that cluster assignments are 0-based, and no 'holes' """ precision_sum = 0 assert len(gnd_assignments.size()) == 1 assert len(pred_assignments.size()) == 1 assert pred_assignments.size(0) == gnd_assignments.size(0) N = gnd_assignments.size(0) K_gnd = gnd_assignments.max().item() + 1 K_pred = pred_assignments.max().item() + 1 for k_pred in range(K_pred): mask = pred_assignments == k_pred gnd = gnd_assignments[mask.nonzero().long().view(-1)] max_intersect = 0 for k_gnd in range(K_gnd): intersect = (gnd == k_gnd).long().sum().item() max_intersect = max(max_intersect, intersect) precision_sum += max_intersect precision = precision_sum / N return precision
536e25aa8e3b50e71beedaab3f2058c79d9957e3
3,658,898
import jsonschema import base64 import binascii import os from datetime import datetime def scheduler_job_output_route(): """receive output from assigned job""" try: jsonschema.validate(request.json, schema=sner.agent.protocol.output) job_id = request.json['id'] retval = request.json['retval'] output = base64.b64decode(request.json['output']) except (jsonschema.exceptions.ValidationError, binascii.Error): return jsonify({'title': 'Invalid request'}), HTTPStatus.BAD_REQUEST job = Job.query.filter(Job.id == job_id).one_or_none() if job and (not job.retval): # requests for invalid, deleted, repeated or clashing job ids are discarded # agent should delete the output on it's side as well job.retval = retval os.makedirs(os.path.dirname(job.output_abspath), exist_ok=True) with open(job.output_abspath, 'wb') as ftmp: ftmp.write(output) job.time_end = datetime.utcnow() db.session.commit() return '', HTTPStatus.OK
75c7760426e4be823ba97d1af0816b3261295d49
3,658,899