content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def weighted_moments(values, weights): """Return weighted mean and weighted standard deviation of a sequence""" w_mean = np.average(values, weights=weights) sq_err = (values - w_mean)**2 w_var = np.average(sq_err, weights=weights) w_std = np.sqrt(w_var) return w_mean, w_std
84775550c54f285f9a032641cf27976eebf94322
6,400
def forestplot(data, kind='forestplot', model_names=None, var_names=None, combined=False, credible_interval=0.95, quartiles=True, r_hat=True, n_eff=True, colors='cycle', textsize=None, linewidth=None, markersize=None, joyplot_alpha=None, joyplot_overlap=2, figsize=None): """ Forest plot Generates a forest plot of 100*(credible_interval)% credible intervals from a trace or list of traces. Parameters ---------- data : xarray.Dataset or list of compatible Samples from a model posterior kind : str Choose kind of plot for main axis. Supports "forestplot" or "joyplot" model_names : list[str], optional List with names for the models in the list of data. Useful when plotting more that one dataset var_names: list[str], optional List of variables to plot (defaults to None, which results in all variables plotted) combined : bool Flag for combining multiple chains into a single chain. If False (default), chains will be plotted separately. credible_interval : float, optional Credible interval to plot. Defaults to 0.95. quartiles : bool, optional Flag for plotting the interquartile range, in addition to the credible_interval intervals. Defaults to True r_hat : bool, optional Flag for plotting Gelman-Rubin statistics. Requires 2 or more chains. Defaults to True n_eff : bool, optional Flag for plotting the effective sample size. Requires 2 or more chains. Defaults to True colors : list or string, optional list with valid matplotlib colors, one color per model. Alternative a string can be passed. If the string is `cycle`, it will automatically chose a color per model from the matyplolibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all models. Defauls to 'cycle'. textsize: int Text size for labels. If None it will be autoscaled based on figsize. linewidth : int Line width throughout. If None it will be autoscaled based on figsize. markersize : int Markersize throughout. If None it will be autoscaled based on figsize. joyplot_alpha : float Transparency for joyplot fill. If 0, border is colored by model, otherwise a black outline is used. joyplot_overlap : float Overlap height for joyplots. figsize : tuple, optional Figure size. Defaults to None Returns ------- gridspec : matplotlib GridSpec """ ncols, width_ratios = 1, [3] if n_eff: ncols += 1 width_ratios.append(1) if r_hat: ncols += 1 width_ratios.append(1) plot_handler = PlotHandler(data, var_names=var_names, model_names=model_names, combined=combined, colors=colors) if figsize is None: figsize = (min(12, sum(width_ratios) * 2), plot_handler.fig_height()) textsize, auto_linewidth, auto_markersize = _scale_text(figsize, textsize=textsize) if linewidth is None: linewidth = auto_linewidth if markersize is None: markersize = auto_markersize fig, axes = plt.subplots(nrows=1, ncols=ncols, figsize=figsize, gridspec_kw={'width_ratios': width_ratios}, sharey=True ) axes = np.atleast_1d(axes) if kind == 'forestplot': plot_handler.forestplot(credible_interval, quartiles, textsize, linewidth, markersize, axes[0]) elif kind == 'joyplot': plot_handler.joyplot(joyplot_overlap, textsize, linewidth, joyplot_alpha, axes[0]) else: raise TypeError(f"Argument 'kind' must be one of 'forestplot' or " f"'joyplot' (you provided {kind})") idx = 1 if r_hat: plot_handler.plot_rhat(axes[idx], textsize, markersize) idx += 1 if n_eff: plot_handler.plot_neff(axes[idx], textsize, markersize) idx += 1 for ax in axes: ax.grid(False) # Remove ticklines on y-axes for ticks in ax.yaxis.get_major_ticks(): ticks.tick1On = False ticks.tick2On = False for loc, spine in ax.spines.items(): if loc in ['left', 'right']: spine.set_color('none') # don't draw spine if len(plot_handler.data) > 1: plot_handler.make_bands(ax) labels, ticks = plot_handler.labels_and_ticks() axes[0].set_yticks(ticks) axes[0].set_yticklabels(labels) all_plotters = list(plot_handler.plotters.values()) y_max = plot_handler.y_max() - all_plotters[-1].group_offset if kind == 'joyplot': # space at the top y_max += joyplot_overlap axes[0].set_ylim(-all_plotters[0].group_offset, y_max) return fig, axes
e2170c4bdbfc2fbf5c3db24688e8ab09a2ec498c
6,401
def tf_dtype(dtype): """Translates dtype specifications in configurations to tensorflow data types. Args: dtype: String describing a numerical type (e.g. 'float'), numpy data type, or numerical type primitive. Returns: TensorFlow data type """ if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32: return tf.float32 elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32: return tf.int32 elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool: return tf.bool else: raise TensorforceError("Error: Type conversion from type {} not supported.".format(str(dtype)))
0c51804974e7e1bb36fcc32f181cfab713fca263
6,404
from typing import Tuple import timeit def decode_frame(raw_frame: bytes, frame_width: int, frame_height: int) -> Tuple[str, np.ndarray]: """ Decode the image bytes into string compatible with OpenCV :param raw_frame: frame data in bytes :param frame_width width of the frame, obtained from Kinesis payload :param frame_height height of the frame, obtained from Kinesis payload """ start_time = timeit.default_timer() # frameBuffer = Image.frombytes('RGB', (frame_width, frame_height), raw_frame) # frameBuffer.save("./h264decoded.png", "png") # frame = np.array(frameBuffer) # img_str = cv2.imencode('.jpg', frame)[1].tostring() img = imageio.get_reader(raw_frame, ".png") frame: np.ndarray = img.get_data(0) img_str = cv2.imencode('.png', frame)[1].tostring() logger.info(f'Decoded frame after: {timeit.default_timer() - start_time}') return img_str, frame
a38d7362997701756767cf466bb3fbb76b77a92e
6,405
def preprocess(picPath): """preprocess""" #read img bgr_img = cv.imread(picPath) #get img shape orig_shape = bgr_img.shape[:2] #resize img img = cv.resize(bgr_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.int8) # save memory C_CONTIGUOUS mode if not img.flags['C_CONTIGUOUS']: img = np.ascontiguousarray(img) return orig_shape, img
ae44fdbf3613e159f28db0d9417470872439f76d
6,406
import requests def live_fractal_or_skip(): """ Ensure Fractal live connection can be made First looks for a local staging server, then tries QCArchive. """ try: return FractalClient("localhost:7777", verify=False) except (requests.exceptions.ConnectionError, ConnectionRefusedError): print("Failed to connect to localhost, trying MolSSI QCArchive.") try: requests.get("https://api.qcarchive.molssi.org:443", json={}, timeout=5) return FractalClient() except (requests.exceptions.ConnectionError, ConnectionRefusedError): return pytest.skip("Could not make a connection to central Fractal server")
b121e33a2294edc80d336abbb07c9a12f3301aea
6,407
def get_legendre(theta, keys): """ Calculate Schmidt semi-normalized associated Legendre functions Calculations based on recursive algorithm found in "Spacecraft Attitude Determination and Control" by James Richard Wertz Parameters ---------- theta : array Array of colatitudes in degrees keys: iterable list of spherical harmnoic degree and order, tuple (n, m) for each term in the expansion Returns ------- P : array Array of Legendre functions, with shape (theta.size, len(keys)). dP : array Array of dP/dtheta, with shape (theta.size, len(keys)) """ # get maximum N and maximum M: n, m = np.array([k for k in keys]).T nmax, mmax = np.max(n), np.max(m) theta = theta.flatten()[:, np.newaxis] P = {} dP = {} sinth = np.sin(d2r*theta) costh = np.cos(d2r*theta) # Initialize Schmidt normalization S = {} S[0, 0] = 1. # initialize the functions: for n in range(nmax +1): for m in range(nmax + 1): P[n, m] = np.zeros_like(theta, dtype = np.float64) dP[n, m] = np.zeros_like(theta, dtype = np.float64) P[0, 0] = np.ones_like(theta, dtype = np.float64) for n in range(1, nmax +1): for m in range(0, min([n + 1, mmax + 1])): # do the legendre polynomials and derivatives if n == m: P[n, n] = sinth * P[n - 1, m - 1] dP[n, n] = sinth * dP[n - 1, m - 1] + costh * P[n - 1, n - 1] else: if n == 1: Knm = 0. P[n, m] = costh * P[n -1, m] dP[n, m] = costh * dP[n - 1, m] - sinth * P[n - 1, m] elif n > 1: Knm = ((n - 1)**2 - m**2) / ((2*n - 1)*(2*n - 3)) P[n, m] = costh * P[n -1, m] - Knm*P[n - 2, m] dP[n, m] = costh * dP[n - 1, m] - sinth * P[n - 1, m] - Knm * dP[n - 2, m] # compute Schmidt normalization if m == 0: S[n, 0] = S[n - 1, 0] * (2.*n - 1)/n else: S[n, m] = S[n, m - 1] * np.sqrt((n - m + 1)*(int(m == 1) + 1.)/(n + m)) # now apply Schmidt normalization for n in range(1, nmax + 1): for m in range(0, min([n + 1, mmax + 1])): P[n, m] *= S[n, m] dP[n, m] *= S[n, m] Pmat = np.hstack(tuple(P[key] for key in keys)) dPmat = np.hstack(tuple(dP[key] for key in keys)) return Pmat, dPmat
8afd34e7e4805fab9393c2efff15c0cffcb9466a
6,408
def table_4_28(x_t, c_): """ Вывод поправочного коэффициента, учитывающего влияние толщины профиля arguments: относительное положение точки перехода ламинарного пограничного слоя в турбулентный (Х_т_), относительная толщина профиля return: Значение поправочного коэффициента""" nu_t_00 = [1.00, 1.03, 1.05, 1.08, 1.11, 1.13, 1.16, 1.19, 1.22, 1.25, 1.29, 1.33, 1.37] nu_t_02 = [1.000, 1.020, 1.040, 1.060, 1.080, 1.104, 1.127, 1.155, 1.180, 1.205, 1.235, 1.260, 1.295] nu_t_04 = [1.00, 1.01, 1.03, 1.04, 1.05, 1.07, 1.09, 1.10, 1.12, 1.14, 1.16, 1.17, 1.20] c_mas = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12] k = int(c_ // 0.01 + 1) if x_t == 0: nu_t = interpol(nu_t_00[k], nu_t_00[k - 1], procent(c_, c_mas[k - 1], c_mas[k])) elif (x_t >= 0) and (x_t <= 0.2): nu_t = interpol(interpol(nu_t_02[k], nu_t_02[k - 1], procent(c_, c_mas[k - 1], c_mas[k])), interpol(nu_t_00[k], nu_t_00[k - 1], procent(c_, c_mas[k - 1], c_mas[k])), procent(x_t, 0, 0.2)) elif (x_t >= 0.2) and (x_t <= 0.4): nu_t = interpol(interpol(nu_t_04[k], nu_t_04[k - 1], procent(c_, c_mas[k - 1], c_mas[k])), interpol(nu_t_02[k], nu_t_02[k - 1], procent(c_, c_mas[k - 1], c_mas[k])), procent(x_t, 0.2, 0.4)) else: nu_t = interpol(nu_t_04[k], nu_t_04[k - 1], procent(c_, c_mas[k - 1], c_mas[k])) return nu_t
954c272bde503363f354b33f6af4f97bee5ad740
6,409
def random_spectra(path_length, coeffs, min_wavelength, max_wavelength, complexity): """ """ solution = random_solution(coeffs, complexity) return beers_law(solution, path_length, coeffs, min_wavelength, max_wavelength)
a82a0d638473d63cd768bfdc0318f5042a75ae12
6,410
def data_prep(data,unit_identifier,time_identifier,matching_period,treat_unit,control_units,outcome_variable, predictor_variables, normalize=False): """ Prepares the data by normalizing X for section 3.3. in order to replicate Becker and Klößner (2017) """ X = data.loc[data[time_identifier].isin(matching_period)] X.index = X.loc[:,unit_identifier] X0 = X.loc[(X.index.isin(control_units)),(predictor_variables)] X0 = X0.groupby(X0.index).mean().values.T #control predictors X1 = X.loc[(X.index == treat_unit),(predictor_variables)] X1 = X1.groupby(X1.index).mean().values.T #treated predictors # outcome variable realizations in matching period - Z0: control, Z1: treated Z0 = np.array(X.loc[(X.index.isin(control_units)),(outcome_variable)]).reshape(len(control_units),len(matching_period)).T #control outcome Z1 = np.array(X.loc[(X.index == treat_unit),(outcome_variable)]).reshape(len(matching_period),1) #treated outcome if normalize == True: # Scaling nvarsV = X0.shape[0] big_dataframe = pd.concat([pd.DataFrame(X0), pd.DataFrame(X1)], axis=1) divisor = np.sqrt(big_dataframe.apply(np.var, axis=1)) V = np.zeros(shape=(len(predictor_variables), len(predictor_variables))) np.fill_diagonal(V, np.diag(np.repeat(big_dataframe.shape[0],1))) scaled_matrix = ((big_dataframe.T) @ (np.array(1/(divisor)).reshape(len(predictor_variables),1) * V)).T X0 = np.array(scaled_matrix.iloc[:,0:len(control_units)]) X1 = np.array(scaled_matrix.iloc[:,len(control_units):(len(control_units)+1)]) Z0 = Z0.astype('float64') Z1 = Z1.astype('float64') return X0, X1, Z0, Z1
6f13cc083973d5bd7ac7d2ca239741aaf067fede
6,411
import numpy def kabsch_superpose(P, Q): # P,Q: vstack'ed matrix """ Usage: P = numpy.vstack([a2, b2, c2]) Q = numpy.vstack([a1, b1, c1]) m = kabsch_superpose(P, Q) newP = numpy.dot(m, P) """ A = numpy.dot(numpy.transpose(P), Q) U, s, V = numpy.linalg.svd(A) tmp = numpy.identity(3) tmp[2,2] = numpy.sign(numpy.linalg.det(A)) R = numpy.dot(numpy.dot(numpy.transpose(V), tmp), numpy.transpose(U)) return R
56b7b9c3168e644ad71bee2146af3e4ae455c648
6,413
def add(a_t, b_t): """ add operator a+b """ return add_op(a_t, b_t)
be4e5bad6deb651af8e8f084cbefd185c1f9781f
6,414
def GetExtensionDescriptor(full_extension_name): """Searches for extension descriptor given a full field name.""" return _pool.FindExtensionByName(full_extension_name)
5e0088f785809e38d306d7416129114ac09a5135
6,416
from operator import sub def parse_args(): """ Parses command-line arguments and returns username, title of specified repository and its' branch. Returns: tuple (username, repo_name, branch). Used only once in `main` method. """ DESC = 'Automatic license detection of a Github repository.' parser = ArgumentParser(description=DESC) # Specify agruments parser.add_argument('--branch', default='master', required=False, help='A branch of a repository from which license file should be obtained. Default: `master`.') parser.add_argument('--repository_name', required=False, help='A name of a repository, whose license needs to be detected. Required.') parser.add_argument('--username', required=False, help='A name of a user who owns a repository. Required.') parser.add_argument('--url', required=False, help='An URL to Github repository.') # Start parsing sys.argv arg_dict = parser.parse_args().__dict__ branch = arg_dict['branch'] # `master` by default user = arg_dict['username'] repo = arg_dict['repository_name'] url = arg_dict['url'] if (user is None) or (repo is None): if (url is None): # No repository information was typed, exiting... print('Usage: --user <USERNAME> --repo <REPOSITORY NAME> --branch' '<BRANCH NAME> (optional) or ') print('--url <LINK TO REPOSITORY>') exit(-1) # Cut the `http` header of an URL chopped_url = sub('https+:\/\/', '', url) # Extract user and repository names from URL user, repo = findall('\/{1}([^\/]+)', chopped_url) return user, repo, branch
f7bf4b0cc27a87add8f65f82bebbabb6a4b9ca06
6,418
import itertools def collect_inventory_values(dataset, inventory_list, parameter_map): """ Collect inventories from a dataset. """ # Collect raw/unicode/clts for all relevant inventories to_collect = [] for catalog in inventory_list.keys(): to_collect += list( itertools.chain.from_iterable(inventory_list[catalog].values()) ) values = defaultdict(list) for row in dataset["ValueTable"]: if row["Contribution_ID"] in to_collect: values[row["Contribution_ID"]].append( { "raw": row["Value"], "unicode": parameter_map[row["Parameter_ID"]]["unicode"], "bipa": parameter_map[row["Parameter_ID"]]["bipa"], } ) return values
1c59e0784b6fc4db24f994440990e46a0ba2b1f0
6,420
def flatten_list(a_list, parent_list=None): """Given a list/tuple as entry point, return a flattened list version. EG: >>> flatten_list([1, 2, [3, 4]]) [1, 2, 3, 4] NB: The kwargs are only for internal use of the function and should not be used by the caller. """ if parent_list is None: parent_list = [] for element in a_list: if isinstance(element, list): flatten_list(element, parent_list=parent_list) elif isinstance(element, tuple): flatten_list(element, parent_list=parent_list) else: parent_list.append(element) return parent_list
dd6c9c66a370e65744ede40dfdc295b0ec63379a
6,423
from typing import List def list_to_csv_str(input_list: List) -> Text: """ Concatenates the elements of the list, joining them by ",". Parameters ---------- input_list : list List with elements to be joined. Returns ------- str Returns a string, resulting from concatenation of list elements, separeted by ",". Example ------- >>> from pymove import conversions >>> a = [1, 2, 3, 4, 5] >>> conversions.list_to_csv_str(a) '1 1:2 2:3 3:4 4:5' """ return list_to_str(input_list)
4c172b0ce3daba01f2d976bc60739846b852c459
6,424
def scheme_listp(x): """Return whether x is a well-formed list. Assumes no cycles.""" while x is not nil: if not isinstance(x, Pair): return False x = x.second return True
e5001695035d2d24e2914295e8ae2f86d8ead0b3
6,425
def list_to_dict(config): """ Convert list based beacon configuration into a dictionary. """ _config = {} list(map(_config.update, config)) return _config
3d7ace7612e67a0c406a2a400ad3147f99dbef0a
6,426
def get_model(model_name, in_channels = 3, input_size = 224, num_classes = 1000): """Get model Args : --model_name: model's name --in_channels: default is 3 --input_size: default is 224 --num_classes: default is 1000 for ImageNet return : --model: model instance """ string = model_name if model_name == 'cmt_ti': model = CMT_Ti(in_channels = in_channels, input_size = input_size, num_classes = num_classes) elif model_name == 'cmt_xs': model = CMT_XS(in_channels = in_channels, input_size = input_size, num_classes = num_classes) elif model_name == 'cmt_s': model = CMT_S(in_channels = in_channels, input_size = input_size, num_classes = num_classes) elif model_name == 'cmt_b': model = CMT_B(in_channels = in_channels, input_size = input_size, num_classes = num_classes) else: raise Exception('No other models!') print(string + ': \n', model) total = sum(p.numel() for p in model.parameters()) print("Total params: %.2fM" % (total / 1e6)) return model
0380a19e2a063382920b7986d1087aaf70f05eda
6,427
def check_edge_heights( stack, shifts, height_resistance, shift_lines, height_arr, MIN_H, MAX_H, RESOLUTION ): """ Check all edges and output an array indicating which ones are 0 - okay at minimum pylon height, 2 - forbidden, 1 - to be computed NOTE: function not used here! only for test purposes """ # print(len(stack)) for i in range(len(stack)): v_x = stack[-i - 1][0] v_y = stack[-i - 1][1] # so far height on in edges for s in range(len(shifts)): neigh_x = v_x + shifts[s][0] neigh_y = v_y + shifts[s][1] # get minimum heights of v_x,v_y dependent on incoming edge bres_line = shift_lines[s] + np.array([v_x, v_y]) # required heights S = int( np.sqrt((v_x - neigh_x)**2 + (v_y - neigh_y)**2) ) * RESOLUTION # left and right point yA = height_resistance[v_x, v_y] + MIN_H yB = height_resistance[neigh_x, neigh_y] + MIN_H # compute lowest point of sag x0 = S / 2 - ((yB - yA) * CAT_H / (CAT_W * S)) # compute height above x0 at left point A_height = (CAT_W * x0**2) / (2 * CAT_H) # print(height_bline) # iterate over points on bres_line stepsize = S / (len(bres_line) + 1) heights_above = np.zeros(len(bres_line)) for k, (i, j) in enumerate(bres_line): x = x0 - stepsize * (k + 1) cat = (CAT_W * x**2) / (2 * CAT_H) heights_above[k ] = yA - A_height - height_resistance[i, j] + cat # analyse heights_above: if np.all(heights_above >= 11): # whole cable is okay fine_60 = 0 elif np.any(heights_above < -MAX_H - MIN_H): # would not work with 80 - 80 fine_60 = 2 else: # somewhere inbetween fine_60 = 1 height_arr[s, neigh_x, neigh_y] = fine_60 return height_arr
f73c6fd0396967e2a5ecfa89d52f1468d2005967
6,428
def linear_int_ext(data_pts, p, scale=None, allow_extrap=False): """ Interpolate data points to find remaining unknown values absent from `p` with optionally scaled axes. If `p` is not in the range and `allow_extra` == True, a linear extrapolation is done using the two data points at the end corresponding to the `p`. Parameters ---------- data_pts : list_like(tuple) [(a_1, ... a_n), ...] sorted on the required axis (either direction). p : list_like Required point to interpolate / extrapolate with at least a single known component, i.e. :math:`(..., None, p_i, None, ...)`. If more than one is supplied, the first is used. scale : Same as ``line_pt`` scale. allow_extrap : bool, optional If True linear extrapolation from the two adjacent endpoints is permitted. Default = False. Returns ------- list : Interpolated / extrapolated point :math:`[q_1, ..., q_n]` where :math:`q_i = p_i` from above. """ if len(data_pts) < 2: raise ValueError("At least two data points required.") if scale is None: scale = [None] * len(data_pts[0]) # Get working axis. for ax, x in enumerate(p): if x is not None: break else: raise ValueError("Requested point must include at least one known " "value.") def on_axis(li): # Return value along required axis. return li[ax] # Get two adjacent points for straight line. try: # Try interpolation. l_idx, r_idx = bracket_list(data_pts, p, key=on_axis) except ValueError: if not allow_extrap: raise ValueError(f"Point not within data range.") if ((on_axis(data_pts[0]) < on_axis(data_pts[-1])) != ( on_axis(p) < on_axis(data_pts[0]))): l_idx, r_idx = -2, -1 # RHS extrapolation. else: l_idx, r_idx = 0, 1 # LHS extrapolation. return line_pt(data_pts[l_idx], data_pts[r_idx], p, scale)
f69cc25d4610987a5f76f21e23df49efad5c6a7f
6,429
def eval_in_els_and_qp(expression, ig, iels, coors, fields, materials, variables, functions=None, mode='eval', term_mode=None, extra_args=None, verbose=True, kwargs=None): """ Evaluate an expression in given elements and points. Parameters ---------- expression : str The expression to evaluate. fields : dict The dictionary of fields used in `variables`. materials : Materials instance The materials used in the expression. variables : Variables instance The variables used in the expression. functions : Functions instance, optional The user functions for materials etc. mode : one of 'eval', 'el_avg', 'qp' The evaluation mode - 'qp' requests the values in quadrature points, 'el_avg' element averages and 'eval' means integration over each term region. term_mode : str The term call mode - some terms support different call modes and depending on the call mode different values are returned. extra_args : dict, optional Extra arguments to be passed to terms in the expression. verbose : bool If False, reduce verbosity. kwargs : dict, optional The variables (dictionary of (variable name) : (Variable instance)) to be used in the expression. Returns ------- out : array The result of the evaluation. """ weights = nm.ones_like(coors[:, 0]) integral = Integral('ie', coors=coors, weights=weights) domain = fields.values()[0].domain region = Region('Elements', 'given elements', domain, '') region.cells = iels + domain.mesh.el_offsets[ig] region.update_shape() domain.regions.append(region) for field in fields.itervalues(): field.clear_mappings(clear_all=True) for ap in field.aps.itervalues(): ap.clear_qp_base() aux = create_evaluable(expression, fields, materials, variables.itervalues(), Integrals([integral]), functions=functions, mode=mode, extra_args=extra_args, verbose=verbose, kwargs=kwargs) equations, variables = aux out = eval_equations(equations, variables, preserve_caches=False, mode=mode, term_mode=term_mode) domain.regions.pop() return out
b71a20f0806ac03f9f995ffa41f317bc2c029d1c
6,430
def tracks2Dataframe(tracks): """ Saves lsit of Track objects to pandas dataframe Input: tracks: List of Track objects Output: df: Pandas dataframe """ if(len(tracks) == 0): print("Error saving to CSV. List of tracks is empty") return # Collect tracks into single dataframe df = pd.DataFrame() for t in tracks: df = df.append(t.toDataframe()) df = df.sort_values(by=['frame', 'id'], ascending=[True, True]) return df
9d25e7f9535cfefc5b6faf791555b382edf12a07
6,431
def sift_point_to_best(target_point, point, sift_dist): """ Move a point to target point given a distance. Based on Jensen's inequality formula. Args: target_point: A ndarray or tensor, the target point of pca, point: A ndarray or tensor, point of pca, sift_dist: A float, distance where point will sift to new one. Returns: new_points: A tuple, a couple of new updated points. References: https://en.wikipedia.org/wiki/Jensen%27s_inequality """ dist = np.sqrt(np.sum((point - target_point) ** 2)) a = sift_dist / dist new_point = np.array([ point[0] * a + (1 - a) * target_point[0], point[1] * a + (1 - a) * target_point[1] ]) new_points = (new_point[0], new_point[1]) return new_points
a14216998631f22d6c8d4e98112672608b8477e5
6,432
def jrandom_counts(sample, randoms, j_index, j_index_randoms, N_sub_vol, rp_bins, pi_bins, period, num_threads, do_DR, do_RR): """ Count jackknife random pairs: DR, RR """ if do_DR is True: DR = npairs_jackknife_xy_z(sample, randoms, rp_bins, pi_bins, period=period, jtags1=j_index, jtags2=j_index_randoms, N_samples=N_sub_vol, num_threads=num_threads) DR = np.diff(np.diff(DR, axis=1), axis=2) else: DR = None if do_RR is True: RR = npairs_jackknife_xy_z(randoms, randoms, rp_bins, pi_bins, period=period, jtags1=j_index_randoms, jtags2=j_index_randoms, N_samples=N_sub_vol, num_threads=num_threads) RR = np.diff(np.diff(RR, axis=1), axis=2) else: RR = None return DR, RR
dce697b11f1d66b61aef46982c40b0310a292a92
6,433
from typing import Any def process_not_inferred_array(ex: pa.ArrowInvalid, values: Any) -> pa.Array: """Infer `pyarrow.array` from PyArrow inference exception.""" dtype = process_not_inferred_dtype(ex=ex) if dtype == pa.string(): array: pa.Array = pa.array(obj=[str(x) for x in values], type=dtype, safe=True) else: raise ex # pragma: no cover return array
c2d84f436dbd1123e38e1468101f8910e928e9ba
6,434
def start_end(tf): """Find start and end indices of running streaks of True values""" n = len(tf) tf = np.insert(tf, [0, len(tf)], [False, False]) # 01 and 10 masks start_mask = (tf[:-1] == 0) & (tf[1:] == 1) end_mask = (tf[:-1] == 1) & (tf[1:] == 0) # Locations start_loc = np.where(start_mask)[0] end_loc = np.minimum(np.where(end_mask)[0] - 1, n-1) return start_loc, end_loc
592a55da0d1c02259676444d2dd640f759dfb62d
6,435
def remove_provinces(data, date_range): """ REMOVE PROVINCES :param data: The Data received from the API :param date_range: the date range of the data :return: data after removing provinces """ countries_with_provinces = [] names_of_countries_with_prov = [] # get countries with provinces for country in data[:]: if country['province'] is not None: if country['country'] not in names_of_countries_with_prov: names_of_countries_with_prov.append(country['country']) countries_with_provinces.append(data.pop(data.index(country))) else: pass # deal with countries with provinces for country_name in names_of_countries_with_prov[:]: # for each country, countries = list( filter(lambda x: x['country'] == country_name, countries_with_provinces)) names_of_countries_with_prov.remove(country_name) # calculate total cases, deaths & recovered per day cases = {} recovered = {} deaths = {} for date in date_range: cs = 0 dt = 0 rc = 0 # sum data up per province for prov in countries: cs += prov['timeline']['cases'][date] dt += prov['timeline']['deaths'][date] rc += prov['timeline']['recovered'][date] cases[date] = cs recovered[date] = rc deaths[date] = dt # return country after removing provinces totals = ({'country': country_name, 'province': None, 'timeline': { 'cases': cases, 'deaths': deaths, 'recovered': recovered}}) data.append(totals) return data
05e973254402fb2c9873fa065d45a6a5dd3da353
6,436
def plot_publish(families, targets=None, identifiers=None, keys=None): """Parse and plot all plugins by families and targets Args: families (list): List of interested instance family names targets (list, optional): List of target names identifiers (list, optional): List of interested dict names, take ["context.data", "instance.data"] if not provided. keys (list, optional): List of interested key names, return all dict keys if not provided. """ if not targets: targets = ["default"] + api.registered_targets() plugins = api.discover() plugins = logic.plugins_by_families(plugins, families) plugins = logic.plugins_by_targets(plugins, targets) reports = list() for plugin in plugins: report = plot_plugin(plugin, identifiers, keys) if report: reports.append(report) return reports
de2dc8cf3184fdd4d883e340256b55153346a3a9
6,437
import torch def mdetr_resnet101_refcocoplus(pretrained=False, return_postprocessor=False): """ MDETR R101 with 6 encoder and 6 decoder layers. Trained on refcoco+, achieves 79.52 val accuracy """ model = _make_detr("resnet101") if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://zenodo.org/record/4721981/files/refcoco%2B_resnet101_checkpoint.pth", map_location="cpu", check_hash=True, ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcess() return model
30f73654fbabccc35629c9adb3d7ac91c5fe368d
6,439
import re def readConfigFile(filePath): """ Read the config file and generate a dictionnary containing an entry for every modules of the installation. """ modules_attributes_list = [] confFile = open(filePath, "r") for i, line in enumerate(confFile.readlines()): # Remove everything that is written after "#" character (comments) line = line.split("#")[0] line = line.split("//")[0] line = line.split("$")[0] # Remove special characters line = re.sub('[!@#$\0\\n ]','',line) # Get the MAC addresses and the modules number words = line.split(",") if len(words) == 4: modID = int(words[0]) posY = int(words[1]) posX = int(words[2]) macAddr = words[3] modules_attributes_list.append((modID, posY, posX, macAddr)) elif len(words) < 2: pass else : raise AttributeError("Wrong formatting of the MAC file.") return modules_attributes_list
fadaec4dd005d6337eb5950b8782d5db944fb4cc
6,441
def unpad_pkcs7(data): """ Strips PKCS#7 padding from data. Raises ValueError if padding is invalid. """ if len(data) == 0: raise ValueError("Error: Empty input.") pad_value = data[-1] if pad_value == 0 or pad_value > 16: raise ValueError("Error: Invalid padding.") for i in range(1, pad_value + 1): if data[-i] != pad_value: raise ValueError("Error: Invalid padding.") unpadded = data[: (len(data) - pad_value)] return unpadded
27e59b8a880c130997f19814135c09cb6e94354d
6,442
def create_output_channel( mgr: sl_tag.TagManager, group: str, name: str, data_type: sl_tag.DataType ) -> sl_tag.TagData: """Create a FlexLogger output channel.""" # "Import" the channel into FlexLogger. full_name = get_tag_prefix() + ".Import.Setpoint.{}.{}".format(group, name) mgr.open(full_name, data_type, create=True) # Once FlexLogger creates the channel, we'll interact with it as an "export" channel # (for both reads and writes). full_name = get_tag_prefix() + ".Export.Setpoint.{}".format(name) # Go ahead and pre-create the output channel, for ease-of-use. Otherwise, when # trying to read its value, we'd have to be prepared for an ApiException complaining # that the tag doesn't exist. mgr.open(full_name, data_type, create=True) return sl_tag.TagData(full_name, data_type)
40bf2f6f555993deb4433d00768a3241dc8d72f6
6,443
import re def slugify(value, allow_unicode=False): """ adapted from https://github.com/django/django/blob/master/django/utils/text.py Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores. """ value = str(value) value = value.replace(":", "_") value = value.replace("/", "_") value = re.sub(r'[^\w\s-]', '', value.lower()) return re.sub(r'[-\s]+', '-', value).strip('-_')
f87a54f124a06fde2163fec39ba41881032db569
6,444
import torch def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor: """Converts raw text into a flat Tensor.""" data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter] return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
1c4bb8cf9997f6a6205c7c4f1122892b528f5c0e
6,445
def rna_view_redirect(request, upi, taxid): """Redirect from urs_taxid to urs/taxid.""" return redirect('unique-rna-sequence', upi=upi, taxid=taxid, permanent=True)
7a45b8b75e2cffb7573a7856c74d8e7b21e70543
6,446
from typing import AbstractSet def skip_for_variants(meta: MetaData, variant_keys: AbstractSet[str]) -> bool: """Check if the recipe uses any given variant keys Args: meta: Variant MetaData object Returns: True if any variant key from variant_keys is used """ # This is the same behavior as in # conda_build.metadata.Metadata.get_hash_contents but without leaving out # "build_string_excludes" (python, r_base, etc.). dependencies = set(meta.get_used_vars()) trim_build_only_deps(meta, dependencies) return not dependencies.isdisjoint(variant_keys)
89bc8bf82431043cc4c6b42b6f8385df14c8d7d1
6,447
def _is_safe_url(url, request): """Override the Django `is_safe_url()` to pass a configured list of allowed hosts and enforce HTTPS.""" allowed_hosts = ( settings.DOMAIN, urlparse(settings.EXTERNAL_SITE_URL).netloc, ) require_https = request.is_secure() if request else False return is_safe_url(url, allowed_hosts=allowed_hosts, require_https=require_https)
e1a8779c72b6d5adfa3fe01b478783d81ef515de
6,448
def _server(): """ Reconstitute the name of this Blueprint I/O Server. """ return urlparse.urlunparse((request.environ.get('wsgi.url_scheme', 'https'), request.environ.get('HTTP_HOST', 'devstructure.com'), '', '', '', ''))
122457aa7f2a5e301299ccaaed4bba75cf273f5a
6,450
def get_range_api(spreadsheetToken, sheet_id, range, valueRenderOption=False): """ 该接口用于根据 spreadsheetToken 和 range 读取表格单个范围的值,返回数据限制为10M。 :return: """ range_fmt = sheet_id + '!' + range get_range_url = cfg.get_range_url.format(spreadsheetToken=spreadsheetToken, range=range_fmt) headers = { "Authorization": "Bearer " + cfg.access_token, "Content-Type": "application/json" } params = { "valueRenderOption": "ToString" if valueRenderOption else None } result = get_http_request(get_range_url, headers=headers, params=params) return result
0c226caaa64e1bab09ac9b3af6a839609d62d5a3
6,451
def rotate_rboxes90(rboxes: tf.Tensor, image_width: int, image_height: int, rotation_count: int = 1) -> tf.Tensor: """Rotate oriented rectangles counter-clockwise by multiples of 90 degrees.""" image_width = tf.cast(image_width, dtype=tf.float32) image_height = tf.cast(image_height, dtype=tf.float32) rotation_count = rotation_count % 4 x, y, w, h, angle = tf.split(rboxes, 5, axis=1) if rotation_count == 0: return rboxes elif rotation_count == 1: angle = tf.where(angle < -90.0, angle + 270, angle - 90) return tf.concat([y, image_width - x - 1, w, h, angle], axis=1) elif rotation_count == 2: angle = tf.where(angle < 0.0, angle + 180, angle - 180) return tf.concat([image_width - x - 1, image_height - y - 1, w, h, angle], axis=1) else: angle = tf.where(angle > 90.0, angle - 270, angle + 90) return tf.concat([image_height - y - 1, x, w, h, angle], axis=1)
7c21d6ea3bf8454af9aabd0f4408c9de593432ac
6,452
def get_wrong_user_credentials(): """ Monkeypatch GithubBackend.get_user_credentials to force the case where invalid credentias were provided """ return dict(username='invalid', password='invalid', token='invalid', remember=False, remember_token=False)
3598f00b05a53cdb13543642048fc8c333eebe52
6,453
def get_points(coords, m, b=None, diagonal=False): """Returns all discrete points on a line""" points = [] x1, y1, x2, y2 = coords[0], coords[1], coords[2], coords[3] # vertical line if m is np.nan: # bottom to top y = min(y1, y2) while y <= max(y1, y2): points.append((x1, y)) y += 1 # horizontal line elif m == 0: # left to right x = min(x1, x2) while x <= max(x1, x2): points.append((x, y1)) x += 1 else: # diagonal line if diagonal: x = x1 y = y1 if x1 < x2: # left to right while x <= x2: points.append((x, y)) x += 1 y = m * x + b else: # right to left while x >= x2: points.append((x, y)) x -= 1 y = m * x + b else: return None return points
23d6d4002c5625b8ea6011c26a0419c2a2710b53
6,454
def get_region_geo(region_id): """Get Geo/TopoJSON of a region. Args: region_id (str): Region ID (e.g. LK-1, LK-23) Returns: Geo-spatial data as GeoPandasFrame """ region_type = get_entity_type(region_id) region_to_geo = _get_region_to_geo(region_type) return region_to_geo.get(region_id, {})
0493aa9e521c6d27cad6e6be07662449b6768a20
6,455
def load_vocabulary(f): """ Load the vocabulary from file. :param f: Filename or file object. :type f: str or file :return: Vocabulary """ v = Vocabulary() if isinstance(f, str): file_ = open(f, 'r') else: file_ = f for line in file_: wordid, word, wordcount = line.strip().split('\t') wordid, wordcount = int(wordid), int(wordcount) v.id2word[wordid] = word v.word2id[word] = wordid if wordcount != 0: v.word_count[wordid] = wordcount if isinstance(f, str): file_.close() return v
7a7cdf44016eccd1ceefd4fcc9e19f8f50caece2
6,456
def populate_objects(phylodata_objects, project_name, path_to_species_trees, path_to_gene_trees, path_to_ranger_outputs): """ this function will try and associate each phylodata object with the correct species_besttree gene_bootstrap_trees and rangerDTL output files (if they exist) args: list of phylodata objects name of project paths (to species trees, to bootstrap gene trees, to rangerDTL returns True if everything was associated False if something has gone horribly awry """ #try and populate the species and gene files. should work. for obj in phylodata_objects: #print("Populating species trees") obj.populate_species_tree(path_to_species_trees) #print("Populating gene trees") obj.populate_gene_boots(path_to_gene_trees) #now try and populate ranger output, if not make directory and run run_rangerDTL for obj in phylodata_objects: #print("Checking for rangerDTL outputs") exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs) if exists is False: #run the program. print("Running RangerDTL") path_to_ranger_outputs, list_of_ranger_outputs = annotate_ranger.run_rangerDTL(obj, project_name) #print("Checking for new rangerDTL outputs") exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs) if exists is False: print ("error in rangerdtl_output assignation") raise SystemExit return True
5813981113be0513920fed0bc21bd6eedd6890f3
6,457
def xml_ind(content): """Translate a individual expression or variable to MathCAD XML. :param content: str, math expression or variable name. :return: str, formatted MathCAD XML. """ ns = ''' xmlns:ml="http://schemas.mathsoft.com/math30">''' # name space as initial head sub_statement = xml_stat(xml_ex(content)) return sub_statement.replace('>', ns, 1)
67e82cbfd2796e31eaef7305e240fc9e3d93c08e
6,459
from typing import Any def read_slug(slug: str, db: Session = Depends(get_db)) -> Any: """ Get a post by slug """ db_slug = get_post(db, slug) if db_slug is None: raise HTTPException(status_code=404, detail="Post not found") return db_slug
347dfd32aa87417cecfbb5b192288fdc0585a071
6,460
from typing import List def left_join_predictions(anno_gold: pd.DataFrame, anno_predicted: pd.DataFrame, columns_keep_gold: List[str], columns_keep_system: List[str]) -> pd.DataFrame: """ Given gold mention annotations and predicted mention annotations, this method returns the gold annotations with additional columns from the system prediction merged in, based on the optimal 1:1 span matching per sentence. Gold annotation spans will not be modified, only enriched (hence: left join). Index and column of dataframes must conform to a certain format (see assert in code). Spans in the dataframes must be non-overlapping. :param anno_gold: :param anno_predicted: :param columns_keep_gold: :param columns_keep_system: :return: """ assert anno_gold.index.names == [DOCUMENT_ID, MENTION_ID] assert anno_predicted.index.names == [DOCUMENT_ID, MENTION_ID] mappings = [] MENTION_ID_GOLD = "mention-id-gold" MENTION_ID_PREDICTED = "mention-id-predicted" # perform intersection sentence-wise if not anno_predicted.empty: for (doc_id, sent_idx), df_gold in anno_gold.reset_index().groupby([DOCUMENT_ID, SENTENCE_IDX]): spans_gold = df_gold[[TOKEN_IDX_FROM, TOKEN_IDX_TO]].values.tolist() # look up mentions at the same spot in system output anno_predicted_wout_index = anno_predicted.reset_index() df_predicted = anno_predicted_wout_index.loc[(anno_predicted_wout_index[DOCUMENT_ID] == doc_id) & (anno_predicted_wout_index[SENTENCE_IDX] == sent_idx)] spans_predicted = df_predicted[[TOKEN_IDX_FROM, TOKEN_IDX_TO]].values.tolist() # perform span matching (only based on spans! no type information taken into consideration!) matched_spans = span_matching(spans_gold, spans_predicted, keep_A=True) # keep MENTION_IDs of matched mentions for i_gold, i_predicted in matched_spans.items(): row = {DOCUMENT_ID: doc_id, MENTION_ID_GOLD: df_gold.iloc[i_gold][MENTION_ID]} # this index can be None because we set keep_A=True for span_matching, to always keep all gold annotations if i_predicted is not None: row[MENTION_ID_PREDICTED] = df_predicted.iloc[i_predicted][MENTION_ID] mappings.append(row) mappings = pd.DataFrame(mappings, columns=[DOCUMENT_ID, MENTION_ID_GOLD, MENTION_ID_PREDICTED]) if not mappings.empty: # merge in the columns we want to keep from the gold annotations mappings = mappings.merge(anno_gold[columns_keep_gold], left_on=[DOCUMENT_ID, MENTION_ID_GOLD], right_index=True) # merge in the columns we want to keep from the predicted annotations - note the use of how="left" to keep gold annotations which have MENTION_ID_PREDICTED == None left_joined = mappings.merge(anno_predicted[columns_keep_system], left_on=[DOCUMENT_ID, MENTION_ID_PREDICTED], right_index=True, how="left") # drop unwanted columns, return to original column names, return to original index left_joined = left_joined.drop(columns=[MENTION_ID_PREDICTED]) left_joined = left_joined.rename(columns={MENTION_ID_GOLD: MENTION_ID}) left_joined = left_joined.set_index([DOCUMENT_ID, MENTION_ID]) else: # append lots of NaNs if there is nothing to merge left_joined = pd.concat([anno_gold[columns_keep_gold], pd.DataFrame([], columns=columns_keep_system)], axis=1) left_joined.sort_index(inplace=True) return left_joined
c34785e255940f69375ee64674186b0b7e8bdf1f
6,461
import json def get_users_data(filter): """ Returns users in db based on submitted filter :param filter: :return: """ # presets - filter must be in one of the lists filter_presets = {"RegistrationStatus": ["Pending", "Verified"], "userTypeName": ["Administrator", "Event Manager", "Alumni"]} if filter.title() in filter_presets["RegistrationStatus"]: users_data = db.get_users(RegistrationStatus=filter) elif filter.title() in filter_presets["userTypeName"]: users_data = db.get_users(userTypeName=filter) else: #filter doesn't exist return all users users_data = db.get_users() users_data = list(enumerate(users_data)) return json.jsonify(users_data)
568dab028a30c9c6f88de83054b6a7b3e95662fc
6,462
def calAdjCCTTFromTrace(nt,dt,tStartIn,tEndIn,dataIn, synthIn): """ calculate the cross correlation traveltime adjoint sources for one seismogram IN: nt : number of timesteps in each seismogram dt : timestep of seismograms tStartIn : float starting time for trace tEndIn : float end time for trace OUT: fBar : array containing the adjoint seismogram for the trace t : ndarray containing the time steps """ isCalculateWeights = False if isCalculateWeights: dSeism = np.zeros(nt) weight = 0 # -- time vector t = np.ogrid[0:(nt-1)*dt:nt*1j] # -- the norm norm = 0 # -- numpy arrays initialisation velSynth = np.zeros(nt) accSynth = np.zeros(nt) timeWind = np.zeros(nt) fBar = np.zeros(nt) # -- calculate time time-window tStart = tStartIn tEnd = tEndIn # -- the starting and ending sample numbers iStart = int(np.floor(tStart/dt)) iEnd = int(np.ceil(tEnd/dt)) # -- sample length of the window iWind = iEnd - iStart #print iStart,iEnd,iWind timeWind[iStart:iEnd]=sgnl.hann(iWind) # -- calculate the adjoint synth = synthIn interpTrc = interp.InterpolatedUnivariateSpline(t,synth) velSynth = interpTrc(t,1) accSynth = interpTrc(t,2) integrArgument = timeWind*synth*accSynth # -- calculating the norm norm = integr.simps(integrArgument,dx=dt,axis=-1,even='last') # -- divide every trace (row in matrices) by their norm (row in vector norm) fBar = timeWind*velSynth / norm if isCalculateWeights: # -- read in the data seismograms data = dataIn # -- calculate the difference between data and synthetics (amplitude) per trace dSeism = data - synth # -- calculate the weight per trace integrArgument = timeWind*velSynth*dSeism weight = integr.simps(integrArgument,dx=dt,axis=-1,even='last') print "weight", weight/norm # -- multiply weight with every adj trace fBar = fBar*weight print weight return [fBar,t]
7524d350c241ae07810b1e5c38bb3db869136804
6,463
def get_par_idx_update_pars_dict(pars_dict, cmd, params=None, rev_pars_dict=None): """Get par_idx representing index into pars tuples dict. This is used internally in updating the commands H5 and commands PARS_DICT pickle files. The ``pars_dict`` input is updated in place. This code was factored out verbatim from kadi.update_cmds.py. :param pars_dict: dict of pars tuples :param cmd: dict or CommandRow Command for updated par_idx :param pars: dict, optional If provided, this is used instead of cmd['params'] :param rev_pars_dict: dict, optional If provided, also update the reverse dict. :returns: int Params index (value of corresponding pars tuple dict key) """ # Define a consistently ordered tuple that has all command parameter information if params is None: params = cmd['params'] keys = set(params.keys()) - set(('SCS', 'STEP', 'TLMSID')) if cmd['tlmsid'] == 'AOSTRCAT': pars_tup = encode_starcat_params(params) if params else () else: if cmd['tlmsid'] == 'OBS': # Re-order parameters to a priority order. new_keys = ['obsid', 'simpos', 'obs_stop', 'manvr_start', 'targ_att'] for key in sorted(cmd['params']): if key not in new_keys: new_keys.append(key) keys = new_keys else: # Maintain original order of keys for OBS command but sort the rest. # This is done so the OBS command displays more nicely. keys = sorted(keys) pars_tup = tuple((key.lower(), params[key]) for key in keys) try: par_idx = pars_dict[pars_tup] except KeyError: # Along with transition to 32-bit idx in #190, ensure that idx=65535 # never gets used. Prior to #190 this value was being used by # get_cmds_from_backstop() assuming that it will never occur as a # key in the pars_dict. Adding 65536 allows older versions to work # with the new cmds.pkl pars_dict. par_idx = len(pars_dict) + 65536 pars_dict[pars_tup] = par_idx if rev_pars_dict is not None: rev_pars_dict[par_idx] = pars_tup return par_idx
9be23a37884eb674b3faa67ac17342b830c123fd
6,464
def COSTR(LR, R, W, S): """ COSTR one value of cosine transform of two-sided function p. 90 """ COSNW = 1. SINNW = 0. COSW = COS(W) SINW = SIN(W) S = R[0] for I in range(1, LR): T = COSW * COSNW - SINW * SINNW COSNW = T S += 2 * R[I] * COSNW return S
c4a4ff69ac4bd2d22885fc5103e62b0d861d8ed6
6,467
def CV_SIGN(*args): """CV_SIGN(int a)""" return _cv.CV_SIGN(*args)
380758a917df6111c27e6072a72a483ac13513c9
6,468
def import_config_data(config_path): """ Parameters ---------- config_path : str path to the experimental configuration file Returns ------- config data : dict dict containing experimental metadata for a given session config file """ data = get_config(config_path) return data
c174272dfd56876f7e8dbd306248c81aa1f3bdb2
6,469
def sigmaLabel(ax, xlabel, ylabel, sigma=None): """Label the axes on a figure with some uncertainty.""" confStr = r'$\pm{} \sigma$'.format(sigma) if sigma is not None else '' ax.set_xlabel(xlabel + confStr) ax.set_ylabel(ylabel + confStr) return ax
8ecf5ae2defd0d67c545943ea48992906612282e
6,470
def startswith(x, prefix): """Determines if entries of x start with prefix Args: x: A vector of strings or a string prefix: The prefix to test against Returns: A bool vector for each element in x if element startswith the prefix """ x = regcall(as_character, x) return x.str.startswith(prefix)
2d41a61d5a569af1925e8df7e2218fdae2bcb7ec
6,471
def create_estimator(est_cls, const_kwargs, node, child_list): """ Creates an estimator. :param est_cls: Function that creates the estimator. :param const_kwargs: Keyword arguments which do not change during the evolution. :param child_list: List of converted child nodes - should me empty. :param evolved_kwargs: Keyword arguments which are set during the evolution process. :return: A new estimator. """ if len(child_list) > 0: raise ValueError("Estimator cannot have sub-estimators.") evolved_kwargs = node.obj_kwargs if 'feat_frac' in evolved_kwargs.keys(): feat_frac = evolved_kwargs['feat_frac'] evolved_kwargs = {key: val for key, val in evolved_kwargs.items() if key != 'feat_frac'} est = est_cls(**const_kwargs, **evolved_kwargs) return RelativeTransformer(est, feat_frac) return est_cls(**const_kwargs, **evolved_kwargs)
306a948f11bc3f70a3c489d1740d7144bbaa4c5b
6,472
def exists(hub_id): """Check for existance of hub in local state. Args: hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub. """ if 'Hubs.{0}'.format(hub_id) in config.state: return True else: return False
4b6d333e070e1dea9300db20bcd50b58c1b9b457
6,473
import logging def query_by_date_after(**kwargs): """ 根据发布的时间查询,之后的记录: 2020-06-03之后,即2020-06-03, 2020-06-04, ...... :param kwargs: {'date': date} :return: """ session = None try: date = kwargs['date'].strip() + config.BEGIN_DAY_TIME session = get_session() ret = session.query(Play).filter(Play.DATE_TIME >= date).order_by( Play.DATE_TIME.desc()).limit(config.LIMIT_MAX).all() # 提交即保存到数据库 session.commit() results = parse_object(*ret) logging.info('OK : play.py--->query_by_date_after(), 成功') return results except Exception as e: logging.critical('Error : play.py--->query_by_date_after() 失败: {}'.format(e)) return [] finally: # 关闭session session.close()
b6bbf40441ae8c3f6db861e8bed025986b7373bd
6,474
from skimage.feature import peak_local_max # Defer slow import from scipy.stats import iqr import math def _step_4_find_peaks( aligned_composite_bg_removed_im, aligned_roi_rect, raw_mask_rects, border_size, field_df, sigproc_params, ): """ Find peaks on the composite image TASK: Remove the mask rect checks and replace with the same masking logic that is now implemented in the alignment phase. That is, just remove the peaks from the source instead of in post-processing. """ n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim assert ( aligned_composite_bg_removed_im.shape[0] == aligned_composite_bg_removed_im.shape[1] ) aligned_dim, _ = aligned_composite_bg_removed_im.shape check.array_t(aligned_composite_bg_removed_im, is_square=True) hat_rad = sigproc_params.hat_rad brim_rad = sigproc_params.hat_rad + 1 hat_mask, brim_mask = _hat_masks(hat_rad, brim_rad) kernel = imops.generate_gauss_kernel(1.0) kernel = kernel - kernel.mean() _fiducial_im = imops.convolve(aligned_composite_bg_removed_im, kernel) # Black out the convolution artifact around the perimeter of the _fiducial_im search_roi_rect = Rect( aligned_roi_rect.b + brim_rad, aligned_roi_rect.t - brim_rad, aligned_roi_rect.l + brim_rad, aligned_roi_rect.r - brim_rad, ) search_roi = search_roi_rect.roi() composite_fiducial_im = np.zeros_like(aligned_composite_bg_removed_im) # Use Inter-Quartile Range for some easy filtering _iqr = 0 if sigproc_params.iqr_rng is not None: _iqr = iqr( _fiducial_im[search_roi], rng=(100 - sigproc_params.iqr_rng, sigproc_params.iqr_rng), ) composite_fiducial_im[search_roi] = (_fiducial_im[search_roi] - _iqr).clip(min=0) locs = peak_local_max( composite_fiducial_im, min_distance=hat_rad, threshold_abs=sigproc_params.threshold_abs, ) # Emergency exit to prevent memory overflows # check.affirm(len(locs) < 7000, f"Too many peaks {len(locs)}") shift = field_df.set_index("cycle_i").sort_index()[["shift_y", "shift_x"]].values shift_y = shift[:, 0] shift_x = shift[:, 1] # Discard any peak in any mask_rect # ALIGN the mask rects to the composite coordinate system aligned_mask_rects = [] for channel in range(sigproc_params.n_output_channels): channel_rects = safe_list_get(raw_mask_rects, channel, []) for cycle in range(n_cycles): for rect in safe_list_get(channel_rects, cycle, []): yx = XY(rect[0], rect[1]) hw = WH(rect[2], rect[3]) yx += XY(border_size, border_size) - XY(shift_x[cycle], shift_y[cycle]) aligned_mask_rects += [(yx[0], yx[1], yx[0] + hw[0], yx[1] + hw[1])] aligned_mask_rects = np.array(aligned_mask_rects) if aligned_mask_rects.shape[0] > 0: # To compare every loc with every mask rect we use the tricky np.fn.outer() y_hits = np.greater_equal.outer(locs[:, 0], aligned_mask_rects[:, 0]) y_hits &= np.less.outer(locs[:, 0], aligned_mask_rects[:, 2]) x_hits = np.greater_equal.outer(locs[:, 1], aligned_mask_rects[:, 1]) x_hits &= np.less.outer(locs[:, 1], aligned_mask_rects[:, 3]) inside_rect = x_hits & y_hits # inside a rect if x and y are inside the rect locs_to_keep = ~np.any( inside_rect, axis=1 ) # Reject if inside of any masked rect locs = locs[locs_to_keep] circle_im = np.zeros((aligned_dim, aligned_dim)) center = aligned_dim / 2 peak_rows = [] for field_peak_i, loc in enumerate(locs): if sigproc_params.radial_filter is not None: radius = math.sqrt((loc[0] - center) ** 2 + (loc[1] - center) ** 2) radius /= center if radius >= sigproc_params.radial_filter: continue imops.set_with_mask_in_place(circle_im, brim_mask, 1, loc=loc, center=True) peak_rows += [ Munch( peak_i=0, field_peak_i=field_peak_i, aln_y=int(loc[0]), aln_x=int(loc[1]), ) ] peak_df = pd.DataFrame(peak_rows) return peak_df, circle_im, aligned_mask_rects
1439336369681569a85ee3e8a8566e4d02cc2999
6,476
import socket def get_reverse_host(): """Return the reverse hostname of the IP address to the calling function.""" try: return socket.gethostbyaddr(get_ipaddress())[0] except: return "Unable to resolve IP address to reverse hostname"
48911baf6507563470cb2d34af2392b52c58ac9a
6,477
def trans_stop(value) -> TransformerResult: """ A transformer that simply returns TransformerResult.RETURN. """ return TransformerResult.RETURN
c124c62a15bca1000ecdfaaa02433de405338e6c
6,478
def generator(n, mode): """ Returns a data generator object. Args: mode: One of 'training' or 'validation' """ flip_cams = False if FLAGS.regularization == 'GRU': flip_cams = True gen = ClusterGenerator(FLAGS.train_data_root, FLAGS.view_num, FLAGS.max_w, FLAGS.max_h, FLAGS.max_d, FLAGS.interval_scale, FLAGS.base_image_size, mode=mode, flip_cams=flip_cams) logger.info('Initializing generator with mode {}'.format(mode)) if mode == 'training': global training_sample_size training_sample_size = len(gen.train_clusters) if FLAGS.regularization == 'GRU': training_sample_size = training_sample_size * 2 return iter(gen)
1dc7950a4ae0f7c35a4ba788710327c2e63fae14
6,479
import re def remove_multispaces(text): """ Replace multiple spaces with only 1 space """ return [re.sub(r' +', " ",word) for word in text]
0b87f6a4b0d49931b3f4bec6f9c313be05d476f0
6,480
def empirical_ci(arr: np.ndarray, alpha: float = 95.0) -> np.ndarray: """Computes percentile range in an array of values. Args: arr: An array. alpha: Percentile confidence interval. Returns: A triple of the lower bound, median and upper bound of the confidence interval with a width of alpha. """ percentiles = 50 - alpha / 2, 50, 50 + alpha / 2 return np.percentile(arr, percentiles)
8bb0ff5768c70d4e34174ea4187898513a4f841e
6,481
def euclidean(a,b): """Calculate GCD(a,b) with the Euclidean algorithm. Args: a (Integer): an integer > 0. b (Integer): an integer > 0. Returns: Integer: GCD(a,b) = m ∈ ℕ : (m|a ⋀ m|b) ⋀ (∄ n ∈ ℕ : (n|a ⋀ n|b) ⋀ n>m). """ if(a<b): a,b = b,a a, b = abs(a), abs(b) while a != 0: a, b = b % a, a return b
8af351e251e52336d7ef946a28bb6d666bff97c3
6,482
from typing import Union def check_reserved_pulse_id(pulse: OpInfo) -> Union[str, None]: """ Checks whether the function should be evaluated generically or has special treatment. Parameters ---------- pulse The pulse to check. Returns ------- : A str with a special identifier representing which pulse behavior to use """ reserved_pulse_mapping = { "stitched_square_pulse": _check_square_pulse_stitching, "staircase": _check_staircase, } for key, checking_func in reserved_pulse_mapping.items(): if checking_func(pulse): return key return None
d4bd89da98612031fbcc7fcde9bcf40bb0843f70
6,483
def figure(*args, **kwargs): """ Returns a new SpectroFigure, a figure extended with features useful for analysis of spectrograms. Compare pyplot.figure. """ kw = { 'FigureClass': SpectroFigure, } kw.update(kwargs) return plt.figure(*args, **kw)
851b02773dc974691ba6e43477244aa8e4ba0760
6,484
import six def allow_ports(ports, proto="tcp", direction="in"): """ Fully replace the incoming or outgoing ports line in the csf.conf file - e.g. TCP_IN, TCP_OUT, UDP_IN, UDP_OUT, etc. CLI Example: .. code-block:: bash salt '*' csf.allow_ports ports="[22,80,443,4505,4506]" proto='tcp' direction='in' """ results = [] ports = set(ports) ports = list(ports) proto = proto.upper() direction = direction.upper() _validate_direction_and_proto(direction, proto) ports_csv = ",".join(six.moves.map(six.text_type, ports)) directions = build_directions(direction) for direction in directions: result = __salt__["file.replace"]( "/etc/csf/csf.conf", # pylint: disable=anomalous-backslash-in-string pattern='^{0}_{1}(\ +)?\=(\ +)?".*"$'.format(proto, direction), # pylint: enable=anomalous-backslash-in-string repl='{0}_{1} = "{2}"'.format(proto, direction, ports_csv), ) results.append(result) return results
3a14a9ea74daf4062e3bd970623284a152ffde08
6,485
def add(n1, n2, base=10): """Add two numbers represented as lower-endian digit lists.""" k = max(len(n1), len(n2)) + 1 d1 = n1 + [0 for _ in range(k - len(n1))] d2 = n2 + [0 for _ in range(k - len(n2))] res = [] carry = 0 for i in range(k): if d1[i] + d2[i] + carry < base: res.append(d1[i] + d2[i] + carry) carry = 0 else: res.append(d1[i] + d2[i] + carry - base) carry = 1 while res and res[-1] == 0: res = res[:-1] if res: return res return [0]
098bfa9ebedf7f219a6f9910e98c4cf9cbf13aa8
6,486
from datetime import datetime import pytz def folder_datetime(foldername, time_infolder_fmt=TIME_INFOLDER_FMT): """Parse UTC datetime from foldername. Foldername e.g.: hive1_rpi1_day-190801/ """ # t_str = folder.name.split("Photos_of_Pi")[-1][2:] # heating!! t_str = foldername.split("day-")[-1] day_naive = datetime.strptime(t_str, time_infolder_fmt) # # Localize as UTC # day_local = local_tz.localize(day_naive) # dt_utc = day_local.astimezone(pytz.utc) day_utc = pytz.utc.localize(day_naive) return day_utc
fbbd9d9749cba6807391009e1720ca35b9dd7c7b
6,487
def get_policy_profile_by_name(name, db_session=None): """ Retrieve policy profile by name. :param name: string representing the name of the policy profile :param db_session: database session :returns: policy profile object """ db_session = db_session or db.get_session() vsm_hosts = config.get_vsm_hosts() pp = n1kv_models.PolicyProfile pprofiles = db_session.query(pp).filter( sql.and_(pp.name == name, pp.vsm_ip.in_(vsm_hosts))).all() if pprofiles and check_policy_profile_exists_on_all_vsm(pprofiles, vsm_hosts): return pprofiles[0] else: raise n1kv_exc.PolicyProfileNotFound(profile=name)
36136be7e618490bcda58799285277982bc41f71
6,488
def ecg_hrv_assessment(hrv, age=None, sex=None, position=None): """ Correct HRV features based on normative data from Voss et al. (2015). Parameters ---------- hrv : dict HRV features obtained by :function:`neurokit.ecg_hrv`. age : float Subject's age. sex : str Subject's gender ("m" or "f"). position : str Recording position. To compare with data from Voss et al. (2015), use "supine". Returns ---------- hrv_adjusted : dict Adjusted HRV features. Example ---------- >>> import neurokit as nk >>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks) >>> ecg_hrv_assessment = nk.bio_ecg.ecg_hrv_assessment(hrv) Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Details* - **Adjusted HRV**: The raw HRV features are normalized :math:`(raw - Mcluster) / sd` according to the participant's age and gender. In data from Voss et al. (2015), HRV analysis was performed on 5-min ECG recordings (lead II and lead V2 simultaneously, 500 Hz sampling rate) obtained in supine position after a 5–10 minutes resting phase. The cohort of healthy subjects consisted of 782 women and 1124 men between the ages of 25 and 74 years, clustered into 4 groups: YF (Female, Age = [25-49], n=571), YM (Male, Age = [25-49], n=744), EF (Female, Age = [50-74], n=211) and EM (Male, Age = [50-74], n=571). References ----------- - Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308. """ hrv_adjusted = {} if position == "supine": if sex == "m": if age <= 49: hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-930)/133 hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-45.8)/18.8 hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-34.0)/18.3 hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-203)/262 hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-101)/143 hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-3.33)/3.47 else: hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-911)/128 hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-33.0)/14.8 hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-20.5)/11.0 hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-84)/115 hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-29.5)/36.6 hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-4.29)/4.06 if sex == "f": if age <= 49: hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-901)/117 hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-44.9)/19.2 hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-36.5)/20.1 hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-159)/181 hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-125)/147 hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-2.75)/2.93 else: hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-880)/115 hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-31.6)/13.6 hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-22.0)/13.2 hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-66)/83 hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-41.4)/72.1 hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-2.09)/2.05 return(hrv_adjusted)
fe3ab5e6f97920f44b7a32928785a19a6185e3d9
6,489
import warnings def declared_attr_roles(rw=None, call=None, read=None, write=None): """ Equivalent of :func:`with_roles` for use with ``@declared_attr``:: @declared_attr @declared_attr_roles(read={'all'}) def my_column(cls): return Column(Integer) While :func:`with_roles` is always the outermost decorator on properties and functions, :func:`declared_attr_roles` must appear below ``@declared_attr`` to work correctly. .. deprecated:: 0.6.1 Use :func:`with_roles` instead. It works for :class:`~sqlalchemy.ext.declarative.declared_attr` since 0.6.1 """ def inner(f): @wraps(f) def attr(cls): # Pass f(cls) as a parameter to with_roles.inner to avoid the test for # iterables within with_roles. We have no idea about the use cases for # declared_attr in downstream code. There could be a declared_attr # that returns a list that should be accessible via the proxy. return with_roles(rw=rw, call=call, read=read, write=write)(f(cls)) return attr warnings.warn("declared_attr_roles is deprecated; use with_roles", stacklevel=2) return inner
4128754046a18e332d5b6f8ba7e2c60d1d576c6b
6,490
def _in_iterating_context(node): """Check if the node is being used as an iterator. Definition is taken from lib2to3.fixer_util.in_special_context(). """ parent = node.parent # Since a call can't be the loop variant we only need to know if the node's # parent is a 'for' loop to know it's being used as the iterator for the # loop. if isinstance(parent, astroid.For): return True # Need to make sure the use of the node is in the iterator part of the # comprehension. elif isinstance(parent, astroid.Comprehension): if parent.iter == node: return True # Various built-ins can take in an iterable or list and lead to the same # value. elif isinstance(parent, astroid.Call): if isinstance(parent.func, astroid.Name): parent_scope = parent.func.lookup(parent.func.name)[0] if _is_builtin(parent_scope) and parent.func.name in _ACCEPTS_ITERATOR: return True elif isinstance(parent.func, astroid.Attribute): if parent.func.attrname == 'join': return True # If the call is in an unpacking, there's no need to warn, # since it can be considered iterating. elif (isinstance(parent, astroid.Assign) and isinstance(parent.targets[0], (astroid.List, astroid.Tuple))): if len(parent.targets[0].elts) > 1: return True return False
f1109b22842e3e9d6306a266b0654670a9f30ac8
6,491
def to_point(obj): """Convert `obj` to instance of Point.""" if obj is None or isinstance(obj, Point): return obj if isinstance(obj, str): obj = obj.split(",") return Point(*(int(i) for i in obj))
340182f054ebac39133edb09c9e1d049f9dde9d4
6,492
def issues(request, project_id): """问题栏""" if request.method == "GET": # 筛选条件 -- 通过get来实现参数筛选 allow_filter_name = ['issues_type', 'status', 'priority', 'assign', 'attention'] condition = {} # 条件 for name in allow_filter_name: value_list = request.GET.getlist(name) if not value_list: continue condition['{}__in'.format(name)] = value_list # 分页获取数据 form = IssuesModelForm(request) issues_obj = Issues.objects.filter(project=request.tracer.project).filter(**condition) page_object = Pagination( current_page=request.GET.get('page'), all_count=issues_obj.count(), base_url=request.path_info, query_params=request.GET, per_page=3, ) issues_object_list = issues_obj[page_object.start:page_object.end] project_total_user = [(request.tracer.project.create_user_id, request.tracer.project.create_user.username,)] join_user = ProjectUser.objects.filter(project_id=project_id).values_list('user_id', 'user__username') project_total_user.extend(join_user) invite_form = InviteModelForm(data=request.POST) context = { 'form': form, 'invite_form': invite_form, 'issues_object_list': issues_object_list, 'page_html': page_object.page_html(), 'filter_list': [ {'title': '问题类型', 'filter': CheckFilter('issues_type', IssuesType.objects.filter(project_id=project_id).values_list( 'id', 'title'), request)}, {'title': '状态', 'filter': CheckFilter('status', Issues.STATUS_CHOICES, request)}, {'title': '优先级', 'filter': CheckFilter('priority', Issues.PRIORITY_CHOICES, request)}, {'title': '指派者', 'filter': SelectFilter('assign', project_total_user, request)}, {'title': '关注者', 'filter': SelectFilter('attention', project_total_user, request)}, ] } return render(request, 'web/issues.html', context) if request.method == "POST": form = IssuesModelForm(request, data=request.POST) if form.is_valid(): # 添加问题数据 form.instance.project = request.tracer.project form.instance.create_user = request.tracer.user form.save() return JsonResponse({'code': 200}) return JsonResponse({'msg': form.errors, 'code': 416})
099b292e8143f1559f3e78e96ad555eaa7328449
6,493
from operator import and_ def get_30mhz_rht_data(sensor_id): """ Produces a JSON with the 30MHz RH & T sensor data for a specified sensor. Args: sensor_id - Advanticsys sensor ID Returns: result - JSON string """ dt_from, dt_to = parse_date_range_argument(request.args.get("range")) query = ( db.session.query( ReadingsZensieTRHClass.sensor_id, ReadingsZensieTRHClass.timestamp, ReadingsZensieTRHClass.temperature, ReadingsZensieTRHClass.humidity, ReadingsZensieTRHClass.time_created, ReadingsZensieTRHClass.time_updated, ) .filter( and_( ReadingsZensieTRHClass.sensor_id == sensor_id, ReadingsZensieTRHClass.timestamp >= dt_from, ReadingsZensieTRHClass.timestamp <= dt_to, ) ) .order_by(desc(ReadingsZensieTRHClass.timestamp)) ) execute_result = db.session.execute(query).fetchall() result = jasonify_query_result(execute_result) return result
d688850720162c33ea9bdd84eaed9ecd83a49902
6,494
import requests def stock_em_gpzy_industry_data() -> pd.DataFrame: """ 东方财富网-数据中心-特色数据-股权质押-上市公司质押比例-行业数据 http://data.eastmoney.com/gpzy/industryData.aspx :return: pandas.DataFrame """ url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get" page_num = _get_page_num_gpzy_industry_data() temp_df = pd.DataFrame() for page in range(1, page_num + 1): print(f"一共{page_num}页, 正在下载第{page}页") params = { "type": "ZD_HY_SUM", "token": "70f12f2f4f091e459a279469fe49eca5", "cmd": "", "st": "amtshareratio_pj", "sr": "-1", "p": str(page), "ps": "5000", "js": "var SIqThurI={pages:(tp),data:(x),font:(font)}", "rt": "52584617", } res = requests.get(url, params=params) data_text = res.text data_json = demjson.decode(data_text[data_text.find("={") + 1 :]) map_dict = dict( zip( pd.DataFrame(data_json["font"]["FontMapping"])["code"], pd.DataFrame(data_json["font"]["FontMapping"])["value"], ) ) for key, value in map_dict.items(): data_text = data_text.replace(key, str(value)) data_json = demjson.decode(data_text[data_text.find("={") + 1 :]) temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True) temp_df.columns = [ "统计时间", "-", "行业", "平均质押比例(%)", "公司家数", "质押总笔数", "质押总股本", "最新质押市值", ] temp_df = temp_df[["统计时间", "行业", "平均质押比例(%)", "公司家数", "质押总笔数", "质押总股本", "最新质押市值"]] temp_df["统计时间"] = pd.to_datetime(temp_df["统计时间"]) return temp_df
4ac0de7bdbae197c9d89dc663dbef594e2010fc6
6,495
def to_float32(x: tf.Tensor) -> tf.Tensor: """Cast the given tensor to float32. Args: x: The tensor of any type. Returns: The tensor casts to float32. """ return tf.cast(x, tf.float32)
2c25ea5450e86139fa1c21041be73e21f01b1bff
6,496
def cli_usage(name=None): """ custom usage message to override `cli.py` """ return """ {logo} usage: signalyze [-h] [-o OUTPUT] [--show-name] [-b | -w | -all] [--show-graph | --show-extra-info] """.format(logo=get_logo())
f512be4404da92aff9a237cdece487a266cbf175
6,497
def unban_chat_member(chat_id, user_id, **kwargs): """ Use this method to unban a previously kicked user in a supergroup. The user will not return to the group automatically, but will be able to join via link, etc. The bot must be an administrator in the group for this to work :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param user_id: Unique identifier of the target user :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type user_id: int :returns: Returns True on success. :rtype: bool """ # required args params = dict( chat_id=chat_id, user_id=user_id, ) return TelegramBotRPCRequest('unbanChatMember', params=params, on_result=lambda result: result, **kwargs)
cc1558e1d49841e47ebf4f457e615319e47abae4
6,498
from typing import Optional import re def parse_progress_line(prefix: str, line: str) -> Optional[float]: """Extract time in seconds from a prefixed string.""" regexp = prefix + r"(?P<hours>\d+):(?P<minutes>\d{2}):(?P<seconds>\d{2}.\d{2})" match = re.search(regexp, line) if not match: return None return ( int(match.group("hours")) * 3600 + int(match.group("minutes")) * 60 + float(match.group("seconds")) )
690b2f0e48a5f584da646f9e4058ed75e654251e
6,499
from functools import reduce def convert_array_to_df(emission_map): """ This function converts the emission map dict to a DataFrame where - 'emission_map' is a dictionary containing at least 'z_var_ave', 'count_var','std_var','q25_var' and'q75_var """ def reform_df(df, nr): """This subfunction will reform the format of the dataframe is such a way that it can be saved in the .map.txt file later on. The expected input is: df: pd.DataFrame nr: int The output is a dataframe that can contains all data of one map and is ready to be written to a .map.txt file""" df_temp_fin = pd.DataFrame() for key, value in df.items(): df_temp = pd.DataFrame() df_temp['Y'] = value.index # CO2_mF [g/s] df_temp['X'] = key # velocity_filtered [km/h] df_temp['Z{}'.format(nr)] = df[key].values # avgNOx [mg/s] df_temp = df_temp[['X', 'Y', 'Z{}'.format(nr)]] df_temp_fin = df_temp_fin.append(df_temp) return df_temp_fin numbering = {'z_var_ave': 1, 'std_var': 2, 'q25_var': 3, 'q75_var': 4, 'count_var': 5} map_df = [] for var in numbering.keys(): if type(emission_map[var]) == np.ndarray: map = emission_map[var] x_axis = np.arange(emission_map['binsizes'][0], emission_map['binsizes'][0] * map.shape[1] + 1, emission_map['binsizes'][0]) y_axis = np.arange(emission_map['binsizes'][1], (emission_map['binsizes'][1] * map.shape[0]) + emission_map['binsizes'][1], emission_map['binsizes'][1]) # check if shape of axis and indices are the same if map.shape[1] != len(x_axis): x_axis = x_axis[:map.shape[1]] elif map.shape[0] != len(y_axis): y_axis = y_axis[:map.shape[0]] ## Make Table for .map.txt outputfile df = pd.DataFrame(data=map, index=y_axis, columns=x_axis) reformed_df = reform_df(df, numbering[var]) map_df.append(reformed_df) final_df = reduce(lambda left, right: pd.merge(left, right, on=['X', 'Y']), map_df) return final_df
171e387ca51f543b522946a213e51040463aec74
6,500
def add_missing_flows(data): """There are some flows not given in ReCiPe that seem like they should be there, given the relatively coarse precision of these CFs.""" new_cfs = { "managed forest": { "amount": 0.3, "flows": [ "occupation, forest, unspecified", "occupation, field margin/hedgerow", ], }, "annual crops": { "amount": 1.0, "flows": [ "occupation, annual crop, flooded crop", "occupation, annual crop, irrigated, extensive", ], }, "pasture": { "amount": 0.55, "flows": [ "occupation, arable land, unspecified use", "occupation, grassland, natural, for livestock grazing", "occupation, heterogeneous, agricultural", ], }, "artificial area": {"amount": 0.73, "flows": [],}, "permanent crops": { "amount": 0.7, "flows": [ "occupation, permanent crop, irrigated", "occupation, permanent crop, irrigated, extensive", "occupation, permanent crop, non-irrigated", "occupation, permanent crop, non-irrigated, extensive", ], }, } """ The following were included in an earlier version of ReCiPe, but are skipped here, as we don't have enough info to use them consistently: * 'occupation, bare area (non-use)', * 'occupation, cropland fallow (non-use)', * 'occupation, forest, primary (non-use)', * 'occupation, forest, secondary (non-use)', * 'occupation, inland waterbody, unspecified', * 'occupation, lake, natural (non-use)', * 'occupation, river, natural (non-use)', * 'occupation, seabed, natural (non-use)', * 'occupation, seabed, unspecified', * 'occupation, snow and ice (non-use)', * 'occupation, unspecified', * 'occupation, unspecified, natural (non-use)', * 'occupation, wetland, coastal (non-use)', * 'occupation, wetland, inland (non-use)' """ for ds in data: ds["exchanges"].extend( [ {"name": flow, "amount": obj["amount"]} for obj in new_cfs.values() for flow in obj["flows"] ] ) return data
e23184bb7363db4777d9f693a3fdc4ace9f8ff14
6,501
from typing import List def cglb_conjugate_gradient( K: TensorType, b: TensorType, initial: TensorType, preconditioner: NystromPreconditioner, cg_tolerance: float, max_steps: int, restart_cg_step: int, ) -> tf.Tensor: """ Conjugate gradient algorithm used in CGLB model. The method of conjugate gradient (Hestenes and Stiefel, 1952) produces a sequence of vectors :math:`v_0, v_1, v_2, ..., v_N` such that :math:`v_0` = initial, and (in exact arithmetic) :math:`Kv_n = b`. In practice, the v_i often converge quickly to approximate :math:`K^{-1}b`, and the algorithm can be stopped without running N iterations. We assume the preconditioner, :math:`Q`, satisfies :math:`Q ≺ K`, and stop the algorithm when :math:`r_i = b - Kv_i` satisfies :math:`||rᵢᵀ||_{Q⁻¹r}^2 = rᵢᵀQ⁻¹rᵢ <= ϵ`. :param K: Matrix we want to backsolve from. Must be PSD. Shape [N, N]. :param b: Vector we want to backsolve. Shape [B, N]. :param initial: Initial vector solution. Shape [N]. :param preconditioner: Preconditioner function. :param cg_tolerance: Expected maximum error. This value is used as a decision boundary against stopping criteria. :param max_steps: Maximum number of CG iterations. :param restart_cg_step: Restart step at which the CG resets the internal state to the initial position using the currect solution vector :math:`v`. Can help avoid build up of numerical errors. :return: `v` where `v` approximately satisfies :math:`Kv = b`. """ CGState = namedtuple("CGState", ["i", "v", "r", "p", "rz"]) def stopping_criterion(state: CGState) -> bool: return (0.5 * state.rz > cg_tolerance) and (state.i < max_steps) def cg_step(state: CGState) -> List[CGState]: Ap = state.p @ K denom = tf.reduce_sum(state.p * Ap, axis=-1) gamma = state.rz / denom v = state.v + gamma * state.p i = state.i + 1 r = tf.cond( state.i % restart_cg_step == restart_cg_step - 1, lambda: b - v @ K, lambda: state.r - gamma * Ap, ) z, new_rz = preconditioner(r) p = tf.cond( state.i % restart_cg_step == restart_cg_step - 1, lambda: z, lambda: z + state.p * new_rz / state.rz, ) return [CGState(i, v, r, p, new_rz)] Kv = initial @ K r = b - Kv z, rz = preconditioner(r) p = z i = tf.constant(0, dtype=default_int()) initial_state = CGState(i, initial, r, p, rz) final_state = tf.while_loop(stopping_criterion, cg_step, [initial_state]) final_state = tf.nest.map_structure(tf.stop_gradient, final_state) return final_state[0].v
bc00f2423c4ffdaf0494ab6e6114222cbc694915
6,502
def num_fixed_points(permutation): """ Compute the number of fixed points (elements mapping to themselves) of a permutation. :param permutation: Permutation in one-line notation (length n tuple of the numbers 0, 1, ..., n-1). :return: Number of fixed points in the permutation. .. rubric:: Examples >>> num_fixed_points((0, 2, 1)) 1 """ n = 0 for i in range(len(permutation)): if permutation[i] == i: n += 1 return n
124713cd4c90988c43630a74881e7107ff748682
6,505
import numpy def mutate(grid): """ Alters the cycle by breaking it into two separate circuits, and then fusing them back together to recreate a (slightly different) cycle. This operation is called "sliding" in 'An Algorithm for Finding Hamiltonian Cycles in Grid Graphs Without Holes', and it's specifically mentioned because it is insuffient if you want to be able to reach all possible cycles for a given starting graph. That condition isn't really relevant to this project, so I use sliding since it's much easier to implement. """ working_grid = grid.copy().astype(numpy.uint8) above = shift_down(grid) below = shift_up(grid) left = shift_right(grid) right = shift_left(grid) # this mask highlights every grid location that could be turned off candidates = numpy.logical_and( numpy.logical_and(grid, above != left), numpy.logical_and(above == below, left == right) ) # the connected region is split into two coord = pick_candidate(candidates) flood_y, flood_x = coord working_grid[coord] = 0 # find the right spot to label one of the regions '2' if left[coord] == 1: flood_x -= 1 elif right[coord] == 1: flood_x += 1 elif above[coord] == 1: flood_y -= 1 elif below[coord] == 1: flood_y += 1 cv2.floodFill( working_grid, numpy.zeros([v + 2 for v in grid.shape], dtype=numpy.uint8), (flood_x, flood_y), 2 ) above = shift_down(working_grid) below = shift_up(working_grid) left = shift_right(working_grid) right = shift_left(working_grid) x_neighbors = left + right y_neighbors = above + below # this mask highlights every grid location that can fuse the two regions # back together while preserving a cycle fuse_candidates = numpy.logical_and( working_grid == 0, numpy.logical_or( numpy.logical_and(x_neighbors == 3, y_neighbors == 0), numpy.logical_and(x_neighbors == 0, y_neighbors == 3) ) ) fuse_location = pick_candidate(fuse_candidates) grid[coord] = 0 grid[fuse_location] = 1 return grid
35cc32385d090fa8091f872858fbeb0c32ecf43d
6,507
def reverse_permute(output_shape: np.array, order: np.array): """ Calculates Transpose op input shape based on output shape and permute order. :param output_shape: Transpose output shape :param order: permute order :return: Transpose input shape corresponding to the specified output shape """ return int64_array(output_shape[PermuteAttrs.get_inverse_permutation(order)])
ada631cc086a1dc0d2dce05f6d97a74a1f3861f4
6,508
def recursive_bisection(block, block_queue, epsilon_cut, depth_max, theta, lamb, delta, verbose=False): """Random cut and random converge Args: block_queue (multiprocessing.Queue): Shared queue to store blocks to be executed Returns: [{"range": {int: (int,int)}, "mondrian_budget": float, "depth": int}] """ # Random cut if verbose: print('Before cut', block.domain_dict) if block.depth > depth_max: axis, index = cut_random(block) else: axis, index = cut_exp_mech(block, epsilon_cut) if verbose: print(axis, index) left_block, right_block = block.split(axis, index) # Random converge converged_block_results = [] if left_block.size() == 1: converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth)) elif random_converge(left_block, left_block.depth, theta, lamb, delta): converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth)) else: block_queue.put(left_block) if right_block.size() == 1: converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth)) elif random_converge(right_block, right_block.depth, theta, lamb, delta): converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth)) else: block_queue.put(right_block) return converged_block_results
d65070c2cf64356277c4af044b97c5eaa8efdd3d
6,509
def _get_global_step_read(graph=None): """Gets global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor. Raises: RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY. """ graph = graph or ops.get_default_graph() global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY) if len(global_step_read_tensors) > 1: raise RuntimeError('There are multiple items in collection {}. ' 'There should be only one.'.format(GLOBAL_STEP_READ_KEY)) if len(global_step_read_tensors) == 1: return global_step_read_tensors[0] return None
46bf3b55b36216e4247d6d73226d22b20383321f
6,510
from unittest.mock import Mock def light_control() -> LightControl: """Returns the light_control mock object.""" mock_request = Mock() mock_request.return_value = "" return LightControl(mock_request)
cb135ed24d2e992eab64b298e5c9238576a37c5d
6,511
def map_threshold(stat_img=None, mask_img=None, alpha=.001, threshold=3., height_control='fpr', cluster_threshold=0): """ Compute the required threshold level and return the thresholded map Parameters ---------- stat_img : Niimg-like object or None, optional statistical image (presumably in z scale) whenever height_control is 'fpr' or None, stat_img=None is acceptable. If it is 'fdr' or 'bonferroni', an error is raised if stat_img is None. mask_img : Niimg-like object, optional, mask image alpha: float, optional number controling the thresholding (either a p-value or q-value). Its actual meaning depends on the height_control parameter. This function translates alpha to a z-scale threshold. threshold: float, optional desired threshold in z-scale. This is used only if height_control is None height_control: string, or None optional false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'\|None cluster_threshold : float, optional cluster size threshold. In the returned thresholded map, sets of connected voxels (`clusters`) with size smaller than this number will be removed. Returns ------- thresholded_map : Nifti1Image, the stat_map thresholded at the prescribed voxel- and cluster-level threshold: float, the voxel-level threshold used actually Note ---- If the input image is not z-scaled (i.e. some z-transformed statistic) the computed threshold is not rigorous and likely meaningless """ # Check that height_control is correctly specified if height_control not in ['fpr', 'fdr', 'bonferroni', None]: raise ValueError( "height control should be one of ['fpr', 'fdr', 'bonferroni', None]") # if height_control is 'fpr' or None, we don't need to look at the data # to compute the threhsold if height_control == 'fpr': threshold = norm.isf(alpha) # In this case, and is stat_img is None, we return if stat_img is None: if height_control in ['fpr', None]: return None, threshold else: raise ValueError( 'Map_threshold requires stat_img not to be None' 'when the heigh_control procedure is bonferroni or fdr') # Masking if mask_img is None: masker = NiftiMasker(mask_strategy='background').fit(stat_img) else: masker = NiftiMasker(mask_img=mask_img).fit() stats = np.ravel(masker.transform(stat_img)) n_voxels = np.size(stats) # Thresholding if height_control == 'fdr': threshold = fdr_threshold(stats, alpha) elif height_control == 'bonferroni': threshold = norm.isf(alpha / n_voxels) stats *= (stats > threshold) # embed it back to 3D grid stat_map = get_data(masker.inverse_transform(stats)) # Extract connected components above threshold label_map, n_labels = label(stat_map > threshold) labels = label_map[get_data(masker.mask_img_) > 0] for label_ in range(1, n_labels + 1): if np.sum(labels == label_) < cluster_threshold: stats[labels == label_] = 0 return masker.inverse_transform(stats), threshold
ea7c1ca48641ed76eef2f2b0396b93fd522fdbaf
6,512
import time import random def grab_features(dataframe: pd.DataFrame) -> pd.DataFrame: """ Attempts to assign song features using the get_features function to all songs in given dataframe. This function creates a column that encompasses all features retuerned from Spotify in a json format for each track ID. It then explodes this column into a seperate dataframe and concatenates it with the original. Parameters: dataframe (pandas dataframe): Dataframe to assigned track IDs to. Must have a "trackID" column Returns: dataframe (pandas dataframe): original pandas dataframe with song features included """ start = time.time() print("Getting song features..") dataframe["features_json"] = dataframe["trackId"].progress_apply( get_features ) # progress apply allows for tqdm progress bar dataframe.dropna( axis=0, subset=["trackId"], inplace=True ) # cannot search for tracks that have no ID temp_list = [pd.json_normalize(x) for x in dataframe["features_json"]] features_df = pd.concat(x for x in temp_list).reset_index().drop(["index"], axis=1) dataframe = dataframe.reset_index().drop(["index"], axis=1) dataframe = pd.concat([dataframe, features_df], axis=1) dataframe.drop(["features_json"], axis=1, inplace=True) index_check = random.randint( 0, len(dataframe) ) # performing check that temporary song feature df matches orignal df assert ( dataframe["trackId"].iloc[index_check] == dataframe["id"].iloc[index_check] ), "track IDs do not match" del temp_list, features_df end = time.time() print( f".apply took {round((end - start),3)} seconds for {len(dataframe)} songs, around {round((end-start) / (len(dataframe)), 3)} seconds per song" ) return dataframe
7a2810b68815241a62f2ce753169bd982a17a211
6,513
def _build_geo_shape_query(field, geom, relation): """Crea una condición de búsqueda por relación con una geometría en formato GeoJSON. Args: field (str): Campo de la condición. geom (dict): Geometría GeoJSON. relation (str): Tipo de búsqueda por geometrías a realizar. Ver la documentación de Elasticsearch GeoShape Query para más detalles. Returns: Query: Condición para Elasticsearch. """ options = { 'shape': geom, 'relation': relation } return GeoShape(**{field: options})
f42fe6e21da30e3d6c8466be92143b215925686c
6,514
from typing import Optional def map_symptom(symptom_name: str) -> Optional[str]: """ Maps a *symptom_name* to current symptom values in ID3C warehouse. There is no official standard for symptoms, we are using the values created by Audere from year 1 (2018-2019). """ symptom_map = { 'feeling feverish': 'feelingFeverish', 'fever': 'feelingFeverish', 'headache': 'headaches', 'headaches': 'headaches', 'cough': 'cough', 'chills': 'chillsOrShivering', 'chills or shivering': 'chillsOrShivering', 'sweats': 'sweats', 'throat': 'soreThroat', 'sore throat or itchy/scratchy throat': 'soreThroat', 'nausea': 'nauseaOrVomiting', 'nausea or vomiting': 'nauseaOrVomiting', 'nose': 'runnyOrStuffyNose', 'runny or stuffy nose': 'runnyOrStuffyNose', 'runny / stuffy nose': 'runnyOrStuffyNose', 'tired': 'fatigue', 'feeling more tired than usual': 'fatigue', 'ache': 'muscleOrBodyAches', 'muscle or body aches': 'muscleOrBodyAches', 'diarrhea': 'diarrhea', 'ear': 'earPainOrDischarge', 'ear pain or ear discharge': 'earPainOrDischarge', 'rash': 'rash', 'breathe': 'increasedTroubleBreathing', 'increased trouble with breathing': 'increasedTroubleBreathing', 'eye': 'eyePain', 'smell_taste': 'lossOfSmellOrTaste', 'other': 'other', 'none': 'none', 'none of the above': 'none', } if symptom_name.lower() not in symptom_map: raise UnknownSymptomNameError(f"Unknown symptom name «{symptom_name}»") return symptom_map[symptom_name.lower()]
c86f0694715b434b1e3b2dc3f66ddfc3afadeaf0
6,516
def get_project_details(p): """Extract from the pickle object detailed information about a given project and parse it in a comprehensive dict structure.""" res = {} project = p['projects'][0] fields = {'Owner(s)': 'project_owners', 'Member(s)': 'project_members', 'Collaborator(s)': 'project_collabs', 'User(s)': 'project_users', 'last_accessed': 'project_last_access'} for k, v in fields.items(): res[k] = project[v].strip().split(' <br/> ') if res[k][0] == '': res[k] = ['None'] for e in ['insert_user', 'insert_date', 'project_access', 'name', 'project_last_workflow']: res[e] = project[e] return res
f8ba3debdd8be7cc7a906851a6a6fb1e3c5f039a
6,518
def get_category(name: str) -> Category: """Returns a category with a given name""" return Category.objects.get(name=name)
4dc99ed672bbb3d7843692da797d0cd901c2c44c
6,519