content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import pandas def order_by_digestibility(sv_reg, pft_id_set, aoi_path): """Calculate the order of feed types according to their digestibility. During diet selection, animals select among feed types in descending order by feed type digestibility. Because digestibility is linearly related to crude protein content, the order of feed types may be estimated from their nitrogen to carbon ratios. Order feed types by digestibility according to the mean nitrogen to carbon ratio of each feed type across the study area aoi. Parameters: sv_reg (dict): map of key, path pairs giving paths to state variables for the previous month, including C and N in aboveground live and standing dead pft_id_set (set): set of integers identifying plant functional types aoi_path (string): path to vector layer giving the spatial extent of the model Returns: ordered_feed_types, a list of strings where each string designates a feed type by a combination of pft_i and fraction (aboveground live or standing dead), in descending order of digestibility """ def calc_nc_ratio(cstatv_path, nstatv_path, aoi_path): """Calculate the mean nitrogen to carbon ratio of a biomass fraction. Calculate the mean nitrogen to carbon ratio of a biomass fraction falling inside the study area aoi. The ratio is calculated from the state variables representing carbon and nitrogen content of that biomass fraction. If the area of interest vector dataset contains more than one polygon feature, the average ratio is calculated across features. Parameters: cstatv_path (string): path to raster containing carbon in the biomass fraction nstatv_path (string): path to raster containing nitrogen in the biomass fraction aoi_path (string): path to vector layer defining the study area of interest Returns: nc_ratio, the ratio of mean nitrogen to mean carbon for this state variable inside the model area of interest """ carbon_zonal_stat_df = pandas.DataFrame.from_dict( pygeoprocessing.zonal_statistics((cstatv_path, 1), aoi_path), orient='index') if carbon_zonal_stat_df['count'].sum() == 0: return 0 else: mean_carbon = ( carbon_zonal_stat_df['sum'].sum() / carbon_zonal_stat_df['count'].sum()) nitrogen_zonal_stat_df = pandas.DataFrame.from_dict( pygeoprocessing.zonal_statistics((nstatv_path, 1), aoi_path), orient='index') if nitrogen_zonal_stat_df['count'].sum() == 0: mean_nitrogen = 0 else: mean_nitrogen = ( nitrogen_zonal_stat_df['sum'].sum() / nitrogen_zonal_stat_df['count'].sum()) return (mean_nitrogen / mean_carbon) nc_ratio_dict = {} for pft_i in pft_id_set: for statv in ['agliv', 'stded']: cstatv_path = sv_reg['{}c_{}_path'.format(statv, pft_i)] nstatv_path = sv_reg['{}e_1_{}_path'.format(statv, pft_i)] nc_ratio = calc_nc_ratio(cstatv_path, nstatv_path, aoi_path) nc_ratio_dict['{}_{}'.format(statv, pft_i)] = nc_ratio # order the dictionary by descending N/C ratio keys, get list from values sorted_list = sorted( [(ratio, feed_type) for (feed_type, ratio) in nc_ratio_dict.items()], reverse=True) ordered_feed_types = [feed_type for (ratio, feed_type) in sorted_list] return ordered_feed_types
f586cbecea72c1bf5a901908f4f9d1414f3d6b93
5,810
def match(pattern, sexp, known_bindings={}): """ Determine if sexp matches the pattern, with the given known bindings already applied. Returns None if no match, or a (possibly empty) dictionary of bindings if there is a match Patterns look like this: ($ . $) matches the literal "$", no bindings (mostly useless) (: . :) matches the literal ":", no bindings (mostly useless) ($ . A) matches B iff B is an atom; and A is bound to B (: . A) matches B always; and A is bound to B (A . B) matches (C . D) iff A matches C and B matches D and bindings are the unification (as long as unification is possible) """ if not pattern.listp(): if sexp.listp(): return None return known_bindings if pattern.as_atom() == sexp.as_atom() else None left = pattern.first() right = pattern.rest() atom = sexp.as_atom() if left == ATOM_MATCH: if sexp.listp(): return None if right == ATOM_MATCH: if atom == ATOM_MATCH: return {} return None return unify_bindings(known_bindings, right.as_atom(), sexp) if left == SEXP_MATCH: if right == SEXP_MATCH: if atom == SEXP_MATCH: return {} return None return unify_bindings(known_bindings, right.as_atom(), sexp) if not sexp.listp(): return None new_bindings = match(left, sexp.first(), known_bindings) if new_bindings is None: return new_bindings return match(right, sexp.rest(), new_bindings)
5389534e437d9090b29af8137d9d106c6550941d
5,811
def who_is_it(image_path, database, model): """ Implements face recognition for the happy house by finding who is the person on the image_path image. Arguments: image_path -- path to an image database -- database containing image encodings along with the name of the person on the image model -- your Inception model instance in Keras Returns: min_dist -- the minimum distance between image_path encoding and the encodings from the database identity -- string, the name prediction for the person on image_path """ ### START CODE HERE ### ## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line) encoding = img_to_encoding(image_path, model) ## Step 2: Find the closest encoding ## # Initialize "min_dist" to a large value, say 100 (≈1 line) min_dist = 100 # Loop over the database dictionary's names and encodings. for (name, db_enc) in database.items(): # Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line) dist = np.linalg.norm(encoding - db_enc) # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines) if dist < min_dist: min_dist = dist identity = name ### END CODE HERE ### if min_dist > 0.7: print("Not in the database.") else: print ("it's " + str(identity) + ", the distance is " + str(min_dist)) return min_dist, identity
60136acaaf1ef95a06917828eb9d545e9c802d59
5,812
def generate_map_bin(geo, img_shape): """Create a q map and the pixel resolution bins Parameters ---------- geo : pyFAI.geometry.Geometry instance The calibrated geometry img_shape : tuple, optional The shape of the image, if None pull from the mask. Defaults to None. Returns ------- q : ndarray The q map qbin : ndarray The pixel resolution bins """ r = geo.rArray(img_shape) q = geo.qArray(img_shape) / 10 # type: np.ndarray q_dq = geo.deltaQ(img_shape) / 10 # type: np.ndarray pixel_size = [getattr(geo, a) for a in ["pixel1", "pixel2"]] rres = np.hypot(*pixel_size) rbins = np.arange(np.min(r) - rres / 2., np.max(r) + rres / 2., rres / 2.) rbinned = BinnedStatistic1D(r.ravel(), statistic=np.max, bins=rbins) qbin_sizes = rbinned(q_dq.ravel()) qbin_sizes = np.nan_to_num(qbin_sizes) qbin = np.cumsum(qbin_sizes) qbin[0] = np.min(q_dq) if np.max(q) > qbin[-1]: qbin[-1] = np.max(q) return q, qbin
964cbc13eb652acbdf85f656bb9d789c5f1949e5
5,813
def wklobjective_converged(qsum, f0, plansum, epsilon, gamma): """Compute finale wkl value after convergence.""" obj = gamma * (plansum + qsum) obj += epsilon * f0 obj += - (epsilon + 2 * gamma) * plansum return obj
079841a8ee6d845cdac25a48306c023a1f38b5f7
5,815
def addFavDirections(request): """ Add favourite stop of currently logged in user by number. Currently works with the URL: http://localhost:8000/api/add-fav-stop/<number> """ try: user = request.user origin = str(request.query_params.get('origin')) destination = str(request.query_params.get('destination')) url = str(request.query_params.get('url')) r = FavouriteDirections(user=user, origin=origin, destination=destination, url=url) r.save() return HttpResponse(status=status.HTTP_201_CREATED) except IntegrityError as e: return HttpResponse( "Error: Stop is already a favourite for this user.") except AssertionError as e: return HttpResponse("Error: Stop number does not exist.")
2585006e5ea1f73433671c984dce5ce4e8ed2079
5,816
import types import numpy import pandas def hpat_pandas_dataframe_index(df): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.DataFrame.index Examples -------- .. literalinclude:: ../../../examples/dataframe/dataframe_index.py :language: python :lines: 27- :caption: The index (row labels) of the DataFrame. :name: ex_dataframe_index .. command-output:: python ./dataframe/dataframe_index.py :cwd: ../../../examples Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas DataFrame attribute :attr:`pandas.DataFrame.index` implementation. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_dataframe.TestDataFrame.test_index* """ ty_checker = TypeChecker('Attribute index.') ty_checker.check(df, DataFrameType) if isinstance(df.index, types.NoneType): empty_df = not df.columns def hpat_pandas_df_index_none_impl(df): if empty_df == True: # noqa return numpy.arange(0) else: return pandas.RangeIndex(len(df)) return hpat_pandas_df_index_none_impl else: def hpat_pandas_df_index_impl(df): return df._index return hpat_pandas_df_index_impl
64b512d170ca5734a416688a9728f535248e9395
5,817
def _get_should_cache_fn(conf, group): """Build a function that returns a config group's caching status. For any given object that has caching capabilities, a boolean config option for that object's group should exist and default to ``True``. This function will use that value to tell the caching decorator if caching for that object is enabled. To properly use this with the decorator, pass this function the configuration group and assign the result to a variable. Pass the new variable to the caching decorator as the named argument ``should_cache_fn``. :param conf: config object, must have had :func:`configure` called on it. :type conf: oslo_config.cfg.ConfigOpts :param group: name of the configuration group to examine :type group: string :returns: function reference """ def should_cache(value): if not conf.cache.enabled: return False conf_group = getattr(conf, group) return getattr(conf_group, 'caching', True) return should_cache
7a11124c640bfb3ced28e2d9395593b70dc85a0a
5,818
def set_difference(lst1, lst2): """returns the elements and indicies of elements in lst1 that are not in lst2""" elements = [] indicies = [] for indx, item in enumerate(lst1): if item not in lst2: elements.append(item) indicies.append(indx) return elements, indicies
75e78de68fb2528341f7246b77f7046da2c9274f
5,819
def _super_check(args, names, op, fmt, msg, val_err): """ A flexible function is used to check whether type or value of variables is valid, which supports in both graph/pynative mode. Args: args(any): 'args' is used as one of argument for operation function and format function. names(any): 'names' is used as one of argument for format function. op(str): 'op' is a string to specify an operation. This operation will be obtained an actual function from a StringDict object, with 'args' as argument. fmt(str): 'fmt' is a string to specify a format. This format will be obtained an actual function from a StringDict object, with 'args' and 'names' as arguments. msg(str, tuple): 'msg' is used the case where format function is not necessary. When 'msg' is not None, we will throw the 'msg' as the error message. val_err(bool): Determine the type of TypeError/ValueError. When 'val_err' is True, raises ValueError, otherwise TypeError. Note: This function does not contain any parameter checks. """ op_fn = _op_dict.get(op) if not op_fn(args): if not msg: fmt_fn = _fmt_dict.get(fmt) msg = fmt_fn(args, names) if val_err: _raise_value_error(*_tuple(msg)) else: _raise_type_error(*_tuple(msg)) return args
07c63f34216e84c10c5ff0c2f886d27aaaf5f245
5,821
import json def obter_novo_username() -> str: """ -> Pede um novo nome de usuário. :return: Retorna o novo nome de usuário. """ username = input('Qual é o seu nome? ') arquivo = 'arquivos_json/nome_de_usuario.json' with open(arquivo, 'w') as obj_arq: json.dump(username, obj_arq) return username
b4d4922d68b1fb80e5a9270638d134b5806969fd
5,822
def BDD100K_MOT2020(path: str) -> Dataset: """`BDD100K_MOT2020 <https://bdd-data.berkeley.edu>`_ dataset. The file structure should be like:: <path> bdd100k_box_track_20/ images/ train/ 00a0f008-3c67908e/ 00a0f008-3c67908e-0000001.jpg ... ... val/ b1c9c847-3bda4659/ b1c9c847-3bda4659-0000001.jpg ... ... test/ cabc30fc-e7726578/ cabc30fc-e7726578-0000001.jpg ... ... labels/ train/ 00a0f008-3c67908e.json ... val/ b1c9c847-3bda4659.json ... Arguments: path: The root directory of the dataset. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ return _tracking_loader(path, "mot")
a850b874da64d9efaf13ae3f1f5ca79805f5307d
5,823
import logging def _parse_block_postheader(line): """ (209)**************!*****************!!*************... """ parts = line[1:].split(')', 1) qlen = int(parts[0]) if not len(parts[1]) == qlen: logging.warn("postheader expected %d-long query, found %d", qlen, len(parts[1])) return qlen, parts[1]
5eee6c11160c0f91cb37c025d6d265188488cad9
5,824
def _remove_dimer_outliers(bond_lengths, energies, zscore_cutoff=3.0): """Removes outliers """ z_score = stats.zscore(energies) idx_keep = np.where(z_score < zscore_cutoff)[0] return bond_lengths[idx_keep], energies[idx_keep]
409ca918213315cfeb3d279319f42bf6ca5651a5
5,825
def get_auth_token(cmd_args=None): """ :param cmd_args: An optional list of additional arguments to pass on the command line :return: The current user's token """ r = Result("whoami") r.add_action(oc_action(cur_context(), "whoami", cmd_args=['-t', cmd_args])) r.fail_if("Unable to determine current token") return r.out().strip()
01edde0d4738a96d25dbea37d3d539e8a8aee7ca
5,826
import re def hash_sid(sid: str) -> str: """ Hash a SID preserving well-known SIDs and the RID. Parameters ---------- sid : str SID string Returns ------- str Hashed SID """ if re.match(WK_SID_PATTERN, sid): return sid usr_sid = re.match(SID_PATTERN, sid) if usr_sid: return ( f"{usr_sid.groups()[0]}{hash_item(usr_sid.groups()[1], delim='-')}" + f"{usr_sid.groups()[2]}" ) return sid
5966d82f1412f7bfdb64e6a9b4904861f43e7c46
5,829
def horizontal_move(t, h_speed=-2/320): """Probe moves horizontally at h_speed [cm/s]""" return 0.*t, h_speed*t, 2/16 + 0*t
d9cf0e5b968e7d8319b7f63f7d1d7a4666484ad3
5,830
def fix_pdp_post(monkeypatch): """monkeyed request /decision/v1 to PDP""" def monkeyed_policy_rest_post(uri, json=None, **kwargs): """monkeypatch for the POST to policy-engine""" return MockHttpResponse("post", uri, json=json, **kwargs) _LOGGER.info("setup fix_pdp_post") pdp_client.PolicyRest._lazy_inited = False pdp_client.PolicyRest._lazy_init() monkeypatch.setattr('policyhandler.pdp_client.PolicyRest._requests_session.post', monkeyed_policy_rest_post) yield fix_pdp_post _LOGGER.info("teardown fix_pdp_post")
18957f2c9f3501ec54962e39fe664a0221133566
5,831
def del_project(): """ @api {post} /v1/interfaceproject/del InterfaceProject_删除项目 @apiName interfaceProDel @apiGroup Interface @apiDescription 删除项目 @apiParam {int} id 子项目id @apiParam {int} all_project_id 总项目id @apiParamExample {json} Request-Example: { "id": 1, "all_project_id": 4 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "data": { "environment_choice": "first", "headers": [], "host": [ "http://sx.api.mengtuiapp.com" ], "host_four": [], "host_three": [], "host_two": [], "principal": null, "pro_name": "mengtui", "user_id": 3, "variables": [] }, "status": 1 } """ data = request.json ids = data.get('id') all_project_id = data.get('all_project_id') jsondata = InterfaceProjectBusiness.del_project(ids, all_project_id) return jsondata
4697360bbdab0ce3b4b10ebbdd1fab66b938fb4b
5,832
from skimage.transform import resize from enum import Enum def draw_pattern_fill(viewport, psd, desc): """ Create a pattern fill. """ pattern_id = desc[Enum.Pattern][Key.ID].value.rstrip('\x00') pattern = psd._get_pattern(pattern_id) if not pattern: logger.error('Pattern not found: %s' % (pattern_id)) return None, None panel = get_pattern(pattern) assert panel.shape[0] > 0 scale = float(desc.get(Key.Scale, 100.)) / 100. if scale != 1.: new_shape = ( max(1, int(panel.shape[0] * scale)), max(1, int(panel.shape[1] * scale)) ) panel = resize(panel, new_shape) height, width = viewport[3] - viewport[1], viewport[2] - viewport[0] reps = ( int(np.ceil(float(height) / panel.shape[0])), int(np.ceil(float(width) / panel.shape[1])), 1, ) channels = EXPECTED_CHANNELS.get(pattern.image_mode) pixels = np.tile(panel, reps)[:height, :width, :] if pixels.shape[2] > channels: return pixels[:, :, :channels], pixels[:, :, -1:] return pixels, None
5d03ae9ebf13b3aa39c9d7f56a68a4a9056331cc
5,833
def register_post(): """Registriraj novega uporabnika.""" username = bottle.request.forms.username password1 = bottle.request.forms.password1 password2 = bottle.request.forms.password2 # Ali uporabnik že obstaja? c = baza.cursor() c.execute("SELECT 1 FROM uporabnik WHERE username=%s", [username]) if c.fetchone(): # Uporabnik že obstaja return bottle.template("registracija.html", username='', napaka='To uporabniško ime je že zavzeto') elif not password1 == password2: # Gesli se ne ujemata return bottle.template("registracija.html", username='', napaka='Gesli se ne ujemata') else: # Vse je v redu, vstavi novega uporabnika v bazo password = password_md5(password1) print('tukaj sem') c.execute("INSERT INTO uporabnik (username, password) VALUES (%s, %s)", (username, password)) bottle.redirect("/prijava/")
7c6569828f33287b7ea19ab37eb0ac868fd87c0a
5,834
def check_format_input_vector( inp, dims, shape_m1, sig_name, sig_type, reshape=False, allow_None=False, forbid_negative0=False, ): """checks vector input and returns in formatted form - inp must be array_like - convert inp to ndarray with dtype float - inp shape must be given by dims and shape_m1 - print error msg with signature arguments - if reshape=True: returns shape (n,3) - required for position init and setter - if allow_None: return None - if extend_dim_to2: add a dimension if input is only (1,2,3) - required for sensor pixel """ if allow_None: if inp is None: return None is_array_like( inp, f"Input parameter `{sig_name}` must be {sig_type}.\n" f"Instead received type {type(inp)}.", ) inp = make_float_array( inp, f"Input parameter `{sig_name}` must contain only float compatible entries.\n", ) check_array_shape( inp, dims=dims, shape_m1=shape_m1, msg=( f"Input parameter `{sig_name}` must be {sig_type}.\n" f"Instead received array_like with shape {inp.shape}." ), ) if reshape: return np.reshape(inp, (-1, 3)) if forbid_negative0: if np.any(inp <= 0): raise MagpylibBadUserInput( f"Input parameter `{sig_name}` cannot have values <= 0." ) return inp
cd26290058fbf9fba65a5ba005eaa8bd6da23a32
5,835
def get_proyecto_from_short_url(short_url): """ :param short_url: :return: item for Proyecto """ item = Proyecto.objects.get(short_url=short_url) if item.iniciativas_agrupadas is not None and \ item.iniciativas_agrupadas != '' and '{' in \ item.iniciativas_agrupadas: iniciativas = item.iniciativas_agrupadas.replace("{", "") iniciativas = iniciativas.replace("}", "") item.iniciativas_agrupadas = iniciativas.split(",") item.congresistas_with_links = hiperlink_congre(item.congresistas) item.fecha_presentacion = convert_string_to_time(item.fecha_presentacion) item.fecha_presentacion_human = arrow.get(item.fecha_presentacion).format('DD MMMM, YYYY', locale='es_es') item.numero_congresistas = len(item.congresistas.split(";")) return item
8f48d62db11bb80803ce13e259eed1b826a2450c
5,836
def select_x(data, order=None): """ Helper function that does a best effort of selecting an automatic x axis. Returns None if it cannot find x axis. """ if data is None: return None if len(data) < 1: return None if order is None: order = ['T', 'O', 'N', 'Q'] else: _validate_custom_order(order) d = _classify_data_by_type(data, order) chosen_x = None for typ in order: if len(d[typ]) >= 1: chosen_x = d[typ][0] break return chosen_x
8efe25aea57444093fe19abcf8df07080c2ec0a6
5,837
def map_clonemode(vm_info): """ Convert the virtualbox config file values for clone_mode into the integers the API requires """ mode_map = {"state": 0, "child": 1, "all": 2} if not vm_info: return DEFAULT_CLONE_MODE if "clonemode" not in vm_info: return DEFAULT_CLONE_MODE if vm_info["clonemode"] in mode_map: return mode_map[vm_info["clonemode"]] else: raise SaltCloudSystemExit( "Illegal clonemode for virtualbox profile. Legal values are: {}".format( ",".join(mode_map.keys()) ) )
39b62c11dbf9f168842a238d23f587aa64a0ff61
5,838
def update_dashboard(dashboard_slug): """Update Dashboard Update an existing Dashboard --- tags: - "Dashboards" parameters: - name: dashboard_slug in: path type: string required: true - name: name in: body schema: type: object required: - name properties: name: type: string description: new name for dashboard responses: 200: description: Success schema: type: object properties: message: type: string dashboard: $ref: '#/definitions/Dashboard' 400: $ref: '#/responses/Error' """ dashboard = Dashboard.query.filter_by( slug=dashboard_slug, owner=current_user ).first() name = request.json.get("name", None) if not name: return jsonify(error="Name is required."), 400 if Dashboard.query.filter_by(slug=slugify(name), owner=current_user).first(): return jsonify(error="A dashboard with that name already exists!"), 400 if dashboard: dashboard.set_name(name) db.session.commit() return jsonify( message="Dashboard updated successfully!", dashboard=dashboard.to_dict() ) else: return jsonify(error="Dashboard doesn't exist!"), 400
90ecfd68f6c64076893248aa7a2de58ed01afe02
5,839
import torch def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, target=100.0, model='checkpoint.pth'): """Deep Q-Learning. Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timesteps per episode eps_start (float): starting value of epsilon, for epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon target (float): desired minimal average per 100 episodes model (str): path to save model """ scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = eps_start # initialize epsilon for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations[0] score = 0 for t in range(max_t): action = agent.act(state, eps) env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window) >= target: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), model) break return scores
433484a848f645e4581702934748c428b5d59adf
5,840
def latLon2XY(xr, yr, lat, lon, ieast=1, azimuth=0): """ Calculate the cartesian distance between consecutive lat,lon points. Will bomb at North and South Poles. Assumes geographical coordinates and azimuth in decimal degrees, local Cartesian coordinates in km. :param xr: Reference longitude, normally 0. :param yr: Reference latitude, normally 0. :param lat: Array of latitudes. :param lon: Array of longitudes. :param int ieast: 1 if longitude increases toward the East (normal case), -1 if longitude increases toward the West. :param int azimuth: local coordinate system constructed with origin at latr,lonr, X axis ('North') in direction of azimuth, and Y axis such that X x Y = Z(down) when going from (lat,lon) to (x,y) scalar or array. :returns: Array of northward and eastward distances between consecutive points. use :func:`xy2r` to convert to a distance between consecutive points. """ #if len(lat) != len(lon): # raise ArrayMismatch, "Input array sizes do not match" radius = 6367.0 # Earth radius (km) lat = np.radians(lat) lon = np.radians(lon) # Is azimuth fixed or variable? if np.size(azimuth) == 1: angle = np.radians(azimuth)*np.ones(lat.size - 1) else: angle = np.radians(azimuth) cosazi = np.cos(angle) sinazi = np.sin(angle) xntru = xr + radius * (np.diff(lat)) yetru = yr + ieast * radius * (np.diff(lon)) * np.cos(lat[1:]) xn = xntru * cosazi + yetru * sinazi ye = -xntru * sinazi + yetru * cosazi return xn, ye
4ea265f02e87593d389bcd6839390b51cc024add
5,841
def __virtual__(): """Only load if grafana4 module is available""" return "grafana4.get_org" in __salt__
acbfe3b15dafc45ab36955d0a72b92544f4dd41a
5,844
def categories_report(x): """Returns value counts report. Parameters ---------- x: pd.Series The series with the values Returns ------- string The value counts report. str1 = False 22 | True 20 | nan 34 str2 = False (22) | True (20) | nan (34) """ # Do counting and sorting counts = x.value_counts(dropna=False) counts.index = counts.index.map(str) counts = counts.sort_index() # Create different strings str1 = ' | '.join(str(counts).split("\n")[:-1]) str2 = ' | '.join("%s (%s)" % (i, counts[i]) for i in counts.index) # Return return str2
695ccd73ee73a13e92edbdf0eb242121d136ddbb
5,846
def train(total_loss, global_step, train_num_examples): """Train model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. # num_batches_per_epoch = train_num_examples / FLAGS.batch_size # decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) decay_steps = DECAY_STEPS # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.scalar_summary('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = _add_loss_summaries(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.histogram_summary(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
66189c7fd3ec55d08e6a197f2f821adc6e1b3aad
5,847
def config_module_add(): """Add module for configuration Add an available module to the config file. POST json object structure: {"action": "add", "value": { "module": "modulename", "moduleprop": ... }} On success, an object with this structure is returned: {"_meta": {"id": <new ID>}, "module": "modulename"} Otherwise an error message and code. """ config = read_config() action = request.json["action"] if action == "add": if "value" not in request.json: return _ret_invalid_request("value") if "module" not in request.json["value"]: return _ret_invalid_request("value/module") moduletype = request.json["value"]["module"] if moduletype not in _get_available_modules(): return _ret_unknown_module(moduletype) newid = max([x["_meta"]["id"] for x in config["modules"]]) + 1 newmodule = request.json["value"] if "_meta" not in newmodule: newmodule["_meta"] = {} if "_order" not in newmodule["_meta"]: newmodule["_meta"]["order"] = 0 newmodule["_meta"]["id"] = newid config["modules"].append(newmodule) write_config(config) ret = {"_meta": {"id": newid}, "module": moduletype} return jsonify(ret) else: return _ret_unknown_action(action)
8ae9324e29408614ee324c3ba32ddab169e0b50e
5,848
def ustobj2songobj( ust: up.ust.Ust, d_table: dict, key_of_the_note: int = None) -> up.hts.Song: """ Ustオブジェクトをノートごとに処理して、HTS用に変換する。 日本語歌詞を想定するため、音節数は1とする。促音に注意。 ust: Ustオブジェクト d_table: 日本語→ローマ字変換テーブル key_of_the_note: 曲のキーだが、USTからは判定できない。 Sinsyでは 0 ~ 11 または 'xx' である。 """ song = up.hts.Song() ust_notes = ust.notes # Noteオブジェクトの種類を変換 for ust_note in ust_notes: hts_note = ustnote2htsnote(ust_note, d_table, key_of_the_note=key_of_the_note) song.append(hts_note) # ノート長や位置などを自動補完 song.autofill() # 発声開始時刻と終了時刻をノート長に応じて設定 song.reset_time() return song
53c92783d881702aa42b7f24c8a1596248b30108
5,850
def detect_ol(table): """Detect ordered list""" if not len(table): return False for tr in table: if len(tr)!=2: return False td1 = tr[0] # Only keep plausible ordered lists if td1.text is None: return False text = td1.text.strip() if not text or len(text)>3: return False if text[-1] not in ('.', ')'): return False if not text[:-1].isalpha() and not text[:-1].isdigit(): return False if len(td1): return False return True
b7082932fba6ba7f9634e70ea424561c084a2dc1
5,851
def analyze_syntax(text): """Use the NL API to analyze the given text string, and returns the response from the API. Requests an encodingType that matches the encoding used natively by Python. Raises an errors.HTTPError if there is a connection problem. """ credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) service = discovery.build( 'language', 'v1beta1', http=http) body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': get_native_encoding_type(), } request = service.documents().annotateText(body=body) return request.execute()
84387cf163f9cfab4fcabd0a43e43aab250bd01d
5,853
def kabsch_numpy(X, Y): """ Kabsch alignment of X into Y. Assumes X,Y are both (Dims x N_points). See below for wrapper. """ # center X and Y to the origin X_ = X - X.mean(axis=-1, keepdims=True) Y_ = Y - Y.mean(axis=-1, keepdims=True) # calculate convariance matrix (for each prot in the batch) C = np.dot(X_, Y_.transpose()) # Optimal rotation matrix via SVD V, S, W = np.linalg.svd(C) # determinant sign for direction correction d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0 if d: S[-1] = S[-1] * (-1) V[:, -1] = V[:, -1] * (-1) # Create Rotation matrix U U = np.dot(V, W) # calculate rotations X_ = np.dot(X_.T, U).T # return centered and aligned return X_, Y_
85e42d58f667c70b3ad0fe0fe888fdfb383d34ee
5,855
import six import base64 def _decode(value): """ Base64 解码,补齐"=" 记得去错多余的“=”,垃圾Docker,签发的时候会去掉 :param value: :return: """ length = len(value) % 4 if length in (2, 3,): value += (4 - length) * "=" elif length != 0: raise ValueError("Invalid base64 string") if not isinstance(value, six.binary_type): value = value.encode() return base64.urlsafe_b64decode(value)
c4a28605fb7f8a0d5110fb06738c31b030cae170
5,856
def header_elements(fieldname, fieldvalue): """Return a sorted HeaderElement list from a comma-separated header string. """ if not fieldvalue: return [] result = [] for element in RE_HEADER_SPLIT.split(fieldvalue): if fieldname.startswith('Accept') or fieldname == 'TE': hv = AcceptElement.from_str(element) else: hv = HeaderElement.from_str(element) result.append(hv) return list(reversed(sorted(result)))
8846a0b5e89e0a4d0d3d6192e988dfe78e394338
5,857
def line2dict(st): """Convert a line of key=value pairs to a dictionary. :param st: :returns: a dictionary :rtype: """ elems = st.split(',') dd = {} for elem in elems: elem = elem.split('=') key, val = elem try: int_val = int(val) dd[key] = int_val except ValueError: dd[key] = val return dd
86bb6c2e72c8a6b2a027d797de88089067ff7475
5,858
def transform(walls, spaces): """svg coords are in centimeters from the (left, top) corner, while we want metres from the (left, bottom) corner""" joint = np.concatenate([np.concatenate(walls), np.concatenate(spaces)]) (left, _), (_, bot) = joint.min(0), joint.max(0) def tr(ps): x, y = ps[..., 0], ps[..., 1] return np.stack([x - left, bot - y], -1)/SCALE + MARGIN return tr(walls), [tr(s) for s in spaces]
5ae28593a72567cf3c15f75fd37b44ca7b9468a8
5,859
def isolate(result_file, isolate_file, mode, variables, out_dir, error): """Main function to isolate a target with its dependencies. Arguments: - result_file: File to load or save state from. - isolate_file: File to load data from. Can be None if result_file contains the necessary information. - mode: Action to do. See file level docstring. - variables: Variables to process, if necessary. - out_dir: Output directory where the result is stored. It's use depends on |mode|. Some arguments are optional, dependending on |mode|. See the corresponding MODE<mode> function for the exact behavior. """ # First, load the previous stuff if it was present. Namely, "foo.result" and # "foo.state". complete_state = CompleteState.load_files(result_file, out_dir) isolate_file = isolate_file or complete_state.saved_state.isolate_file if not isolate_file: error('A .isolate file is required.') if (complete_state.saved_state.isolate_file and isolate_file != complete_state.saved_state.isolate_file): error( '%s and %s do not match.' % ( isolate_file, complete_state.saved_state.isolate_file)) try: # Then process options and expands directories. complete_state.load_isolate(isolate_file, variables, error) # Regenerate complete_state.result.files. complete_state.process_inputs(LEVELS[mode]) # Finally run the mode-specific code. result = VALID_MODES[mode](out_dir, complete_state) except run_test_from_archive.MappingError, e: error(str(e)) # Then store the result and state. complete_state.save_files() return result
16201bf1fb11bafc9913fc620f0efea3de887e62
5,861
def check_structure(struct): """ Return True if the monophyly structure represented by struct is considered "meaningful", i.e. encodes something other than an unstructured polytomy. """ # First, transform e.g. [['foo'], [['bar']], [[[['baz']]]]], into simply # ['foo','bar','baz']. def denester(l): if type(l) != list: return l if len(l) == 1: return denester(l[0]) return [denester(x) for x in l] struct = denester(struct) # Now check for internal structure if not any([type(x) == list for x in struct]): # Struct is just a list of language names, with no internal structure return False return True
e07a2f39c7d3b8f2454b5171119b8698f4f58a99
5,862
import torch def batchify_with_label(input_batch_list, gpu, volatile_flag=False): """ input: list of words, chars and labels, various length. [[words,biwords,chars,gaz, labels],[words,biwords,chars,labels],...] words: word ids for one sentence. (batch_size, sent_len) chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length) output: zero padding for word and char, with their batch length word_seq_tensor: (batch_size, max_sent_len) Variable char_seq_lengths: (batch_size,1) Tensor char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable character_seq_lengths: (batch_size*max_sent_len,1) Tensor char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order label_seq_tensor: (batch_size, max_sent_len) mask: (batch_size, max_sent_len) """ batch_size = len(input_batch_list) chars = [sent[0] for sent in input_batch_list] bichars = [sent[1] for sent in input_batch_list] gazs = [sent[2] for sent in input_batch_list] labels = [sent[3] for sent in input_batch_list] char_seq_lengths = torch.LongTensor(list(map(len, chars))) max_seq_len = char_seq_lengths.max().item() with torch.no_grad(): # torch.zeros(*sizes, out=None) → Tensor # 返回一个全为标量 0 的张量,形状由可变参数sizes 定义 # sizes (int...) – 整数序列,定义了输出形状 # out(Tensor, optional) – 结果张量 char_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long() bichar_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long() label_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long() mask = autograd.Variable(torch.zeros((batch_size, max_seq_len))).byte() for idx, (seq, biseq, label, seqlen) in enumerate(zip(chars, bichars, labels, char_seq_lengths)): # torch.Tensor是一种包含单一数据类型元素的多维矩阵 # 64-bit integer (signed) torch.LongTensor torch.cuda.LongTensor char_seq_tensor[idx, :seqlen] = torch.LongTensor(seq) bichar_seq_tensor[idx, :seqlen] = torch.LongTensor(biseq) label_seq_tensor[idx, :seqlen] = torch.LongTensor(label) mask[idx, :seqlen] = torch.Tensor([1] * seqlen.item()) char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True) char_seq_tensor = char_seq_tensor[char_perm_idx] bichar_seq_tensor = bichar_seq_tensor[char_perm_idx] label_seq_tensor = label_seq_tensor[char_perm_idx] mask = mask[char_perm_idx] _, char_seq_recover = char_perm_idx.sort(0, descending=False) # keep the gaz_list in orignial order gaz_list = [gazs[i] for i in char_perm_idx] gaz_list.append(volatile_flag) if gpu: char_seq_tensor = char_seq_tensor.cuda() bichar_seq_tensor = bichar_seq_tensor.cuda() char_seq_lengths = char_seq_lengths.cuda() char_seq_recover = char_seq_recover.cuda() label_seq_tensor = label_seq_tensor.cuda() mask = mask.cuda() return gaz_list, char_seq_tensor, bichar_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
aea1b271292751740b35fe0d18b133beb7df53c7
5,863
def symbolicMatrix(robot): """ Denavit - Hartenberg parameters for n - th rigid body theta: rotation on «z» axis d: translation on «z» axis a: translation on «x» axis alpha: rotation on «x» axis """ return np.array([[0, 0, 0, 0], [robot.symbolicJointsPositions[0, 0], robot.symbolicLinksLengths[0], 0, np.pi / 2], [robot.symbolicJointsPositions[1, 0], 0, robot.symbolicLinksLengths[1], 0], [robot.symbolicJointsPositions[2, 0], 0, 0, np.pi / 2], [robot.symbolicJointsPositions[3, 0], robot.symbolicLinksLengths[2], 0, 0]])
3b476527336b15c171bbede76ff5b4ed2d4a6eb6
5,864
from typing import List import json def magnitude_list(data: List) -> List: """ :param data: :return: """ if data is None or len(data) == 0: return [] if isinstance(data, str): try: data = json.loads(data) except: data = data try: input_data = np.array([i for i in data]) data = norm(input_data, axis=1).tolist() except Exception as e: print("Error in calculating magnigutude ----> ") print("Data: ", data) print("NP Array: ", input_data) print(e) raise Exception return data
fc124d9a21b4b08ac50731c9234676860b837acf
5,865
def lookup_axis1(x, indices, fill_value=0): """Return values of x at indices along axis 1, returning fill_value for out-of-range indices. """ # Save shape of x and flatten ind_shape = indices.shape a, b = x.shape x = tf.reshape(x, [-1]) legal_index = indices < b # Convert indices to legal indices in flat array indices = tf.clip_by_value(indices, 0., b - 1.) indices = indices + b * tf.range(a, dtype=float_type())[:, o, o] indices = tf.reshape(indices, shape=(-1,)) indices = tf.dtypes.cast(indices, dtype=int_type()) # Do indexing result = tf.reshape(tf.gather(x, indices), shape=ind_shape) # Replace illegal indices with fill_value, cast to float explicitly return tf.cast(tf.where(legal_index, result, tf.zeros_like(result) + fill_value), dtype=float_type())
6e93475d5c6324a709792903a453ffbb454d2d62
5,867
def delete_compute_job(): """ Deletes the current compute job. --- tags: - operation consumes: - application/json parameters: - name: agreementId in: query description: agreementId type: string - name: jobId in: query description: Id of the job. type: string - name: owner in: query description: owner type: string """ #since op-engine handles this, there is no need for this endpoint. Will just keep it here for backwards compat return jsonify(""), 200
112e79bdfb9c569aa0d49275bf3df14c7eecd7b5
5,868
import io def load_dict(dict_path): """ Load a dict. The first column is the value and the second column is the key. """ result_dict = {} for idx, line in enumerate(io.open(dict_path, "r", encoding='utf8')): terms = line.strip("\n") result_dict[idx] = terms return result_dict
cad2061561c26e247687e7c2ee52fb5cf284352a
5,869
def project(x, n): """ http://www.euclideanspace.com/maths/geometry/elements/plane/lineOnPlane/""" l = np.linalg.norm(x) a = normalize(x) b = normalize(n) axb = np.cross(a,b) bxaxb = np.cross(b, axb) return l * bxaxb
e80d87454457920edfbe9638e6793372000bb3bd
5,870
def topologicalSort(roots, getParents): """Return a topological sorting of nodes in a graph. roots - list of root nodes to search from getParents - function which returns the parents of a given node """ results = [] visited = set() # Use iterative version to avoid stack limits for large datasets stack = [(node,0) for node in roots] while stack: current, state = stack.pop() if state == 0: # before recursing if current not in visited: visited.add(current) stack.append((current,1)) stack.extend((parent,0) for parent in getParents(current)) else: # after recursing assert current in visited results.append(current) return results
eec46378dc2282447ff1567945334b6cf18dc180
5,871
def get_headers(metric_resource: MetricResource): """ Get the headers to be used in the REST query for the given metric. """ headers = {} # no headers will be used if metric_resource.spec.headerTemplates is None: return headers, None # initialize headers dictionary for item in metric_resource.spec.headerTemplates: headers[item.name] = item.value # if authType is None, interpolation is not attempted if metric_resource.spec.authType is None: return headers, None # if authType is Basic, interpolation is not attempted if metric_resource.spec.authType == AuthType.BASIC: return headers, None # if there is no secret referenced, interpolation is not attempted if metric_resource.spec.secret is None: return headers, None # args contain decoded secret data for header template interpolation args, err = get_secret_data_for_metric(metric_resource) if err is None: for key in headers: headers[key], err = interpolate(headers[key], args) if err is not None: return None, err return headers, None return None, err
00ab2000ef83f12ebcdc26d834b285ca1ab2da40
5,872
from typing import Iterable def indra_upstream_ora( client: Neo4jClient, gene_ids: Iterable[str], **kwargs ) -> pd.DataFrame: """ Calculate a p-value for each entity in the INDRA database based on the set of genes that it regulates and how they compare to the query gene set. """ count = count_human_genes(client=client) return _do_ora( get_entity_to_targets(client=client), gene_ids=gene_ids, count=count, **kwargs )
1e95b4dc329d09055e0f441f3ddef3614a693005
5,873
def _take_photo(gopro_instance, interval_secs = 600): """ Take a photo, this function still in dev """ try: img = gopro_instance.take_photo(); return img except TypeError: tl.send_alert() return False except: tl.send_alert( message = \ '🆘*E️rror desconocido*, se requiere soporte técnico urgente!' ) return False #time.sleep(interval_secs) #time_lapse(gopro_instance, interval_secs)
04b59957c513eee44e487ba7f86bf296a0c19150
5,875
import time def evaluate( forecaster, cv, y, X=None, strategy="refit", scoring=None, return_data=False ): """Evaluate forecaster using cross-validation Parameters ---------- forecaster : sktime.forecaster Any forecaster cv : sktime.SlidingWindowSplitter or sktime.ExpandingWindowSplitter Splitter of how to split the data into test data and train data y : pd.Series Target time series to which to fit the forecaster. X : pd.DataFrame, optional (default=None) Exogenous variables strategy : str, optional Must be "refit" or "update", by default "refit". The strategy defines whether forecaster is only fitted on the first train window data and then updated or always refitted. scoring : object of class MetricFunctionWrapper from sktime.performance_metrics, optional. Example scoring=sMAPE(). Used to get a score function that takes y_pred and y_test as arguments, by default None (if None, uses sMAPE) return_data : bool, optional Returns three additional columns in the DataFrame, by default False. The cells of the columns contain each a pd.Series for y_train, y_pred, y_test. Returns ------- pd.DataFrame DataFrame that contains several columns with information regarding each refit/update and prediction of the forecaster. Examples -------- >>> from sktime.datasets import load_airline >>> from sktime.performance_metrics.forecasting import evaluate >>> from sktime.forecasting.model_selection import ExpandingWindowSplitter >>> from sktime.forecasting.naive import NaiveForecaster >>> y = load_airline() >>> forecaster = NaiveForecaster(strategy="drift", sp=12) >>> cv = ExpandingWindowSplitter( initial_window=24, step_length=12, fh=[1,2,3,4,5,6,7,8,9,10,11,12] ) >>> evaluate(forecaster=forecaster, y=y, cv=cv) """ cv = check_cv(cv) y = check_y(y) _check_strategies(strategy) scoring = check_scoring(scoring) results = pd.DataFrame() cv.start_with_window = True for i, (train, test) in enumerate(cv.split(y)): # get initial window, if required if i == 0 and cv.initial_window and strategy == "update": train, test = cv.split_initial(y) # this might have to be directly handled in split_initial() test = test[: len(cv.fh)] # create train/test data y_train = y.iloc[train] y_test = y.iloc[test] X_train = X.iloc[train] if X else None X_test = X.iloc[test] if X else None # fit/update start_fit = time.time() if strategy == "refit" or i == 0: forecaster.fit( y=y_train, X=X_train, fh=ForecastingHorizon(y_test.index, is_relative=False), ) else: # strategy == "update" and i != 0: forecaster.update(y=y_train, X=X_train) fit_time = time.time() - start_fit # predict start_pred = time.time() y_pred = forecaster.predict( fh=ForecastingHorizon(y_test.index, is_relative=False), X=X_test ) pred_time = time.time() - start_pred # save results results = results.append( { "test_" + scoring.__class__.__name__: scoring(y_pred, y_test), "fit_time": fit_time, "pred_time": pred_time, "len_train_window": len(y_train), "cutoff": forecaster.cutoff, "y_train": y_train if return_data else np.nan, "y_test": y_test if return_data else np.nan, "y_pred": y_pred if return_data else np.nan, }, ignore_index=True, ) # post-processing of results if not return_data: results = results.drop(columns=["y_train", "y_test", "y_pred"]) results["len_train_window"] = results["len_train_window"].astype(int) return results
ea9663c942ee71c40674c64196b3ced5f61a2c2c
5,876
def euler(step, y0): """ Implements Euler's method for the differential equation dy/dx = 1/(2(y-1)) on the interval [0,4] """ x = [0] index_x = 0 while x[index_x] < 4: x.append(x[index_x] + step) index_x += 1 index_y = 0 y = [y0] def yprime(y): yprime = 1 / (2 * (y - 1)) return yprime while index_y < index_x: y.append(y[index_y] + step * yprime(y[index_y])) index_y += 1 return x, y
89c6e6409a1c43ce4766507fba2f401bb01cfbb8
5,877
import random def generate_random_solution(): """generate_random_solution() Generates a random solution of random characters from [ ,!,..A..Z..a..z...~].""" global answer #codes for chars [ ,!..A..Z..a..z..~] chars = list(range(32,127)) solution = [] while len(solution) < len(answer): #generate random solutions to length of the true answer solution.append(random.choice(chars)) return solution
534a4a249bbbbc9e285b3dc9ccc5010413239b66
5,879
def getTV_Info(): """ 获取TeamViewer的账号和密码信息 使用 Spy++ 读取特定程序中子窗口及各个控件类的信息, 然后 使用 win32 api 读取文本框中的内容 注意: # FindWindowEx() 只能查找直接子窗口,因此需要逐级查找 # 该函数的第二个参数用于表示在哪个子窗口继续查找,用于查找包含两个相同类名的子窗口 参考: https://github.com/wuxc/pywin32doc/blob/master/md/win32gui.md#win32guifindwindowex """ # 获取指定 Handle id_hwnd, pwd_hwnd = get_Hwnd() ID = get_Text(id_hwnd) # 如果数据还未生成,则重新读取 while len(ID) < 6: # 保证Teamviewer 本身是正常运行 id_hwnd, pwd_hwnd = get_Hwnd() ID = get_Text(id_hwnd) Password = get_Text(pwd_hwnd) print("ID:",ID, "Password:",Password) return ID, Password
2d6de5029eda4b447d9fa87c271e18fe94148dc9
5,881
import jieba def tokenize_words(text): """Word segmentation""" output = [] sentences = split_2_short_text(text, include_symbol=True) for sentence, idx in sentences: if is_chinese_string(sentence): output.extend(jieba.lcut(sentence)) else: output.extend(whitespace_tokenize(sentence)) return output
f50c963316927a8051489a22cae674b19ab7b0d5
5,882
def segmentation_model_func(output_channels, backbone_name, backbone_trainable=True): """ Creates a segmentation model with the tf.keras functional api. Args: output_channels: number of output_channels (classes) backbone_name: name of backbone; either: 'vgg19', 'resnet50', 'resnet50v2', 'mobilenetv2', 'resnet101' Returns: tf.keras functional model """ down_stack = create_backbone(name=backbone_name, set_trainable=backbone_trainable) skips = [down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][0]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][1]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][2]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][3]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][4]).output] up_stack_filters = [64, 128, 256, 512] x = skips[-1] skips = reversed(skips[:-1]) up_stack_filters = reversed(up_stack_filters) # Upsampling and establishing the skip connections for skip, filters in zip(skips, up_stack_filters): x = simple_upblock(x, filters, 3, 'up_stack' + str(filters)) x = tf.keras.layers.Concatenate()([x, skip]) # x = simple_upblock_func(x, 32, 3, 'up_stack' + str(32)) x = tf.keras.layers.UpSampling2D(2)(x) x = tf.keras.layers.Conv2D(32, 3, activation='relu', padding='same')(x) x = tf.keras.layers.Conv2D(output_channels, 1, activation='softmax', padding='same', name='final_output')(x) return tf.keras.Model(inputs=down_stack.layers[0].input, outputs=x)
31f46e0fcde797c22c07abeabbf4e4879bddf180
5,883
def EventAddKwargs(builder, kwargs): """This method is deprecated. Please switch to AddKwargs.""" return AddKwargs(builder, kwargs)
b19aa256819f3be1b018baf469b72293a08fa4db
5,884
import json def generate_sidecar(events, columns_selected): """ Generate a JSON sidecar template from a BIDS-style events file. Args: events (EventInput): An events input object to generate sidecars from. columns_selected (dict): A dictionary of columns selected. Returns: dict: A dictionary of results in standard format including either the generated sidecar string or errors. """ columns_info = BidsTsvSummary.get_columns_info(events.dataframe) hed_dict = {} for column_name, column_type in columns_selected.items(): if column_name not in columns_info: continue if column_type: column_values = list(columns_info[column_name].keys()) else: column_values = None hed_dict[column_name] = generate_sidecar_entry(column_name, column_values=column_values) display_name = events.name file_name = generate_filename(display_name, name_suffix='_generated', extension='.json') return {base_constants.COMMAND: base_constants.COMMAND_GENERATE_SIDECAR, base_constants.COMMAND_TARGET: 'events', 'data': json.dumps(hed_dict, indent=4), 'output_display_name': file_name, 'msg_category': 'success', 'msg': 'JSON sidecar generation from event file complete'}
4fada8d65eab69384cb1d1f26f888d40fd0cea90
5,885
def _filter_artifacts(artifacts, relationships): """ Remove artifacts from the main list if they are a child package of another package. Package A is a child of Package B if all of Package A's files are managed by Package B per its file manifest. The most common examples are python packages that are installed via dpkg or rpms. :param artifacts: :param relationships: :return: """ def filter_fn(artifact): # some packages are owned by other packages (e.g. a python package that was installed # from an RPM instead of with pip), filter out any packages that are not "root" packages. if _filter_relationships( relationships, child=dig(artifact, "id"), type="ownership-by-file-overlap" ): return False return True return [a for a in artifacts if filter_fn(a)]
642f16fd4b9784288a283a21db8632cc11af6cba
5,886
def get_augmented_image_palette(img, nclusters, angle): """ Return tuple of (Image, Palette) in LAB space color shifted by the angle parameter """ lab = rgb2lab(img) ch_a = lab[...,1] ch_b = lab[...,2] theta = np.deg2rad(angle) rot = np.array([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) hue_rotate = lambda ab: np.dot(rot, [ab[0], ab[1]]) ab = np.asarray(list(map(hue_rotate, zip(ch_a, ch_b)))).transpose((0, 2, 1)) lab = np.dstack((lab[...,0], ab[...,0], ab[...,1])) palette = kmeans_get_palette(lab, nclusters) return (lab, palette)
89cfc4f50a70be413aa6525b9b462924baaf9907
5,887
def squeeze__default(ctx, g, self, dim=None): """Register default symbolic function for `squeeze`. squeeze might be exported with IF node in ONNX, which is not supported in lots of backend. """ if dim is None: dims = [] for i, size in enumerate(self.type().sizes()): if size == 1: dims.append(i) else: dims = [sym_help._get_const(dim, 'i', 'dim')] return g.op('Squeeze', self, axes_i=dims)
7ce7672f187f2d699cc378d00b1415007a2fe04b
5,889
from predefinedentities import BANNED_PREF_BRANCHES, BANNED_PREF_REGEXPS import re def _call_create_pref(a, t, e): """ Handler for pref() and user_pref() calls in defaults/preferences/*.js files to ensure that they don't touch preferences outside of the "extensions." branch. """ if not t.im_self.filename.startswith("defaults/preferences/") or len(a) == 0: return value = str(t(a[0]).get_literal_value()) for banned in BANNED_PREF_BRANCHES: if value.startswith(banned): return ("Extensions should not alter preferences in the '%s' " "preference branch" % banned) for banned in BANNED_PREF_REGEXPS: if re.match(banned, value): return ("Extensions should not alter preferences matching /%s/" % banned) if not value.startswith("extensions.") or value.rindex(".") < len("extensions."): return ("Extensions should not alter preferences outside of the " "'extensions.' preference branch. Please make sure that " "all of your extension's preferences are prefixed with " "'extensions.add-on-name.', where 'add-on-name' is a " "distinct string unique to and indicative of your add-on.")
90ceef343ead469da5fb078b45ee30c87fceb84b
5,890
def pig_action_utility(state, action, utility): """The expected value of choosing action in state.Assumes opponent also plays with optimal strategy. An action is one of ["roll", "hold", "accept", decline", "double"] """ if action == 'roll': one = iter([1]) rest = iter([2, 3, 4, 5, 6]) return (-utility(do(action, state, one)) + sum(utility(do(action, state, rest)) for _ in range(5))) / 6.0 else: return -utility(do(action, state, fair_die_rolls()))
c2e06a074f5fefd62f8a810e338bb7938d1cf6fd
5,891
def to_canonical_url(url): """ Converts a url into a "canonical" form, suitable for hashing. Keeps only scheme, domain and path. Ignores url query, fragment, and all other parts of the url. :param url: a string :return: a string """ parsed_url = urlparse(url) return urlunparse([ parsed_url.scheme, parsed_url.netloc, parsed_url.path, '', '', '' ])
0991502fcd696308d0fe50a06a7fa5e2e12703af
5,892
from pydash import get from jobflow.utils.find import find_key_value from typing import Any def find_and_get_references(arg: Any) -> tuple[OutputReference, ...]: """ Find and extract output references. This function works on nested inputs. For example, lists or dictionaries (or combinations of list and dictionaries) that contain output references. Parameters ---------- arg The argument to search for references. Returns ------- tuple[OutputReference] The output references as a tuple. """ if isinstance(arg, OutputReference): # if the argument is a reference then stop there return tuple([arg]) elif isinstance(arg, (float, int, str, bool)): # argument is a primitive, we won't find a reference here return tuple() arg = jsanitize(arg, strict=True, enum_values=True) # recursively find any reference classes locations = find_key_value(arg, "@class", "OutputReference") # deserialize references and return return tuple(OutputReference.from_dict(get(arg, loc)) for loc in locations)
a2b1873ecd921afbb3d254c0a0fe4706c0ca5d12
5,893
def get_fdr_thresh(p_values, alpha=0.05): """ Calculate the false discovery rate (FDR) multiple comparisons correction threshold for a list of p-values. :param p_values: list of p-values :param alpha: the uncorrected significance level being used (default = 0.05) :type p_values: numpy array :type alpha: float :returns: The FDR correction threshold :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] for i in range(len(sn)): p_crit = alpha * float(i+1) / float(len(sn)) if sn[i] <= p_crit: continue else: break return sn[i]
5182eef60be397fe9f13ecb4e5440adc1a9ffd00
5,894
import typing def _rescue_filter( flags: RescueRenderFlags, platform_filter: typing.Optional[Platforms], rescue: Rescue ) -> bool: """ determine whether the `rescue` object is one we care about Args: rescue: Returns: """ filters = [] if flags.filter_unassigned_rescues: # return whether any rats are assigned # either properly or via unidentified rats filters.append(not (bool(rescue.rats) or bool(rescue.unidentified_rats))) # use the active bool on rescue if we don't want inactives, otherwise True if flags.filter_active_rescues: filters.append(rescue.active) if flags.filter_inactive_rescues: filters.append(not rescue.active) if platform_filter: # if we are filtering on platform filters.append(rescue.platform is platform_filter) return not all(filters)
bb192e4fd8eeb811bb681cec6e60956a71a1c15b
5,895
def penalty(precision, alpha, beta, psi): """Penalty for time-varying graphical lasso.""" if isinstance(alpha, np.ndarray): obj = sum(a[0][0] * m for a, m in zip(alpha, map(l1_od_norm, precision))) else: obj = alpha * sum(map(l1_od_norm, precision)) if isinstance(beta, np.ndarray): obj += sum(b[0][0] * m for b, m in zip(beta, map(psi, precision[1:] - precision[:-1]))) else: obj += beta * psi(precision[1:] - precision[:-1]) return obj
e8563c82cb51a5e3efa25fac5647b782abecabdf
5,896
from propy.CTD import CalculateCTD from typing import Optional from typing import List def all_ctd_descriptors( G: nx.Graph, aggregation_type: Optional[List[str]] = None ) -> nx.Graph: """ Calculate all CTD descriptors based seven different properties of AADs. :param G: Protein Graph to featurise :type G: nx.Graph :param aggregation_type: Aggregation types to use over chains :type aggregation_type: List[Optional[str]] :return: Protein Graph with ctd_descriptors feature added. G.graph["ctd_descriptors_{chain | aggregation_type}"] :rtype: nx.Graph """ func = CalculateCTD feature_name = "ctd_descriptors" return compute_propy_feature( G, func=func, feature_name=feature_name, aggregation_type=aggregation_type, )
71dc1559b1d3f3a682e1a2107b0cd9fb49c57b9e
5,897
def get_report_hash(report: Report, hash_type: HashType) -> str: """ Get report hash for the given diagnostic. """ hash_content = None if hash_type == HashType.CONTEXT_FREE: hash_content = __get_report_hash_context_free(report) elif hash_type == HashType.PATH_SENSITIVE: hash_content = __get_report_hash_path_sensitive(report) elif hash_type == HashType.DIAGNOSTIC_MESSAGE: hash_content = __get_report_hash_diagnostic_message(report) else: raise Exception("Invalid report hash type: " + str(hash_type)) return __str_to_hash('|||'.join(hash_content))
6f0ba5edfcc49daa9f700857e8b6ba5cd5f7d1ba
5,898
import json def parse_json_file(json_file_path, allow_non_standard_comments=False): """ Parse a json file into a utf-8 encoded python dictionary :param json_file_path: The json file to parse :param allow_non_standard_comments: Allow non-standard comment ('#') tags in the file :return: Dictionary representation of the json file """ def _decode_list(list_data): rv = [] for item in list_data: if isinstance(item, unicode): item = item.encode('utf-8') elif isinstance(item, list): item = _decode_list(item) elif isinstance(item, dict): item = _decode_dict(item) rv.append(item) return rv def _decode_dict(dict_data): rv = {} for key, value in dict_data.iteritems(): if isinstance(key, unicode): key = key.encode('utf-8') if isinstance(value, unicode): value = value.encode('utf-8') elif isinstance(value, list): value = _decode_list(value) elif isinstance(value, dict): value = _decode_dict(value) rv[key] = value return rv try: if allow_non_standard_comments: # If we are reading non-standard json files where we are accepting '#' as comment tokens, then the # file must have CR/LF characters and will be read in line by line. with open(json_file_path) as json_file: json_lines = json_file.readlines() json_file_content = "" for json_line in json_lines: comment_index = json_line.find('#') literal_pound_index = json_line.find('##') if comment_index>=0 and comment_index != literal_pound_index: processed_line = json_line.split('#')[0].strip() else: if literal_pound_index>=0: processed_line = json_line.replace('##','#').strip() else: processed_line = json_line.strip() json_file_content += processed_line else: with open(json_file_path) as json_file: json_file_content = json_file.read() json_file_data = json.loads(json_file_content, object_hook=_decode_dict) return json_file_data except Exception as e: raise ValueError('Error reading {}: {}'.format(json_file_path, e.message))
0df1108aedb60f0b0e0919c6cc7a66dd736ff8ac
5,899
import logging def update_softwaretitle_packages(api, jssid, pkgs): """ Update packages of software title :param jssid: Patch Software Title ID :param pkgs: dict of {version: package, ...} :returns: None """ logger = logging.getLogger(__name__) data = api.get(f"patchsoftwaretitles/id/{jssid}") title = data['patch_software_title'] title_name = title['name'] logger.info(f"updating patch software title: {title_name} ({jssid})") # single version (dict), multiple versions (list) version = title['versions']['version'] _modified = False try: # access key of single version and count on TypeError being raised v = version['software_version'] if v in pkgs.keys(): version['package'] = {'name': pkgs[v]} _modified = True except TypeError: # looks like it was actually a list for _version in version: v = _version['software_version'] if v in pkgs.keys(): _version['package'] = {'name': pkgs[v]} _modified = True if _modified: result = api.put(f"patchsoftwaretitles/id/{jssid}", data) logger.info(f"succesfully updated: {title_name}") return result else: logger.info(f"software title was not modified")
0acb3dfbff0e85a2e8a876d5e5d484c4d1e52068
5,900
from typing import List def get_balances(session: Session, redis: Redis, user_ids: List[int]): """Gets user balances. Returns mapping { user_id: balance } Enqueues in Redis user balances requiring refresh. """ # Find user balances query: List[UserBalance] = ( (session.query(UserBalance)).filter(UserBalance.user_id.in_(user_ids)).all() ) # Construct result dict from query result result = { user_balance.user_id: { "owner_wallet_balance": user_balance.balance, "associated_wallets_balance": user_balance.associated_wallets_balance, "associated_sol_wallets_balance": user_balance.associated_sol_wallets_balance, "waudio_balance": user_balance.waudio, "total_balance": str( int(user_balance.balance) + int(user_balance.associated_wallets_balance) + int(user_balance.associated_sol_wallets_balance) * 10 ** WAUDIO_DECIMALS + int(user_balance.waudio) * 10 ** WAUDIO_DECIMALS ), } for user_balance in query } # Find user_ids that don't yet have a balance user_ids_set = set(user_ids) fetched_user_ids_set = {x.user_id for x in query} needs_balance_set = user_ids_set - fetched_user_ids_set # Add new balances to result set no_balance_dict = { user_id: { "owner_wallet_balance": "0", "associated_wallets_balance": "0", "associated_sol_wallets_balance": "0", "total_balance": "0", "waudio_balance": "0", } for user_id in needs_balance_set } result.update(no_balance_dict) # Get old balances that need refresh needs_refresh = [ user_balance.user_id for user_balance in query if does_user_balance_need_refresh(user_balance) ] # Enqueue new balances to Redis refresh queue # 1. All users who need a new balance # 2. All users who need a balance refresh enqueue_lazy_balance_refresh(redis, list(needs_balance_set) + needs_refresh) return result
82f6fdf0fcc8bcd241c97ab50a89ba640793b704
5,901
from typing import Optional from typing import Tuple def kmeans(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]: """Partition observations into k clusters. Parameters ---------- observations : ndarray, `shape (N, 2)` or `shape (N, 3)` An array of observations (x, y) to be clustered. Data should be provided as: `[(x, y), (x, y), (x, y), ...]` or `[(x, y, z), (x, y, z), (x, y, z), ...]` k : int, optional Amount of clusters to partition observations into, by default 5 Returns ------- center : ndarray, `shape (k, 2)` or `shape (k, 3)` An array of positions to center of each cluster. count : ndarray, `shape (k, )` Array of counts of datapoints closest to the center of its cluster. Examples ------- >>> observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]] >>> center, count = kmeans_2d(observations, k=2) >>> center [[-4, -4 5, -3]] >>> count [1, 4] """ if not isinstance(observations, ndarray): raise TypeError("Observations must be a ndarray.") if observations.shape[-1] == 3: return kmeans_3d(observations, k) elif observations.shape[-1] == 2: return kmeans_2d(observations, k) else: pass
1a8cb2e61e8d96a45d4165edf1b148fd7c8ab5e3
5,902
def encloses(coord, points): """ """ sc = constants.CLIPPER_SCALE coord = st(coord.to_list(), sc) points = st(points, sc) return pyclipper.PointInPolygon(coord, points) != 0
d5d7aeb8f52087653027d57c7a718832dbf32200
5,903
def arpls(y, lam, ratio=1e-6, niter=1000, progressCallback=None): """ Return the baseline computed by asymmetric reweighted penalized least squares smoothing, arPLS. Ref: Baseline correction using asymmetrically reweighted penalized least squares smoothing Sung-June Baek, Aaron Park, Young-Jin Ahn and Jaebum Choo Analyst, 2015, 140, 250-257. DOI: 10.1039/C4AN01061B In this implementation, W is not squared so p carries the same meaning as in AsLS. Parameters: y: one spectrum to correct, or multiple as an array of shape (spectrum, wavenumber) lam: lambda, the smoothness parameter ratio: convergence criterion; target relative change in weights between iterations niter: maximum number of iterations progressCallback(int a, int b): callback function called to indicated that the processing is complete to a fraction a/b. Returns: baseline of the spectrum, measured at the same points """ L = y.shape[-1] D = sparse.csc_matrix(np.diff(np.eye(L), 2)) D = lam * D.dot(D.T) def arpls_one(yy): w = np.ones(L) for i in range(niter): W = sparse.spdiags(w, 0, L, L) z = sparse.linalg.spsolve(W + D, w * yy) d = yy - z dn = d[d < 0] s = dn.std() wt = 1. / (1 + np.exp(2 / s * (d - (2*s-dn.mean())))) if np.linalg.norm(w - wt) / np.linalg.norm(w) < ratio: break w = wt return z return mp_bgcorrection(arpls_one, y, progressCallback=progressCallback)
d149397827d89b8708a09f4ceb7c38c989d99e17
5,904
async def test_function_raised_exception(dut): """ Test that exceptions thrown by @function coroutines can be caught """ @cocotb.function async def func(): raise ValueError() @external def ext(): return func() with pytest.raises(ValueError): await ext()
acd31a1142dea0cd300861e75721a4597e2b5bbc
5,905
def dismiss_notification_mailbox(notification_mailbox_instance, username): """ Dismissed a Notification Mailbox entry It deletes the Mailbox Entry for user Args: notification_mailbox_instance (NotificationMailBox): notification_mailbox_instance username (string) Return: bool: Notification Mailbox Dismissed """ profile_instance = get_self(username) NotificationMailBox.objects.filter(target_profile=profile_instance, pk=notification_mailbox_instance.id).delete() return True
9955361ac42c079adefcd8402fb9a1d5e3822a57
5,906
import operator def knn(x, y, k, predict_x): """ knn算法实现,使用欧氏距离 :param x: 样本值 :param y: 标签 :param k: 个数 :return: """ assert isinstance(y, np.ndarray) y = y.flatten('F') def cal_distance(a, b): return np.sqrt(np.sum(np.power(a - b, 2), axis=0)) dists = { } for (index, sample) in enumerate(x): dists[index] = cal_distance(sample, predict_x) k_sample = sorted(dists.items(), key=operator.itemgetter(1))[:k] k_labels = y[[key for (key, value) in k_sample]] counters = { } for k in k_labels: if k not in counters.keys(): counters[k] = 1 else: counters[k] += 1 return sorted(counters.items(), key=operator.itemgetter(1))[0]
425095898acce2fc966d00d4ba6bc8716f1062f8
5,907
def piano(): """A piano instrument.""" return lynames.Instrument('Piano', abbr='Pno.', transposition=None, keyboard=True, midi='acoustic grand', family='percussion', mutopianame='Piano')
792a1dd3655ac038bdde27f9d1ad27451e2b9121
5,908
from typing import Any from typing import Dict def extract_fields(obj: Any) -> Dict[str, Any]: """A recursive function that extracts all fields in a Django model, including related fields (e.g. many-to-many) :param obj: A Django model :return: A dictionary containing fields and associated values """ sub_content = {} if obj is not None: # Gets a list of any Django model fields fields = type(obj)._meta.get_fields() for field in fields: if issubclass(field.__class__, ForeignKey): sub_content[field.name] = extract_fields(getattr(obj, field.name)) elif issubclass(field.__class__, RelatedField): sub_content[field.name] = [extract_fields(sub_obj) for sub_obj in list(getattr(obj, field.name).all())] elif issubclass(field.__class__, Field): sub_content[field.name] = getattr(obj, field.name) return sub_content
bc6b45a82ab2a336e116ce528aaed45b2b77ef39
5,909
from re import T def decomposeM(modified): """Auxiliary in provenance filtering: split an entry into name and date.""" splits = [m.rsplit(ON, 1) for m in modified] return [(m[0], dtm(m[1].replace(BLANK, T))[1]) for m in splits]
1f613d11d2f8c3ceec4f6c853b9412b5b7eb3e0c
5,910
def get_2d_peaks_coords( data: np.ndarray, size: int = None, threshold: float = 0.5 ) -> np.ndarray: """Detect peaks in image data, return coordinates. If neighborhoods size is None, default value is the highest value between 50 pixels and the 1/40th of the smallest image dimension. Detection threshold is relative to difference between data maximum and minimum. """ if size is None: size = max(min(data.shape) // 40, 50) data_max = spf.maximum_filter(data, size) data_min = spf.minimum_filter(data, size) data_diff = data_max - data_min abs_threshold = (data_diff.max() - data_diff.min()) * threshold diff = (data_max - data_min) > abs_threshold maxima = data == data_max maxima[diff == 0] = 0 labeled, _num_objects = spi.label(maxima) slices = spi.find_objects(labeled) coords = [] for dy, dx in slices: x_center = int(0.5 * (dx.start + dx.stop - 1)) y_center = int(0.5 * (dy.start + dy.stop - 1)) coords.append((x_center, y_center)) if len(coords) > 1: # Eventually removing duplicates dist = distance_matrix(coords) for index in reversed(np.unique(np.where((dist < size) & (dist > 0))[1])): coords.pop(index) return np.array(coords)
815979bd0105acc7bb3fb58db691a8963d9ca2f4
5,912
def border_positions_from_texts(texts, direction, only_attr=None): """ From a list of textboxes in <texts>, get the border positions for the respective direction. For vertical direction, return the text boxes' top and bottom border positions. For horizontal direction, return the text boxes' left and right border positions. <direction> must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL from pdftabextract.common. optional <only_attr> must be either 'low' (only return 'top' or 'left' borders) or 'high' (only return 'bottom' or 'right'). Border positions are returned as sorted NumPy array. """ if direction not in (DIRECTION_HORIZONTAL, DIRECTION_VERTICAL): raise ValueError("direction must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL (see pdftabextract.common)") if only_attr is not None and only_attr not in ('low', 'high'): raise ValueError("only_attr must be either 'low' or 'high' if not set to None (default)") if direction == DIRECTION_VERTICAL: attr_lo = 'top' attr_hi = 'bottom' else: attr_lo = 'left' attr_hi = 'right' positions = [] for t in texts: if only_attr is None or only_attr == 'low': positions.append(t[attr_lo]) if only_attr is None or only_attr == 'high': positions.append(t[attr_hi]) return np.array(sorted(positions))
8b0f57e21b015b6092104454195254861432b610
5,913
def progress(self): """Check if foo can send to corge""" return True
89a0c9671645f9fa855db35bf5e383145d6b7616
5,914
def write_sample_sdf(input_file_name, valid_list): """ Function for writing a temporary file with a subset of pre-selected structures :param input_file_name: name of input file :param valid_list: list of indexes of pre-selected structures :return: name of subsampled file """ sample_file_name = '{}_sample.sdf'.format(input_file_name.split('.')[0]) sample_file = open(sample_file_name, 'w') mol = [] i = 0 for line in open(input_file_name): mol.append(line) if line[:4] == '$$$$': i += 1 if i in valid_list: for mol_line in mol: sample_file.write(mol_line) valid_list.remove(i) mol = [] else: mol = [] sample_file.close() return sample_file_name
0b22c14452f6de978e7ea811d761195d92bfe6c4
5,915
import math def rotx(theta, unit="rad"): """ ROTX gives rotation about X axis :param theta: angle for rotation matrix :param unit: unit of input passed. 'rad' or 'deg' :return: rotation matrix rotx(THETA) is an SO(3) rotation matrix (3x3) representing a rotation of THETA radians about the x-axis rotx(THETA, "deg") as above but THETA is in degrees """ check_args.unit_check(unit) if unit == "deg": theta = theta * math.pi / 180 ct = math.cos(theta) st = math.sin(theta) mat = np.matrix([[1, 0, 0], [0, ct, -st], [0, st, ct]]) mat = np.asmatrix(mat.round(15)) return mat
b05a6116c64837de163ad26dc36ffe1a7166635d
5,916
from typing import Sequence def _table(*rows: Sequence) -> str: """ >>> _table(['a', 1, 'c', 1.23]) '|a|1|c|1.23|' >>> _table(['foo', 0, None]) '|foo|||' >>> print(_table(['multiple', 'rows', 0], ['each', 'a', 'list'])) |multiple|rows|| |each|a|list| """ return '\n'.join([ '|'.join(['', *[str(cell or '') for cell in row], '']) for row in rows ])
d566da2ad9240e73b60af00d3e4b4e25607234b4
5,917
def trunc(s, n): """ Truncate a string to N characters, appending '...' if truncated. trunc('1234567890', 10) -> '1234567890' trunc('12345678901', 10) -> '1234567890...' """ if not s: return s return s[:n] + '...' if len(s) > n else s
0f3c9f03f566f9f50a557f6b5592ec20a12e92bc
5,918
def cs_geo(): """Geographic lat/lon coordinates in WGS84 datum. """ cs = CSGeo() cs.inventory.datumHoriz = "WGS84" cs.inventory.datumVert = "mean sea level" cs.inventory.spaceDim = 2 cs._configure() cs.initialize() return cs
28df90e7b1490d681c9d13f4604dbc3966d896dc
5,920
def make_range(value): """ Given an integer 'value', return the value converted into a range. """ return range(value)
385d23eaebd04249f9384e0d592b7fb3a9bbb457
5,921
def run(actor, observer, content): """ Shortcut to run an Onirim and return the result. Returns: True if win, False if lose, None if other exception thrown. """ return Flow(Core(actor, observer, content)).whole()
03b1dee5bd993d8a88debd558878de5a32e9c318
5,922
def GetPoseBoneFCurveFromArmature(armatureObj, poseBoneName, data_path, parameterIndex): """ In Blender the FCurves are used to define the Key Frames. In general, for a single object, there's one FCurve for each of the following properties. data_path, index 'location', 0 (.x) 'location', 1 (.y) 'location', 2 (.z) 'rotation_quaternion', 0 (.w) 'rotation_quaternion', 1 (.x) 'rotation_quaternion', 2 (.y) 'rotation_quaternion', 3 (.z) 'scale', 0 (.x) 'scale', 1 (.y) 'scale', 2 (.z) For more tips about this, see: https://docs.blender.org/api/blender_python_api_2_75_release/info_quickstart.html#animation Returns a bpy.types.FCurve """ completePath = BuildPoseBoneFCurveDataPath(poseBoneName, data_path) return armatureObj.animation_data.action.fcurves.find(completePath, index=parameterIndex)
450d98306adf43ea171dffa0fe6afa71ebabce57
5,923
def get_document_instance(conf=None): """ Helper function to get a database Document model instance based on CLA configuration. :param conf: Same as get_database_models(). :type conf: dict :return: A Document model instance based on configuration specified. :rtype: cla.models.model_interfaces.Document """ return get_database_models(conf)['Document']()
054f6ff6acc38ed44a9bd2a97e0598ed34b322f8
5,924
from typing import List def get_full_private_keys(gpg: gnupg.GPG) -> List[GPGKey]: """Get a list of private keys with a full private part. GPG supports exporting only the subkeys for a given key, and in this case a stub of the primary private key is also exported (the stub). This stub cannot be used to do anything with the primary key, so it's useful to list only keys that can actually be used. :param gpg: The GPG interface used by the gnupg library :return: The list of fully available private keys in the keyring """ return [key for key in get_private_keys(gpg) if key.key_token == KeyToken.FULL]
d2bbb248613c3be9ed103212e0ca2a433de07e03
5,925
def create_blueprint(): """Creates a Blueprint""" blueprint = Blueprint('Health Check Blueprint', __name__) blueprint.route('/')(healthcheck.healthcheck) return blueprint
348c6ff172bb0d230d83eab73dd451edba0d1b00
5,926
def playable_card(card, fireworks, n_colors): # if isinstance(card, pyhanabi.HanabiCard): # card = {'color':colors[card.color],'rank':card.rank} """A card is playable if it can be placed on the fireworks pile.""" if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor and card().rank != pyhanabi.HanabiCard.RankType.kUnknownRank): for color in range(n_colors): if fireworks[color] == card.rank: continue else: return False return True # elif card['color'] == None or card['rank'] == None: if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor and card().rank == pyhanabi.HanabiCard.RankType.kUnknownRank): return False else: return card.rank == fireworks[card.color]
a96c6935c6b57ead9c639f13d8eccccbaf21aa4b
5,927