content
stringlengths
22
815k
id
int64
0
4.91M
def generate_run_base_dir( result_dir: str, timestamp: int = None, tag: str = None, sub_dirs: List[str] = None ) -> str: """ Generate a base directory for each experiment run. Looks like this: result_dir/date_tag/sub_dir_1/.../sub_dir_n Args: result_dir (str): Experiment output directory. timestamp (int): Timestamp which will be inlcuded in the form of '%y%m%d_%H%M'. tag (str): Tag after timestamp. sub_dirs (List[str]): List of subdirectories that should be created. Returns: str: Directory name. """ if timestamp is None: timestamp = time.time() if sub_dirs is None: sub_dirs = [] # Convert time date = datetime.datetime.fromtimestamp(timestamp) date_str = date.strftime("%y-%m-%d_%H:%M") # Append tag if given if tag is None: base_dir = date_str else: base_dir = date_str + "_" + tag # Create directory base_dir = os.path.join(result_dir, base_dir, *sub_dirs) return base_dir
5,356,300
def mulaw_to_value(mudata): """Convert a mu-law encoded value to linear.""" position = ((mudata & 0xF0) >> 4) + 5 return ((1 << position) | ((mudata & 0xF) << (position - 4)) | (1 << (position - 5))) - 33
5,356,301
def init_exclusion_regexes(paths_ignore: Iterable[str]) -> Set[re.Pattern]: """ filter_set creates a set of paths of the ignored entries from 3 sources: .gitguardian.yaml files in .git files ignore in .gitignore """ res = set() for path in paths_ignore: if not is_pattern_valid(path): raise click.ClickException(f"{path} is not a valid exclude pattern.") res.add(re.compile(translate_user_pattern(path))) return res
5,356,302
def make_payments(): """Pay payments based on credits""" # i.e. [ { uid, addr_type, amount, address }, ... ] payments = [] now = database.walltime_to_db_time(time()) users = get_balances_and_thresholds() total_matured = 0 total_pending = 0 log.message('Building list of payments') for user in users: uid, wallet_addr, payment_threshold, credits_pending, credits_matured, debits = user confirmed_balance = credits_matured - debits total_matured += confirmed_balance total_pending += credits_pending if confirmed_balance < payment_threshold: continue # Limit the amount to pay to PAYMENTS_MAX_PAYMENT_AMOUNT because if # it is a really large amount, will get "tx not possible" amount_to_pay = min(confirmed_balance, PAYMENTS_MAX_PAYMENT_AMOUNT) wallet_info = validate(wallet_addr, COIN_ADDRESS_PREFIXES) if not wallet_info['valid']: log.error('User with uid %d has an invalid address %s, skipping...' % (uid, wallet_addr)) continue # Append to payments array payments.append({ 'uid': uid, 'addr_type': wallet_info['type'], 'amount': amount_to_pay, 'address': wallet_addr }) # sort payments by lowest amount first payments = sorted(payments, key=lambda k: k['amount']) log.message('Building list of payments... DONE') if not len(payments): log.message('No payments need to be made now') balance, unlocked_balance = wallet.get_balance() net_difference = balance - int(total_matured+total_pending) log.message('') log.message('Accounting check') log.message('Wallet:') log.message('==========================================================') log.message('| balance | unlocked | locked |') log.message('==========================================================') log.message('|%s|%s|%s|' % (str(balance).rjust(18), str(unlocked_balance).rjust(18), str(int(balance-unlocked_balance)).rjust(18))) log.message('==========================================================') log.message('') log.message('Owed to users:') log.message('==========================================================') log.message('| total | confirmed | unconfirmed |') log.message('==========================================================') log.message('|%s|%s|%s|' % (str(int(total_matured+total_pending)).rjust(18), str(total_matured).rjust(18), str(total_pending).rjust(18))) log.message('==========================================================') log.message('') log.message('Net (balance - owed): %d' % (net_difference,)) log.message('') if net_difference < -1 * PAYMENTS_WARNING_THRESHOLD: log.error('We owe more than we have in the wallet, quitting...') raise CriticalPaymentError() out_of_money = False # Continue building transactions until we run out of money or payees while not out_of_money and len(payments): balance, unlocked_balance = wallet.get_balance() log.message('Building transaction') log.message('Wallet has unlocked balance of: %d' % (unlocked_balance)) # payments that will be made in this transaction recipients = [] running_total = 0 if payments[0]['addr_type'] == 'integrated': log.message('This will be an exchange payment') if payments[0]['amount'] <= unlocked_balance: log.message('We have enough money') running_total = payments[0]['amount'] recipients = payments.pop(0) else: log.message('We do not have enough money') out_of_money = True break else: log.message('This will be a normal payment') i = 0 while len(recipients) < PAYMENTS_MAX_RECIPIENTS and i < len(payments): if payments[i]['addr_type'] == 'integrated': i += 1 continue if running_total + payments[i]['amount'] <= unlocked_balance: running_total += payments[i]['amount'] recipients.append(payments.pop(i)) else: out_of_money = True break if not out_of_money: log.message('We have enough money') elif len(recipients): log.message('We have enough money for partial payment') else: log.message('We do not have enough money') break log.message('Attempting transaction to pay %d users a total of %d' % (len(recipients), running_total)) fee_estimated = PAYMENTS_FEE_ADJ_FACTOR * fee.estimate_fee(recipients) fee_per_user = fee.split_fee(fee_estimated, len(recipients)) # this will hold recipient info with only amount and address for RPC recipients_rpc = [] for recipient in recipients: # subtract estimated fee for each user recipient['amount'] = int(recipient['amount'] - fee_per_user) # push this address into the wallet rpc list recipients_rpc.append({ 'amount': recipient['amount'], 'address': recipient['address'] }) # Make the actual transfer try: result = wallet.transfer(recipients_rpc) txid = result['tx_hash'] fee_actual = result['fee'] fee_actual_per_user = fee.split_fee(fee_actual, len(recipients)) log.message('Transaction success with txid %s' % (txid,)) log.message('Estimated fee - actual fee: %s - %s = %s' % (fee_estimated, fee_actual, fee_estimated - fee_actual)) except rpc.RpcError as re: log.error('Error transferring payment, reason: %s' % (re,)) log.error(recipients) # If RPC failed, we will still record debit with estimated fee and empty txid txid = None fee_actual_per_user = fee_per_user for recipient in recipients: uid = recipient['uid'] amount = recipient['amount'] # record payment and fee log.message('Debit user %s (amount, fee): %s %s' % (uid, amount, fee_actual_per_user)) if not record_payment(uid, txid, now, amount, fee_actual_per_user): log.error('Critical: failed to record payment for user %d' % (uid,)) raise CriticalPaymentError()
5,356,303
def validate_watch(value): """Validate "watch" parameter.""" if not value: return None if isinstance(value, str): value = [_ for _ in value.split("\n") if _] return value
5,356,304
def diatomic_unitary(a, b, c): """ Unitary decomposed as a diatomic gate of the form Ztheta + X90 + Ztheta + X90 + Ztheta """ X90 = expm(-0.25j*np.pi*pX) return expm(-0.5j*a*pZ)@X90@expm(-0.5j*b*pZ)@X90@expm(-0.5j*c*pZ)
5,356,305
def is_x_y_in_hidden_zone_all_targets(room_representation, camera_id, x, y): """ :description Extend the function is_x_y_in_hidden_zone_one_target, 1.for every target in the room :param 1. (RoomRepresentation) -- room description of the target and the cameras 2. (int) camera_id -- camera id to find it in the given room description 1. (int) x -- x coordinate of a point in the room frame 2. (int) y -- y coordinate of a point in the room frame :return / modify vector 1. (bool) -- True if the point is not hidden """ camera = find_cam_in_camera_representation(room_representation, camera_id) if camera is None: return False for target in room_representation.target_representation_list: xt = target.xc yt = target.yc radius = target.radius if is_x_y_in_hidden_zone_one_target(camera, x, y, xt, yt, radius): return True return False
5,356,306
def run_RIB_IN_capacity_test(cvg_api, duthost, tgen_ports, multipath, start_value, step_value, route_type, port_speed,): """ Run RIB-IN Capacity test Args: cvg_api (pytest fixture): snappi API duthost (pytest fixture): duthost fixture tgen_ports (pytest fixture): Ports mapping info of T0 testbed multipath: ecmp value for BGP config start_value: start value of number of routes step_value: step value of routes to be incremented at every iteration route_type: IPv4 or IPv6 routes port_speed: speed of the port used for test """ port_count = multipath+1 """ Create bgp config on dut """ duthost_bgp_config(duthost, tgen_ports, port_count, route_type,) """ Run the RIB-IN capacity test by increasig the route count step by step """ get_RIB_IN_capacity(cvg_api, multipath, start_value, step_value, route_type, port_speed,) """ Cleanup the dut configs after getting the convergence numbers """ cleanup_config(duthost)
5,356,307
def _create_pure_mcts_player( game: polygames.Game, mcts_option: mcts.MctsOption, num_actor: int ) -> mcts.MctsPlayer: """a player that uses only mcts + random rollout, no neural net""" player = mcts.MctsPlayer(mcts_option) for _ in range(num_actor): actor = polygames.Actor( None, game.get_feat_size(), game.get_action_size(), False, False, None ) player.add_actor(actor) return player
5,356,308
def get_fans_users(): """ 获取用户的粉丝 :return: """ user_id = request.argget.all("user_id") page = str_to_num(request.argget.all("page", 1)) pre = str_to_num(request.argget.all("pre", 20)) s, r = arg_verify(reqargs=[("user id", user_id)], required=True) if not s: return r data = {"users": []} fans = mdbs["user"].db.user_follow.find({"type": "account", "follow": user_id}) data_cnt = fans.count(True) for user in fans.skip(pre * (page - 1)).limit(pre): s, r = get_user_public_info(user_id=user["user_id"], is_basic=False, current_user_isauth=current_user.is_authenticated) if s: data["users"].append(r) data["users"] = datas_paging( pre=pre, page_num=page, data_cnt=data_cnt, datas=data["users"]) return data
5,356,309
def test_invalid_patterns(list, pattern): """ Function to facilitate the tests in MyRegExTest class :param list: list with strings of invalid cases :param pattern: a regular expression :return: list with the result of all matches which should be a list of None """ newList = [] for item in list: matched = re.match(pattern, item) if matched is None: newList.append(None) else: raise ValueError(item + ' matched to ' + pattern + ' while it should not have matched') return newList
5,356,310
def args_parse(): """Parse the input args.""" parser = argparse.ArgumentParser(description='Certificate import') parser.add_argument("--cert", default="./kmc/config/crt/sever.cert", type=str, help="The path of certificate file") parser.add_argument("--key", default='./kmc/config/crt/sever.key', type=str, help="The path of private Key file.") parser.add_argument("--key_component_1", default='./kmc/config/ksf/ksmaster.dat', type=str, help="key material 1.") parser.add_argument("--key_component_2", default='./kmc/config/ksf/ksstandby.dat', type=str, help="key material 2.") args = parser.parse_args() return args
5,356,311
def remove_constant_features(sfm): """ Remove features that are constant across all samples """ # boolean matrix of whether x == first column (feature) x_not_equal_to_1st_row = sfm._x != sfm._x[0] non_const_f_bool_ind = x_not_equal_to_1st_row.sum(axis=0) >= 1 return sfm.ind_x(selected_f_inds=non_const_f_bool_ind)
5,356,312
def flatten(x): """ Flatten list an array. Parameters ---------- x: list of ndarray or ndarray the input dataset. Returns ------- y: ndarray 1D the flatten input list of array. shape: list of uplet the input list of array structure. """ # Check input if not isinstance(x, list): x = [x] elif len(x) == 0: return None, None # Flatten the dataset y = x[0].flatten() shape = [x[0].shape] for data in x[1:]: y = np.concatenate((y, data.flatten())) shape.append(data.shape) return y, shape
5,356,313
def open_window(): """Open the logging window""" log_window = stager.utils.BUILDER.get_object(LOG_VIEW_WINDOW_ID) log_window.show_all() textview = stager.utils.BUILDER.get_object(LOG_VIEW_TEXTVIEW_ID) buffer = textview.get_buffer() try: with open(LOG_FILE, encoding="utf-8") as log_file: text = log_file.read() except PermissionError: logging.critical("Unable to read log file %s", LOG_FILE) text = f"Unable to read log file {LOG_FILE}" GLib.idle_add(buffer.set_text, text) # TODO add log handler
5,356,314
def sample_automaton(): """ Creates a sample automaton and returns it. """ # The states are a python Set. Name them whatever you want. states = {"0","1","2"} # Choose one of the states to be the initial state. You need to give this a Set, but that Set usually only contains one state. init_state = {"0"} # The set of accepted states should also be a subset of the states. accept_states = {"0","1"} # The automaton works based on a set alphabet. alphabet = {"a","b"} #The transition diagram for the automaton is a set of edges. Each edge q1 --x--> q2 is represented by a tuple (not a list!) (q1, x, q2). # The constructor will accept the actual set, like below, or you can pass it a # simplified string that shows the edges. So either of the two lines below works. d = { ("0","a","1"), ("0","b","2"), ("1","a","2"), ("2","b","0") } d = "0a1,0b2,2b0" #create automaton usr_auto = Automaton(states, init_state, accept_states, alphabet, d) return usr_auto
5,356,315
def mae(data, data_truth): """Computes mean absolute error (MAE) :param data: Predicted time series values (n_timesteps, n_timeseries) :type data: numpy array :param data_truth: Ground truth time series values :type data_truth: numpy array """ return np.mean(np.abs(data - data_truth))
5,356,316
def _find_ntc_family(guide_id): """Return a String of the NTC family """ guide_id_list = guide_id.split('_') return '_'.join(guide_id_list[0:2])
5,356,317
def main(unused_argv): """训练入口""" global total_feature_columns, label_feature_columns dense_feature_columns, category_feature_columns, label_feature_columns = create_feature_columns() total_feature_columns = dense_feature_columns + category_feature_columns params = { "category_feature_columns": category_feature_columns, "dense_feature_columns": dense_feature_columns, "hidden_units": FLAGS.hidden_units.split(','), "dropout_rate": FLAGS.dropout_rate, "batch_norm": FLAGS.batch_norm, "learning_rate": FLAGS.learning_rate, } print(params) estimator = tf.estimator.Estimator( model_fn=nfm_model_fn, params=params, config=tf.estimator.RunConfig(model_dir=FLAGS.model_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps) ) train_spec = tf.estimator.TrainSpec( input_fn=lambda: train_input_fn(filepath=FLAGS.train_data, example_parser=example_parser, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs, shuffle_buffer_size=FLAGS.shuffle_buffer_size), max_steps=FLAGS.train_steps ) feature_spec = tf.feature_column.make_parse_example_spec(total_feature_columns) serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec) exporters = [ tf.estimator.BestExporter( name="best_exporter", serving_input_receiver_fn=serving_input_receiver_fn, exports_to_keep=5) ] eval_spec = tf.estimator.EvalSpec( input_fn=lambda: eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size), throttle_secs=600, steps=None, exporters=exporters ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # Evaluate Metrics. metrics = estimator.evaluate(input_fn=lambda: eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size)) for key in sorted(metrics): print('%s: %s' % (key, metrics[key])) results = estimator.predict(input_fn=lambda: eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size)) predicts_df = pd.DataFrame.from_dict(results) predicts_df['probabilities'] = predicts_df['probabilities'].apply(lambda x: x[0]) test_df = pd.read_csv("../../dataset/wechat_algo_data1/dataframe/test.csv") predicts_df['read_comment'] = test_df['read_comment'] predicts_df.to_csv("predictions.csv") print("after evaluate")
5,356,318
def CreateFilletCurves(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance, multiple=False): """ Creates a tangent arc between two curves and trims or extends the curves to the arc. Args: curve0 (Curve): The first curve to fillet. point0 (Point3d): A point on the first curve that is near the end where the fillet will be created. curve1 (Curve): The second curve to fillet. point1 (Point3d): A point on the second curve that is near the end where the fillet will be created. radius (double): The radius of the fillet. join (bool): Join the output curves. trim (bool): Trim copies of the input curves to the output fillet curve. arcExtension (bool): Applies when arcs are filleted but need to be extended to meet the fillet curve or chamfer line. If true, then the arc is extended maintaining its validity. If false, then the arc is extended with a line segment, which is joined to the arc converting it to a polycurve. tolerance (double): The tolerance, generally the document's absolute tolerance. Returns: Curve[]: The results of the fillet operation. The number of output curves depends on the input curves and the values of the parameters that were used during the fillet operation. In most cases, the output array will contain either one or three curves, although two curves can be returned if the radius is zero and join = false. For example, if both join and trim = true, then the output curve will be a polycurve containing the fillet curve joined with trimmed copies of the input curves. If join = False and trim = true, then three curves, the fillet curve and trimmed copies of the input curves, will be returned. If both join and trim = false, then just the fillet curve is returned. """ url = "rhino/geometry/curve/createfilletcurves-curve_point3d_curve_point3d_double_bool_bool_bool_double_double" if multiple: url += "?multiple=true" args = [curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance] if multiple: args = list(zip(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
5,356,319
def select_ads() -> jsonify: """ select ads """ try: if INSERTED_FILES_NUM == 0 or INSERTED_FILES_NUM != PROCESSED_FILES_NUM: raise Exception('server is not ready') weights: List[Tuple[str, int]] = json.loads(request.get_json()) selected_ads: List[Dict[str, int or float]] = server_algo.select_ads(weights) return jsonify({'ads': selected_ads}), 200 except Exception as e: return jsonify(str(e)), 400
5,356,320
def test_or() -> None: """Test ``Or`` condition. """ @argcomb(Or("a", "b")) def f(a: Any = None, b: Any = None) -> None: ... # valid f(a=1) f(b=2) f(a=1, b=2) # invalid with pytest.raises(InvalidArgumentCombination): f()
5,356,321
def _xpath_find(data: Dict, xparts: List, create_if_missing: bool = False) -> Any: """ Descend into a data dictionary. :arg data: The dictionary where to look for `xparts`. :arg xparts: Elements of an Xpath split with xpath_split() :arg bool create_if_missing: If elements are missing from `data`, create them. :returns: The element identified by `xparts`. :raises KeyError: If create_if_missing=False and the element is not found in `data`. :raises TypeError: If `data` does not match the expected structure conveyed by `xparts`. """ for _, name, keys in xparts: if not isinstance(data, dict): raise TypeError("expected a dict") if keys: if name not in data and create_if_missing: data[name] = KeyedList(key_name=_xpath_keys_to_key_name(keys)) lst = data[name] # may raise KeyError if isinstance(lst, KeyedList): try: data = lst[_xpath_keys_to_key_val(keys)] except KeyError: if not create_if_missing: raise data = dict(keys) lst.append(data) elif isinstance(lst, list): # regular python list, need to iterate over it try: i = _list_find_key_index(keys, lst) data = lst[i] except ValueError: # not found if not create_if_missing: raise KeyError(keys) from None data = dict(keys) lst.append(data) else: raise TypeError("expected a list") elif create_if_missing: data = data.setdefault(name, {}) else: data = data[name] # may raise KeyError return data
5,356,322
def r_mediate(y, t, m, x, interaction=False): """ This function calls the R function mediate from the package mediation (https://cran.r-project.org/package=mediation) y array-like, shape (n_samples) outcome value for each unit, continuous t array-like, shape (n_samples) treatment value for each unit, binary m array-like, shape (n_samples) mediator value for each unit, here m is necessary binary and uni- dimensional x array-like, shape (n_samples, n_features_covariates) covariates (potential confounders) values interaction boolean, default=False whether to include interaction terms in the model interactions are terms XT, TM, MX """ m = m.ravel() var_names = [[y, 'y'], [t, 't'], [m, 'm'], [x, 'x']] df_list = list() for var, name in var_names: if len(var.shape) > 1: var_dim = var.shape[1] col_names = ['{}_{}'.format(name, i) for i in range(var_dim)] sub_df = pd.DataFrame(var, columns=col_names) else: sub_df = pd.DataFrame(var, columns=[name]) df_list.append(sub_df) df = pd.concat(df_list, axis=1) m_features = [c for c in df.columns if ('y' not in c) and ('m' not in c)] y_features = [c for c in df.columns if ('y' not in c)] if not interaction: m_formula = 'm ~ ' + ' + '.join(m_features) y_formula = 'y ~ ' + ' + '.join(y_features) else: m_formula = 'm ~ ' + ' + '.join(m_features + [':'.join(p) for p in combinations(m_features, 2)]) y_formula = 'y ~ ' + ' + '.join(y_features + [':'.join(p) for p in combinations(y_features, 2)]) robjects.globalenv['df'] = df mediator_model = Rstats.lm(m_formula, data=base.as_symbol('df')) outcome_model = Rstats.lm(y_formula, data=base.as_symbol('df')) res = mediation.mediate(mediator_model, outcome_model, treat='t', mediator='m', boot=True, sims=1) relevant_variables = ['tau.coef', 'z1', 'z0', 'd1', 'd0'] to_return = [np.array(res.rx2(v))[0] for v in relevant_variables] return to_return + [None]
5,356,323
def default_props(reset=False, **kwargs): """Return current default properties Parameters ---------- reset : bool if True, reset properties and return default: False """ global _DEFAULT_PROPS if _DEFAULT_PROPS is None or reset: reset_default_props(**kwargs) return _DEFAULT_PROPS
5,356,324
def ReadFlatFileNGA(xlsfile): """ Generate NGA flatfile dictionary for generate usage """ # read in excel flatfile book = xlrd.open_workbook(xlsfile) sh = book.sheet_by_index(0) # 'Flatfile' sheet name keys = sh.row_values(0) for itmp in range( len(keys) ): keys[itmp] = keys[itmp].encode('ascii') # Column names needed ( add more depending on selection criterion ) names_predictors = [ 'Record Sequence Number', 'EQID', # IDs 'Earthquake Magnitude', 'Dip (deg)','Rake Angle (deg)','Dept to Top Of Fault Rupture Model', 'Fault Rupture Width (km)', # source related 'Joyner-Boore Dist. (km)', 'ClstD (km)', 'FW/HW Indicator', 'Source to Site Azimuth (deg)', # source-site pair related "GMX's C1", 'HP-H1 (Hz)', 'HP-H2 (Hz)', 'LP-H1 (Hz)', 'LP-H2 (Hz)','File Name (Horizontal 1)','File Name (Horizontal 2)', # seismogram related 'Preferred Vs30 (m/s)', 'Measured/Inferred Class', 'Z1 (m)', 'Z1.5 (m)', 'Z2.5 (m)' # site related ] keys_predictors = ['RecordID', 'EQID', 'Mw', 'dip', 'rake', 'Ztor', 'W', 'Rjb', 'Rrup', 'Fhw', 'azimuth', 'GMX_C1', 'HP1', 'HP2', 'LP1', 'LP2', 'H1','H2', 'Vs30', 'VsFlag', 'Z1.0','Z1.5','Z2.5' ] Fhwi = {'hw':1,'fw':0,'nu':0,'na':0,'':None} # relate indicators to Fhw flag # IM related names_IMs = ['Record Sequence Number', 'PGA (g)', 'PGV (cm/sec)', 'PGD (cm)' ] keys_IMs = ['RecordID', 'PGA', 'PGV', 'PGD'] periods = [] for ikey, key in enumerate( keys ): if isinstance( key, str ): key.encode( 'ascii' ) # key now is one of the column name if key[0] == 'T' and key[-1] == 'S': names_IMs.append( key ) keys_IMs.append( 'SA'+key[1:-1] ) periods.append( float(key[1:-1]) ) # colname and colindex map icol_dictP = {} icol_dictI = {} for ikey, key in enumerate( keys ): if key in names_predictors: icol_dictP[key] = ikey if key in names_IMs: icol_dictI[key] = ikey nga_flats = {}; nga_IMs = {} for icol, key in enumerate( names_predictors ): col0 = sh.col_values(icol_dictP[key]) col0[0] = col0[0].encode('ascii') if isinstance( col0[1], str ): if key == 'FW/HW Indicator': # Fhw string to flag (int) for irow in range(1, len(col0) ): col0[irow] = col0[irow].encode('ascii') col0[irow] = Fhwi[col0[irow]] else: for irow in range(1, len(col0) ): col0[irow] = col0[irow].encode('ascii') keyP = keys_predictors[icol] nga_flats[keyP] = col0[1:] for icol, key in enumerate( names_IMs ): col0 = sh.col_values(icol_dictI[key]) if isinstance( col0[1], str ): for irow in range(1, len(col0) ): col0[irow] = col0[irow].encode('ascii') keyI = keys_IMs[icol] nga_IMs[keyI] = col0[1:] return nga_flats, nga_IMs
5,356,325
def _chr_ord(x): """ This is a private utility function for getBytesIOString to return chr(ord(x)) """ return chr(ord(x))
5,356,326
def vkToWchar (m): """ Mapping from virtual key to character """ ret = [] retTbl = ['/* table of virtual key to wchar mapping tables */', 'static VK_TO_WCHAR_TABLE aVkToWcharTable[] = {'] def generate (n, g, defPrefix=''): defname = f'aVkToWch{defPrefix}{n}' ret.extend ([f'/* map virtual key to flags and {n} unicode output characters */', f'static VK_TO_WCHARS{n} {defname}[] = {{']) for vk, flags, chars in g: def toRepr (s): if s is None: return WChar.NONE.cdefName elif len (s) != 1: # everything else belongs to ligature tables, which we # don’t support. raise Exception (f'only single-character strings are supported ({s!r})') else: return f'0x{ord (s):04X}u /*{repr (s)}*/' chars = ', '.join (map (toRepr, chars)) ret.append (f'\t{{{vk.cdefName}, {flags}, {{{chars}}}}},') ret.extend ([f'\t{{0, 0, {{{("0, "*n)}}}}},', '\t};', '']) # add the new table retTbl.append (f'\t{{(PVK_TO_WCHARS1) {defname}, {n}, sizeof({defname}[0])}},') f = lambda x: len (x[2]) m = groupby (sorted (m, key=f), key=f) for n, g in m: generate (n, g) # We are almost always going to need the numpad keys. They also need to be # last, so translation from string to virtual key does not map them. numpad = [ (VirtualKey.NUMPAD0, 0, '0'), (VirtualKey.NUMPAD1, 0, '1'), (VirtualKey.NUMPAD2, 0, '2'), (VirtualKey.NUMPAD3, 0, '3'), (VirtualKey.NUMPAD4, 0, '4'), (VirtualKey.NUMPAD5, 0, '5'), (VirtualKey.NUMPAD6, 0, '6'), (VirtualKey.NUMPAD7, 0, '7'), (VirtualKey.NUMPAD8, 0, '8'), (VirtualKey.NUMPAD9, 0, '9'), ] generate (1, numpad, 'Num') retTbl.extend (['\t{NULL, 0, 0},', '\t};']) return '\n'.join (ret + retTbl)
5,356,327
def load_glove_vectors(glove_file="/home/yaguang/pretrained_models/glove.6B.50d.txt"): """Load the glove word vectors""" word_vectors = {} with open(glove_file) as f: for line in f: split = line.split() word_vectors[split[0]] = [float(x) for x in split[1:]] return word_vectors
5,356,328
def _ParseFileVersion(file_version): """Convert the string file_version in event.proto into a float. Args: file_version: String file_version from event.proto Returns: Version number as a float. """ tokens = file_version.split("brain.Event:") try: return float(tokens[-1]) except ValueError: ## This should never happen according to the definition of file_version ## specified in event.proto. logger.warn( ( "Invalid event.proto file_version. Defaulting to use of " "out-of-order event.step logic for purging expired events." ) ) return -1
5,356,329
def gen_imgs(samples, batch_size, slide, shuffle=False): """This function returns a generator that yields tuples of ( X: tensor, float - [batch_size, 224, 224, 3] y: tensor, int32 - [batch_size, 224, 224, NUM_CLASSES] ) input: samples: samples dataframe input: batch_size: The number of images to return for each pull output: yield (X_train, y_train): generator of X, y tensors option: base_truth_dir: path, directory of truth slides option: shuffle: bool, if True shuffle samples """ num_samples = len(samples) print(num_samples) images = [] for _, batch_sample in batch_samples.iterrows(): #tiles = DeepZoomGenerator(slide, tile_size=224, overlap=0, limit_bounds=False) #xy = batch_sample.tile_loc[::] xy = batch_sample.tile_loc[::-1] xylarge = [x * 224 for x in xy] print(batch_sample.tile_loc[::-1], xylarge) #img = tiles.get_tile(tiles.level_count-1, batch_sample.tile_loc[::-1]) img = slide.read_region(xylarge, 0, crop_size) img = np.array(img) img = img[:, :, :3] images.append(img) X_train = np.array(images) yield X_train
5,356,330
def get_file_format(input_files): """ Takes all input files and checks their first character to assess the file format. Returns one of the following strings; fasta, fastq, other or mixed. fasta and fastq indicates that all input files are of the same format, either fasta or fastq. other indiates that all files are not fasta nor fastq files. mixed indicates that the inputfiles are a mix of different file formats. """ # Open all input files and get the first character file_format = [] invalid_files = [] for infile in input_files: if is_gzipped(infile):#[-3:] == ".gz": f = gzip.open(infile, "rb") fst_char = f.read(1); else: f = open(infile, "rb") fst_char = f.read(1); f.close() # Assess the first character if fst_char == b"@": file_format.append("fastq") elif fst_char == b">": file_format.append("fasta") else: invalid_files.append("other") if len(set(file_format)) != 1: return "mixed" return ",".join(set(file_format))
5,356,331
def get_nominal_hour(train_num): """Get the nominal hour for a train num (most frequent)""" res = database.get().query(""" SELECT count(*) as count, substr(date, 12, 5) as hour FROM results WHERE num = '%s' GROUP BY hour ORDER BY count DESC LIMIT 1; """ % train_num) return next(res).hour
5,356,332
def prepare_aggregation_data(group_name: str) -> List[PlotValues]: """Constructs and returns learning rate curves Args: group_name (str): group name for which to construct the curves Returns: A list of `PlotValues`. """ group_dir = os.path.join(FLAGS.results_dir, group_name) # List of tuples (benchmark_name, unitary_accuracy, federated_accuracy) labels_and_lrs = read_all_task_values(group_dir=group_dir) assert labels_and_lrs, "No values for group found" return [ (label, lrs, [i for i in range(1, len(lrs) + 1, 1)]) for label, lrs in labels_and_lrs ]
5,356,333
def compute_cgan_metrics(img_y, img_g, i = 0): """ Computes accuracy, precision, recall, f1, iou_score for passed image, return None in case of div 0 img_y: ground truth building footprint semantic map img_g: generated image i: 0 for entire image, 1 for inner (excluding border) Note: image format is (n,n,1) and each pixel is either -1 (for 'no' building at pixel) or 1 (for 'yes' building at pixel) """ # image size (it is square), and ring step iz, rz = int(img_y.shape[0]), int(img_y.shape[0] / (4 * 2)) # building inner square mask (ring) where we calculate metrics # example of such mask: # 1 1 1 1 # 1 0 0 1 # 1 0 0 1 # 1 1 1 1 ring = np.ones(img_y.shape, dtype=bool) ring[i * rz:iz - i * rz, i * rz:iz - i * rz, 0] = False # now, erasing all areas which are not in ring with 0 img_y[ring] = 0 img_g[ring] = 0 # TP (true positive), TN, FP, FN TP = np.sum(np.logical_and((img_y == 1), (img_g == 1))) TN = np.sum(np.logical_and((img_y == -1), (img_g == -1))) FP = np.sum(np.logical_and((img_y == -1), (img_g == 1))) FN = np.sum(np.logical_and((img_y == 1), (img_g == -1))) # IoU (intersection over union) intersection = np.logical_and((img_y == 1), (img_g == 1)) union = np.logical_or((img_y == 1), (img_g == 1)) if TP + FP == 0 or TP + FN == 0: return None # reporting metrics accuracy = (TP + TN) / (TP + TN + FP + FN) precision = TP / (TP + FP) recall = TP / (TP + FN) if precision == 0 and recall == 0: return None f1 = 2.0 * (precision * recall) / (precision + recall) iou_score = np.sum(intersection) / np.sum(union) return accuracy, precision, recall, f1, iou_score
5,356,334
def learning(spiking_neurons, spike_times, taup, taum, Ap, Am, wmax, w_init): """ Takes a spiking group of neurons, connects the neurons sparsely with each other, and learns the weight 'pattern' via STDP: exponential STDP: f(s) = A_p * exp(-s/tau_p) (if s > 0), where s=tpost_{spike}-tpre_{spike} :param spiking_neurons, spike_times: np.arrays for Brian2's SpikeGeneratorGroup (list of lists created by `generate_spike_train.py`) - spike train used for learning :param taup, taum: time constant of weight change (in ms) :param Ap, Am: max amplitude of weight change :param wmax: maximum weight (in S) :param w_init: initial weights (in S) :return weightmx: learned synaptic weights """ np.random.seed(12345) pyrandom.seed(12345) #plot_STDP_rule(taup/ms, taum/ms, Ap/1e-9, Am/1e-9, "STDP_rule") PC = SpikeGeneratorGroup(nPCs, spiking_neurons, spike_times*second) # mimics Brian1's exponentialSTPD class, with interactions='all', update='additive' # see more on conversion: http://brian2.readthedocs.io/en/stable/introduction/brian1_to_2/synapses.html STDP = Synapses(PC, PC, """ w : 1 dA_presyn/dt = -A_presyn/taup : 1 (event-driven) dA_postsyn/dt = -A_postsyn/taum : 1 (event-driven) """, on_pre=""" A_presyn += Ap w = clip(w + A_postsyn, 0, wmax) """, on_post=""" A_postsyn += Am w = clip(w + A_presyn, 0, wmax) """) STDP.connect(condition="i!=j", p=connection_prob_PC) STDP.w = w_init run(400*second, report="text") weightmx = np.zeros((nPCs, nPCs)) weightmx[STDP.i[:], STDP.j[:]] = STDP.w[:] return weightmx
5,356,335
def deploy(version_tag=None): """deploys a updated version of the site version_tag: a git tag, defaults to HEAD """ supervised_process = SITE_SETTINGS['supervised_process'] #dust() stop(supervised_process) update(commit=version_tag) setup() collectstatic() start(supervised_process) #undust()
5,356,336
def acq2vhdr( # Paths output_file: Path, data_file: str, # Channels channels: list[Channel], ch_names: list = None, ch_scales: list = None, ch_units: list = None, channel_indexes: list[int] = None, # Raw data samples_per_second: float = 2000.0, # Markers marker_file: str = None, # Other settings header_settings: dict = {}, ): """ Writes a '.vhdr' file for BrainVision Analyzer """ # Select channels if channel_indexes is not None: channels = [channels[i] for i in channel_indexes] # Create infos header_infos = HeaderInfos( # Paths data_file, # Channels channels, ch_names, ch_scales, ch_units, # Raw data samples_per_second, # Markers marker_file, # Other settings header_settings, ) # Write file with output_file.open("wt") as header: header.write(header_infos.generate_text())
5,356,337
def test_disequilibrium5(n): """ Test that peaked ScalarDistributions have non-zero disequilibrium. """ d = ScalarDistribution([1] + [0]*(n-1)) assert disequilibrium(d) >= 0
5,356,338
def bdc_check_build(build_file: str, verbose: bool = False) -> NoReturn: """ :param build_file: :param verbose: :return: """ init_verbosity(verbose) _ = load_and_validate(build_file) if errors == 0: print("\nNo errors.") else: # Error messages already printed. raise BuildError(f'There are problems with "{build_file}".')
5,356,339
def main(): """ NAME aarm_magic.py DESCRIPTION Converts AARM data to best-fit tensor (6 elements plus sigma) Original program ARMcrunch written to accomodate ARM anisotropy data collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the off-axis remanence terms to construct the tensor. A better way to do the anisotropy of ARMs is to use 9,12 or 15 measurements in the Hext rotational scheme. SYNTAX aarm_magic.py [-h][command line options] OPTIONS -h prints help message and quits -f FILE: specify input file, default is aarm_measurements.txt -crd [s,g,t] specify coordinate system, requires samples file -fsa FILE: specify er_samples.txt file, default is er_samples.txt (2.5) or samples.txt (3.0) -Fa FILE: specify anisotropy output file, default is arm_anisotropy.txt (MagIC 2.5 only) -Fr FILE: specify results output file, default is aarm_results.txt (MagIC 2.5 only) -Fsi FILE: specify output file, default is specimens.txt (MagIC 3 only) -DM DATA_MODEL: specify MagIC 2 or MagIC 3, default is 3 INPUT Input for the present program is a series of baseline, ARM pairs. The baseline should be the AF demagnetized state (3 axis demag is preferable) for the following ARM acquisition. The order of the measurements is: positions 1,2,3, 6,7,8, 11,12,13 (for 9 positions) positions 1,2,3,4, 6,7,8,9, 11,12,13,14 (for 12 positions) positions 1-15 (for 15 positions) """ # initialize some parameters args = sys.argv if "-h" in args: print(main.__doc__) sys.exit() #meas_file = "aarm_measurements.txt" #rmag_anis = "arm_anisotropy.txt" #rmag_res = "aarm_results.txt" # # get name of file from command line # data_model_num = int(pmag.get_named_arg("-DM", 3)) spec_file = pmag.get_named_arg("-Fsi", "specimens.txt") if data_model_num == 3: samp_file = pmag.get_named_arg("-fsa", "samples.txt") else: samp_file = pmag.get_named_arg("-fsa", "er_samples.txt") dir_path = pmag.get_named_arg('-WD', '.') input_dir_path = pmag.get_named_arg('-ID', '') infile = pmag.get_named_arg('-f', reqd=True) coord = pmag.get_named_arg('-crd', '-1') #if "-Fa" in args: # ind = args.index("-Fa") # rmag_anis = args[ind + 1] #if "-Fr" in args: # ind = args.index("-Fr") # rmag_res = args[ind + 1] ipmag.aarm_magic(infile, dir_path, input_dir_path, spec_file, samp_file, data_model_num, coord)
5,356,340
def register_config_callbacks(): """ Registers callback function to fire whenever a Maya file is opened or created. :return: None """ # Make sure there are no callbacks that we've registered and never # de-registered... this could happen if Mimic crashed. de_register_callbacks() callbacks = [str(OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kAfterNew, reload_mimic)), str(OpenMaya.MSceneMessage.addCallback( OpenMaya.MSceneMessage.kAfterOpen, reload_mimic))] pm.optionVar[CALLBACK_KEY] = ' '.join(callbacks)
5,356,341
def build_arg_parser2(): """ Build an argument parser using optparse. Use it when python version is 2.5 or 2.6. """ usage_str = "Smatch table calculator -- arguments" parser = optparse.OptionParser(usage=usage_str) parser.add_option("--fl", dest="fl", type="string", help='AMR ID list file') parser.add_option("-f", dest="f", type="string", action="callback", callback=cb, help="AMR IDs (at least one)") parser.add_option("-p", dest="p", type="string", action="callback", callback=cb, help="User list") parser.add_option("--fd", dest="fd", type="string", help="file directory") parser.add_option("-r", "--restart", dest="r", type="int", help='Restart number (Default: 4)') parser.add_option("-v", "--verbose", action='store_true', dest="v", help='Verbose output (Default:False)') parser.set_defaults(r=4, v=False, ms=False, fd=isi_dir_pre) return parser
5,356,342
def _write_log(path, lines): """ :param path: log file path :param lines: content :return status:Bool """ try: with open(path, 'w') as file: logi('open file {log_path} for writting'.format(log_path=path)) file.writelines(lines) except Exception as e: loge(e) return False return True
5,356,343
def test_branch_and_ring_at_beginning_of_branch(): """Test SELFIES that have a branch and ring immediately at the start of a branch. """ # CC1CCCS((Br)1Cl)F assert is_eq(sf.decoder("[C][C][C][C][C][S][Branch1_2][Branch1_3]" "[Branch1_1][C][Br]" "[Ring1][Branch1_1][Cl][F]"), "CC1CCCS1(Br)(Cl)F") # CC1CCCS(1(Br)Cl)F assert is_eq(sf.decoder("[C][C][C][C][C][S][Branch1_2][Branch1_3]" "[Ring1][Branch1_1]" "[Branch1_1][C][Br][Cl][F]"), "CC1CCCS1(Br)(Cl)F") # CC1CCCS(((1Br)Cl)I)F assert is_eq(sf.decoder("[C][C][C][C][C][S][Branch1_3][Branch2_3]" "[Branch1_2][Branch1_3]" "[Branch1_1][Ring2][Ring1][Branch1_1][Br]" "[Cl][I][F]"), "CC1CCCS1(Br)(Cl)(I)F")
5,356,344
def update_meal_plan(plan_id: str, meal_plan: MealPlan): """ Updates a meal plan based off ID """ meal_plan.process_meals() meal_plan.update(plan_id) # try: # meal_plan.process_meals() # meal_plan.update(plan_id) # except: # raise HTTPException( # status_code=404, # detail=SnackResponse.error("Unable to Update Mealplan"), # ) return SnackResponse.success("Mealplan Updated")
5,356,345
def clean_data(df): """ INPUT: df - Panda DataFrame - A data frame that contains the data OUTPUT: df - Panda DataFrame - A Cleaned Panda Data frame """ #split categories into a data frame and take the first row cat = df.categories.str.split(';', expand=True) row = cat.iloc[0] rew=row.unique() # Fix columns name f = [] for x in rew: r = x[:-2] f.append(r) category_colnames = pd.Series(f) cat.columns = category_colnames for column in cat: cat[column] = cat[column].str.strip().str[-1] # convert column from string to numeric cat[column] = cat[column].astype('int64') # concating the categories column with df and dropping unnesscary values df = df.drop(['categories'], axis = 1) df = pd.concat([df, cat], axis=1 ) df = df.drop_duplicates() df.dropna(how='any') return df pass
5,356,346
def adjust_learning_rate(optimizer, iteration_count,args): """Imitating the original implementation""" lr = args.lr / (1.0 + args.lr_decay * iteration_count) for param_group in optimizer.param_groups: param_group['lr'] = lr
5,356,347
def vec2transform(v): """Convert a pose from 7D vector format ( x y z qx qy qz qw) to transformation matrix form Args: v pose in 7D vector format Returns: T 4x4 transformation matrix $ rosrun tf tf_echo base os_lidar - Translation: [-0.084, -0.025, 0.050] - Rotation: in Quaternion [0.000, 0.000, 0.924, 0.383] in RPY (radian) [0.000, -0.000, 2.356] in RPY (degree) [0.000, -0.000, 135.000] Random quaternion sent by nived in Mattermost -Rotation: q_BL: [ 0.0122965, -0.002454, 0.9226886, 0.385342] """ T_cam_to_os = np.eye(4) T_cam_to_os[:3, -1] = np.array([-0.084, -0.025, 0.050]) T_cam_to_os[:3, :3] = o3d.geometry.Geometry3D.get_rotation_matrix_from_quaternion( np.array([0.383, 0.000, 0.000, 0.924]) ) T_os_to_cam = np.linalg.inv(T_cam_to_os) T = np.eye(4) T[:3, -1] = v[:3] T[:3, :3] = o3d.geometry.Geometry3D.get_rotation_matrix_from_quaternion( np.array([v[6], v[3], v[4], v[5]]) ) return T_os_to_cam @ T @ T_cam_to_os
5,356,348
def cleanup_name_customregex(cname, customregex=None, returnmatches=False): """Cleanup the input name given a custom dictionary of regular expressions (format of customregex: a dict like {'regex-pattern': 'replacement'}""" if customregex is None: customregex = {'_': ' ', 'repos': '', 'ecg': '', '[0-9]+': '', } matches = set() # For each pattern for pattern, replacement in customregex.iteritems(): # First try to see if there is a match and store it if yes if returnmatches: m = re.search(pattern, cname, flags=re.I) if m: matches.add(m.group(0)) # Then replace the pattern found cname = re.sub(pattern, replacement, cname, flags=re.I) # Return both the cleaned name and matches if returnmatches: return (cname, matches) # Return just the cleaned name else: return cname
5,356,349
def test_query_vaults_details_non_premium(rotkehlchen_api_server): """Check querying the vaults details endpoint without premium does not work""" response = requests.get(api_url_for( rotkehlchen_api_server, "makerdaovaultdetailsresource", )) assert_error_response( response=response, contained_in_msg='Currently logged in user testuser does not have a premium subscription', status_code=HTTPStatus.CONFLICT, )
5,356,350
def compute_recommended_batch_size_for_trustworthy_experiments(C: int, H: int, W: int, safety_val: float) -> int: """ Based on inequality with safety_val=s: N' >= s*D' the recommended batch size is, assuming N'=B*H*W and D'=C (so considering neurons as filter, patches as data): B*H*W >= s*C leading to any batch size B that satisfies: B >= (s*C)/(H*W) for the current layer and model. So, C, H, W are for the current model at that layer. note: - recommended way to compute this is to get the largest B after plugging in the C, H, W for all the layers of your model - essentially computing the "worst-case" B needed for the model. :return: """ recommended_batch_size: int = int(math.ceil(safety_val * C / (H * W))) assert (recommended_batch_size > 0), 'Batch size that was recommnded was negative, check the input your using.' return recommended_batch_size
5,356,351
def filter_gradient(t, h_, n_std=3): """Filter outliers by evaluating the derivative. Take derivative and evaluate outliers in derivative. """ h = h_.copy() # NOTE: This must be a separate step # dh/dt = 0 -> invalid dhdt = np.gradient(h) invalid = np.round(dhdt, 6) == 0.0 dhdt[invalid] = np.nan invalid = np.isnan(dhdt) | ( np.abs(dhdt - np.nanmedian(dhdt)) > mad_std(dhdt) * n_std ) if sum(invalid) == 0: return h h[invalid] = np.nan return h
5,356,352
def network_driver_create_endpoint(): """Creates new Neutron Subnets and a Port with the given EndpointID. This function takes the following JSON data and delegates the actual endpoint creation to the Neutron client mapping it into Subnet and Port. :: { "NetworkID": string, "EndpointID": string, "Options": { ... }, "Interface": { "Address": string, "AddressIPv6": string, "MacAddress": string } } Then the following JSON response is returned. :: { "Interface": { "Address": string, "AddressIPv6": string, "MacAddress": string } } See the following link for more details about the spec: https://github.com/docker/libnetwork/blob/master/docs/remote.md#create-endpoint # noqa """ json_data = flask.request.get_json(force=True) LOG.debug("Received JSON data %s for " "/NetworkDriver.CreateEndpoint", json_data) jsonschema.validate(json_data, schemata.ENDPOINT_CREATE_SCHEMA) endpoint_id = json_data['EndpointID'] neutron_network_identifier = _make_net_identifier(json_data['NetworkID'], tags=app.tag) filtered_networks = _get_networks_by_identifier(neutron_network_identifier) if not filtered_networks: return flask.jsonify({ 'Err': "Neutron net associated with identifier {0} doesn't exist." .format(neutron_network_identifier) }) else: neutron_network_id = filtered_networks[0]['id'] interface = json_data['Interface'] or {} # Workaround for null interface_cidrv4 = interface.get('Address', '') interface_cidrv6 = interface.get('AddressIPv6', '') interface_mac = interface.get('MacAddress', '') if not interface_cidrv4 and not interface_cidrv6: return flask.jsonify({ 'Err': "Interface address v4 or v6 not provided." }) neutron_port, subnets = _create_or_update_port( neutron_network_id, endpoint_id, interface_cidrv4, interface_cidrv6, interface_mac) try: port_driver = get_driver(neutron_port) (stdout, stderr) = port_driver.create_host_iface( endpoint_id, neutron_port, subnets, filtered_networks[0]) LOG.debug(stdout) if stderr: LOG.error(stderr) except (exceptions.VethCreationFailure, exceptions.BindingNotSupportedFailure) as ex: with excutils.save_and_reraise_exception(): LOG.error('Preparing the veth ' 'pair was failed: %s.', ex) except processutils.ProcessExecutionError: with excutils.save_and_reraise_exception(): LOG.error('Could not bind the Neutron port to ' 'the veth endpoint.') except (exceptions.KuryrException, n_exceptions.NeutronClientException) as ex: with excutils.save_and_reraise_exception(): LOG.error('Failed to set up the interface: %s', ex) if app.vif_plug_is_fatal: port_active = utils.wait_for_port_active( app.neutron, neutron_port['id'], app.vif_plug_timeout) if not port_active: neutron_port_name = neutron_port['name'] raise exceptions.InactiveResourceException( "Neutron port {0} did not become active on time." .format(neutron_port_name)) response_interface = {} created_fixed_ips = neutron_port['fixed_ips'] subnets_dict_by_id = {subnet['id']: subnet for subnet in subnets} if not interface_mac: response_interface['MacAddress'] = neutron_port['mac_address'] vnic_type = neutron_port.get('binding:vnic_type') if vnic_type in const.VNIC_TYPES_SRIOV: response_interface.pop('MacAddress', None) if not (interface_cidrv4 or interface_cidrv6): if 'ip_address' in neutron_port: _process_interface_address( neutron_port, subnets_dict_by_id, response_interface) for fixed_ip in created_fixed_ips: _process_interface_address( fixed_ip, subnets_dict_by_id, response_interface) LOG.debug("Response JSON data %s for /NetworkDriver.CreateEndpoint", {'Interface': response_interface}) return flask.jsonify({'Interface': response_interface})
5,356,353
def save_pkl(fp, obj): """Saves an object to pickle file.""" with open(fp, "wb") as fh: pickle.dump(obj, fh)
5,356,354
def generate_valve_from_great_vessel( label_great_vessel, label_ventricle, valve_thickness_mm=8, ): """ Generates a geometrically-defined valve. This function is suitable for the pulmonic and aortic valves. Args: label_great_vessel (SimpleITK.Image): The binary mask for the great vessel (pulmonary artery or ascending aorta) label_ventricle (SimpleITK.Image): The binary mask for the ventricle (left or right) valve_thickness_mm (int, optional): Valve thickness, in millimetres. Defaults to 8. Returns: SimpleITK.Image: The geometric valve, as a binary mask. """ # To speed up binary morphology operations we first crop all images template_img = 0 * label_ventricle cb_size, cb_index = label_to_roi( (label_great_vessel + label_ventricle) > 0, expansion_mm=(20, 20, 20) ) label_ventricle = crop_to_roi(label_ventricle, cb_size, cb_index) label_great_vessel = crop_to_roi(label_great_vessel, cb_size, cb_index) # Convert valve thickness to voxels _, _, res_z = label_ventricle.GetSpacing() valve_thickness = int(valve_thickness_mm / res_z) # Dilate the ventricle label_ventricle_dilate = sitk.BinaryDilate(label_ventricle, (valve_thickness,) * 3) # Find the overlap overlap = label_great_vessel & label_ventricle_dilate # Mask to thinner great vessel mask = label_great_vessel | label_ventricle_dilate overlap = sitk.Mask(overlap, mask) label_valve = sitk.BinaryMorphologicalClosing(overlap) # Finally, paste back to the original image space label_valve = sitk.Paste( template_img, label_valve, label_valve.GetSize(), (0, 0, 0), cb_index, ) return label_valve
5,356,355
def current_decay(dataframe,two_components=False): """ Fits 95% peak to: A(t) = A*exp(-t/Taufast) + B*exp(-t/Tauslow) +Iss Parameters ---------- dataframe : A pandas dataframe Should be baselined two_components : True/False When False, a single exponential component is fitted to the current decay (B is zero). When True, the sum of two exponential components is fitted.The default is False. Returns ------- A Graph of the current decay with superimposed fit. Values for fast (and slow, if selected) time constants, or value for single time constant in mS """ # will need to get peak - done # get steady state # need to get amplitude of the fast component (peak) # amplitude of the slower component # Currently going for an unbinned approach, but can always consider a binned peak = get_peak(dataframe,decay=True) # gives component A for both fittig routines peak_to_baseline = dataframe.loc[peak:,:].mean(axis=1) # using get_Iss() and get_component (both should return amplitude and time # Normalising times to time of peak current peak_to_baseline.index = peak_to_baseline.index-(peak_to_baseline.index[0]) #### get 95% current to baseline current_at_t = peak_to_baseline[peak_to_baseline > (peak_to_baseline.iloc[0]*0.95)] # get times t = np.array(current_at_t.index) ##### # reformat current to numpy.array current_at_t = np.array(current_at_t) # get Iss _,Iss = get_Iss(peak_to_baseline) # fast component,A, peak amplitude A = current_at_t[0] ##### # preparing figure if two_components: xdata = np.zeros([np.size(t),4]) xdata[:,0] = t xdata[:,1] = A xdata[:,2] = Iss _,B = get_component(peak_to_baseline,'slow') # amplitude of slow component plt.style.use('ggplot') decayfig,decayaxs = plt.subplots(1) decayaxs.set_xlabel("t(ms)") decayaxs.set_ylabel("I(pA)") xdata[:,3] = B xdata = xdata.transpose() times = t*10**3 # rescaling to mS popt,_ = sp.optimize.curve_fit(double_exp_fit,xdata,current_at_t) # popt = Tfast,Tslow decayaxs.plot(times,double_exp_fit(xdata,popt[0],popt[1]),linestyle="--",color= 'red',label = "fit") decayaxs.plot(times,current_at_t,color = 'black',label = "data") decayaxs.set_title("Decay from 95% Ipeak:baseline. Tauf = {}ms,Taus = {}ms".format((popt[0]*10**3),(popt[1]*10**3))) decayaxs.legend() decayfig.tight_layout() return(popt[0]*10**3,popt[1]*10**3) else: xdata = np.zeros([np.size(t),3]) xdata[:,0] = t xdata[:,1] = A xdata[:,2] = Iss xdata = xdata.transpose() plt.style.use('ggplot') decayfig,decayaxs = plt.subplots(1) decayaxs.set_xlabel("t(ms)") decayaxs.set_ylabel("I(pA)") times = t*10**3 # rescaling to mS popt,_ = sp.optimize.curve_fit(exp_fit,xdata,current_at_t) #popt = Tau of single component decayaxs.plot(times,current_at_t,color = 'black',label = "data") decayaxs.plot(times,exp_fit(xdata,popt),linestyle="--",color= 'red',label = "fit") decayaxs.set_title("Decay from 95% Ipeak:baseline. Tau = {}ms".format((popt[0]*10**3))) decayaxs.legend() decayfig.tight_layout() return(popt[0]*10**3)
5,356,356
def get_n_mode_follow(p_state, idx_image=-1, idx_chain=-1): """Returns the index of the mode which to follow.""" return int(_MMF_Get_N_Mode_Follow(p_state, ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))
5,356,357
def get_possible_centroid_nodes_from_partial_preprocessing(nw_name): """ this function returns a list of partially preprocessed nodes to used them as zone systems (for fast routing) """ nw_path = os.path.join(MAIN_DIR, "data", "networks", nw_name) ppf = os.path.join(nw_path, "base", "tt_matrix.npy") if os.path.isfile(ppf): tt_matrx = np.load(ppf) return [i for i in range(tt_matrx.shape[0])] else: raise FileExistsError("file {} not found! not preprocessed?".format(ppf))
5,356,358
def dismiss_recommendation(title): """Dismiss the movie matching the specified criteria from showing up in recommendations. """ yield 'recommendations/movies/{title}'.format(title=slugify(str(title)))
5,356,359
def divide(lhs, rhs): """Division with auto-broadcasting Parameters ---------- lhs : tvm.Tensor or Expr The left operand rhs : tvm.Tensor or Expr The right operand Returns ------- ret : tvm.Tensor or Expr Returns Expr if both operands are Expr. Otherwise returns Tensor. """ return _cpp.divide(lhs, rhs)
5,356,360
def load(filename): """Loads a compressed object from disk """ file = gzip.GzipFile(filename, 'rb') buffer = b'' while True: data = file.read() if data == b'': break buffer += data object = pickle.loads(buffer) file.close() return object
5,356,361
def ordered_ltrunc(): """ Iterates through the left truncatable prime https://youtu.be/azL5ehbw_24 A left truncatable prime is still prime when the left most digit is dropped. For example 317 -> 17 -> 7 are prime so 317 is a left truncatable prime returns an iterator that goes through them in increasing order """ # Prime.value is the value of the prime number # Prime.k is the power of 10 needed to generate the first digit Prime = namedtuple('Prime',['value','k']) # list single digit prime numbers working_list = [Prime(x,0) for x in (2,3,5,7)] # stores the set of left truncatable primes that have one more # digit than the working list expanding_list = [] # the expanding is built with the primes ordered while working_list: # digits 1,...,9 are valid left additions for t in range(1,10): for prime in working_list: n = t*10**(prime.k + 1) + prime.value # the left additions just got constructed in ascending order # if the number is prime it is a left truncatable prime # add it to the expanding list if isprime(n): expanding_list.append(Prime(n,prime.k + 1)) # the expanding list is built so yield the working list one by one while working_list: yield working_list.pop(0).value # working list is exhausted - replace it with the expanding list working_list = expanding_list # start a new expanding list expanding_list = []
5,356,362
def on_receive_best5(best5_entry): """ 處理最佳五檔事件 """ print('[%s %s] 最佳五檔' % (best5_entry['id'], best5_entry['name'])) for i in range(0, 5): print('%5d %.2f | %.2f %5d' % ( best5_entry['best'][i]['bidQty'], best5_entry['best'][i]['bid'], best5_entry['best'][i]['ask'], best5_entry['best'][i]['askQty'], ))
5,356,363
def connect_message_queue(name, url=None, maxsize=0): """ create connection to message queue name: name of message queue rabbitmq: amqp://username:password@host:5672/%2F Refer: https://www.rabbitmq.com/uri-spec.html beanstalk: beanstalk://host:11300/ redis: redis://host:6379/db builtin: None """ if not url: from multiprocessing import Queue return Queue(maxsize=maxsize) parsed = urlparse.urlparse(url) if parsed.scheme == 'amqp': from .rabbitmq import Queue return Queue(name, url, maxsize=maxsize) elif parsed.scheme == 'beanstalk': from .beanstalk import Queue return Queue(name, host=parsed.netloc, maxsize=maxsize) elif parsed.scheme == 'redis': from .redis_queue import Queue db = parsed.path.lstrip('/').split('/') try: db = int(db[0]) except: db = 0 return Queue(name, parsed.hostname, parsed.port, db=db, maxsize=maxsize) raise Exception('unknow connection url: %s', url)
5,356,364
def load_document(filepath): """ Description:Opens and loads the file specified by filepath as a raw txt string; assumes valid text file format. Input: String -> filepath of file from current directory Output: Entire contents of text file as a string """ #assert(filepath.endswith(".txt")), "Function: Load Document -> File specificed by filepath is not of type .txt" file = open(filepath, 'r') file_string = file.read() file.close() return file_string
5,356,365
def get_user(request, uid): """ GET /user/1/ """ if uid != 1: return JsonResponse({"code": 10101, "message": "user id null"}) data = {"age": 22, "id": 1, "name": "tom"} return JsonResponse({"code": 10200, "data": data, "message": "success"})
5,356,366
def write_json_test_results(category, # type: ResultType name, # type: str content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]] formatted=True, # type: bool encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]] ): # type: (...) -> None """Write the given json content to the specified test results path, creating directories as needed.""" path = os.path.join(category.path, name) write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder)
5,356,367
def count_ngrams(lines, min_length=1, max_length=3): """ Iterate through given lines iterator (file object or list of lines) and return n-gram frequencies. The return value is a dict mapping the length of the n-gram to a collections.Counter object of n-gram tuple and number of times that n-gram occurred. Returned dict includes n-grams of length min_length to max_length. """ lengths = range(min_length, max_length + 1) ngrams = {length: collections.Counter() for length in lengths} queue = collections.deque(maxlen=max_length) # Helper function to add n-grams at start of current queue to dict def add_queue(): current = tuple(queue) for length in lengths: if len(current) >= length: ngrams[length][current[:length]] += 1 # Loop through all lines and words and add n-grams to dict for line in lines: for word in tokenize(line): queue.append(word) if len(queue) >= max_length: add_queue() # Make sure we get the n-grams at the tail end of the queue while len(queue) > min_length: queue.popleft() add_queue() return ngrams
5,356,368
def notas(*n, sit = False): """ -> Função para analisar notas e situação de vários alunos. :param n: notas (uma ou mais) :param sit: situação (valor opcional) :return: dicionário com várias informaçoes sobre o aluno. """ r = {} r['total'] = len(n) r['maior'] = max(n) r['menor'] = min(n) r['média'] = sum(n) / len(n) if sit: if r['média'] >= 7: r['situação'] = 'boa' elif r['média'] >= 5: r['situação'] = 'razoavél' else: r['situação'] = 'ruim' return r
5,356,369
def main( filepath: str = "ice_thickness_01-01-2020.csv", rescaling_factor: int = 2, grid_size: float = 0.1, robot_radius: float = 0.01, ): """Loads the ice thickness data and plans a route over safe ice.""" df = pd.read_csv(filepath) df_rescaled = df.iloc[::rescaling_factor, :] gx, gy, sx, sy, ox, oy = process_data(df_rescaled) plt.grid(True) plt.axis("equal") # path generation _, _ = potential_field_planning(sx, sy, gx, gy, ox, oy, grid_size, robot_radius) plt.show()
5,356,370
def validate_metrics(metrics): """ Checks if specified metrics are valid. Returns None if check passes, else raises ValueError. """ if any(m not in METRICS for m in metrics): bad_metrics = [m for m in metrics if m not in METRICS] raise ValueError('Unknown metrics: {}'.format(bad_metrics))
5,356,371
def accuracy(output, labels_test): """How many correct predictions?""" TP, TN, FP, FN = confusionMatrix(labels_test, numpy.sign(output)) return float(TP + TN) / (TP + TN + FP + FN)
5,356,372
def test_tk_import(): """Test `tqdm.tk` import""" importorskip('tqdm.tk')
5,356,373
def metric_pairs(request): """Pairs of (dask-ml, sklearn) accuracy metrics. * accuracy_score """ return ( getattr(dask_ml.metrics, request.param), getattr(sklearn.metrics, request.param) )
5,356,374
def MyDuration(duration, initial_time=None): """ Usecase: a timestamp is provided as when an access token expires, then add it to the current time, then showing it as a human-readable future time. Alternatively specify a *initial_time* as manual now value. Args duration: <type 'int'> OR <type 'str'> Duration in seconds. If given as a string, convert to int. initial_time: <type 'int'> OR <type 'str'> Time to start differenc calculation from. If given as a string, convert to int. If not set, use current time. Returns out_time: what time will it be after number seconds in have elapsed. Shows in format '2016-12-11 15:40:00' if printed. """ duration = int(duration) if initial_time: initial_time = int(initial_time) else: initial_time = time.time() # use current time in_time = initial_time + duration # add duration to start time out_time = datetime.datetime.fromtimestamp(in_time) return out_time
5,356,375
def test_fpn(): """Tests fpn.""" s = 64 in_channels = [8, 16, 32, 64] feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] out_channels = 8 # `num_outs` is not equal to len(in_channels) - start_level with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, num_outs=2) # `end_level` is larger than len(in_channels) - 1 with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, end_level=4, num_outs=2) # `num_outs` is not equal to end_level - start_level with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, end_level=3, num_outs=1) # Invalid `add_extra_convs` option with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs='on_xxx', num_outs=5) fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, num_outs=5) # FPN expects a multiple levels of features per image feats = [ torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels)) ] outs = fpn_model(feats) assert fpn_model.add_extra_convs == 'on_input' assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Tests for fpn with no extra convs (pooling is used instead) fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=False, num_outs=5) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs assert not fpn_model.add_extra_convs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Tests for fpn with lateral bns fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, no_norm_on_lateral=False, norm_cfg=dict(type='BN', requires_grad=True), num_outs=5) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs assert fpn_model.add_extra_convs == 'on_input' for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) bn_exist = False for m in fpn_model.modules(): if isinstance(m, _BatchNorm): bn_exist = True assert bn_exist # Bilinear upsample fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, upsample_cfg=dict(mode='bilinear', align_corners=True), num_outs=5) fpn_model(feats) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs assert fpn_model.add_extra_convs == 'on_input' for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Scale factor instead of fixed upsample size upsample fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, upsample_cfg=dict(scale_factor=2), num_outs=5) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Extra convs source is 'inputs' fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_input', start_level=1, num_outs=5) assert fpn_model.add_extra_convs == 'on_input' outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Extra convs source is 'laterals' fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_lateral', start_level=1, num_outs=5) assert fpn_model.add_extra_convs == 'on_lateral' outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Extra convs source is 'outputs' fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_output', start_level=1, num_outs=5) assert fpn_model.add_extra_convs == 'on_output' outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
5,356,376
def orthogonal_init(shape, gain=1.0): """Generating orthogonal matrix""" # Check the shape if len(shape) < 2: raise ValueError("The tensor to initialize must be " "at least two-dimensional") # Flatten the input shape with the last dimension remaining # its original shape so it works for conv2d num_rows = 1 for dim in shape[:-1]: num_rows *= dim num_cols = shape[-1] flat_shape = (num_rows, num_cols) # Generate a random matrix a = np.random.normal(size=flat_shape).astype(np.float32) # Compute the qr factorization q, r = np.linalg.qr(a, mode='reduced') # Make Q uniform square_len = np.minimum(num_rows, num_cols) d = np.diagonal(r[:square_len, :square_len]) ph = d / np.absolute(d) q *= ph # Pad zeros to Q (if rows smaller than cols) if num_rows < num_cols: padding = np.zeros([num_rows, num_cols - num_rows], dtype=np.float32) q = np.concatenate([q, padding], 1) return gain * np.reshape(q, shape)
5,356,377
def application(env, start_response): """The callable function per the WSGI spec; PEP 333""" headers = {x[5:].replace('_', '-'):y for x, y in env.items() if x.startswith('HTTP_')} if env.get('CONTENT_TYPE', None): headers['Content-Type'] = env['CONTENT_TYPE'] if env.get('CONTENT_LENGTH', None): headers['Content-Length'] = env['CONTENT_LENGTH'] headers.pop('CONNECTION', None) # let RelayQuery choose to use keepalives or not body = env['wsgi.input'] uri = env.get('PATH_INFO', '') if not uri: # Some WSGI servers use RAW_URI instead of PATH_INFO. # Gunicorn uses PATH_INFO, gevent.pywsgi.WSGIServer uses RAW_URI uri = env.get('RAW_URI', '') token = env.get('HTTP_X_AUTH', '').encode() host, tls, port = router.get_host(uri=uri, token=token) if env.get('QUERY_STRING', None): uri += '?{}'.format(env['QUERY_STRING']) resp = RelayQuery(host=host, method=env['REQUEST_METHOD'], uri=uri, headers=headers, body=body, port=port, tls=tls) start_response(resp.status, resp.headers) return resp
5,356,378
def loadModule(): """ """ load_config_data() registe_madmin() from gatenodeapp import *
5,356,379
def angle_normalize(x): """ Normalize angles between 0-2PI """ return ((x + np.pi) % (2 * np.pi)) - np.pi
5,356,380
async def hook_factory() -> HookFactory: """Factory creation fixture. Cleans uf after yield.""" cur_factory = HookFactory() yield cur_factory cur_factory.stop_all() await asyncio.sleep(0.01)
5,356,381
def load_data(BASE_DIR, DATA_DIR): """ Loads data necessary for project Arguments: BASE_DIR (str) -- path to working dir DATA_DIR (str) -- path to KEGG data Returns: tla_to_mod_to_kos (defaultdict of dicts) -- maps tla to series of dicts, keys are KEGG modules and values are lists of KOs in that module (e.g.: 'eun': {'M00001': ['K00845', etc]}, etc} etc}) mod_sets (defaultdict) -- raw data from KEGG defining which KOs are in each module tla_to_tnum (dict) -- for each genome, converts tla to tnum tnum_to_tla (dict) -- for each genome, converts tnum to tla keepers (list) -- KEGG genomes selected for inclusion in this study tnum_to_kos (dict) -- maps tnums to KOs encoded by that genome, e.g.: 'T00001': [K00001, ... 'K0000N'] n_kos_tot (int) -- total number of KOs in the dataset all_kos (list) -- list of all KOs in the dataset mod_to_ko_clean (dict )-- the functions of many modules can be "completed" by different sets of genes. Here we choose to represent each module by the most common set of genes. Dict maps each module (e.g.: 'K00001') to a list of genes (e.g.: ['K00845', ..., 'K00873']) train_data (numpy.ndarray) -- training data. Rows are genomes, columns are genes/KOs. 1's denote presence of a gene in the genome, 0's denote absence test_data (numpy.ndarray) -- test data. Rows are genomes, columns are genes/KOs. 1's denote presence of a gene in the genome, 0's denote absence train_genomes (list) -- tnums of genomes in the training set test_genomes (list) -- tnums of genomes in the test set """ tla_to_mod_to_kos, mod_sets = load_mods(DATA_DIR) # path to dir with tla_to_mod_to_kos.pkl tla_to_tnum, tnum_to_tla, keepers = genomes2include(DATA_DIR) tnum_to_kos, n_kos_tot, all_kos = load_kos(tla_to_tnum, tnum_to_tla, tla_to_mod_to_kos, DATA_DIR) mod_to_kos = create_mod_to_kos(tla_to_mod_to_kos) mod_to_ko_clean = clean_kos(mod_sets) all_kos = torch.load(BASE_DIR+"all_kos_2020-09-29.pt") tla_to_mod_to_kos = torch.load(BASE_DIR+"tla_to_mod_to_kos_2020-09-29.pt") train_data = torch.load(BASE_DIR+"kegg_v2_train_2020-09-29.pt") test_data = torch.load(BASE_DIR+"kegg_v2_test_2020-09-29.pt") train_genomes = torch.load(BASE_DIR+"kegg_v2_train_genomes_2020-09-29.pt") test_genomes = torch.load(BASE_DIR+"kegg_v2_test_genomes_2020-09-29.pt") return tla_to_mod_to_kos, mod_sets, tla_to_tnum, tnum_to_tla, keepers, tnum_to_kos, n_kos_tot, all_kos, mod_to_ko_clean, all_kos, train_data, test_data, train_genomes, test_genomes
5,356,382
def disable_app(app, base_url=DEFAULT_BASE_URL): """Disable App. Disable an app to effectively remove it from your Cytoscape session without having to uninstall it. Args: app (str): Name of app base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: {'appName': <name of app>}, and is returned whether or not app exists Raises: requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> disable_app('stringApp') {'appName': 'stringApp'} """ verify_supported_versions(1, 3.7, base_url=base_url) res = commands.commands_post(f'apps disable app="{app}"', base_url=base_url) return narrate(res)
5,356,383
def wrangle_adni(): """This function returns three dataframes. Unpack the dataframes when calling the function. """ # ensure pandas availability for the function if 'pd' not in globals(): import pandas as pd # read in the data to a pandas dataframe adni_full = pd.read_csv('ADNIMERGE.csv', dtype='object') # set the logical orders for the two diagnoses DX = ['CN', 'MCI', 'AD'] DX_bl = ['CN', 'SMC', 'EMCI', 'LMCI', 'AD'] # initialize empty dataframe adni = pd.DataFrame() # convert datatypes to categorical, datetime, and int adni.loc[:, 'PTGENDER'] = pd.Categorical(adni_full.PTGENDER) adni.loc[:, 'DX'] = pd.Categorical(adni_full.DX, ordered=True, categories=DX) adni.loc[:, 'DX_bl'] = pd.Categorical(adni_full.DX_bl, ordered=True, categories=DX_bl) adni.loc[:, 'EXAMDATE'] = pd.to_datetime(adni_full['EXAMDATE']) adni.loc[:, 'EXAMDATE_bl'] = pd.to_datetime(adni_full['EXAMDATE_bl']) adni.loc[:, 'PTEDUCAT'] = adni_full.PTEDUCAT.astype('int') adni.loc[:, 'Month'] = adni_full.Month.astype('int') adni.loc[:, 'RID'] = adni_full.RID.astype('int') # create a list of float data columns, loop and assign float dtypes floats = ['AGE', 'CDRSB', 'ADAS11', 'ADAS13', 'MMSE', 'RAVLT_immediate', 'Hippocampus', 'Ventricles', 'WholeBrain', 'Entorhinal', 'MidTemp', 'FDG', 'AV45'] # loop and assign dtypes for i in floats: adni.loc[:, i] = adni_full[i].astype('float') # age has no baseline '_bl' equivalent if i == 'AGE': continue # every other column has a '_bl' equivalent to convert as well else: y = i + '_bl' adni.loc[:, y] = adni_full[y].astype('float') # drop columns with too much missing data adni.drop(labels=['FDG', 'FDG_bl', 'AV45', 'AV45_bl'], axis='columns', inplace=True) # set the index adni.set_index(adni.RID, inplace=True) # sort the index adni.sort_index(inplace=True) # remove redundant columns adni.drop('RID', axis='columns', inplace=True) # calculate dynamic age adni.loc[:, 'AGE_dynamic'] = adni.AGE + (adni.Month / 12) # create dataframe with only patients that have complete data adni_rmv = adni.dropna(how='any') # filter those results to only patients with multiple visits num_comp_exams = adni_rmv.groupby('RID')['EXAMDATE_bl'].count() adni_comp_filter = num_comp_exams[num_comp_exams > 1] adni_comp = adni_rmv.loc[adni_comp_filter.index] # map baseline diagnosis categories to match subsequent diagnosis categories # map new column for DX_bl to categorize based on subsequent DX categories # 'SMC' -> 'CN' due to medical definitions # combine 'LMCI' and 'EMCI' into 'MCI' mapper = {'SMC': 'CN', 'LMCI': 'MCI', 'EMCI': 'MCI', 'CN': 'CN', 'AD': 'AD'} adni_comp.loc[:, 'DX_bl2'] = adni_comp.DX_bl.map(mapper) # isolate clinical data clin_cols = ['EXAMDATE', 'EXAMDATE_bl', 'Month', 'PTGENDER', 'DX', 'DX_bl', 'PTEDUCAT', 'AGE', 'AGE_dynamic', 'CDRSB', 'CDRSB_bl', 'ADAS11', 'ADAS11_bl', 'ADAS13', 'ADAS13_bl', 'MMSE', 'MMSE_bl', 'RAVLT_immediate', 'RAVLT_immediate_bl', 'DX_bl2'] clin_data = pd.DataFrame() clin_data = adni.reindex(columns=clin_cols) # filter the scan data scan_cols = ['EXAMDATE', 'EXAMDATE_bl', 'Month', 'PTGENDER', 'DX', 'DX_bl', 'PTEDUCAT', 'AGE', 'AGE_dynamic', 'Hippocampus', 'Hippocampus_bl', 'Ventricles', 'Ventricles_bl', 'WholeBrain', 'WholeBrain_bl', 'Entorhinal', 'Entorhinal_bl', 'MidTemp', 'MidTemp_bl', 'DX_bl2'] scan_data = pd.DataFrame() scan_data = adni.reindex(columns=scan_cols) return adni_comp, clin_data, scan_data
5,356,384
def test_remove_state_no_key(): """Test ability of remove_state function to work with input of the state.""" v = get_vapordome() state_3 = State("water", T=500 * units.kelvin, v=1 * units.m ** 3 / units.kg) v.add_state(state_3) # test of repr(state) v.remove_state(state_3) # assert v.states[repr(state_3)] == None
5,356,385
def readByte (file): """ Read a byte from file. """ return ord (file.read (1))
5,356,386
def run_main(): """ 这是主函数 """ input_dir = os.path.abspath("./input") groundtruth_dir = os.path.join(input_dir, "ground-truth") if not os.path.exists(groundtruth_dir): os.makedirs(groundtruth_dir) # 初始化测试数据集txt文件 dataset_dir = os.path.abspath(args.dataset_dir) test_txt_path = os.path.join(dataset_dir, "ImageSets", "Main", "val.txt") image_ids = [] with open(test_txt_path,'r') as f: for line in f.readlines(): image_ids.append(line.strip()) # 生成测试集的groundtruth的分类与定位的txt文件 annotation_dir = os.path.join(dataset_dir,"Annotations") for image_id in image_ids: gt_txt_path = os.path.join(groundtruth_dir,image_id+".txt") with open(gt_txt_path, "w") as f: xml_path = os.path.join(annotation_dir,image_id+".xml") if is_contain_object(xml_path): objects = parse_xml(xml_path) for obj in objects: f.write(obj) print("Test Dataset GroundTruth Result Conversion Completed!")
5,356,387
def qa_tfserving(data_input, url): """ tf-serving 一整套流程 """ bert_input = covert_text_to_id(data_input) data = json.dumps(bert_input) r = requests.post(url, data) r_text_json = json.loads(r.text) r_post = postprocess(r_text_json) return r_post
5,356,388
def contract_creation_exceptions(): """ Return create exceptions. These elements depend on the networksegments table which was renamed in the contract branch. """ return { sa.Table: ['segmenthostmappings'], sa.Index: ['segmenthostmappings'] }
5,356,389
def _column_sel_dispatch(columns_to_select, df): # noqa: F811 """ Base function for column selection. Applies only to slices. The start slice value must be a string or None; same goes for the stop slice value. The step slice value should be an integer or None. A slice, if passed correctly in a Multindex column, returns a list of tuples across all levels of the column. A list of column names is returned. """ df_columns = df.columns filtered_columns = None start_check = None stop_check = None step_check = None if not df_columns.is_unique: raise ValueError( """ The column labels are not unique. Kindly ensure the labels are unique to ensure the correct output. """ ) start, stop, step = ( columns_to_select.start, columns_to_select.stop, columns_to_select.step, ) start_check = any((start is None, isinstance(start, str))) stop_check = any((stop is None, isinstance(stop, str))) step_check = any((step is None, isinstance(step, int))) if not start_check: raise ValueError( """ The start value for the slice must either be a string or `None`. """ ) if not stop_check: raise ValueError( """ The stop value for the slice must either be a string or `None`. """ ) if not step_check: raise ValueError( """ The step value for the slice must either be an integer or `None`. """ ) start_check = any((start is None, start in df_columns)) stop_check = any((stop is None, stop in df_columns)) if not start_check: raise ValueError( """ The start value for the slice must either be `None` or exist in the dataframe's columns. """ ) if not stop_check: raise ValueError( """ The stop value for the slice must either be `None` or exist in the dataframe's columns. """ ) if start is None: start = 0 else: start = df_columns.get_loc(start) if stop is None: stop = len(df_columns) + 1 else: stop = df_columns.get_loc(stop) if start > stop: filtered_columns = df_columns[slice(stop, start + 1, step)][::-1] else: filtered_columns = df_columns[slice(start, stop + 1, step)] df_columns = None return [*filtered_columns]
5,356,390
def create_or_update(*, db_session, monitor_in: MonitorCreate) -> Monitor: """Creates or updates a monitor.""" monitor = get_by_weblink(db_session=db_session, weblink=monitor_in.weblink) if monitor: monitor = update(db_session=db_session, monitor=monitor, monitor_in=monitor_in) else: monitor = create(db_session=db_session, monitor_in=monitor_in) return monitor
5,356,391
def get_record_map(index_array, true_false_ratio): """Get record map. :param index_array: the indexes of the images :type index_array: numpy array :param true_false_ratio: the number of occurrences of true cases over the number of occurrences of false cases :type true_false_ratio: int or float :return: record_index_pair_array refers to the indexes of the image pairs, while record_index_pair_label_array refers to whether these two images represent the same person. :rtype: tuple """ # Generate record_index_pair_array and record_index_pair_label_array record_index_pair_list = [] record_index_pair_label_list = [] for record_index_1, record_index_2 in itertools.combinations( range(index_array.size), 2): record_index_pair_list.append((record_index_1, record_index_2)) record_index_pair_label_list.append( index_array[record_index_1] == index_array[record_index_2]) record_index_pair_array = np.array(record_index_pair_list) record_index_pair_label_array = np.array(record_index_pair_label_list) # Do not need sampling if true_false_ratio is None: return (record_index_pair_array, record_index_pair_label_array) # Perform sampling based on the true_false_ratio pair_label_true_indexes = np.where(record_index_pair_label_array)[0] pair_label_false_indexes = np.where(~record_index_pair_label_array)[0] selected_pair_label_false_indexes = np.random.choice(pair_label_false_indexes, \ 1.0 * pair_label_true_indexes.size / true_false_ratio, \ replace=False) selected_pair_label_indexes = np.hstack( (pair_label_true_indexes, selected_pair_label_false_indexes)) return (record_index_pair_array[selected_pair_label_indexes, :], record_index_pair_label_array[selected_pair_label_indexes])
5,356,392
def sdfGetMolBlock(mol): """ sdfGetMolBlock() returns the MOL block of the molecule """ return mol["molblock"]
5,356,393
def changelog(ctx, base, head, jira): """Get changelog between base branch and head branch""" log = git.log(base, head, merges=True) logger.debug(log) # Git changelog click.echo('\nGit changelog:\n') ticket_ids, changelog = git.changelog(log, ticket_ids=True) click.echo(changelog) # JIRA changelog if jira: click.echo('\nJIRA changelog:\n') jira_api = ctx.get('apis').get('jira') jira_changelog = jira_api.get_issue_details_list(ticket_ids) click.echo(jira_changelog) # Audit click.echo('\nAudit:\n') audit = audit_changes(base, head) click.echo(audit)
5,356,394
def get_methods(klass): """Get all methods, include regular, static, class method. """ methods = list() attributes = get_attributes(klass) for key, value in inspect.getmembers(MyClass): if (not (key.startswith("__") and key.endswith("__"))) and \ (key not in attributes): methods.append(key) return methods
5,356,395
def get_sample_column(table_file_name, sample_name, sex='U'): """ Get a VCF column as a Pandas Series for one sample. :param table_file_name: Name of genotyped features table file output by the genotyper after applying the genotyping model and annotating the genotype for no-call variants. :param sex: "M", "F", or "U" depending on if the sample is male, female, or unknown. For males, chrX is never het. for females, chrY is absent. For males or unknown, chrY is never het. :param sample_name: Name of the sample. This is saved to the name of the returned Series object. :return: Series of a column to add to the VCF for one genotyped sample. """ df_gt = pd.read_csv( table_file_name, sep='\t', header=0, usecols=('#CHROM', 'CALLABLE', 'BP_REF_COUNT', 'BP_ALT_COUNT', 'HOM_REF', 'HET', 'HOM_ALT') ) df_gt = df_gt.loc[:, ('#CHROM', 'CALLABLE', 'BP_REF_COUNT', 'BP_ALT_COUNT', 'HOM_REF', 'HET', 'HOM_ALT')] # Adjust density estimates on sex if sex == 'M': adjust_chrx_for_males(df_gt) adjust_chry(df_gt, False) elif sex == 'F': adjust_chry(df_gt, True) elif sex == 'U': adjust_chry(df_gt, False) # Set genotype (GT), genotype quality (GQ), and genotype likelihood (GL) df_gt['CLASS'] = df_gt.apply( lambda row: str(np.argmax(row[['HOM_REF', 'HET', 'HOM_ALT']])) if row['CALLABLE'] else 'NO_CALL', axis=1 ) df_gt['GT'] = df_gt['CLASS'].apply(lambda gt_class: GENOTYPE_TO_GT[gt_class]) df_gt['GQ'] = df_gt.apply( lambda row: ( int(10 * -math.log10(1 - row[row['CLASS']])) if row[row['CLASS']] < 1 else 255 ) if row['CALLABLE'] else '.', axis=1 ) df_gt['GL'] = df_gt.apply(lambda row: '{HOM_REF:.4f},{HET:.4f},{HOM_ALT:.4f}'.format(**row), axis=1) # Get a series representing the column to be added to the VCF sample_column = df_gt.apply(lambda row: '{GT}:{GQ}:{GL}:{BP_REF_COUNT:.1f}:{BP_ALT_COUNT:.1f}'.format(**row), axis=1) sample_column.name = sample_name # Return return sample_column
5,356,396
def find_plugin_models(): """ Find custom models """ # List of plugin objects plugins_dir = find_plugins_dir() # Go through files in plug-in directory if not os.path.isdir(plugins_dir): msg = "SasView couldn't locate Model plugin folder %r." % plugins_dir logger.warning(msg) return {} plugin_log("looking for models in: %s" % plugins_dir) # compile_file(plugins_dir) #always recompile the folder plugin logger.info("plugin model dir: %s", plugins_dir) plugins = {} for filename in os.listdir(plugins_dir): name, ext = os.path.splitext(filename) if ext == '.py' and not name == '__init__': path = os.path.abspath(os.path.join(plugins_dir, filename)) try: model = load_custom_model(path) # TODO: add [plug-in] tag to model name in sasview_model if not model.name.startswith(PLUGIN_NAME_BASE): model.name = PLUGIN_NAME_BASE + model.name plugins[model.name] = model except Exception: msg = traceback.format_exc() msg += "\nwhile accessing model in %r" % path plugin_log(msg) logger.warning("Failed to load plugin %r. See %s for details", path, PLUGIN_LOG) return plugins
5,356,397
def write_to_variable(tensor, fail_if_exists=True): """Saves a tensor for later retrieval on CPU.""" if not isinstance(tensor, tf.Tensor): raise ValueError('Expected tf.Tensor but got {}'.format(type(tensor))) # Only relevant for debugging. debug_name = 'tpu_util__' + tensor.name.split(':')[0] reuse = False if fail_if_exists else tf.compat.v1.AUTO_REUSE with tf.variable_scope(top_level_scope, reuse=reuse): variable = tf.get_variable( name=debug_name, shape=tensor.shape, dtype=tensor.dtype, trainable=False, use_resource=True) var_store[tensor] = variable with tf.control_dependencies([variable.assign(tensor)]): tensor_copy = tf.identity(tensor) var_store[tensor_copy] = variable return tensor_copy
5,356,398
def evaluate_sb_policy_against_gym_env(sb_algorithm, policy_path, gym_env_name, episodes): """CLI command for stable baselines policy evaluation against ("real") gym env.""" sb_cls = get_sb_class_for_algo(sb_algorithm.upper()) policy_path = os.path.abspath(policy_path) model = sb_cls.load(policy_path) env = gym.make(gym_env_name) step_counts = [] for i in range(episodes): step = 0 done = False obs = env.reset() while not done: step += 1 action, _state = model.predict(obs, deterministic=True) obs, reward, done, info = env.step(action) env.render() click.echo(f'Next episode done after {step} steps...') step_counts.append(step) env.reset() step = 0 click.echo(f'Maximum amount of steps was: {max(step_counts)}') click.echo(f'Minimum amount of steps was: {min(step_counts)}') click.echo(f'Mean of amount of steps was: {mean(step_counts)}') click.echo(f'Std of amount of steps was: {std(step_counts)}')
5,356,399