content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def ccf(tdm, tsuid_list_or_dataset, lag_max=None, tsuids_out=False, cut_ts=False): """ This function calculates the maximum of the cross correlation function matrix between all ts in tsuid_list_or_dataset in a serial mode. The result is normalized (between -1 and 1) Cross correlation is a correlation between two timeseries whose one is delayed of successive lag values. Result of CCF is a timeseries (correlation function of the lag between timeseries). This function keep the maximum value of the CCF function generated and pull it in the matrix for corresponding timeseries couple. :returns: a string matrix (whose size is equal to the number of tsuids in tsuid_list_or_dataset plus one line and one column for headers) :rtype: np.ndarray :param tdm: Temporal Data Manager client :param tsuid_list_or_dataset: list of identifiers of the time series or dataset name :param lag_max: maximum lag between timeseries (cf. _ccf function for more details) :param tsuids_out: True to fill headers with tsuids False to fill headers with functional ids :param cut_ts: Cut the TS list to the min-length if set to True :type tdm: TemporalDataMgr :type tsuid_list_or_dataset: list of str or str :type lag_max: positive int :type tsuids_out: boolean :type cut_ts: boolean :raises TypeError: if tsuids_out is not a boolean """ if type(tsuids_out) is not bool: raise TypeError("tsuids_out must be a boolean") # retrieve data from temporal data manager ts_data_list, tsuid_list = __retrieve_data( tdm, tsuid_list_or_dataset) if tsuids_out: ts_list = tsuid_list else: ts_list = __retrieve_func_id(tdm, tsuid_list) # number and size of time series ts_nb = len(ts_data_list) ts_size = len(ts_data_list[0]) if cut_ts: for ts in ts_data_list: ts_size = min(len(ts), ts_size) else: # check time series have same length for ts in ts_data_list: if len(ts) != ts_size: raise ValueError('time series do not have same length') # matrix initialization matrix_corr = np.zeros([ts_nb, ts_nb]) for index1, _ in enumerate(ts_data_list): matrix_corr[index1, index1] = 1 # Conversion ts1 data from list (keeping only value column) to an array ts1 = np.asarray(ts_data_list[index1][:ts_size, 1]) for index2 in range(index1 + 1, len(ts_data_list)): # Conversion ts2 data from list (keeping only value column) to an # array ts2 = np.asarray(ts_data_list[index2][:ts_size, 1]) # cross correlation calculation # keeping the maximum absolute value between cross correlation with # positive and with negative lag ccf_fcn = _ccf(ts1, ts2, lag_max) max_ccf = __get_max_abs_value(ccf_fcn) # fill matrix with result (max of ccf is commutative) matrix_corr[index1, index2] = max_ccf matrix_corr[index2, index1] = max_ccf # fill final matrix with headers matrix = __fill_headers_to_final_matrix(matrix_corr, ts_list) return matrix
be3b5ccae3686fdef2e71eca87bc8131519d0398
3,651,616
def filter_zoau_installs(zoau_installs, build_info, minimum_zoau_version): """Sort and filter potential ZOAU installs based on build date and version. Args: zoau_installs (list[dict]): A list of found ZOAU installation paths. build_info (list[str]): A list of build info strings minimum_zoau_version (str): The minimum version of ZOAU to accept. Returns: list[dict]: A sorted and filtered list of ZOAU installation paths. """ for index, zoau_install in enumerate(zoau_installs): zoau_install["build"] = build_info[index] for zoau_install in zoau_installs: zoau_install["build"] = _get_version_from_build_string(zoau_install.get("build", "")) zoau_installs.sort(key=lambda x: _version_to_tuple(x.get("build")), reverse=True) min_version = _version_to_tuple(minimum_zoau_version) valid_installs = [] for zoau_install in zoau_installs: if min_version <= _version_to_tuple( zoau_install.get("build") ): valid_installs.append(zoau_install) # account for the fact 1.1.0 may or may not require pip install depending on PTF if "1.1.0" in zoau_install.get("build", ""): backup_install = zoau_install.copy() # set build to none so we do not treat it like a pip 1.1.0 install when testing backup_install["build"] = "" valid_installs.append(backup_install) return valid_installs
6e6c2de214c75630091b89e55df2e57fd9be12b9
3,651,617
def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE): """Build a transaction that spends parent_txid.vout[n] and produces one output with amount = parent_value with a fee deducted. Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created). """ inputs = [{"txid": parent_txid, "vout": n}] my_value = parent_value - fee outputs = {address : my_value} rawtx = node.createrawtransaction(inputs, outputs) prevtxs = [{ "txid": parent_txid, "vout": n, "scriptPubKey": parent_locking_script, "amount": parent_value, }] if parent_locking_script else None signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs) assert signedtx["complete"] tx = tx_from_hex(signedtx["hex"]) return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
07f18e227f13c146c6fba0a9487c73337654a2a3
3,651,618
from datetime import datetime def timestamp(date): """Get the timestamp of the `date`, python2/3 compatible :param datetime.datetime date: the utc date. :return: the timestamp of the date. :rtype: float """ return (date - datetime(1970, 1, 1)).total_seconds()
a708448fb8cb504c2d25afa5bff6208abe1159a4
3,651,620
def pratt_arrow_risk_aversion(t, c, theta, **params): """Assume constant relative risk aversion""" return theta / c
ccbe6e74a150a4cbd3837ca3ab24bf1074d694c9
3,651,621
def parse_content_type(content_type): """ Parse a content-type and its parameters into values. RFC 2616 sec 14.17 and 3.7 are pertinent. **Examples**:: 'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')]) 'text/plain; charset=UTF-8; level=1' -> ('text/plain', [('charset, 'UTF-8'), ('level', '1')]) :param content_type: content_type to parse :returns: a tuple containing (content type, list of k, v parameter tuples) """ parm_list = [] if ';' in content_type: content_type, parms = content_type.split(';', 1) parms = ';' + parms for m in _rfc_extension_pattern.findall(parms): key = m[0].strip() value = m[1].strip() parm_list.append((key, value)) return content_type, parm_list
ba7f93853299dafdd4afc342b5ba2ce7c6fdd3e7
3,651,622
def generate_athena(config): """Generate Athena Terraform. Args: config (dict): The loaded config from the 'conf/' directory Returns: dict: Athena dict to be marshalled to JSON """ result = infinitedict() prefix = config['global']['account']['prefix'] athena_config = config['lambda']['athena_partitioner_config'] data_buckets = athena_partition_buckets_tf(config) database = athena_config.get('database_name', '{}_streamalert'.format(prefix)) results_bucket_name = athena_query_results_bucket(config) queue_name = athena_config.get( 'queue_name', '{}_streamalert_athena_s3_notifications'.format(prefix) ).strip() logging_bucket, _ = s3_access_logging_bucket(config) # Set variables for the athena partitioner's IAM permissions result['module']['athena_partitioner_iam'] = { 'source': './modules/tf_athena', 'account_id': config['global']['account']['aws_account_id'], 'prefix': prefix, 's3_logging_bucket': logging_bucket, 'database_name': database, 'queue_name': queue_name, 'athena_data_buckets': data_buckets, 'results_bucket': results_bucket_name, 'lambda_timeout': athena_config['timeout'], 'kms_key_id': '${aws_kms_key.server_side_encryption.key_id}', 'function_role_id': '${module.athena_partitioner_lambda.role_id}', 'function_name': '${module.athena_partitioner_lambda.function_name}', 'function_alias_arn': '${module.athena_partitioner_lambda.function_alias_arn}', } # Set variables for the Lambda module result['module']['athena_partitioner_lambda'] = generate_lambda( '{}_streamalert_{}'.format(prefix, ATHENA_PARTITIONER_NAME), 'streamalert.athena_partitioner.main.handler', athena_config, config, tags={ 'Subcomponent': 'AthenaPartitioner' } ) return result
4fd3a18e5220e82a04451271f1ea8004978b4c65
3,651,624
def _angular_rate_to_rotvec_dot_matrix(rotvecs): """Compute matrices to transform angular rates to rot. vector derivatives. The matrices depend on the current attitude represented as a rotation vector. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. Returns ------- ndarray, shape (n, 3, 3) """ norm = np.linalg.norm(rotvecs, axis=1) k = np.empty_like(norm) mask = norm > 1e-4 nm = norm[mask] k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2 mask = ~mask nm = norm[mask] k[mask] = 1/12 + 1/720 * nm**2 skew = _create_skew_matrix(rotvecs) result = np.empty((len(rotvecs), 3, 3)) result[:] = np.identity(3) result[:] += 0.5 * skew result[:] += k[:, None, None] * np.matmul(skew, skew) return result
c0d468901ec7dc4d6da7f5eff7b95ac3fc176901
3,651,625
from typing import Any def get_all_learners() -> Any: """Get all learner configurations which are prepared.""" return { "learner_types": sorted( [ possible_dir.name for possible_dir in LEARNERS_DIR.iterdir() if possible_dir.is_dir() ] ) }
d05fd8d9da820061cea29d25002513e778c2b367
3,651,626
def getdate(targetconnection, ymdstr, default=None): """Convert a string of the form 'yyyy-MM-dd' to a Date object. The returned Date is in the given targetconnection's format. Arguments: - targetconnection: a ConnectionWrapper whose underlying module's Date format is used - ymdstr: the string to convert - default: The value to return if the conversion fails """ try: (year, month, day) = ymdstr.split('-') modref = targetconnection.getunderlyingmodule() return modref.Date(int(year), int(month), int(day)) except Exception: return default
21d27c3ef4e99b28b16681072494ce573e592255
3,651,627
def thermal_dm(n, u): """ return the thermal density matrix for a boson n: integer dimension of the Fock space u: float reduced temperature, omega/k_B T """ nlist = np.arange(n) diags = exp(- nlist * u) diags /= np.sum(diags) rho = lil_matrix(n) rho.setdiag(diags) return rho.tocsr()
80631a0575176e16e8832cb6c136030bcd589c58
3,651,628
from zope.configuration import xmlconfig, config def zcml_strings(dir, domain="zope", site_zcml=None): """Retrieve all ZCML messages from `dir` that are in the `domain`.""" # Load server-independent site config context = config.ConfigurationMachine() xmlconfig.registerCommonDirectives(context) context.provideFeature("devmode") context = xmlconfig.file(site_zcml, context=context, execute=False) return context.i18n_strings.get(domain, {})
23c62c50b313f53b25ad151ebccd5808bf7bad59
3,651,630
def const_p(a: C) -> Projector[C]: """ Make a projector that always returns the same still frame """ return lambda _: a
d73fb818f0606f9a64cb0076c99ff57c0b3bb042
3,651,631
import json def get_s3_bucket(bucket_name, s3): """" Takes the s3 and bucket_name and returns s3 bucket If does not exist, it will create bucket with permissions """ bucket_name = bucket_name.lower().replace('/','-') bucket = s3.Bucket(bucket_name) exists = True try: s3.meta.client.head_bucket(Bucket=bucket_name) except ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. error_code = int(e.response['Error']['Code']) if error_code == 404: exists = False if exists is False: s3.create_bucket(Bucket=bucket_name, ACL='public-read') # We need to set an S3 policy for our bucket to # allow anyone read access to our bucket and files. # If we do not set this policy, people will not be # able to view our S3 static web site. bucket_policy = s3.BucketPolicy(bucket_name) policy_payload = { "Version": "2012-10-17", "Statement": [{ "Sid": "Allow Public Access to All Objects", "Effect": "Allow", "Principal": "*", "Action": "s3:GetObject", "Resource": "arn:aws:s3:::%s/*" % (bucket_name) }] } # Add the policy to the bucket bucket_policy.put(Policy=json.dumps(policy_payload)) # Make our new S3 bucket a static website bucket_website = s3.BucketWebsite(bucket_name) # Create the configuration for the website website_configuration = { 'ErrorDocument': {'Key': 'error.html'}, 'IndexDocument': {'Suffix': 'index.html'}, } bucket_website.put( WebsiteConfiguration=website_configuration ) bucket = s3.Bucket(bucket_name) return bucket
67e9ede766989894aa86d2af1c766a57c4ed7116
3,651,632
def rf_render_ascii(tile_col): """Render ASCII art of tile""" return _apply_column_function('rf_render_ascii', tile_col)
d697014f019b303c3c7de0e874e8d321c5d96f7a
3,651,633
import json def index(): """ Display productpage with normal user and test user buttons""" global productpage table = json2html.convert(json = json.dumps(productpage), table_attributes="class=\"table table-condensed table-bordered table-hover\"") return render_template('index.html', serviceTable=table)
e27de5745c9e20f8942ea1ae3b07a4afa932b0f3
3,651,634
def student_classes(id): """ Show students registrered to class * display list of all students (GET) """ template = "admin/class_students.html" if not valid_integer(id): return ( render_template( "errors/custom.html", title="400", message="Id must be integer" ), 400, ) school_class = dict_sql_query( f"SELECT * FROM school_classes WHERE id={id}", fetchone=True ) if not school_class: return ( render_template( "errors/custom.html", title="400", message="Class does not exist." ), 400, ) # show students with class defined as this one students = [] for student in dict_sql_query( f"SELECT * FROM students WHERE class_id={school_class['id']}" ): students.append( { "student": student, "activity_name": dict_sql_query( f"SELECT name FROM activities WHERE id={student['chosen_activity']}", fetchone=True, )["name"] if student["chosen_activity"] else "Ej valt", } ) return render_template(template, school_class=school_class, students=students)
b431a21e39c97cbcc21d161a411cc9f3a3746cc8
3,651,635
def f_score(overlap_count, gold_count, guess_count, f=1): """Compute the f1 score. :param overlap_count: `int` The number of true positives. :param gold_count: `int` The number of gold positives (tp + fn) :param guess_count: `int` The number of predicted positives (tp + fp) :param f: `int` The beta term to weight precision vs recall. :returns: `float` The f score """ beta_sq = f*f if guess_count == 0: return 0.0 p = precision(overlap_count, guess_count) r = recall(overlap_count, gold_count) if p == 0.0 or r == 0.0: return 0.0 f = (1. + beta_sq) * (p * r) / (beta_sq * p + r) return f
6c7c0e3e58aa7fe4ca74936ce9029b6968ed6ee3
3,651,637
import math def phi(n): """Calculate phi using euler's product formula.""" assert math.sqrt(n) < primes[-1], "Not enough primes to deal with " + n # For details, check: # http://en.wikipedia.org/wiki/Euler's_totient_function#Euler.27s_product_formula prod = n for p in primes: if p > n: break if n % p == 0: prod *= 1 - (1 / p) return int(prod)
d17f0b5901602a9a530427da2b37d0402ef426ce
3,651,638
import logging def run_bert_pretrain(strategy, custom_callbacks=None): """Runs BERT pre-training.""" bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file) if not strategy: raise ValueError('Distribution strategy is not specified.') # Runs customized training loop. logging.info('Training using customized training loop TF 2.0 with distributed' 'strategy.') performance.set_mixed_precision_policy(common_flags.dtype(), use_experimental_api=False) # Only when explicit_allreduce = True, post_allreduce_callbacks and # allreduce_bytes_per_pack will take effect. optimizer.apply_gradients() no # longer implicitly allreduce gradients, users manually allreduce gradient and # pass the allreduced grads_and_vars to apply_gradients(). # With explicit_allreduce = True, clip_by_global_norm is moved to after # allreduce. return run_customized_training( strategy, bert_config, FLAGS.init_checkpoint, # Used to initialize only the BERT submodel. FLAGS.max_seq_length, FLAGS.max_predictions_per_seq, FLAGS.model_dir, FLAGS.num_steps_per_epoch, FLAGS.steps_per_loop, FLAGS.num_train_epochs, FLAGS.learning_rate, FLAGS.warmup_steps, FLAGS.end_lr, FLAGS.optimizer_type, FLAGS.input_files, FLAGS.train_batch_size, FLAGS.use_next_sentence_label, FLAGS.train_summary_interval, custom_callbacks=custom_callbacks, explicit_allreduce=FLAGS.explicit_allreduce, pre_allreduce_callbacks=[ model_training_utils.clip_by_global_norm_callback ], allreduce_bytes_per_pack=FLAGS.allreduce_bytes_per_pack)
16397fb83bb02e2f01c716f97f6f461e4675c319
3,651,639
import json def add_mutes(guild_id: int, role_id: int, user_id: int, author_id: int, datetime_to_parse: str): """ Add a temporary mute to a user. NOTE: datetime_to_parse should be a string like: "1 hour 30 minutes" """ with open("data/unmutes.json", "r+", newline='\n', encoding='utf-8') as temp_file: mutes = json.load(temp_file) new_mute_data = (user_id, role_id, guild_id) str_dt_obj = parse_times(datetime_to_parse) # if the script made it this far, this is real we have to store mute data if str_dt_obj not in mutes: mutes[str_dt_obj] = [] mutes[str_dt_obj].append(new_mute_data) mute_index = len(mutes[str_dt_obj]) - 1 if str(guild_id) not in mutes: mutes[str(guild_id)] = {} if str(user_id) in mutes[str(guild_id)]: mutes[str(guild_id)].pop(str(user_id)) if not str(user_id) in mutes[str(guild_id)]: mutes[str(guild_id)][str(user_id)] = [] mutes[str(guild_id)][str(user_id)] = [str_dt_obj, author_id, mute_index] json.dump(mutes, open("data/unmutes.json", "w+", newline='\n', encoding='utf-8')) return str_dt_obj # Don't worry I can't read this mess either.
8c762f56217ee940d8803e069f1b3bce47629a2e
3,651,640
def operation_dict(ts_epoch, request_dict): """An operation as a dictionary.""" return { "model": request_dict, "model_type": "Request", "args": [request_dict["id"]], "kwargs": {"extra": "kwargs"}, "target_garden_name": "child", "source_garden_name": "parent", "operation_type": "REQUEST_CREATE", }
e7b63d79c6de73616b39e2713a0ba2da6f9e2a25
3,651,641
def memory_index(indices, t): """Location of an item in the underlying memory.""" memlen, itemsize, ndim, shape, strides, offset = t p = offset for i in range(ndim): p += strides[i] * indices[i] return p
ed97592aa5444cfd6d6894b042b5b103d2de6afc
3,651,643
def createExpData(f, xVals): """Asssumes f is an exponential function of one argument xVals is an array of suitable arguments for f Returns array containing results of applying f to the elements of xVals""" yVals = [] for i in range(len(xVals)): yVals.append(f(xVals[i])) return pylab.array(xVals), pylab.array(yVals)
79c6575ec07579e792e77b65960992a48837f2e9
3,651,644
from typing import Tuple import math def discrete_one_samp_ks(distribution1: np.array, distribution2: np.array, num_samples: int) -> Tuple[float, bool]: """Uses the one-sample Kolmogorov-Smirnov test to determine if the empirical results in distribution1 come from the distribution represented in distribution2 :param distribution1: empirical distribution (numpy array) :param distribution2: reference distribution (numpy array) :param num_samples: number of samples used to generate distribution1 :return: a tuple (D, D<D_{alpha}) """ cutoff = 1.36 / math.sqrt(num_samples) ecdf1 = np.array([sum(distribution1[:i + 1]) for i in range(len(distribution1))]) ecdf2 = np.array([sum(distribution2[:i + 1]) for i in range(len(distribution2))]) max_diff = np.absolute(ecdf1 - ecdf2).max() return max_diff, max_diff < cutoff
37e85c695f0e33c70566e5462fb55e7882fbcd02
3,651,645
def _get_product_refs(pkgs): """Returns a list of product references as declared in the specified packages list. Args: pkgs: A `list` of package declarations (`struct`) as created by `packages.create()`, `packages.pkg_json()` or `spm_pkg()`. Returns: A `list` of product reference (`string`) values. """ return [refs.create(ref_types.product, pkg.name, prd) for pkg in pkgs for prd in pkg.products]
f545f261e237dfe447533c89a489abb863b994e8
3,651,646
def merge_intervals(interval_best_predictors): """ Merge intervals with the same best predictor """ predictor2intervals = defaultdict(set) for interval, best_predictor in interval_best_predictors.items(): predictor2intervals[best_predictor].update(interval) merged_intervals = {best_predictor: max(interval_points) - min(interval_points) for best_predictor, interval_points in predictor2intervals.items()} return merged_intervals
6ebd0b5b26193c5d3885e603ab3bae68d395d6b1
3,651,647
def build_pixel_sampler(cfg, **default_args): """Build pixel sampler for segmentation map.""" return build_module_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
f7d687b80c7bb3cfa266b65691574e40291021d2
3,651,648
def solution_to_schedule(solution, events, slots): """Convert a schedule from solution to schedule form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- list A list of instances of :py:class:`resources.ScheduledItem` """ return [ ScheduledItem( event=events[item[0]], slot=slots[item[1]] ) for item in solution ]
7470849f90e445f8146c561a49646d4bd8bbb886
3,651,649
def flip_tiles( tiles ): """ Initially all tiles are white. Every time, a tile is visited based on the directions, it is flipped (to black, or to white again). The directions are represented in (x,y) coordinates starting from reference tile at (0,0). Based on the given directions to each tile starting from the reference tile, the coordinates of the tile is found and added to the set of black tiles. If the tile is already a black tile, it is flipped and thus removed from the set. This function returns the set of black tiles. """ black_tiles = set() for directions_to_tile in tiles: x,y = (0,0) for direction in directions_to_tile: x,y = get_coordinates( x,y, direction ) found_tile = (x,y) if found_tile not in black_tiles: black_tiles.add( found_tile ) else: black_tiles.remove( found_tile ) return black_tiles
91628f0ae4d1713f1fa12dad34d2a3e2f97b663e
3,651,650
def version() -> int: """Return the version number of the libpq currently loaded. The number is in the same format of `~psycopg.ConnectionInfo.server_version`. Certain features might not be available if the libpq library used is too old. """ return impl.PQlibVersion()
cc8360372787d08f3852cb8d908db780fe3c9573
3,651,651
import scipy def feature_predictors_from_ensemble(features, verbose=False): """generates a dictionary of the form {"offset":offset_predictor, "sigma":sigma_predictor, ...} where the predictors are generated from the center and spread statistics of the feature ensemble. features: list the feature objects """ lparams = np.asarray([f.profile.get_parameters() for f in features]) cent_wvs = np.asarray([f.wv for f in features]) rel_norms = np.asarray([f.relative_continuum for f in features]) delta_wvs = np.asarray([np.mean(scipy.gradient(f.data_sample.wv)) for f in features]) dwv_over_wv = delta_wvs/cent_wvs med_inv_r = np.median(dwv_over_wv) sig_over_wv = lparams[:, 1]/cent_wvs sig_med = np.median(sig_over_wv) sig_mad = np.median(np.abs(sig_over_wv-sig_med)) if verbose: print("sigma median", sig_med, "sigma mad", sig_mad) vel_offs = lparams[:, 0]/cent_wvs vel_med = np.median(vel_offs) vel_mad = np.median(np.abs(vel_offs - vel_med)) if verbose: print("velocity median", vel_med, "velocity mad", vel_mad) gam_med = np.median(np.abs(lparams[:, 2])) gam_mad = np.median(np.abs(lparams[:, 2]-gam_med)) if verbose: print("gamma median", gam_med, "gamma mad", gam_mad) rel_med = np.median(rel_norms) rel_mad = np.median(np.abs(rel_norms-rel_med)) if verbose: print("rel_norm median", gam_med, "rel_norm mad", gam_mad) predictors = {} offset_predictor = WavelengthScaledGaussianPredictor(vel_med, 1.4*vel_mad) sigma_predictor = WavelengthScaledGaussianPredictor(sig_med, 1.4*sig_mad + 0.5*med_inv_r) gamma_predictor = GaussianPredictor(gam_med, 1.4*gam_mad+0.1*np.median(delta_wvs)) rel_norm_predictor = GaussianPredictor(1.0, 0.01) predictors["offset"] = offset_predictor predictors["sigma"] = sigma_predictor predictors["gamma"] = gamma_predictor predictors["rel_norm"] = rel_norm_predictor return predictors
ea26d0640fa6dd8b948c8620b266519137805979
3,651,652
import requests def remoteLoggingConfig(host, args, session): """ Called by the logging function. Configures remote logging (rsyslog). @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/config/remote" try: res = session.put(url + '/attr/Port', headers=jsonHeader, json = {"data": args.port}, verify=False, timeout=baseTimeout) res = session.put(url + '/attr/Address', headers=jsonHeader, json = {"data": args.address}, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text
04417970f671f79af0157d82ea048b5c4f8f957d
3,651,653
import numpy as np from typing import Union import pathlib def _merge_3d_t1w(filename: Union[str, PathLike]) -> pathlib.Path: """ Merges T1w images that have been split into two volumes Parameters ---------- filename : str or pathlib.Path Path to T1w image that needs to be merged Returns ------- filename : pathlib.Path Path to merged T1w image """ filename = pathlib.Path(filename).resolve() img = nib.load(str(filename)) if not (len(img.shape) == 4 and img.shape[-1] > 1): return # split data along fourth dimension and then concatenate along third imdata = img.get_data() cat = [d.squeeze() for d in np.split(imdata, imdata.shape[-1], axis=-1)] imdata = np.concatenate(cat, axis=-1) new_img = img.__class__(imdata, img.affine, img.header) nib.save(new_img, filename) return filename
f1eae741c270553ee18c6f4cc2eb2484215617db
3,651,654
def get_partial_results(case_name, list_of_variables): """ Get a dictionary with the variable names and the time series for `list_of_variables` """ reader = get_results(case_name) d = dict() read_time = True for v in list_of_variables: if read_time: d['time'] = reader.values(v)[0] read_time = False d[v] = reader.values(v)[1] return d
43954296a11bea2c8a04f2e65a709c56ea14d00a
3,651,655
def take_with_time(self, duration, scheduler=None): """Takes elements for the specified duration from the start of the observable source sequence, using the specified scheduler to run timers. Example: res = source.take_with_time(5000, [optional scheduler]) Description: This operator accumulates a queue with a length enough to store elements received during the initial duration window. As more elements are received, elements older than the specified duration are taken from the queue and produced on the result sequence. This causes elements to be delayed with duration. Keyword arguments: duration -- {Number} Duration for taking elements from the start of the sequence. scheduler -- {Scheduler} Scheduler to run the timer on. If not specified, defaults to rx.Scheduler.timeout. Returns {Observable} An observable sequence with the elements taken during the specified duration from the start of the source sequence. """ source = self scheduler = scheduler or timeout_scheduler def subscribe(observer): def action(scheduler, state): observer.on_completed() disposable = scheduler.schedule_relative(duration, action) return CompositeDisposable(disposable, source.subscribe(observer)) return AnonymousObservable(subscribe)
d96ce7ae892fe6700b9f14cccbb01e3aa45b9b76
3,651,656
def add_label(hdf5_filename, key, peak, label): """ Function that adds a label to a peak dataset in the hdf5 file.It has to be iterated over every single peak. Parameters: hdf5_filename (string): filename of experimental file key (string): key within `hdf5_filename` of experimental file peak (string): string name of 'Peak_0#" associated with the peak list containing tuples of the x_data (wavenumber) and y_data (counts) values of the peaks. label (string): string name of an individual label from internal function unknown_peak_assignment that is used in lineidplot. Returns: df (DataFrame): DataFrame which contains the peak fitted data and peak descriptors of each classified peak based on the fed-in known spectra. """ #Handling errors in inputs. if not isinstance(hdf5_filename, str): raise TypeError("""Passed value of `hdf5_filename` is not a string! Instead, it is: """ + str(type(hdf5_filename))) if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5': raise TypeError("""`hdf5_filename` is not type = .hdf5! Instead, it is: """ + hdf5_filename.split('/')[-1].split('.')[-1]) if not isinstance(key, str): raise TypeError("""Passed value of `key` is not a str! Instead, it is: """ + str(type(key))) if not isinstance(peak, str): raise TypeError("""Passed value of `peak` is not a string! Instead, it is: """ + str(type(peak))) if not isinstance(label, str): raise TypeError("""Passed value of `label` is not a string! Instead, it is: """ + str(type(label))) # open hdf5 file as read/write hdf5 = h5py.File(hdf5_filename, 'r+') # extract existing data from peak dataset peak_data = list(hdf5['{}/{}'.format(key, peak)][0])[:7] # print(peak_data) # make a new tuple that contains the orginal data as well as the label label_tuple = (label,) data = tuple(peak_data) +label_tuple # delete the old dataset so the new one can be saved del hdf5['{}/{}'.format(key, peak)] # define a custom datatype that allows for a string as the the last tuple element my_datatype = np.dtype([('fraction', np.float), ('center', np.float), ('sigma', np.float), ('amplitude', np.float), ('fwhm', np.float), ('height', np.float), ('area under the curve', np.float), ('label', h5py.special_dtype(vlen=str))]) # recreate the old dataset in the hdf5 file dataset = hdf5.create_dataset('{}/{}'.format(key, peak), (1,), dtype=my_datatype) # apply custom dtype to data tuple # print(dataset) # print(data) # print(my_datatype) data_array = np.array(data, dtype=my_datatype) # write new values to the blank dataset dataset[...] = data_array # print(dataset) hdf5.close() df = pd.DataFrame(data = data) return df
b903d59ae8e18adf2942227fc6b4c0e207dbde78
3,651,657
def _infer_color_variable_kind(color_variable, data): """Determine whether color_variable is array, pandas dataframe, callable, or scikit-learn (fit-)transformer.""" if hasattr(color_variable, "dtype") or hasattr(color_variable, "dtypes"): if len(color_variable) != len(data): raise ValueError( "color_variable and data must have the same length.") color_variable_kind = "scalars" elif hasattr(color_variable, "transform"): color_variable_kind = "transformer" elif hasattr(color_variable, "fit_transform"): color_variable_kind = "fit_transformer" elif callable(color_variable): color_variable_kind = "callable" elif color_variable is None: color_variable_kind = "none" else: # Assume color_variable is a selection of columns color_variable_kind = "else" return color_variable_kind
a1a21c6df4328331754f9fb960e64cf8bfe09be7
3,651,658
import random def Dense(name, out_dim, W_init=stax.glorot(), b_init=stax.randn()): """Layer constructor function for a dense (fully-connected) layer.""" def init_fun(rng, example_input): input_shape = example_input.shape k1, k2 = random.split(rng) W, b = W_init(k1, (out_dim, input_shape[-1])), b_init(k2, (out_dim,)) return W, b def apply_fun(params, inputs): W, b = params return np.dot(W, inputs) + b return core.Layer(name, init_fun, apply_fun).bind
a2b20961ff3fd23e0cd87f200d1c575d9788e076
3,651,660
from datetime import datetime def datetime_to_epoch(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (seconds).""" return int(date_time.timestamp())
73767c663d66464420594e90a438687c9363b884
3,651,661
def parse_arguments(): """ Merge the scar.conf parameters, the cmd parameters and the yaml file parameters in a single dictionary. The precedence of parameters is CMD >> YAML >> SCAR.CONF That is, the CMD parameter will override any other configuration, and the YAML parameters will override the SCAR.CONF settings """ config_args = ConfigFileParser().get_properties() func_call, cmd_args = CommandParser().parse_arguments() if 'conf_file' in cmd_args['scar'] and cmd_args['scar']['conf_file']: yaml_args = FileUtils.load_yaml(cmd_args['scar']['conf_file']) # YAML >> SCAR.CONF merged_args = fdl.merge_conf(config_args, yaml_args) merged_args = fdl.merge_cmd_yaml(cmd_args, merged_args) else: # CMD >> SCAR.CONF merged_args = fdl.merge_conf(config_args, cmd_args) #self.cloud_provider.parse_arguments(merged_args) FileUtils.create_tmp_config_file(merged_args) return func_call
a34525ed55514db2133c5c39d273ea48af8f8c54
3,651,662
from typing import BinaryIO def check_signature(stream: BinaryIO) -> str: """ Check signature of the model file and return characters used by the model. The characters returned are sorted in lexicographical order. """ uzmodel_tag = stream.read(8) if uzmodel_tag != b'UZMODEL ': raise IOError('invalid uzmodel_tag') uzmodel_version = read_int(stream) if uzmodel_version == 1: ssv = 0 elif uzmodel_version == 2: ssv = read_int(stream) else: raise IOError('invalid uzmodel_version') if ssv == 0: chars = ''.join(map(chr, chain(STD, AFT, EXA, EXB, SPC))) elif ssv == 1: chars = ''.join(map(chr, chain(STD, AFT, EXA, EXB))) else: raise ValueError('invalid ssv') bmarkov_tag = stream.read(8) if bmarkov_tag != b'BMARKOV ': raise IOError('invalid bmarkov_tag') bmarkov_version = read_int(stream) if bmarkov_version != 0: raise IOError('invalid bmarkov_version') return chars
3a8d2e646a2ffe08a471f5447d8e790aefd6fc68
3,651,663
def Validate(expected_schema, datum): """Determines if a python datum is an instance of a schema. Args: expected_schema: Schema to validate against. datum: Datum to validate. Returns: True if the datum is an instance of the schema. """ schema_type = expected_schema.type if schema_type == 'null': return datum is None elif schema_type == 'boolean': return isinstance(datum, bool) elif schema_type == 'string': return isinstance(datum, str) elif schema_type == 'bytes': return isinstance(datum, bytes) elif schema_type == 'int': return (isinstance(datum, int) and (INT_MIN_VALUE <= datum <= INT_MAX_VALUE)) elif schema_type == 'long': return (isinstance(datum, int) and (LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE)) elif schema_type in ['float', 'double']: return (isinstance(datum, int) or isinstance(datum, float)) elif schema_type == 'fixed': return isinstance(datum, bytes) and (len(datum) == expected_schema.size) elif schema_type == 'enum': return datum in expected_schema.symbols elif schema_type == 'array': return (isinstance(datum, list) and all(Validate(expected_schema.items, item) for item in datum)) elif schema_type == 'map': return (isinstance(datum, dict) and all(isinstance(key, str) for key in datum.keys()) and all(Validate(expected_schema.values, value) for value in datum.values())) elif schema_type in ['union', 'error_union']: return any(Validate(union_branch, datum) for union_branch in expected_schema.schemas) elif schema_type in ['record', 'error', 'request']: return (isinstance(datum, dict) and all(Validate(field.type, datum.get(field.name)) for field in expected_schema.fields)) else: raise AvroTypeException('Unknown Avro schema type: %r' % schema_type)
22ed46f2d82f9c4ea53fdd707553d54958a20814
3,651,664
def get_main_play_action(action: PlayerAction) -> PlayerAction: """ Gets the main play, e.g., FLYOUT or SINGLE :param action: :return: """ print("Searching for main play") # find out if the string contains any of the allowed actions for i in PlayerActionEnum: if i.value in action.action_text: print(f"\tFound {i.value}!") action.action_type = i action.action_text = action.action_text.replace(i.value, '') break return action
ec85c305509b5f6f88eb157e7a110dbed7ad0ab4
3,651,665
from functools import reduce def inet_aton(s): """Convert a dotted-quad to an int.""" try: addr = list(map(int, s.split('.'))) addr = reduce(lambda a,b: a+b, [addr[i] << (3-i)*8 for i in range(4)]) except (ValueError, IndexError): raise ValueError('illegal IP: {0}'.format(s)) return addr
abc16c14e416f55c9ae469b4b9c1958df265433c
3,651,666
def helper(): """I'm useful helper""" data = { "31 Dec 2019": "Wuhan Municipal Health Commission, China, reported a cluster of cases of pneumonia in Wuhan, Hubei Province. A novel coronavirus was eventually identified.", "1 January 2020": "WHO had set up the IMST (Incident Management Support Team) across the three levels of the organization: headquarters, regional headquarters and country level, putting the organization on an emergency footing for dealing with the outbreak.", "4 January 2020": "WHO reported on social media that there was a cluster of pneumonia cases – with no deaths – in Wuhan, Hubei province." } return data
1f0f58505ce4179d56b2bf6e4cb29e42cdd7cfc9
3,651,668
def canonicalize_specification(expr, syn_ctx, theory): """Performs a bunch of operations: 1. Checks that the expr is "well-bound" to the syn_ctx object. 2. Checks that the specification has the single-invocation property. 3. Gathers the set of synth functions (should be only one). 4. Gathers the variables used in the specification. 5. Converts the specification to CNF (as part of the single-invocation test) 6. Given that the spec is single invocation, rewrites the CNF spec (preserving and sat) by introducing new variables that correspond to a uniform way of invoking the (single) synth function Returns a tuple containing: 1. A list of 'variable_info' objects corresponding to the variables used in the spec 2. A list of synth functions (should be a singleton list) 3. A list of clauses corresponding to the CNF specification 4. A list of NEGATED clauses 5. A list containing the set of formal parameters that all appearances of the synth functions are invoked with. """ check_expr_binding_to_context(expr, syn_ctx) clauses, cnf_expr = to_cnf(expr, theory, syn_ctx) synth_function_set = gather_synth_functions(expr) synth_function_list = list(synth_function_set) num_funs = len(synth_function_list) orig_variable_set = gather_variables(expr) orig_variable_list = [x.variable_info for x in orig_variable_set] orig_variable_list.sort(key=lambda x: x.variable_name) # check single invocation/separability properties if (not check_single_invocation_property(clauses, syn_ctx)): raise basetypes.ArgumentError('Spec:\n%s\nis not single-invocation!' % exprs.expression_to_string(expr)) (intro_clauses, intro_vars) = _intro_new_universal_vars(clauses, syn_ctx, synth_function_list[0]) # ensure that the intro_vars at the head of the list # Arjun: Why? Most likely not necessary variable_list = [x.variable_info for x in intro_vars] + orig_variable_list num_vars = len(variable_list) for i in range(num_vars): variable_list[i].variable_eval_offset = i num_funs = len(synth_function_list) for i in range(num_funs): synth_function_list[i].synth_function_id = i if len(intro_clauses) == 1: canon_spec = intro_clauses[0] else: canon_spec = syn_ctx.make_function_expr('and', *intro_clauses) canon_clauses = [] for ic in intro_clauses: if exprs.is_application_of(ic, 'or'): disjuncts = ic.children else: disjuncts = [ic] canon_clauses.append(disjuncts) return (variable_list, synth_function_list, canon_spec, canon_clauses, intro_vars)
99613fb5cc78b53ca094ffb46cc927f05d5f74d4
3,651,669
def human_time(seconds, granularity=2): """Returns a human readable time string like "1 day, 2 hours".""" result = [] for name, count in _INTERVALS: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip("s") result.append("{} {}".format(int(value), name)) else: # Add a blank if we're in the middle of other values if len(result) > 0: result.append(None) if not result: if seconds < 1.0: return "%.2f seconds" % seconds else: if seconds == 1: return "1 second" else: return "%d seconds" % seconds return ", ".join([x for x in result[:granularity] if x is not None])
25d184982e5c0c2939814938f09a72ab2d46d270
3,651,670
def cmorlet_wavelet(x, fs, freq_vct, n=6, normalization=True): """Perform the continuous wavelet (CWT) tranform using the complex Morlet wavelet. Parameters ---------- x : 1D array with shape (n_samples) or 2D array with shape (n_samples, n_channels) fs : Sampling frequency in Hz freq_vct : 1D array with frequencies to compute the CWT (Default = [1 : 1 : fs/2] ) n : Number of cicles inside the Gaussian curve (Default 6) normalization : Scale each wavelet to have energy equal to 1 (Default True) Returns ------- wcoef : Complex wavelet coefficients 2D array with shape [n_samples, n_freqs] if `x` is 1D array 3D array with shape [n_samples, n_freqs, n_channels] if `x` is 2D array wfam : 2D array with shape [n_wavelet_samples, n_freqs] where each column corresponds to the a member of the wavelet family """ # input 'x' as 2D matrix [samples, columns] try: x.shape[1] except IndexError: x = x[:, np.newaxis] # number of samples and number of channels n_samples, n_channels = x.shape # number of wavelets n_freqs = len(freq_vct) # number of samples for Wavetet family # This is equal to the number of samples needed to represent 2*n cycles # of a sine with frequency = fres(1)[Hz], sampled at fs [Hz]. # This is done to ensure that every wavelet in the wavalet family will be # close to 0 in the negative and positive edges n_samples_wav = np.round( (2*n/freq_vct[0])*fs ) # The wavelet will be symmetrical around 0 if np.mod(n_samples_wav,2) == 0: # even samples n_samples_wav = n_samples_wav + 1 # create time vector for Wavelet family half = np.floor(n_samples_wav/2) time = np.arange(-half, half+1)/fs # initialize Wavelet family matrix wfam = np.zeros([len(time), n_freqs], dtype=complex) # for each frequency defined in FREQ, create its respective Wavelet for iwav in range(n_freqs): s = n/(2*np.pi*freq_vct[iwav]) gaussian_win = np.exp((-time**2)/(2*s**2)) sinwave = np.exp(2*np.pi*1j*freq_vct[iwav]*time) if normalization: # each wavelet has unit energy sum(abs(wavelet).^2)) = 1 A = 1. / ((s**2) * np.pi) ** (1./4) else: A = 1. # Complex Morlet wavelet wfam[:, iwav] = A * sinwave * gaussian_win wcoef = np.zeros((n_samples, n_freqs, n_channels), dtype=complex) if n_channels == 1: # one channel tmp = conv_m(x, wfam, 'same') wcoef[:, :, 0] = tmp else: # convolution between signal X and the each Wavelt in the Wavelet family for i_channel in range(n_channels): x_tmp = x[:, i_channel] tmp = conv_m(x_tmp, wfam, 'same') wcoef[:, :, i_channel] = tmp return wcoef, wfam
13a5e2b16c2641b8fabf997679f4d8f6724d32a9
3,651,671
from typing import Tuple from typing import Union from typing import List def add_fake_planet( stack: np.ndarray, parang: np.ndarray, psf_template: np.ndarray, polar_position: Tuple[Quantity, Quantity], magnitude: float, extra_scaling: float, dit_stack: float, dit_psf_template: float, return_planet_positions: bool = False, interpolation: str = 'bilinear', ) -> Union[np.ndarray, Tuple[np.ndarray, List[Tuple[float, float]]]]: """ Add a fake planet to the given ``stack`` which, when derotating and merging the stack, will show up at the given ``position``. This function can also be used to *remove* planets from a stack by setting the ``psf_scaling`` to a negative number. If you simply want to use this function to generate a fake signal stack, set ``stack`` to all zeros, the ``magnitude`` to zero, both the ``dit_stack`` and ``dit_psf_template`` to 1 (or any other non-zero number), and use the `extra_scaling` factor to linearly control the "brightness" of the injected planet. This function is essentially a simplified port of the corresponding PynPoint function :py:func:`pynpoint.util.analysis.fake_planet()`. Args: stack: A 3D numpy array of shape `(n_frames, width, height)` which contains the stack of images / frames into which we want to inject a fake planet. parang: A 1D numpy array of shape `(n_frames,)` that contains the respective parallactic angle for every frame in `stack`. psf_template: A 2D numpy array that contains the (centered) PSF template which will be used for the fake planet. This should *not* be normalized to `(0, 1]` if we want to work with actual astrophysical magnitudes for the contrast. polar_position: A tuple `(separation, angle)` which specifies the position at which the planet will show up after de-rotating with ``parang``. ``separation`` needs to be a ``Quantity`` that can be converted to pixel; `angle` needs to be a ``Quantity`` that can be converted to radian. Additionally, ``angle`` should be using *astronomical* polar coordinates, that is, 0 degrees will be "up" (= North), not "right". This function will internally add 90° to the angles to convert them to mathematical pilar coordinates. magnitude: The magnitude difference used to scale the PSF. Note: This is the contrast ratio in *magnitudes*, meaning that increasing this value by a factor of 5 will result in a planet that is 100 times brighter. In case you want to keep things linear, set this value to 0 and only use the ``psf_scaling`` parameter. extra_scaling: An additional scaling factor that is used for the PSF template. This number is simply multiplied with the PSF template, meaning that it changes the brightness linearly, not on a logarithmic scale. For example, you could use `-1` to add a *negative* planet to remove an actual planet in the data. This can also be used to incorporate an additional dimming factor due to a neutral density (ND) filter. dit_stack: The detector integration time of the frames in the ``stack`` (in seconds). Necessary to compute the correct scaling factor for the planet that we inject. dit_psf_template: The detector integration time of the ``psf_template`` (in seconds). Necessary to compute the correct scaling factor for the planet that we inject. return_planet_positions: Whether to return the (Cartesian) positions at which the fake planet was injected, as a 2D numpy array of shape `(n_frames, 2)`. interpolation: ``interpolation`` argument that is passed to :py:func:`scipy.ndimage.shift` that is used internally. Returns: A 3D numpy array of shape `(n_frames, width, height)` which contains the original ``stack`` into which a fake planet has been injected, as well as a list of tuples `(x, y)` that, for each frame, contain the position at which the fake planet has been added. If desired (i.e., if ``return_planet_positions`` is ``True``), the function also returns a 2D numpy array of shape `(n_frames, 2)` containing the Cartesian positions at which the fake planet has been injected. """ # Make sure that the stack and the parallactic angles are compatible check_consistent_size(stack, parang) # Define shortcut for the number of frames and the frame_size n_frames, frame_size = stack.shape[0], (stack.shape[1], stack.shape[2]) # Split the target planet position into separation and angles, convert # the quantities to pixels / convert to mathematical polar coordinates rho = polar_position[0].to('pixel').value phi = np.radians(polar_position[1].to('degree').value + 90 - parang) # Convert `magnitude` from logarithmic contrast to linear flux ratio flux_ratio = 10.0 ** (-magnitude / 2.5) # Compute scaling factor that is due to the different integration times # for the science images and the PSF template dit_scaling = dit_stack / dit_psf_template # Combine all scaling factors and scale the PSF template scaling_factor = flux_ratio * dit_scaling * extra_scaling psf_scaled = scaling_factor * np.copy(psf_template) # Make sure that the PSF has a compatible shape, that is, either crop or # pad the PSF template to the same spatial shape as the `stack`. psf_scaled = crop_or_pad(psf_scaled, frame_size) # Compute the shift for each frame x_shift = rho * np.cos(phi) y_shift = rho * np.sin(phi) # Initialize the "pure signal" stack (can use empty() here, because all # values will be overwritten and allocation should be slightly faster) signal_stack = np.empty_like(stack) # For each frame, move the scaled PSF template to the correct position # Note: We use mode='constant' instead of 'reflect' here (unlike PynPoint) # because the latter just does not seem to make a lot of sense? for i in range(n_frames): signal_stack[i] = shift_image( image=psf_scaled, offset=(float(x_shift[i]), float(y_shift[i])), interpolation=interpolation, mode='constant', ) # Add the planet stack to the original input stack output_stack = stack + signal_stack # Either return only the output stack, or the output stack and # the planet positions if return_planet_positions: center = get_center(frame_size) planet_positions = np.column_stack( (x_shift + center[0], y_shift + center[1]) ) return output_stack, planet_positions return np.array(output_stack)
f5897585934fe9609a4d6cc0f032285194a59f19
3,651,673
def _BD_from_Av_for_dereddening(line_lambdas, line_fluxes, A_v): """ Find the de-reddened Balmer decrement (BD) that would arise from "removing" an extinction of A_v (magnitudes) from the line_fluxes. line_lambdas, line_fluxes: As in the function "deredden". A_v: The extinction (magnitudes), as a scalar or array of extinction values. Returns the Balmer decrement dereddened_BD (F_Halpha / F_Hbeta), as a float or array of floats with the same shape as A_v. """ assert np.all(np.asarray(A_v) >= 0) initial_BD = _find_BD(line_lambdas, line_fluxes) # Calculate the Balmer decrement (BD) that would result from "removing" an # extinction of A_v, using an inverted form of Equation A14 in Vogt13. dereddened_BD = initial_BD / 10**(A_v / 8.55) return dereddened_BD
280255db3669b8ee585afbcb685dc97dfbedc5c0
3,651,675
def otherEnd(contours, top, limit): """ top与end太近了,找另一个顶部的点,与top距离最远 """ tt = (0, 9999) for li in contours: for pp in li: p = pp[0] if limit(p[0]) and top[1] - p[1] < 15 and abs(top[0] - p[0]) > 50 and p[1] < tt[1]: tt = p return tt
4f938d33ba28c1999603cd60381ed6d9aec23815
3,651,676
from matador.workflows.castep.common import castep_prerelax def castep_phonon_prerelax(computer, calc_doc, seed): """ Run a singleshot geometry optimisation before an SCF-style calculation. This is typically used to ensure phonon calculations start successfully. The phonon calculation will then be restarted from the .check file produced here. Parameters: computer (:obj:`ComputeTask`): the object that will be calling CASTEP. calc_doc (dict): the structure to run on. seed (str): root filename of structure. """ LOG.info('Performing CASTEP phonon pre-relax...') required = ["write_checkpoint"] forbidden = ['phonon_fine_kpoint_list', 'phonon_fine_kpoint_path', 'phonon_fine_kpoint_mp_spacing', 'phonon_fine_kpoint_path_spacing'] return castep_prerelax( computer, calc_doc, seed, required_keys=required, forbidden_keys=forbidden )
4687e6cdf7150c8721329c7ea1b007e47ee3cd7e
3,651,678
def get_external_links(soup): """Retrieve the different links from a `Lyric Wiki` page. The links returned can be found in the `External Links` page section, and usually references to other platforms (like Last.fm, Amazon, iTunes etc.). Args: soup (bs4.element.Tag): connection to the `Lyric Wiki` page. Returns: dict Examples:: >>> # Import packages >>> import bs4 # for web scrapping >>> import urllib.request # to connect >>> # Set Up: connect to a lyric wiki page >>> USER = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' >>> HEADERS = {'User-Agent': USER} >>> URL = 'https://lyrics.fandom.com/wiki/London_Grammar:Who_Am_I' >>> req = urllib.request.Request(URL, headers=HEADERS) >>> page = urllib.request.urlopen(req) >>> soup = bs4.BeautifulSoup(page, 'lxml') >>> # Retrieve links from the page >>> get_external_links(soup) {'Amazon': ['https://www.amazon.com/exec/obidos/redirect?link_code=ur2&tag=wikia-20&camp=1789&creative=9325&path=https%3A%2F%2Fwww.amazon.com%2Fdp%2FB00J0QJ84E'], 'Last.fm': ['https://www.last.fm/music/London+Grammar', 'https://www.last.fm/music/London+Grammar/If+You+Wait'], 'iTunes': ['https://itunes.apple.com/us/album/695805771'], 'AllMusic': ['https://www.allmusic.com/album/mw0002559862'], 'Discogs': ['http://www.discogs.com/master/595953'], 'MusicBrainz': ['https://musicbrainz.org/release-group/dbf36a9a-df02-41c4-8fa9-5afe599960b0'], 'Spotify': ['https://open.spotify.com/album/0YTj3vyjZmlfp16S2XGo50']} """ # Only add links from this set. Other are not relevant. links_keys = ['Amazon', 'Last.fm', 'iTunes', 'AllMusic', 'Discogs', 'MusicBrainz', 'Spotify', 'Bandcamp', 'Wikipedia', 'Pandora', 'Hype Machine'] links = {} # Scrape links from a page for external_tag in scrape_external_links(soup): # Get the respective kink / href for link_a in external_tag.findAll('a', attrs={'class', 'external text'}): # Add it to a dict key = external_tag.text.split(':')[0].strip() if key in links_keys: links.setdefault(key, []) links[key].append(link_a.get('href')) return links
9d1f654176cfe5ccdc849448b5cf1720dba4e6c5
3,651,679
def gcc(): """Return the current container, that is the widget holding the figure and all the control widgets, buttons etc.""" gcf() # make sure we have something.. return current.container
d32b9c53694ad258976757b15cc0982431b06e8e
3,651,680
def preprocessing(string): """helper function to remove punctuation froms string""" string = string.replace(',', ' ').replace('.', ' ') string = string.replace('(', '').replace(')', '') words = string.split(' ') return words
17f41a566c3661ab6ffb842ac6d610425fc779d1
3,651,681
def add_input_arguments(argument_parser_object): """Adds input args for this script to `argparse.ArgumentParser` object. :param argument_parser_object: `argparse.ArgumentParser` object, which may or may not already contain input args. :return: argument_parser_object: Same as input object, but with new input args added. """ argument_parser_object.add_argument( '--' + TRACKING_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_TRACKING_DIR_NAME, help=TRACKING_DIR_HELP_STRING) argument_parser_object.add_argument( '--' + TRACKING_SCALE_INPUT_ARG, type=int, required=False, default=echo_top_tracking.DUMMY_TRACKING_SCALE_METRES2, help=TRACKING_SCALE_HELP_STRING) argument_parser_object.add_argument( '--' + GRIDRAD_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_GRIDRAD_DIR_NAME, help=GRIDRAD_DIR_HELP_STRING) argument_parser_object.add_argument( '--' + OUTPUT_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_OUTPUT_DIR_NAME, help=OUTPUT_DIR_HELP_STRING) return argument_parser_object
7e4b407aff10148c9843ba33410c233a32acc36d
3,651,682
def fullUnitSphere(res): """Generates a unit sphere in the same way as :func:`unitSphere`, but returns all vertices, instead of the unique vertices and an index array. :arg res: Resolution - the number of angles to sample. :returns: A ``numpy.float32`` array of size ``(4 * (res - 1)**2, 3)`` containing the ``(x, y, z)`` vertices which can be used to draw a unit sphere (using the ``GL_QUADS`` primitive type). """ u = np.linspace(-np.pi / 2, np.pi / 2, res, dtype=np.float32) v = np.linspace(-np.pi, np.pi, res, dtype=np.float32) cosu = np.cos(u) cosv = np.cos(v) sinu = np.sin(u) sinv = np.sin(v) vertices = np.zeros(((res - 1) * (res - 1) * 4, 3), dtype=np.float32) cucv = np.outer(cosu[:-1], cosv[:-1]).flatten() cusv = np.outer(cosu[:-1], sinv[:-1]).flatten() cu1cv = np.outer(cosu[1:], cosv[:-1]).flatten() cu1sv = np.outer(cosu[1:], sinv[:-1]).flatten() cu1cv1 = np.outer(cosu[1:], cosv[1:]) .flatten() cu1sv1 = np.outer(cosu[1:], sinv[1:]) .flatten() cucv1 = np.outer(cosu[:-1], cosv[1:]) .flatten() cusv1 = np.outer(cosu[:-1], sinv[1:]) .flatten() su = np.repeat(sinu[:-1], res - 1) s1u = np.repeat(sinu[1:], res - 1) vertices.T[:, ::4] = [cucv, cusv, su] vertices.T[:, 1::4] = [cu1cv, cu1sv, s1u] vertices.T[:, 2::4] = [cu1cv1, cu1sv1, s1u] vertices.T[:, 3::4] = [cucv1, cusv1, su] return vertices
65d83a83b17087934847ab7db8200a67c79294d4
3,651,683
def prompt_for_word_removal(words_to_ignore=None): """ Prompts the user for words that should be ignored in kewword extraction. Parameters ---------- words_to_ignore : str or list Words that should not be included in the output. Returns ------- ignore words, words_added : list, bool A new list of words to ignore and a boolean indicating if words have been added. """ if isinstance(words_to_ignore, str): words_to_ignore = [words_to_ignore] words_to_ignore = [w.replace("'", "") for w in words_to_ignore] words_added = False # whether to run the models again more_words = True while more_words: more_words = input("\nShould words be removed [y/n]? ") if more_words == "y": new_words_to_ignore = input("Type or copy word(s) to be removed: ") # Remove commas if the user has used them to separate words, # as well as apostraphes. new_words_to_ignore = [ char for char in new_words_to_ignore if char not in [",", "'"] ] new_words_to_ignore = "".join(new_words_to_ignore) if " " in new_words_to_ignore: new_words_to_ignore = new_words_to_ignore.split(" ") elif isinstance(new_words_to_ignore, str): new_words_to_ignore = [new_words_to_ignore] words_to_ignore += new_words_to_ignore words_added = True # we need to run the models again more_words = False elif more_words == "n": more_words = False else: print("Invalid input") return words_to_ignore, words_added
65615f3fe5f0391f44d60e7e9a2990d8fea35bc0
3,651,684
import time def wait_for_image_property(identifier, property, cmp_func, wait=20, maxtries=10): """Wait for an image to have a given property. Raises TimeoutError on failure. :param identifier: the image identifier :param property: the name of the property :param cmp_func: predicate function accepting current value of the property :param wait: time (in seconds) between polls :param maxtries: maximum number of attempts :returns: True """ logger.info('Waiting for {identifier} to be {property} using {cmp_func}' .format(**locals())) for _ in xrange(maxtries): output = image_show(identifier) current = openstack_parse_show(output, 'status') if cmp_func(current): return True else: time.sleep(wait) msg = 'Timeout while waiting for image {identifier} {property} using {fn}'\ .format(identifier=identifier, property=property, fn=cmp_func) logger.info(msg) raise TimeoutError(msg)
27ad96fceb931a73deddb49fb40975dd295ebd36
3,651,685
def mock_requests_get_json_twice(mocker: MockerFixture) -> MagicMock: """Mock two pages of results returned from the parliament open data API.""" mock: MagicMock = mocker.patch("requests.get") mock.return_value.__enter__.return_value.json.side_effect = [ { "columnNames": ["column1", "column2"], "rowData": [["Lorem ipsum", "dolor sit amet"]], "hasMore": True, }, { "columnNames": ["column1", "column2"], "rowData": [["eripuit principes intellegam", "eos id"]], "hasMore": False, }, ] return mock
1c546963b5a2503c8d65d87ee373c2d2c5981b2a
3,651,687
def _get_rating_accuracy_stats(population, ratings): """ Calculate how accurate our ratings were. :param population: :param ratings: :return: """ num_overestimates = 0 num_underestimates = 0 num_correct = 0 for employee, rating in zip(population, ratings): if rating < employee: num_underestimates += 1 elif rating > employee: num_overestimates += 1 else: num_correct += 1 return num_underestimates, num_correct, num_overestimates
6fefd6faf465a304acc692b465f575cc4c3a62e3
3,651,688
import hashlib def genb58seed(entropy=None): """ Generate a random Family Seed for Ripple. (Private Key) entropy = String of any random data. Please ensure high entropy. ## Note: ecdsa library's randrange() uses os.urandom() to get its entropy. ## This should be secure enough... but just in case, I added the ability ## to include your own entropy in addition. """ if entropy == None: entropy = int2data(ecdsa.util.randrange(2 ** 128), 16) else: entropy = hashlib.sha256(entropy + int2data(ecdsa.util.randrange(2 ** 128), 16)).digest()[:16] b58seed = data_to_address(entropy, 33) return b58seed
1bfbbbff5abffa2bac0fd2accf9480387ff2e8bb
3,651,689
def convert_nhwc_to_nchw(data: np.array) -> np.array: """Convert data to NCHW.""" return np.transpose(data, [0, 3, 1, 2])
5ca229d9dfcb388d3f3a487b51719eaa0dd8fdb6
3,651,690
def get_mfcc_features(wave_data: pd.Series, n_mfcc): """ mfcc_feature """ x = wave_data.apply(lambda d: (d-np.mean(d))/(np.std(d))) # x = wave_data x, max_length = utils.padding_to_max(x) features = [] for i in range(x.shape[0]): t1 = mfcc(x[i], sr=16000, n_mfcc=n_mfcc) t2 = utils.diff(t1, axis=0) t3 = utils.diff(t1, axis=0, delta=2) t = np.concatenate([t1.T, t2.T, t3.T], axis=1).flatten() features.append(t) return np.array(features)
2f5fa5a4f752c4d5af963bd390868f98e886c0d9
3,651,691
def download_instance_func(instance_id): """Download a DICOM Instance as DCM""" file_bytes = client.orthanc.download_instance_dicom(instance_id) return flask.send_file(BytesIO(file_bytes), mimetype='application/dicom', as_attachment=True, attachment_filename=f'{instance_id}.dcm')
bbd506904096da9d73f3c0f33dd30ba869551025
3,651,692
def generate_random_initial_params(n_qubits, n_layers=1, topology='all', min_val=0., max_val=1., n_par=0, seed=None): """Generate random parameters for the QCBM circuit (iontrap ansatz). Args: n_qubits (int): number of qubits in the circuit. n_layers (int): number of entangling layers in the circuit. If n_layers=-1, you can specify a custom number of parameters (see below). topology (str): describes topology of qubits connectivity. min_val (float): minimum parameter value. max_val (float): maximum parameter value. n_par (int): specifies number of parameters to be generated in case of incomplete layers (i.e. n_layers=-1). seed (int): initialize random generator Returns: numpy.array: the generated parameters, stored in a 1D array. """ gen = np.random.RandomState(seed) assert(topology == 'all') n_params_layer_zero = 2*n_qubits n_params_per_layer = int((n_qubits*(n_qubits-1))/2) if n_layers==-1: n_params=n_par else: assert(n_layers>0) if n_par!=0: raise ValueError("If n_layers is specified, n_par is automatically computed.") n_params = n_params_layer_zero+n_layers*n_params_per_layer params = gen.uniform(min_val, max_val, n_params) return(params)
f3beaa9b36b704d8289c91c46895247275a69ef1
3,651,693
def number_of_friends(user): """How many friends does this user have?""" user_id = user["id"] friend_ids = friendships[user_id] return len(friend_ids)
3f17dfb1e2c3829c650727d36a34a24885d4d77d
3,651,694
def get_serializer_class(format=None): """Convenience function returns serializer or raises SerializerNotFound.""" if not format: serializer = BaseSerializer() elif format == 'json-ld': serializer = JsonLDSerializer() elif format == 'json': serializer = JsonSerializer() else: raise SerializerNotFound(format) return serializer
7660ba2f7861773d6a4e8d5796facbbe96259503
3,651,695
from typing import Optional from typing import Any def get_or_create_mpc_section( mp_controls: "MpConfigControls", section: str, subkey: Optional[str] = None # type: ignore ) -> Any: """ Return (and create if it doesn't exist) a settings section. Parameters ---------- mp_controls : MpConfigControls The MP Config database. section : str The section name (top level settings item) subkey : Optional[str], optional Optional subkey to create, by default None Returns ------- Any The settings at that section[subkey] location. """ curr_section = mp_controls.get_value(section) if curr_section is None: mp_controls.set_value(section, {}) curr_section = mp_controls.get_value(section) if subkey and subkey not in curr_section: mp_controls.set_value(f"{section}.{subkey}", {}) return mp_controls.get_value(f"{section}.{subkey}") return mp_controls.get_value(section)
60b741f35e0a1c9fe924b472217e0e3b62a1d31e
3,651,696
import csv def get_sql_table_headers(csv_dict_reader: csv.DictReader) -> str: """ This takes in a csv dictionary reader type, and returns a list of the headings needed to make a table """ column_names = [] for row in csv_dict_reader: for column in row: column_names.append('{} {} '.format(column, get_sql_type(row[column]))) return column_names
b874ca3992eac45ed1708434a5adfd28fd96c1cd
3,651,697
from unittest.mock import call def greater_than(val1, val2): """Perform inequality check on two unsigned 32-bit numbers (val1 > val2)""" myStr = flip_string(val1) + flip_string(val2) call(MATH_32BIT_GREATER_THAN,myStr) return ord(myStr[0]) == 1
b9bba2aa776dc71320df736c654a5c0163827dff
3,651,698
from re import I def upsampling_2x_blocks(n_speakers, speaker_dim, target_channels, dropout): """Return a list of Layers that upsamples the input by 2 times in time dimension. Args: n_speakers (int): number of speakers of the Conv1DGLU layers used. speaker_dim (int): speaker embedding size of the Conv1DGLU layers used. target_channels (int): channels of the input and the output.(the list of layers does not change the number of channels.) dropout (float): dropout probability. Returns: List[Layer]: upsampling layers. """ upsampling_convolutions = [ Conv1DTranspose( target_channels, target_channels, 2, stride=2, param_attr=I.Normal(scale=np.sqrt(1. / (2 * target_channels)))), Conv1DGLU( n_speakers, speaker_dim, target_channels, target_channels, 3, dilation=1, std_mul=1., dropout=dropout), Conv1DGLU( n_speakers, speaker_dim, target_channels, target_channels, 3, dilation=3, std_mul=4., dropout=dropout) ] return upsampling_convolutions
e2a31c4ef7c392d86e5cf6ac96891b1a57a3692e
3,651,699
def actor_path(data, actor_id_1, goal_test_function): """ Creates the shortest possible path from the given actor ID to any actor that satisfies the goal test function. Returns a a list containing actor IDs. If no actors satisfy the goal condition, returns None. """ agenda = {actor_id_1,} seen = {actor_id_1,} relations = {} map_of_actors = mapped_actors(data) while agenda: # Get the children of the parent next_agenda = set() for i in agenda: for j in map_of_actors[i]: if j not in seen and j not in agenda: next_agenda.add(j) # Map child to parent relations[j] = i # If actor satisfies function condition, return constructed path for id_ in agenda: if goal_test_function(id_): final_path = construct_path(relations, id_, actor_id_1) return final_path for next_ in agenda: if next_ not in seen: seen.add(next_) # Update agenda to next bacon number/layer agenda = next_agenda # No path exists return None
8e41d7075b3ade8f75481959f9aa376a096aaa1c
3,651,700
def M_to_E(M, ecc): """Eccentric anomaly from mean anomaly. .. versionadded:: 0.4.0 Parameters ---------- M : float Mean anomaly (rad). ecc : float Eccentricity. Returns ------- E : float Eccentric anomaly. """ with u.set_enabled_equivalencies(u.dimensionless_angles()): E = optimize.newton(_kepler_equation, M, _kepler_equation_prime, args=(M, ecc)) return E
071f33a294edf6627ad77caa256de48e94afad76
3,651,702
def encrypt(plaintext, a, b): """ 加密函数:E(x) = (ax + b)(mod m) m为编码系统中的字母数,一般为26 :param plaintext: :param a: :param b: :return: """ cipher = "" for i in plaintext: if not i.isalpha(): cipher += i else: n = "A" if i.isupper() else "a" cipher += chr((a * (ord(i) - ord(n)) + b) % 26 + ord(n)) return cipher
0cbb57250d8d7a18740e19875f79127b8057ab06
3,651,704
def _url_as_filename(url: str) -> str: """Return a version of the url optimized for local development. If the url is a `file://` url, it will return the remaining part of the url so it can be used as a local file path. For example, 'file:///logs/example.txt' will be converted to '/logs/example.txt'. Parameters ---------- url: str The url to check and optaimize. Returns ------- str: The url converted to a filename. """ return url.replace('file://', '')
d1aef7a08221c7788f8a7f77351ccb6e6af9416b
3,651,707
from typing import Dict def hard_max(node: NodeWrapper, params: Dict[str, np.ndarray], xmap: Dict[str, XLayer]): """ ONNX Hardmax to XLayer AnyOp conversion function Input tensor shape: N dims Output tensor shape: 2D """ logger.info("ONNX Hardmax -> XLayer AnyOp") assert len(node.get_outputs()) == 1 name = node.get_outputs()[0] bottoms = node.get_inputs() node_attrs = node.get_attributes() iX = xmap[bottoms[0]] d = len(iX.shapes) axis = int(node_attrs['axis']) if 'axis' in node_attrs else 1 if axis < 0: axis = d + axis in_shape = iX.shapes.tolist() dim_0 = int(np.prod(in_shape[:axis])) dim_1 = int(np.prod(in_shape[axis:])) X = px.ops.any_op( op_name=px.stringify(name), in_xlayers=[iX], any_shape=[dim_0, dim_1], onnx_id=name ) return [X]
5f412e98836cd377d40a759ab0487aa81cc4f3dc
3,651,708
from typing import AnyStr from typing import List def sol_files_by_directory(target_path: AnyStr) -> List: """Gathers all the .sol files inside the target path including sub-directories and returns them as a List. Non .sol files are ignored. :param target_path: The directory to look for .sol files :return: """ return files_by_directory(target_path, ".sol")
e41ad3da26ffa1d3c528f34362ac1aeeadeb2b3c
3,651,709
def _call(sig, *inputs, **kwargs): """Adds a node calling a function. This adds a `call` op to the default graph that calls the function of signature `sig`, passing the tensors in `inputs` as arguments. It returns the outputs of the call, which are one or more tensors. `sig` is OpDefArg.a `_DefinedFunction` object. You can pass an optional keyword parameter `name=string` to name the added operation. You can pass an optional keyword parameter `noinline=True|False` to instruct the runtime not to inline the function body into the call site. Args: sig: OpDefArg. The signature of the function. *inputs: arguments to the function. **kwargs: Optional keyword arguments. Can only contain 'name' or 'noinline'. Returns: A 2-element tuple. First element: a Tensor if the function returns a single value; a list of Tensors if the function returns multiple value; the Operation if the function returns no values. Second element: the Operation. Raises: ValueError: if the arguments are invalid. """ if len(inputs) != len(sig.input_arg): raise ValueError("Expected number of arguments: %d, received: %d" % (len( sig.input_arg), len(inputs))) name = kwargs.pop("name", None) g = ops.get_default_graph() func_name = sig.name if name is None: name = func_name attrs = _parse_kwargs_as_attrs(func_name, **kwargs) output_types = [dtypes.DType(x.type) for x in sig.output_arg] op = g._create_op_internal( # pylint: disable=protected-access func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig) if op.outputs: if len(op.outputs) == 1: ret = op.outputs[0] else: ret = tuple(op.outputs) else: ret = op return ret, op
6fd65281118e33bbcd9d567a7c528d85976e75e7
3,651,710
import torch def cov(x, rowvar=False, bias=False, ddof=None, aweights=None): """Estimates covariance matrix like numpy.cov""" # ensure at least 2D if x.dim() == 1: x = x.view(-1, 1) # treat each column as a data point, each row as a variable if rowvar and x.shape[0] != 1: x = x.t() if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 w = aweights if w is not None: if not torch.is_tensor(w): w = torch.tensor(w, dtype=torch.float) w_sum = torch.sum(w) avg = torch.sum(x * (w/w_sum)[:,None], 0) else: avg = torch.mean(x, 0) # Determine the normalization if w is None: fact = x.shape[0] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof * torch.sum(w * w) / w_sum xm = x.sub(avg.expand_as(x)) if w is None: X_T = xm.t() else: X_T = torch.mm(torch.diag(w), xm).t() c = torch.mm(X_T, xm) c = c / fact return c.squeeze()
6b5666a3e7fa6fe0c0e115286e10d2e756ba8ee9
3,651,712
def threadsafe_generator(f): """A decorator that takes a generator function and makes it thread-safe. Args: f(function): Generator function Returns: None """ def g(*args, **kwargs): """ Args: *args(list): List of non-key worded,variable length arguments. **kwargs(dict): List of key-worded,variable length arguments. Returns: function: The thread-safe function. """ return threadsafe_iter_3(f(*args, **kwargs)) return g
6a3e53984c85c951e5ffefa2ed238af86d8fc3e3
3,651,713
def load_many_problems(file, collection): """Given a ZIP file containing several ZIP files (each one a problem), insert the problems into collection""" problems = list() try: with ZipFile(file) as zfile: for filename in zfile.infolist(): with zfile.open(filename) as curr_file: problem = load_problem_from_file(curr_file) problem.collection = collection problem.author = collection.author problems.append(problem) except ZipFileParsingException as excp: raise ZipFileParsingException('{}: {}'.format(filename.filename, excp)) from excp except Exception as excp: raise ZipFileParsingException("{}: {}".format(type(excp), excp)) from excp return problems
08d60f5c7905397254715f80e74019f3496d84e5
3,651,714
def CheckStructuralModelsValid(rootGroup, xyzGridSize=None, verbose=False): """ **CheckStricturalModelsValid** - Checks for valid structural model group data given a netCDF root node Parameters ---------- rootGroup: netCDF4.Group The root group node of a Loop Project File xyzGridSize: [int,int,int] or None The 3D grid shape to test data in this node to adhere to verbose: bool A flag to indicate a higher level of console logging (more if True) Returns ------- bool True if valid structural model data in project file, False otherwise. """ valid = True if "StructuralModels" in rootGroup.groups: if verbose: print(" Structural Models Group Present") smGroup = rootGroup.groups.get("StructuralModels") # if verbose: print(smGroup) if "easting" in smGroup.ncattrs() and "northing" in smGroup.ncattrs() and "depth" in smGroup.ncattrs(): if xyzGridSize != None: # Check gridSize from extents matches models sizes smGridSize = [smGroup.dimensions["easting"].size,smGroup.dimensions["northing"].size,smGroup.dimensions["depth"].size] if smGridSize != xyzGridSize: print("(INVALID) Extents grid size and Structural Models Grid Size do NOT match") print("(INVALID) Extents Grid Size : ", xyzGridSize) print("(INVALID) Structural Models Grid Size : ", smGridSize) valid = False else: if verbose: print(" Structural Models grid size adheres to extents") else: if verbose: print("No structural models extents in project file") else: if verbose: print("No Structural Models Group Present") return valid
d11ce42b041b8be7516f827883a37b40f6f98477
3,651,715
def get_load_balancers(): """ Return all load balancers. :return: List of load balancers. :rtype: list """ return elbv2_client.describe_load_balancers()["LoadBalancers"]
b535f47ce94106a4c7ebe3d84ccfba7c57f22ba9
3,651,716
def file_preview(request): """ Live preview of restructuredtext payload - currently not wired up """ f = File( heading=request.POST['heading'], content=request.POST['content'], ) rendered_base = render_to_string('projects/doc_file.rst.html', {'file': f}) rendered = restructuredtext(rendered_base) json_response = simplejson.dumps({'payload': rendered}) return HttpResponse(json_response, mimetype='text/javascript')
e83570b7b31b4a2d526f1699f8b65c5623d6f7ee
3,651,718
def makeMask(n): """ return a mask of n bits as a long integer """ return (long(2) << n - 1) - 1
c0fe084ec9d6be1519115563cce3c0d3649947c6
3,651,719
def link_name_to_index(model): """ Generate a dictionary for link names and their indicies in the model. """ return { link.name : index for index, link in enumerate(model.links) }
ba0e768b1160218908b6ecf3b186a73c75a69894
3,651,720
import json def photos_page(): """ Example view demonstrating rendering a simple HTML page. """ context = make_context() with open('data/featured.json') as f: context['featured'] = json.load(f) return make_response(render_template('photos.html', **context))
dfb172e01f659be163c7dffdb13cc5cbaa28ab10
3,651,722
import json def get_user_by_id(current_user, uid): """ Получение одного пользователя по id в json""" try: user_schema = CmsUsersSchema(exclude=['password']) user = CmsUsers.query.get(uid) udata = user_schema.dump(user) response = Response( response=json.dumps(udata.data), status=200, mimetype='application/json' ) except Exception: response = server_error(request.args.get("dbg")) return response
9f91319020fb0b386d506b4365c2912af3ed5874
3,651,723
def update_bond_lists_mpi(bond_matrix, comm, size, rank): """ update_bond_lists(bond_matrix) Return atom indicies of angular terms """ N = bond_matrix.shape[0] "Get indicies of bonded beads" bond_index_full = np.argwhere(bond_matrix) "Create index lists for referring to in 2D arrays" indices_full = create_index(bond_index_full) angle_indices = [] angle_bond_indices = [] "Count number of unique bonds" count = np.unique(bond_index_full.T[0]).shape[0] """ "Find indicies of ends of fibrils" fib_end_check = np.argwhere(np.sum(bond_matrix, axis=1) <= 1) n_fib_end = fib_end_check.shape[0] fib_end_check_ind = np.tile(fib_end_check, n_fib_end) fib_end_check_ind = np.stack((fib_end_check_ind, fib_end_check_ind.T), axis=2) fib_end_check_ind = create_index(fib_end_check_ind[np.where(~np.eye(n_fib_end,dtype=bool))]) fib_end = np.zeros(bond_matrix.shape) fib_end[fib_end_check_ind] += 1 """ for n in range(N): slice_full = np.argwhere(bond_index_full.T[0] == n) if slice_full.shape[0] > 1: angle_indices.append(np.unique(bond_index_full[slice_full].flatten())) angle_bond_indices.append(bond_index_full[slice_full][::-1]) bond_indices = np.nonzero(np.array_split(bond_matrix, size)[rank]) angle_indices = np.array_split(angle_indices, size)[rank] angle_bond_indices = create_index(np.array_split(angle_bond_indices, size)[rank].reshape((2 * len(angle_indices), 2))) return bond_indices, angle_indices, angle_bond_indices
60fd4e5ee7418d182f0c29b0d69e0f148a5a40ee
3,651,724
from ibis.omniscidb.compiler import to_sql def compile(expr: ibis.Expr, params=None): """Compile a given expression. Note you can also call expr.compile(). Parameters ---------- expr : ibis.Expr params : dict Returns ------- compiled : string """ return to_sql(expr, dialect.make_context(params=params))
01bfe1be13b9a78adba04ca37a08aadbf551c827
3,651,726
def get_border(border, size): """ Get border """ i = 1 while size - border // i <= border // i: # size > 2 * (border // i) i *= 2 return border // i
45233f53cdf6f0edb5b4a9262b61f2a70ac42661
3,651,727
def load_normalized_data(file_path, log1p=True): """load normalized data 1. Load filtered data for both FACS and droplet 2. Size factor normalization to counts per 10 thousand 3. log(x+1) transform 4. Combine the data Args: file_path (str): file path. Returns: adata_combine (AnnData): Combined data for FACS and droplet """ # Load filtered data # adata_facs = read_h5ad(f'{file_path}/facs_filtered.h5ad') adata_facs = read_h5ad(f'{file_path}/facs_filtered_reannotated-except-for-marrow-lung-kidney.h5ad') adata_droplet = read_h5ad(f'{file_path}/droplet_filtered.h5ad') # Size factor normalization sc.pp.normalize_per_cell(adata_facs, counts_per_cell_after=1e4) sc.pp.normalize_per_cell(adata_droplet, counts_per_cell_after=1e4) # log(x+1) transform if log1p: sc.pp.log1p(adata_facs) sc.pp.log1p(adata_droplet) # Combine the data ind_select = adata_facs.obs['age'].isin(['3m', '18m', '24m']) adata_facs = adata_facs[ind_select,] adata_combine = AnnData.concatenate(adata_facs, adata_droplet, batch_key='b_method', batch_categories = ['facs','droplet']) return adata_combine
3c180c1f2ba1e118678331795eb42b7132686ed6
3,651,728
def from_copy_number( model: cobra.Model, index: pd.Series, cell_copies: pd.Series, stdev: pd.Series, vol: float, dens: float, water: float, ) -> cobra.Model: """Convert `cell_copies` to mmol/gDW and apply them to `model`. Parameters ---------- model: cobra.Model cobra or geckopy Model (will be converted to geckopy.Model). It is NOT modified inplace. index: pd.Series uniprot IDs cell_copies: pd.Series cell copies/ cell per proteins stdev: pd.Series standard deviation of the cell copies vol: float cell volume dens: float cell density water: float water content fraction (0-1) Returns ------- geckopy.Model with the proteomics constraints applied """ df = pd.DataFrame({"cell_copies": cell_copies, "CV": stdev}) # from molecules/cell to mmol/gDW df["copies_upper"] = df["cell_copies"] + 0.5 * df["CV"] / 100 * df["cell_copies"] df["mmol_per_cell"] = df["copies_upper"] / 6.022e21 proteomics = df["mmol_per_cell"] / (vol * dens * water) proteomics.index = index return from_mmol_gDW(model, proteomics)
858d563ad0f4ae16e83b36db3908895671809431
3,651,729
import re def _get_values(attribute, text): """Match attribute in text and return all matches. :returns: List of matches. """ regex = '{}\s+=\s+"(.*)";'.format(attribute) regex = re.compile(regex) values = regex.findall(text) return values
59a0fdb7a39221e5f728f512ba0aa814506bbc37
3,651,731
def time_axis(tpp=20e-9, length=20_000) -> np.ndarray: """Return the time axis used in experiments. """ ts = tpp * np.arange(length) ten_percent_point = np.floor(length / 10) * tpp ts -= ten_percent_point ts *= 1e6 # convert from seconds to microseconds return ts
6cd18bcbfa6949fe98e720312b07cfa20fde940a
3,651,732
from cyder.core.ctnr.models import CtnrUser def _has_perm(user, ctnr, action, obj=None, obj_class=None): """ Checks whether a user (``request.user``) has permission to act on a given object (``obj``) within the current session CTNR. Permissions will depend on whether the object is within the user's current CTNR and the user's permissions level within that CTNR. Plebs are people that don't have any permissions except for dynamic registrations. Guests of a CTNR have view access to all objects within the current CTNR. Users have full access to objects within the current CTNR, except for exceptional types of objects (domains, SOAs) and the CTNR itself. CTNR admins are like users except they can modify the CTNR itself and assign permissions to other users. Cyder admins are CTNR admins to every CTNR. Though the object has to be within the CURRENT CTNR for permissions to be granted, for purposes of encapsulation. Superusers (Uber-admins/Elders) have complete access to everything including the ability to create top-level domains, SOAs, and global DHCP objects. Plebs are not assigned to any CTNR. CTNR Guests have level 0 to a CTNR. CTNR Users have level 1 to a CTNR. CTNR Admins have level 2 to a CTNR. Cyder Admins have level 2 to the 'global' CTNR (``pk=1``). Superusers are Django superusers. :param request: A django request object. :type request: :class:`request` :param obj: The object being tested for permission. :type obj: :class:`object` :param action: ``0`` (view), ``1`` (create), ``2`` (update), ``3`` (delete) :type action: :class: `int` An example of checking whether a user has 'create' permission on a :class:`Domain` object. >>> perm = request.user.get_profile().has_perm(request, \'create\', ... obj_class=Domain) >>> perm = request.user.get_profile().has_perm(request, \'update\', ... obj=domain) """ user_level = None if user.is_superuser: return True ctnr_level = -1 assert LEVEL_ADMIN > LEVEL_USER > LEVEL_GUEST > ctnr_level if obj: ctnr = None ctnrs = None if hasattr(obj, "get_ctnrs"): try: ctnrs = obj.get_ctnrs() except TypeError: pass if ctnrs is not None: for c in ctnrs: try: level = CtnrUser.objects.get(ctnr=c, user=user).level except CtnrUser.DoesNotExist: continue if level > ctnr_level: ctnr_level = level ctnr = c if ctnr_level == LEVEL_ADMIN: break elif ctnr and user and not obj: try: ctnr_level = CtnrUser.objects.get(ctnr=ctnr, user=user).level except CtnrUser.DoesNotExist: pass if obj and ctnr and not ctnr.check_contains_obj(obj): return False # Get user level. is_ctnr_admin = ctnr_level == LEVEL_ADMIN is_ctnr_user = ctnr_level == LEVEL_USER is_ctnr_guest = ctnr_level == LEVEL_GUEST try: cyder_level = CtnrUser.objects.get(ctnr=1, user=user).level except CtnrUser.DoesNotExist: cyder_level = -1 is_cyder_admin = cyder_level == LEVEL_ADMIN is_cyder_guest = CtnrUser.objects.filter(user=user).exists() if is_cyder_admin: user_level = 'cyder_admin' elif is_ctnr_admin: user_level = 'ctnr_admin' elif is_ctnr_user: user_level = 'ctnr_user' elif is_ctnr_guest: user_level = 'ctnr_guest' elif is_cyder_guest: user_level = 'cyder_guest' else: user_level = 'pleb' # Dispatch to appropriate permissions handler. if obj: obj_type = obj.__class__.__name__ elif obj_class: if isinstance(obj_class, basestring): obj_type = str(obj_class) else: obj_type = obj_class.__name__ else: return False if (obj_type and obj_type.endswith('AV') and obj_type != 'WorkgroupAV'): obj_type = obj_type[:-len('AV')] handling_functions = { # Administrative. 'Ctnr': has_administrative_perm, 'User': has_administrative_perm, 'UserProfile': has_administrative_perm, 'CtnrUser': has_ctnr_user_perm, 'CtnrObject': has_ctnr_object_perm, 'SOA': has_soa_perm, 'Domain': has_domain_perm, # Domain records. 'AddressRecord': has_domain_record_perm, 'CNAME': has_domain_record_perm, 'MX': has_domain_record_perm, 'Nameserver': has_name_server_perm, 'SRV': has_domain_record_perm, 'SSHFP': has_domain_record_perm, 'TXT': has_domain_record_perm, 'PTR': has_reverse_domain_record_perm, # DHCP. 'Network': has_network_perm, 'Range': has_range_perm, 'Site': has_site_perm, 'System': has_system_perm, 'Vlan': has_vlan_perm, 'Vrf': has_vrf_perm, 'Workgroup': has_workgroup_perm, 'StaticInterface': has_static_registration_perm, 'DynamicInterface': has_dynamic_registration_perm, 'Supernet': has_supernet_perm, 'WorkgroupAV': has_workgroupav_perm, 'Token': has_token_perm } handling_function = handling_functions.get(obj_type, None) if not handling_function: if '_' in obj_type: obj_type = obj_type.replace('_', '') if 'Intr' in obj_type: obj_type = obj_type.replace('Intr', 'interface') for key in handling_functions.keys(): if obj_type.lower() == key.lower(): handling_function = handling_functions[key] if handling_function: return handling_function(user_level, obj, ctnr, action) else: raise Exception('No handling function for {0}'.format(obj_type))
998119c3aa9b50fcdd9fdec1f734374f04fe51c6
3,651,733
def read_chunk(file: File, size: int=400) -> bytes: """ Reads first [size] chunks from file, size defaults to 400 """ file = _path.join(file.root, file.name) # get full path of file with open(file, 'rb') as file: # read chunk size chunk = file.read(size) return chunk
dfa1fd576fe14c5551470fb76a674dccd136e200
3,651,734
def parse_input(file_path): """ Turn an input file of newline-separate bitrate samples into input and label arrays. An input file line should look like this: 4983 1008073 1591538 704983 1008073 1008073 704983 Adjacent duplicate entries will be removed and lines with less than two samples will be filtered out. @return a tuple of the x, x sequence length, and y arrays parsed from the input file """ bitrate_inputs = [] inputs_length = [] bitrate_labels = [] with open(file_path, 'r') as file: for line in file: samples = map(lambda x: [float(x) * bps_to_MBps], line.strip().split(' '))[0:MAX_SAMPLES + 1] if (len(samples) < 2): # skip lines without enough samples continue bitrate_labels.append(samples.pop()) inputs_length.append(len(samples)) samples += [[-1] for i in range(MAX_SAMPLES - len(samples))] bitrate_inputs += [samples] return bitrate_inputs, inputs_length, bitrate_labels
1e1aada5b8da01d362f7deb0b2145209bb55bcc0
3,651,735