content
stringlengths
22
815k
id
int64
0
4.91M
def get_env_loader(package, context): """This function returns a function object which extends a base environment based on a set of environments to load.""" def load_env(base_env): # Copy the base environment to extend job_env = dict(base_env) # Get the paths to the env loaders env_loader_paths = get_env_loaders(package, context) # If DESTDIR is set, set _CATKIN_SETUP_DIR as well if context.destdir is not None: job_env['_CATKIN_SETUP_DIR'] = context.package_dest_path(package) for env_loader_path in env_loader_paths: # print(' - Loading resultspace env from: {}'.format(env_loader_path)) resultspace_env = get_resultspace_environment( os.path.split(env_loader_path)[0], base_env=job_env, quiet=True, cached=context.use_env_cache, strict=False) job_env.update(resultspace_env) return job_env return load_env
5,353,100
def sampen(L, m): """ """ N = len(L) r = (np.std(L) * .2) B = 0.0 A = 0.0 # Split time series and save all templates of length m xmi = np.array([L[i: i + m] for i in range(N - m)]) xmj = np.array([L[i: i + m] for i in range(N - m + 1)]) # Save all matches minus the self-match, compute B B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi]) # Similar for computing A m += 1 xm = np.array([L[i: i + m] for i in range(N - m + 1)]) A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm]) # Return SampEn return -np.log(A / B)
5,353,101
def close_socket(): """ This function is used to close client's socket. Returns: """ # send a close signal to cpp server and close its socket global client_socket signal = struct.pack("!I", 0) client_socket.send(signal) respond = client_socket.recv(4) respond = struct.unpack("!I", respond) assert (respond[0] == 0) reply = struct.pack("!I", 1) client_socket.send(reply) client_socket.close()
5,353,102
def total(score: Union[int, RevisedResult]) -> int: """ Return the total number of successes (negative for a botch). If `score` is an integer (from a 1st/2nd ed. die from :func:`standard` or :func:`special`) then it is returned unmodified. If `score` is a :class:`RevisedResult` (from :func:`revised_standard` or :func:`revised_special`) then the value returned is the net successes, except in the special case where there were successes but they were all cancelled out by botches. In that case return 0 even if the net successes is negative. """ return int(score)
5,353,103
def test_register_user_with_password(api_client): """ Test if a new user can register himself providing his own new password. """ from testshop.models import Customer register_user_url = reverse('shop:register-user') data = { 'form_data': { 'email': '[email protected]', 'password1': 'secret', 'password2': 'secret', 'preset_password': False, } } response = api_client.post(register_user_url, data, format='json') assert response.status_code == 200 assert response.json() == {'register_user_form': {'success_message': 'Successfully registered yourself.'}} customer = Customer.objects.get(user__email='[email protected]') assert customer is not None
5,353,104
def load_vgg16(model_dir, gpu_ids): """ Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """ # if not os.path.exists(model_dir): # os.mkdir(model_dir) # if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')): # if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')): # os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7')) # vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7')) # vgg = Vgg16() # for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()): # dst.data[:] = src # torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight')) vgg = Vgg16() # vgg.cuda() vgg.cuda(device=gpu_ids[0]) vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight'))) vgg = torch.nn.DataParallel(vgg, gpu_ids) return vgg
5,353,105
def findExchange(username, password, region, orgId, envId, name): """This command will try to find an exchange in a given region, org id and environment id""" #### Anypoint login #### token = login(username, password) #### Request destinations to AMQ Rest API #### destinations_request_url = 'https://anypoint.mulesoft.com/mq/admin/api/v1/organizations/' + \ orgId + '/environments/' + envId + '/regions/' + region + '/destinations/exchanges/' + name payload = {} headers = {'X-ANYPNT-ENV-ID': envId, 'Authorization': 'bearer ' + token} try: destinations = requests.request( "GET", destinations_request_url, headers=headers, data=payload) destinations.raise_for_status() print(json.dumps({ "exists": True, "message": name + " already exists" })) except HTTPError as http_err: if http_err.response.status_code == 404: print(json.dumps({ "exists": False, "message": name + " does not exist" })) else: raise Exception('HTTP error occurred: ' + str(http_err)) except Exception as err: raise Exception('Other error occurred: ' + str(err))
5,353,106
def rbbox_overlaps_v3(bboxes1, bboxes2, mode='iou', is_aligned=False): """Calculate overlap between two set of bboxes. Args: bboxes1 (torch.Tensor): shape (B, m, 5) in <cx, cy, w, h, a> format or empty. bboxes2 (torch.Tensor): shape (B, n, 5) in <cx, cy, w, h, a> format or empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground) or "giou" (generalized intersection over union). Default "iou". is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) """ assert mode in ['iou', 'iof'] # Either the boxes are empty or the length of boxes's last dimension is 5 assert (bboxes1.size(-1) == 5 or bboxes1.size(0) == 0) assert (bboxes2.size(-1) == 5 or bboxes2.size(0) == 0) rows = bboxes1.size(0) cols = bboxes2.size(0) if is_aligned: assert rows == cols if rows * cols == 0: return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols) return obb_overlaps(bboxes1, bboxes2, mode, is_aligned)
5,353,107
def write_comparison(report_fh, result: Tuple[Dict, List[BadResult]])-> None: """Write comparison output.""" (config, bad_results) = result logging.debug("writing report for %s", config['paper_id']) if bad_results: # data = json.dumps( [ config, bad_results], sort_keys=True, default=_serialize) # report_fh.write( data + "\n") report_fh.write(f"* paper {config['paper_id']}\n") for br in bad_results: if 'GOOD' not in br.message: report_fh.write( format_bad_result( br ) )
5,353,108
def get_target_compute_version(target=None): """Utility function to get compute capability of compilation target. Looks for the arch in three different places, first in the target attributes, then the global scope, and finally the GPU device (if it exists). Parameters ---------- target : tvm.target.Target, optional The compilation target Returns ------- compute_version : str compute capability of a GPU (e.g. "8.0") """ # 1. Target if target: if "arch" in target.attrs: compute_version = target.attrs["arch"] major, minor = compute_version.split("_")[1] return major + "." + minor # 2. Global scope from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel if AutotvmGlobalScope.current.cuda_target_arch: major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1] return major + "." + minor # 3. GPU if tvm.gpu(0).exist: return tvm.gpu(0).compute_version warnings.warn( "No CUDA architecture was specified or GPU detected." "Try specifying it by adding '-arch=sm_xx' to your target." ) return None
5,353,109
def get_poet_intro_by_id(uid): """ get poet intro by id :param uid: :return: """ return Poet.get_poet_by_id(uid)
5,353,110
def create_post(): """Создать пост""" user = get_user_from_request() post = Post( created_date=datetime.datetime.now(), updated_date=datetime.datetime.now(), creator=user, ) json = request.get_json() url = json["url"] if Post.get_or_none(Post.url == url) is not None: return errors.post_url_already_taken() error = set_blog(post, json, user) if error is not None: error_response = { BlogError.NoBlog: errors.blog_not_found(), BlogError.NoAccess: errors.blog_no_access(), }[error] return error_response fill_post_from_json(post, json) post.save() set_tags_for_post(post, json) manage_jam_entries(post, json) return jsonify({"success": 1, "post": post.to_json()})
5,353,111
def plot_absorption_spectrum(pairlist): """Plot line pairs along with transmission spectrum """ import subprocess for pair in tqdm(pairlist): args = ['/Users/dberke/code/plotSpec.py', 'HD45184/ADP.2014-09-26T16:54:56.573.fits', 'HD45184/ADP.2015-09-30T02:00:51.583.fits', '-o', 'Trans_{}_{}.png'.format(pair[0], pair[1]), '-r', '-3.9', '-i', '0', '-j', '1.05', '-vtz', '-n', '{}'.format(float(pair[0]) - ((float(pair[1]) - float(pair[0])) * 0.75)), '-m', '{}'.format(float(pair[1]) + ((float(pair[1]) - float(pair[0])) * 0.75)), '-l', pair[0], pair[1]] subprocess.run(args)
5,353,112
async def fetch(session, url): """Method to fetch data from a url asynchronously """ async with async_timeout.timeout(30): async with session.get(url) as response: return await response.json()
5,353,113
def recurse_while(predicate, f, *args): """ Accumulate value by executing recursively function `f`. The function `f` is executed with starting arguments. While the predicate for the result is true, the result is fed into function `f`. If predicate is never true then starting arguments are returned. :param predicate: Predicate function guarding execution. :param f: Function to execute. :param *args: Starting arguments. """ result = f(*args) result = result if type(result) == tuple else (result, ) while predicate(*result): args = result # predicate(args) is always true result = f(*args) result = result if type(result) == tuple else (result, ) return args if len(args) > 1 else args[0]
5,353,114
def unparse(node: Optional[ast.AST]) -> Optional[str]: """Unparse an AST to string.""" if node is None: return None elif isinstance(node, str): return node elif node.__class__ in OPERATORS: return OPERATORS[node.__class__] elif isinstance(node, ast.arg): if node.annotation: return "%s: %s" % (node.arg, unparse(node.annotation)) else: return node.arg elif isinstance(node, ast.arguments): return unparse_arguments(node) elif isinstance(node, ast.Attribute): return "%s.%s" % (unparse(node.value), node.attr) elif isinstance(node, ast.BinOp): return " ".join(unparse(e) for e in [node.left, node.op, node.right]) elif isinstance(node, ast.BoolOp): op = " %s " % unparse(node.op) return op.join(unparse(e) for e in node.values) elif isinstance(node, ast.Bytes): return repr(node.s) elif isinstance(node, ast.Call): args = ([unparse(e) for e in node.args] + ["%s=%s" % (k.arg, unparse(k.value)) for k in node.keywords]) return "%s(%s)" % (unparse(node.func), ", ".join(args)) elif isinstance(node, ast.Dict): keys = (unparse(k) for k in node.keys) values = (unparse(v) for v in node.values) items = (k + ": " + v for k, v in zip(keys, values)) return "{" + ", ".join(items) + "}" elif isinstance(node, ast.Ellipsis): return "..." elif isinstance(node, ast.Index): return unparse(node.value) elif isinstance(node, ast.Lambda): return "lambda %s: ..." % unparse(node.args) elif isinstance(node, ast.List): return "[" + ", ".join(unparse(e) for e in node.elts) + "]" elif isinstance(node, ast.Name): return node.id elif isinstance(node, ast.NameConstant): return repr(node.value) elif isinstance(node, ast.Num): return repr(node.n) elif isinstance(node, ast.Set): return "{" + ", ".join(unparse(e) for e in node.elts) + "}" elif isinstance(node, ast.Str): return repr(node.s) elif isinstance(node, ast.Subscript): return "%s[%s]" % (unparse(node.value), unparse(node.slice)) elif isinstance(node, ast.UnaryOp): return "%s %s" % (unparse(node.op), unparse(node.operand)) elif isinstance(node, ast.Tuple): if node.elts: return ", ".join(unparse(e) for e in node.elts) else: return "()" elif sys.version_info > (3, 6) and isinstance(node, ast.Constant): # this branch should be placed at last return repr(node.value) else: raise NotImplementedError('Unable to parse %s object' % type(node).__name__)
5,353,115
def test_convert_nonnumeric_value(): """Test exception is thrown for nonnumeric type.""" with pytest.raises(TypeError): speed_util.convert("a", SPEED_KILOMETERS_PER_HOUR, SPEED_MILES_PER_HOUR)
5,353,116
def construct_lookup_variables(train_pos_users, train_pos_items, num_users): """Lookup variables""" index_bounds = None sorted_train_pos_items = None def index_segment(user): lower, upper = index_bounds[user:user + 2] items = sorted_train_pos_items[lower:upper] negatives_since_last_positive = np.concatenate( [items[0][np.newaxis], items[1:] - items[:-1] - 1]) return np.cumsum(negatives_since_last_positive) start_time = timeit.default_timer() inner_bounds = np.argwhere(train_pos_users[1:] - train_pos_users[:-1])[:, 0] + 1 (upper_bound,) = train_pos_users.shape index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound]) # Later logic will assume that the users are in sequential ascending order. assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users)) sorted_train_pos_items = train_pos_items.copy() for i in range(num_users): lower, upper = index_bounds[i:i + 2] sorted_train_pos_items[lower:upper].sort() total_negatives = np.concatenate([ index_segment(i) for i in range(num_users)]) logging.info("Negative total vector built. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) return total_negatives, index_bounds, sorted_train_pos_items
5,353,117
def total_allocation_constraint(weight, allocation: float, upper_bound: bool = True): """ Used for inequality constraint for the total allocation. :param weight: np.array :param allocation: float :param upper_bound: bool if true the constraint is from above (sum of weights <= allocation) else from below (sum of weights <= allocation) :return: np.array """ if upper_bound: return allocation - weight.sum() else: return weight.sum() - allocation
5,353,118
def sigmoid(x): """ computes sigmoid of x """ return 1.0/(1.0 + np.exp(-x))
5,353,119
def handle_error(err): """Catches errors with processing client requests and returns message""" code = 500 error = 'Error processing the request' if isinstance(err, HTTPError): code = err.code error = str(err.message) return jsonify(error=error, code=code), code
5,353,120
def dimensionality(quantity_or_unit: str) -> Dict[str, int]: """ Returns the dimensionality of the quantity or unit. Parameters ----------- quantity_or_unit : str A quanitity or a unit Returns ------- dimensionality_dict : dict Dictionary which keys are fundamental units and values are the exponent of each unit in the quantity. """ from pyunitwizard.kernel import default_form, default_parser from pyunitwizard import convert as _convert, get_dimensionality as _get_dimensionality tmp_quantity_or_unit = _convert(quantity_or_unit, to_form=default_form, parser=default_parser) return _get_dimensionality(tmp_quantity_or_unit)
5,353,121
def split_prec_rows(df): """Split precincts into two rows. NOTE: Because this creates a copy of the row values, don't rely on total vote counts, just look at percentage. """ for idx in df.index: # look for rows with precincts that need to be split if re.search('\d{4}/\d{4}',idx): row_values = df.loc[idx] split = idx.split('/') for p in split: df.loc[p] = row_values # delete original row df = df.drop(idx, axis=0) return(df)
5,353,122
def socfaker_elasticecsfields_host(): """ Returns an ECS host dictionary Returns: dict: Returns a dictionary of ECS host fields/properties """ if validate_request(request): return jsonify(str(socfaker.products.elastic.document.fields.host))
5,353,123
def _moog_writer(photosphere, filename, **kwargs): """ Writes an :class:`photospheres.photosphere` to file in a MOOG-friendly format. :param photosphere: The photosphere. :path filename: The filename to write the photosphere to. """ def _get_xi(): xi = photosphere.meta["stellar_parameters"].get("microturbulence", 0.0) if 0 >= xi: logger.warn("Invalid microturbulence value: {:.3f} km/s".format(xi)) return xi if photosphere.meta["kind"] == "marcs": xi = _get_xi() output = dedent(""" WEBMARCS MARCS (2011) TEFF/LOGG/[M/H]/XI {1:.0f}/{2:.3f}/{3:.3f}/{4:.3f} NTAU {0:.0f} 5000.0 """.format(len(photosphere), photosphere.meta["stellar_parameters"]["effective_temperature"], photosphere.meta["stellar_parameters"]["surface_gravity"], photosphere.meta["stellar_parameters"]["metallicity"], xi)).lstrip() for i, line in enumerate(photosphere): output += " {0:>3.0f} {0:>3.0f} {1:10.3e} {0:>3.0f} {2:10.3e} "\ "{3:10.3e} {4:10.3e}\n".format(i + 1, line["lgTau5"], line["T"], line["Pe"], line["Pg"]) output += " {0:.3f}\n".format(xi) output += "NATOMS 0 {0:.3f}\n".format( photosphere.meta["stellar_parameters"]["metallicity"]) output += "NMOL 0\n" elif photosphere.meta["kind"] == "castelli/kurucz": xi = _get_xi() output = dedent(""" KURUCZ CASTELLI/KURUCZ (2004) {1:.0f}/{2:.3f}/{3:.3f}/{4:.3f}/{5:.3f} NTAU {0:.0f} """.format(len(photosphere), photosphere.meta["stellar_parameters"]["effective_temperature"], photosphere.meta["stellar_parameters"]["surface_gravity"], photosphere.meta["stellar_parameters"]["metallicity"], photosphere.meta["stellar_parameters"]["alpha_enhancement"], xi)).lstrip() for line in photosphere: output += " {0:.8e} {1:10.3e}{2:10.3e}{3:10.3e}{4:10.3e}\n".format( line["RHOX"], line["T"], line["P"], line["XNE"], line["ABROSS"]) output += " {0:.3f}\n".format(xi) output += "NATOMS 0 {0:.3f}\n".format( photosphere.meta["stellar_parameters"]["metallicity"]) output += "NMOL 0\n" # MOOG11 fails to read if you don't add an extra line output += "\n" else: raise ValueError("photosphere kind '{}' cannot be written to a MOOG-"\ "compatible format".format(photosphere.meta["kind"])) with open(filename, "w") as fp: fp.write(output) return None
5,353,124
def upcomingSplits( symbol="", exactDate="", token="", version="stable", filter="", format="json", ): """This will return all upcoming estimates, dividends, splits for a given symbol or the market. If market is passed for the symbol, IPOs will also be included. https://iexcloud.io/docs/api/#upcoming-events Args: symbol (str): Symbol to look up exactDate (str): exactDate Optional. Exact date for which to get data token (str): Access token version (str): API version filter (str): filters: https://iexcloud.io/docs/api/#filter-results format (str): return format, defaults to json Returns: dict or DataFrame: result """ return _baseEvent( "splits", symbol=symbol, exactDate=exactDate, token=token, version=version, filter=filter, format=format, )
5,353,125
async def async_media_pause(hass, entity_id=None): """Send the media player the command for pause.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
5,353,126
def mu_model(u, X, U, k): """ Returns the utility of the kth player Parameters ---------- u X U k Returns ------- """ M = X.T @ X rewards = M @ u penalties = u.T @ M @ U[:, :k] * U[:, :k] return rewards - penalties.sum(axis=1)
5,353,127
def tokenizer_init(model_name): """simple wrapper for auto tokenizer""" tokenizer = AutoTokenizer.from_pretrained(model_name) return tokenizer
5,353,128
def insert_message(nick, message, textDirection): """ Insert record """ ins = STATE['messages_table'].insert().values( nick=nick, message=message, textDirection=textDirection) res = STATE['conn'].execute(ins) ltr = 1 if textDirection == 'ltr' else 0 rtl = 1 if textDirection == 'rtl' else 0 STATE['conn'].execute( 'update message_stats set ltr = ltr + ?, rtl = rtl + ?', ltr, rtl) return { 'id': res.lastrowid }
5,353,129
def process_metadata(metadata) -> Tuple[Dict[str, str], Dict[str, str]]: """ Returns a tuple of valid and invalid metadata values. """ if not metadata: return {}, {} valid_values = {} invalid_values = {} for m in metadata: key, value = m.split("=", 1) if key in supported_metadata_keys: valid_values[key] = value else: invalid_values[key] = value return valid_values, invalid_values
5,353,130
def rightOfDeciSeperatorToDeci(a): """This function only convert value at the right side of decimal seperator to decimal""" deciNum = 0 for i in range(len(a)): deciNum += (int(a[i]))*2**-(i+1) return deciNum
5,353,131
def test_single_download_from_requirements_file(script): """ It should support download (in the scratch path) from PyPi from a requirements file """ script.scratch_path.join("test-req.txt").write(textwrap.dedent(""" INITools==0.1 """)) result = script.pip( 'install', '-r', script.scratch_path / 'test-req.txt', '-d', '.', expect_error=True, ) assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created assert script.site_packages / 'initools' not in result.files_created
5,353,132
def init_logging(): """ Initialize logging and write info both into logfile and console Usage example: land.logger.logging.info('Your message here') Logger levels: critical, error, warning, info, debug, notset """ this_dir = os.path.dirname(os.path.realpath(__file__)) # path to this directory log_dir = os.path.join(this_dir, '..', 'temp') # Create logging directory if not exist if not os.path.isdir(log_dir): os.makedirs(log_dir) # specify logging configuration logging.basicConfig(level=logging.INFO, format='%(levelname)s, %(asctime)s, %(filename)s, %(funcName)s, %(message)s', filename=os.path.join(log_dir, 'logfile.log'), filemode='a') # define a handler which writes to the sys.stderr console = logging.StreamHandler() # set a format which is simpler for console usage formatter = logging.Formatter('[%(levelname)s] %(message)s') # tell the handler to use this format console.setFormatter(formatter) # add the handler to the root logger logging.getLogger('').addHandler(console) # Override sys.excepthook to log uncaught exceptions sys.excepthook = handle_uncaught_exception
5,353,133
def conv(input, weight): """ Returns the convolution of input and weight tensors, where input contains sequential data. The convolution is along the sequence axis. input is of size [batchSize, inputDim, seqLength] """ output = torch.nn.functional.conv1d(input=input, weight=weight) return output
5,353,134
def irr_repr(order, alpha, beta, gamma, dtype = None): """ irreducible representation of SO3 - compatible with compose and spherical_harmonics """ cast_ = cast_torch_tensor(lambda t: t) dtype = default(dtype, torch.get_default_dtype()) alpha, beta, gamma = map(cast_, (alpha, beta, gamma)) return wigner_d_matrix(order, alpha, beta, gamma, dtype = dtype)
5,353,135
def label_to_span(labels: List[str], scheme: Optional[str] = 'BIO') -> dict: """ convert labels to spans :param labels: a list of labels :param scheme: labeling scheme, in ['BIO', 'BILOU']. :return: labeled spans, a list of tuples (start_idx, end_idx, label) """ assert scheme in ['BIO', 'BILOU'], ValueError("unknown labeling scheme") labeled_spans = dict() i = 0 while i < len(labels): if labels[i] == 'O' or labels[i] == 'ABS': i += 1 continue else: if scheme == 'BIO': if labels[i][0] == 'B': start = i lb = labels[i][2:] i += 1 try: while labels[i][0] == 'I': i += 1 end = i labeled_spans[(start, end)] = lb except IndexError: end = i labeled_spans[(start, end)] = lb i += 1 # this should not happen elif labels[i][0] == 'I': i += 1 elif scheme == 'BILOU': if labels[i][0] == 'U': start = i end = i + 1 lb = labels[i][2:] labeled_spans[(start, end)] = lb i += 1 elif labels[i][0] == 'B': start = i lb = labels[i][2:] i += 1 try: while labels[i][0] != 'L': i += 1 end = i labeled_spans[(start, end)] = lb except IndexError: end = i labeled_spans[(start, end)] = lb break i += 1 else: i += 1 return labeled_spans
5,353,136
def marker_genes_text( marker_res, groups: Union[str, Sequence[str]] = 'all', markers_num: int = 20, sort_key: str = 'scores', ascend: bool = False, fontsize: int = 8, ncols: int = 4, sharey: bool = True, ax: Optional[Axes] = None, **kwargs, ): # scatter plot, 差异基因显著性图,类碎石图 """ marker gene scatter visualization. :param marker_res: the StereoResult of FindMarkers tool. :param groups: list of cluster ids or 'all' clusters, a cluster equal a group. :param markers_num: top N genes to show in each cluster. :param sort_key: the sort key for getting top n marker genes, default `scores`. :param ascend: asc or dec. :param fontsize: font size. :param ncols: number of plot columns. :param sharey: share scale or not :param ax: axes object :param kwargs: other args for plot. """ # 调整图像 panel/grid 相关参数 if 'n_panels_per_row' in kwargs: n_panels_per_row = kwargs['n_panels_per_row'] else: n_panels_per_row = ncols if groups == 'all': group_names = list(marker_res.keys()) else: group_names = [groups] if isinstance(groups, str) else groups # one panel for each group # set up the figure n_panels_x = min(n_panels_per_row, len(group_names)) n_panels_y = np.ceil(len(group_names) / n_panels_x).astype(int) # 初始化图像 width = 10 height = 10 fig = plt.figure( figsize=( n_panels_x * width, # rcParams['figure.figsize'][0], n_panels_y * height, # rcParams['figure.figsize'][1], ) ) gs = gridspec.GridSpec(nrows=n_panels_y, ncols=n_panels_x, wspace=0.22, hspace=0.3) ax0 = None ymin = np.Inf ymax = -np.Inf for count, group_name in enumerate(group_names): result = data_helper.get_top_marker(g_name=group_name, marker_res=marker_res, sort_key=sort_key, ascend=ascend, top_n=markers_num) gene_names = result.genes.values scores = result.scores.values # Setting up axis, calculating y bounds if sharey: ymin = min(ymin, np.min(scores)) ymax = max(ymax, np.max(scores)) if ax0 is None: ax = fig.add_subplot(gs[count]) ax0 = ax else: ax = fig.add_subplot(gs[count], sharey=ax0) else: ymin = np.min(scores) ymax = np.max(scores) ymax += 0.3 * (ymax - ymin) ax = fig.add_subplot(gs[count]) ax.set_ylim(ymin, ymax) ax.set_xlim(-0.9, markers_num - 0.1) # Making labels for ig, gene_name in enumerate(gene_names): ax.text( ig, scores[ig], gene_name, rotation='vertical', verticalalignment='bottom', horizontalalignment='center', fontsize=fontsize, ) ax.set_title(group_name) if count >= n_panels_x * (n_panels_y - 1): ax.set_xlabel('ranking') # print the 'score' label only on the first panel per row. if count % n_panels_x == 0: ax.set_ylabel('score') if sharey is True: ymax += 0.3 * (ymax - ymin) ax.set_ylim(ymin, ymax)
5,353,137
def get_storage_account(account_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStorageAccountResult: """ The storage account. :param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. :param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. """ __args__ = dict() __args__['accountName'] = account_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:storage/v20160501:getStorageAccount', __args__, opts=opts, typ=GetStorageAccountResult).value return AwaitableGetStorageAccountResult( access_tier=__ret__.access_tier, creation_time=__ret__.creation_time, custom_domain=__ret__.custom_domain, encryption=__ret__.encryption, id=__ret__.id, kind=__ret__.kind, last_geo_failover_time=__ret__.last_geo_failover_time, location=__ret__.location, name=__ret__.name, primary_endpoints=__ret__.primary_endpoints, primary_location=__ret__.primary_location, provisioning_state=__ret__.provisioning_state, secondary_endpoints=__ret__.secondary_endpoints, secondary_location=__ret__.secondary_location, sku=__ret__.sku, status_of_primary=__ret__.status_of_primary, status_of_secondary=__ret__.status_of_secondary, tags=__ret__.tags, type=__ret__.type)
5,353,138
def format_and_add(graph, info, relation, name): """ input: graph and three stirngs function formats the strings and adds to the graph """ info = info.replace(" ", "_") name = name.replace(" ", "_") inf = rdflib.URIRef(project_prefix + info) rel = rdflib.URIRef(project_prefix + relation) nm = rdflib.URIRef(project_prefix + name) graph.add((inf, rel, nm)) return None
5,353,139
def get_dataset(dir, batch_size, num_epochs, reshape_size, padding='SAME'): """Reads input data num_epochs times. AND Return the dataset Args: train: Selects between the training (True) and validation (False) data. batch_size: Number of examples per returned batch. num_epochs: Number of times to read the input data, or 0/None to train forever. padding: if 'SAME' , have ceil(#samples / batch_size) * epoch_nums batches if 'VALID', have floor(#samples / batch_size) * epoch_nums batches Returns: A tuple (images, labels), where: * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS] in the range [-0.5, 0.5]. * labels is an int32 tensor with shape [batch_size] with the true label, a number in the range [0, mnist.NUM_CLASSES). This function creates a one_shot_iterator, meaning that it will only iterate over the dataset once. On the other hand there is no special initialization required. """ if not num_epochs: num_epochs = None filenames = [os.path.join(dir, i) for i in os.listdir(dir)] with tf.name_scope('input'): # TFRecordDataset opens a protobuf and reads entries line by line # could also be [list, of, filenames] dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.repeat(num_epochs) # map takes a python function and applies it to every sample dataset = dataset.map(decode) dataset = dataset.map(extract) dataset = dataset.map(cast_type) dataset = dataset.map(augment) dataset = dataset.map(normalize) dataset = dataset.map(set_parameter(reshape, reshape_size=reshape_size)) # the parameter is the queue size dataset = dataset.shuffle(1000 + 3 * batch_size) dataset = dataset.batch(batch_size) return dataset
5,353,140
def to_log_space(p:float, bounds: BOUNDS_TYPE): """ Interprets p as a point in a rectangle in R^2 or R^3 using Morton space-filling curve :param bounds [ (low,high), (low,high), (low,high) ] defaults to unit cube :param dim Dimension. Only used if bounds are not supplied. Very similar to "to_space" but assumes speed varies with logarithm """ assert 0 <= p <= 1 dim = len(bounds) us = list(reversed(ZCurveConventions().to_cube(zpercentile=p, dim=dim))) # 0 < us[i] < 1 return [to_log_space_1d(u, low=b[0], high=b[1]) for u, b in zip(us, bounds)]
5,353,141
def advection2D(iplot=False,use_petsc=False,htmlplot=False,outdir='./_output',solver_type='classic'): """ Example python script for solving the 2d advection equation. """ #=========================================================================== # Import libraries #=========================================================================== if use_petsc: import petclaw as pyclaw else: import pyclaw #=========================================================================== # Setup solver and solver parameters #=========================================================================== if solver_type=='classic': solver = pyclaw.ClawSolver2D() elif solver_type=='sharpclaw': solver = pyclaw.SharpClawSolver2D() solver.bc_lower[0] = pyclaw.BC.periodic solver.bc_upper[0] = pyclaw.BC.periodic solver.bc_lower[1] = pyclaw.BC.periodic solver.bc_upper[1] = pyclaw.BC.periodic solver.mwaves = 1 solver.dim_split = 0 solver.cfl_max=1.0 solver.cfl_desired = 0.9 solver.mthlim = pyclaw.limiters.tvd.vanleer #=========================================================================== # Initialize grids, then initialize the solution associated to the grid and # finally initialize aux array #=========================================================================== # Grid: mx=50; my=50 x = pyclaw.Dimension('x',0.0,1.0,mx) y = pyclaw.Dimension('y',0.0,1.0,my) grid = pyclaw.Grid([x,y]) meqn = 1 state = pyclaw.State(grid,meqn) state.aux_global['u'] = 0.5 # Parameters (global auxiliary variables) state.aux_global['v'] = 1.0 # Initial solution # ================ qinit(state) # This function is defined above #=========================================================================== # Set up controller and controller parameters #=========================================================================== claw = pyclaw.Controller() claw.tfinal = 2.0 claw.solution = pyclaw.Solution(state) claw.solver = solver claw.outdir = outdir #=========================================================================== # Solve the problem #=========================================================================== status = claw.run() #=========================================================================== # Plot results #=========================================================================== if htmlplot: pyclaw.plot.html_plot(outdir=outdir) if iplot: pyclaw.plot.interactive_plot(outdir=outdir)
5,353,142
def rsi_tradingview(ohlc: pd.DataFrame, period: int = 14, round_rsi: bool = True): """ Implements the RSI indicator as defined by TradingView on March 15, 2021. The TradingView code is as follows: //@version=4 study(title="Relative Strength Index", shorttitle="RSI", format=format.price, precision=2, resolution="") len = input(14, minval=1, title="Length") src = input(close, "Source", type = input.source) up = rma(max(change(src), 0), len) down = rma(-min(change(src), 0), len) rsi = down == 0 ? 100 : up == 0 ? 0 : 100 - (100 / (1 + up / down)) plot(rsi, "RSI", color=#8E1599) band1 = hline(70, "Upper Band", color=#C0C0C0) band0 = hline(30, "Lower Band", color=#C0C0C0) fill(band1, band0, color=#9915FF, transp=90, title="Background") :param ohlc: :param period: :param round_rsi: :return: an array with the RSI indicator values """ delta = ohlc["close"].diff() up = delta.copy() up[up < 0] = 0 up = pd.Series.ewm(up, alpha=1/period).mean() down = delta.copy() down[down > 0] = 0 down *= -1 down = pd.Series.ewm(down, alpha=1/period).mean() rsi = np.where(up == 0, 0, np.where(down == 0, 100, 100 - (100 / (1 + up / down)))) return np.round(rsi, 2) if round_rsi else rsi
5,353,143
def bin_by(x, y, nbins=30): """Bin x by y, given paired observations of x & y. Returns the binned "x" values and the left edges of the bins.""" bins = np.linspace(y.min(), y.max(), nbins+1) # To avoid extra bin for the max value bins[-1] += 1 indicies = np.digitize(y, bins) output = [] for i in xrange(1, len(bins)): output.append(x[indicies==i]) # Just return the left edges of the bins bins = bins[:-1] return output, bins
5,353,144
def calc_stats_with_cumsum(df_tgt, list_tgt_status, dict_diff, calc_type=0): """ Calculate statistics with cumulative sum of target status types. \n "dict_diff" is dictionaly of name key and difference value. ex) {"perweek": 7, "per2week": 14} \n calc_type=0: calculate for each simulation result. \n calc_type=1: calculate for each daycount result. """ # Prepare front side of dataframe. if calc_type == 0: sim_num = len(df_tgt[list_tgt_status[0]].columns) output_df = pd.DataFrame([i for i in range(sim_num)], columns=["sim_num"]) else: output_df = df_tgt.iloc[:, :2].copy() # Calculate statistics with cumulative sum. for one_status in list_tgt_status: # Extract target status data. one_tgt_df = df_tgt[one_status] # Calculate the days difference in dict_diff. dict_df_diff = {} for one_key, one_diff in dict_diff.items(): temp_df_diff = one_tgt_df.cumsum().diff(one_diff) temp_df_diff.iloc[one_diff-1, :] = one_tgt_df.cumsum().iloc[one_diff-1, :] dict_df_diff[one_key] = temp_df_diff if calc_type == 0: # Each simulation. output_df.loc[:, "{}_perday_mean".format(one_status)] = one_tgt_df.T.mean(axis=1).values output_df.loc[:, "{}_perday_std".format(one_status)] = one_tgt_df.T.std(axis=1).values output_df.loc[:, "{}_perday_min".format(one_status)] = one_tgt_df.T.min(axis=1).values output_df.loc[:, "{}_perday_quartile1".format(one_status)] = one_tgt_df.T.quantile(q=0.25, axis=1).values output_df.loc[:, "{}_perday_median".format(one_status)] = one_tgt_df.T.median(axis=1).values output_df.loc[:, "{}_perday_quartile3".format(one_status)] = one_tgt_df.T.quantile(q=0.75, axis=1).values output_df.loc[:, "{}_perday_max".format(one_status)] = one_tgt_df.T.max(axis=1).values for one_key, one_diff in dict_diff.items(): output_df.loc[:, "{}_{}_mean".format(one_status, one_key)] = dict_df_diff[one_key].T.mean(axis=1).values output_df.loc[:, "{}_{}_std".format(one_status, one_key)] = dict_df_diff[one_key].T.std(axis=1).values output_df.loc[:, "{}_{}_min".format(one_status, one_key)] = dict_df_diff[one_key].T.min(axis=1).values output_df.loc[:, "{}_{}_quartile1".format(one_status, one_key)] = dict_df_diff[one_key].T.quantile(q=0.25, axis=1).values output_df.loc[:, "{}_{}_median".format(one_status, one_key)] = dict_df_diff[one_key].T.median(axis=1).values output_df.loc[:, "{}_{}_quartile3".format(one_status, one_key)] = dict_df_diff[one_key].T.quantile(q=0.75, axis=1).values output_df.loc[:, "{}_{}_max".format(one_status, one_key)] = dict_df_diff[one_key].T.max(axis=1).values else: # Each day. output_df.loc[:, "{}_perday_mean".format(one_status)] = one_tgt_df.mean(axis=1) output_df.loc[:, "{}_perday_std".format(one_status)] = one_tgt_df.std(axis=1) output_df.loc[:, "{}_perday_min".format(one_status)] = one_tgt_df.min(axis=1) output_df.loc[:, "{}_perday_quartile1".format(one_status)] = one_tgt_df.quantile(q=0.25, axis=1) output_df.loc[:, "{}_perday_median".format(one_status)] = one_tgt_df.median(axis=1) output_df.loc[:, "{}_perday_quartile3".format(one_status)] = one_tgt_df.quantile(q=0.75, axis=1) output_df.loc[:, "{}_perday_max".format(one_status)] = one_tgt_df.max(axis=1) for one_key, one_diff in dict_diff.items(): # Note: Processing is well done, but numpy warning occurs. # Note: Because all the data of first few days in "perweek" and "per2week" become np.NaN. output_df.loc[:, "{}_{}_mean".format(one_status, one_key)] = dict_df_diff[one_key].mean(axis=1) output_df.loc[:, "{}_{}_std".format(one_status, one_key)] = dict_df_diff[one_key].std(axis=1) output_df.loc[:, "{}_{}_min".format(one_status, one_key)] = dict_df_diff[one_key].min(axis=1) output_df.loc[:, "{}_{}_quartile1".format(one_status, one_key)] = dict_df_diff[one_key].quantile(q=0.25, axis=1) output_df.loc[:, "{}_{}_median".format(one_status, one_key)] = dict_df_diff[one_key].median(axis=1) output_df.loc[:, "{}_{}_quartile3".format(one_status, one_key)] = dict_df_diff[one_key].quantile(q=0.75, axis=1) output_df.loc[:, "{}_{}_max".format(one_status, one_key)] = dict_df_diff[one_key].max(axis=1) return output_df
5,353,145
def savestates(plate, filename, n, newpath, number=NUMBER): """ Create a JPEG and a PNG of the snowflake. :param plate: (list of list of dict) The plate which contain the cristal. :param filename: (str) Name of the file. :param n: (int) The n-th iteration of the snowflake. 0 by default, if the param doesn't change you will only get the last image. :param newpath: (str) the path of the folder where the pictures are saved :param number: (int) [DEFAULT:NUMBER] the total number of iterations """ pixels_snowflake = [] index_number = str(n).zfill(len(str(number))) # Adds leading zeros in front of the index (instead of 50 we would get 050) # Creating the pixel image for y in range(DIMENSION[0]): for x in range(DIMENSION[1]): d = plate[y][x] if d["is_in_crystal"] == False: pixels_snowflake.append((0,0,255 - int((d["d"] / RHO)*255))) else: pixels_snowflake.append((0,255,(int(d["i"]/NUMBER*255)))) snowflake = Image.new("RGB", DIMENSION, color=0) snowflake.putdata(pixels_snowflake) snowflake.save(newpath + "Pixels/" + filename + index_number + ".png", format="PNG") # Creating the Hexagon Image # Half the height of the hexagon x = 12*DIMENSION[1]+6 y = DIMENSION[0]*11+3 snowflake = Image.new("RGB", (x, y), color=0) for y in range(DIMENSION[0]): for x in range(DIMENSION[1]): # Add the horizontal offset on every other row x_ = 0 if (y % 2 == 0) else 6 shape = [ (12*x +6 +x_, y*10 ), (12*x +12 +x_, y*10 +3 ), (12*x +12 +x_, y*10 +11), (12*x +6 +x_, y*10 +14), (12*x +x_, y*10 +11), (12*x +x_, y*10 +3 ) ] d = plate[y][x] if d["is_in_crystal"] == False: ImageDraw.Draw(snowflake).polygon(xy=shape, fill=(0,0,255 - int((d["d"] / RHO)*255)), outline=(0,0,255 - int((d["d"] / RHO)*255)), ) else: ImageDraw.Draw(snowflake).polygon(xy=shape, fill=(0,255,(int(d["i"]/NUMBER*255))), outline=(0,255,(int(d["i"]/NUMBER*255)))) snowflake.save(newpath + "Hexagons/" + filename + index_number + ".jpeg", format="JPEG") return
5,353,146
def _normalize_handler_method(method): """Transforms an HTTP method into a valid Python identifier.""" return method.lower().replace("-", "_")
5,353,147
def getContentType(the_type): """ Get the content type based on the type name which is in settings :param the_type: :return: """ if the_type not in settings.XGDS_MAP_SERVER_JS_MAP: return None the_model_name = settings.XGDS_MAP_SERVER_JS_MAP[the_type]['model'] splits = the_model_name.split('.') content_type = ContentType.objects.get(app_label=splits[0], model=splits[1]) return content_type
5,353,148
def calculateGravityAcceleration(stateVec, epoch, useGeoid): """ Calculate the acceleration due to gravtiy acting on the satellite at a given state (3 positions and 3 velocities). Ignore satellite's mass, i.e. use a restricted two-body problem. Arguments ---------- numpy.ndarray of shape (1,6) with three Cartesian positions and three velocities in an inertial reference frame in metres and metres per second, respectively. epoch - datetime corresponding to the UTC epoch at which the rate of change is to be computed. useGeoid - bool, whether to compute the gravity by using EGM geopotential expansion (True) or a restricted two-body problem (False). Returns ---------- numpy.ndarray of shape (1,3) with three Cartesian components of the acceleration in m/s2 given in an inertial reference frame. """ if useGeoid: " Compute geocentric co-latitude, longitude & radius. " colatitude,longitude,r = calculateGeocentricLatLon(stateVec, epoch) " Find the gravitational potential at the desired point. " # See Eq. 1 in Cunningham (1996) for the general form of the geopotential expansion. gravitationalPotential = 0.0 # Potential of the gravitational field at the stateVec location. for degree in range(0, MAX_DEGREE+1): # Go through all the desired orders and compute the geoid corrections to the sphere. temp = 0. # Contribution to the potential from the current degree and all corresponding orders. legendreCoeffs = scipy.special.legendre(degree) # Legendre polynomial coefficients corresponding to the current degree. for order in range(degree+1): # Go through all the orders corresponding to the currently evaluated degree. if (abs(colatitude-math.pi/2. <= 1E-16)) or (abs(colatitude-3*math.pi/2. <= 1E-16)): # We're at the equator, cos(colatitude) will be zero and things will break. temp += legendreCoeffs[order] * 1.0 * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude )) else: temp += legendreCoeffs[order] * math.cos(colatitude) * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude )) gravitationalPotential += math.pow(EarthRadius/r, degree) * temp # Add the contribution from the current degree. gravitationalPotential *= GM/r # Final correction (*GM for acceleration, /r to get r^(n+1) in the denominator). " Compute the acceleration due to the gravity potential at the given point. " # stateVec is defined w.r.t. Earth's centre of mass, so no need to account # for the geoid shape here. gravityAcceleration = gravitationalPotential/r* (-1.*stateVec[:3]/r) # First divide by the radius to get the acceleration value, then get the direction (towards centre of the Earth). else: r = numpy.linalg.norm(stateVec[:3]) # Earth-centred radius. gravityAcceleration = GM/(r*r) * (-1.*stateVec[:3]/r) # First compute the magnitude, then get the direction (towards centre of the Earth). return gravityAcceleration
5,353,149
def points_on_line(r0, r1, spacing): """ Coordinates of points spaced `spacing` apart between points `r0` and `r1`. The dimensionality is inferred from the length of the tuples `r0` and `r1`, while the specified `spacing` will be an upper bound to the actual spacing. """ dim = len(r0) v = np.array(r1) - np.array(r0) length = np.linalg.norm(v) steps = math.ceil(1.0 * length / spacing) + 1 points = np.zeros((steps, dim)) for i in xrange(dim): points[:, i] = np.linspace(r0[i], r1[i], steps) return points
5,353,150
def index() -> Response: """ Return application index. """ return APP.send_static_file("index.html")
5,353,151
def compare_files(og_maxima,new_maxima, compare_file, until=100, divisor=1000): """ given input of the maxima of a graph, compare it to the maxima from data100.txt maxima will be a series of x,y coordinates corresponding to the x,y values of a maximum from a file. First see if there is a maxima with the same x value as data100.txt, if there is not expand the x value ranges until a maximum is found. Find out what this dx is for the new file. Note do it for all the peaks of data100.txt at once, so that if it finds a peak for the 2nd peak of data100.txt, it doesn't also assign this to the first peak as well. kewyword arguments until and divisor: for the dx loop the loop will increase dx from 0 until until/divisor in steps of 1/divisor eg for default values until=100 and divisor=1000, it will increase dx from 0 until 100/1000 (=0.1) in steps of 1/1000 (=0.001) changing these arguments will lead to more or less peak matching, which could affect the results of the calculation significantly. """ if compare_file == 'data100.txt': return None # Whenever there is a match we will iterate this, so that we can compare #this at the end? number_of_matches = 0 # Initiate two lists to contain all the dx and dy values for each peak that # is matched by the code. dx_values = [] dy_values = [] # Loop through the original maxima list (supplied as an argument) # and also loop through the maxima from the file being compared. for og_idx,og_val in enumerate(og_maxima.T[0]): for idx,val in enumerate(new_maxima.T[0]): #this will loop dx from 0 to (until)/divisor in steps of 1/divisor for x in range(until+1): dx = x/divisor # For the current value of dx see if there is a matching # peak between the data100.txt file and the file being compared. # There is a match if the val from the compare_file is within the range # of the original peak x value +/- the dx value. if og_val - dx <= val <= og_val + dx: #if there is a match print some logging information to the console. print(f"Peak Match : index {og_idx} from data100.txt and {idx} from {compare_file}") print(f"values are {og_val} and {val} respectively") # iterate the number of peak matches between the two files being compared. number_of_matches+=1 # append the current dx value to our running list which will keep track # of the dx values for all the matched peaks dx_values.append(dx) # Get the absolute value of the difference in y values (dy) dy = abs(og_maxima.T[1][og_idx] - new_maxima.T[1][idx]) dy_values.append(dy) #breaks us out of the "for x in range" loop break # If the for loop (for x in range ...) isn't terminated by a break statement # I.E. we didn't get a match else: "move onto next peak in new_maxima" continue # If the for loop does get terminated by the break statement # I.E. we get a match """compare next peak in og_maxima, IE break the new_maxima loop and move onto next in the original maxima list""" break # Calculate the absolute value of the difference in number of peaks # between the two data files different_no_peaks = abs(len(new_maxima) - len(og_maxima)) return [dx_values, dy_values, number_of_matches, different_no_peaks]
5,353,152
def parseDatetimetz(string, local=True): """Parse the given string using :func:`parse`. Return a :class:`datetime.datetime` instance. """ y, mo, d, h, m, s, tz = parse(string, local) s, micro = divmod(s, 1.0) micro = round(micro * 1000000) if tz: offset = _tzoffset(tz, None) / 60 _tzinfo = tzinfo(offset) else: _tzinfo = None return _datetime(y, mo, d, int(h), int(m), int(s), int(micro), _tzinfo)
5,353,153
def median_ratio_flux(spec, smask, ispec, iref, nsig=3., niter=5, **kwargs): """ Calculate the median ratio between two spectra Parameters ---------- spec smask: True = Good, False = Bad ispec iref nsig niter kwargs Returns ------- med_scale : float Median of reference spectrum to input spectrum """ # Setup fluxes, sigs, wave = unpack_spec(spec) # Mask okm = smask[iref,:] & smask[ispec,:] # Insist on positive values okf = (fluxes[iref,:] > 0.) & (fluxes[ispec,:] > 0) allok = okm & okf # Ratio med_flux = fluxes[iref,allok] / fluxes[ispec,allok] # Clip mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(med_flux, sigma=nsig, maxiters=niter, **kwargs) # Return return med_scale
5,353,154
def merge_chars(lgr, script, merged_lgr, ref_mapping, previous_scripts): """ Merge chars from LGR set. :param lgr: A LGR from the set :param script: The LGR script :param merged_lgr: The merged LGR :param ref_mapping: The reference mapping from base LGR to new LGR :param previous_scripts: The scripts that has already been processed """ new_variants = [] merged_chars = {char: char for char in merged_lgr.repertoire} for char in lgr.repertoire: if len(char.cp) == 1 and lgr.unicode_database is not None: script_extensions = lgr.unicode_database.get_script_extensions(char.cp[0]) else: script_extensions = [] new_tags = set(script + '-' + x if ':' not in x else x for x in char.tags) | set(script_extensions) existing_char = None if char in merged_chars: existing_char = merged_chars[char] if existing_char: # same cp already in LGR existing_char.comment = let_user_choose(existing_char.comment, char.comment) existing_char.tags = list(set.union(set(existing_char.tags), set(new_tags))) existing_char.references = set.union(set(existing_char.references), set(char.references)) # if 2 scripts have different variants on a character, we need to add the variants for script 1 as # variant on script 2 to keep transitivity (e.g. b is variant of a in script 1, c is variant of a in # script 2, we need to set b as c variant and conversely). We do that after processing all code points # as the code point for the new variant may not be in the merged LGR in the current iteration. new_v1 = set.difference(set(char.get_variants()), set(existing_char.get_variants())) new_v2 = set.difference(set(existing_char.get_variants()), set(char.get_variants())) # remove cp itself to avoid error with reflexive variants for v in new_v1: if v.cp == existing_char.cp: new_v1.remove(v) break for v in new_v2: if v.cp == existing_char.cp: new_v2.remove(v) break if new_v1 and new_v2: new_variants.append((new_v1, new_v2)) # add new variants to current code point # do not keep new_v1 as reflexive variant may have been removed for v in set.difference(set(char.get_variants()), set(existing_char.get_variants())): new_ref = [ref_mapping[script].get(x, x) for x in v.references] new_when = None new_not_when = None if v.when: new_when = script + '-' + v.when if v.not_when: new_not_when = script + '-' + v.not_when merged_lgr.add_variant(existing_char.cp, v.cp, variant_type='blocked', when=new_when, not_when=new_not_when, comment=v.comment, ref=new_ref) # existing variants comment or references are not updated as it is not really important # if when or not-when: # - if existing cp has no concurrent rule or conversely, keep the rule as is (i.e. if existing cp has # no rule but cp has one, keep the cp rule with prefixed with the current script) # - if existing cp has the same when/not-when rules (same name, content is not checked), update cp WLE with # the prefix from this script # - if existing cp has a different rule (not the same name), raise an exception existing_when = existing_char.when existing_not_when = existing_char.not_when # retrieve WLE names for other_script in reversed(previous_scripts): if existing_char.when: existing_when = re.sub(r'^{}-'.format(other_script), '', existing_when) if existing_char.not_when: existing_not_when = re.sub(r'^{}-'.format(other_script), '', existing_not_when) if char.when: if not existing_when: existing_char.when = script + '-' + char.when elif existing_when == char.when: existing_char.when = script + '-' + existing_char.when # add a merged rule matching_script = re.sub(r'-{}$'.format(existing_when), '', existing_char.when) merge_rules(lgr, matching_script, merged_lgr, ref_mapping, specific=existing_when) else: raise CharInvalidContextRule(char.cp, char.when) if char.not_when: if not existing_not_when: existing_char.not_when = script + '-' + char.not_when elif existing_not_when == char.not_when: existing_char.not_when = script + '-' + existing_char.not_when # add a merged rule matching_script = re.sub(r'-{}$'.format(existing_not_when), '', existing_char.not_when) merge_rules(lgr, matching_script, merged_lgr, ref_mapping, specific=existing_not_when) else: raise CharInvalidContextRule(char.cp, char.not_when) continue # add new cp in LGR when = None not_when = None if char.when: when = script + '-' + char.when if char.not_when: not_when = script + '-' + char.not_when new_ref = [ref_mapping.get(script, {}).get(x, x) for x in char.references] merged_lgr.add_cp(char.cp, comment=char.comment, ref=new_ref, tag=list(new_tags), when=when, not_when=not_when) for v in char.get_variants(): when = None not_when = None if v.when: when = script + '-' + v.when if v.not_when: not_when = script + '-' + v.not_when new_ref = [ref_mapping[script].get(r, r) for r in v.references] merged_lgr.add_variant(char.cp, v.cp, variant_type='blocked', comment=v.comment, ref=new_ref, when=when, not_when=not_when) # handle transitivity for variants that differ between scripts for var1_list, var2_list in new_variants: for v1 in var1_list: for v2 in var2_list: merged_lgr.add_variant(v1.cp, v2.cp, variant_type='blocked', comment='New variant for merge to keep transitivity') merged_lgr.add_variant(v2.cp, v1.cp, variant_type='blocked', comment='New variant for merge to keep transitivity')
5,353,155
def configure_parser(parser): """ Configure parser for this action """ qisys.parsers.worktree_parser(parser)
5,353,156
def get_tags_from_event(): """List of tags Arguments: event {dict} -- Lambda event payload Returns: list -- List of AWS tags for use in a CFT """ return [ { "Key": "OwnerContact", "Value": request_event['OwnerContact'] } ]
5,353,157
def _verify(symbol_table: SymbolTable, ontology: _hierarchy.Ontology) -> List[Error]: """Perform a battery of checks on the consistency of ``symbol_table``.""" errors = _verify_there_are_no_duplicate_symbol_names(symbol_table=symbol_table) if len(errors) > 0: return errors errors.extend( _verify_with_model_type_for_classes_with_at_least_one_concrete_descendant( symbol_table=symbol_table ) ) errors.extend( _verify_all_the_function_calls_in_the_contracts_are_valid( symbol_table=symbol_table ) ) errors.extend( _verify_all_non_optional_properties_are_initialized_in_the_constructor( symbol_table=symbol_table ) ) errors.extend( _verify_orders_of_constructors_arguments_and_properties_match( symbol_table=symbol_table ) ) errors.extend( _verify_all_argument_references_occur_in_valid_context( symbol_table=symbol_table ) ) errors.extend(_verify_constraints_and_constraintrefs(symbol_table=symbol_table)) errors.extend(_verify_description_rendering_with_smoke(symbol_table=symbol_table)) errors.extend(_verify_only_simple_type_patterns(symbol_table=symbol_table)) if len(errors) > 0: return errors _assert_interfaces_defined_correctly(symbol_table=symbol_table, ontology=ontology) _assert_all_class_inheritances_defined_an_interface(symbol_table=symbol_table) _assert_self_not_in_concrete_descendants(symbol_table=symbol_table) return errors
5,353,158
def gsl_eigen_symmv_alloc(*args, **kwargs): """gsl_eigen_symmv_alloc(size_t n) -> gsl_eigen_symmv_workspace""" return _gslwrap.gsl_eigen_symmv_alloc(*args, **kwargs)
5,353,159
def add_poll_answers(owner, option): """ Add poll answer object. Matching user and option is considered same. :param owner: User object. :param option: Chosen poll option. :return: Poll answer object, Boolean (true, if created). """ ''' owner = models.ForeignKey(User, related_name='poll_answers', on_delete=models.CASCADE) answer = models.ForeignKey(PollOption, related_name='answers', on_delete=models.CASCADE) ''' created = False try: a = PollAnswer.objects.get(owner=owner, answer=option) except PollAnswer.DoesNotExist: a = PollAnswer(owner=owner, answer=option) a.save() return a, created
5,353,160
def calculate_rrfdi ( red_filename, nir_filename ): """ A function to calculate the Normalised Difference Vegetation Index from red and near infrarred reflectances. The reflectance data ought to be present on two different files, specified by the varaibles `red_filename` and `nir_filename`. The file format ought to be recognised by GDAL """ g_red = gdal.Open ( red_filename ) red = g_red.ReadAsArray() g_nir = gdal.Open ( nir_filename ) nir = g_nir.ReadAsArray() if ( g_red.RasterXSize != g_nir.RasterXSize ) or \ ( g_red.RasterYSize != g_nir.RasterYSize ): print "ERROR: Input datasets do't match!" print "\t Red data shape is %dx%d" % ( red.shape ) print "\t NIR data shape is %dx%d" % ( nir.shape ) sys.exit ( -1 ) passer = True rrfdi = np.where ( passer, (1.*red - 1.*nir ) / ( 1.*nir + 1.*red ), -999 ) return rrfdi*(-1)
5,353,161
def retry_on_failure(retries=NO_RETRIES): """Decorator which runs a test function and retries N times before actually failing. """ def logfun(exc): print("%r, retrying" % exc, file=sys.stderr) # NOQA return retry(exception=AssertionError, timeout=None, retries=retries, logfun=logfun)
5,353,162
def command_line(): """Generate an Argument Parser object to control the command line options """ parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-w", "--webdir", dest="webdir", help="make page and plots in DIR", metavar="DIR", default=None) parser.add_argument("-s", "--samples", dest="samples", help="Posterior samples hdf5 file", nargs='+', default=None) parser.add_argument("--labels", dest="labels", help="labels used to distinguish runs", nargs='+', default=None) parser.add_argument("--prior", dest="prior", choices=["population", "default", "both"], default="both", help=("Prior to use when calculating source " "classification probabilities")) parser.add_argument("--plot", dest="plot", help="name of the plot you wish to make", default="bar", choices=["bar", "mass_1_mass_2"]) return parser
5,353,163
def test_xonfg_help(capsys, xonsh_builtins): """verify can invoke it, and usage knows about all the options""" with pytest.raises(SystemExit): xonfig_main(["-h"]) capout = capsys.readouterr().out pat = re.compile(r"^usage:\s*xonfig[^\n]*{([\w,-]+)}", re.MULTILINE) m = pat.match(capout) assert m[1] verbs = set(v.strip().lower() for v in m[1].split(",")) exp = set(v.lower() for v in XONFIG_MAIN_ACTIONS) assert verbs == exp
5,353,164
def strip_price(header_list): """input a list of tag-type values and return list of strings with surrounding html characters removed""" import re match_obs = [] regex = '\$(((\d+).\d+)|(\d+))' string_list = []#['' for item in range(len(header_list))] for item in range(len(header_list)): match_obs.append(re.search(regex, str(header_list[item]))) for i in range(len(match_obs)): #print(match_obs[i]) string_list.append(match_obs[i].group(1)) #print(string_list) return string_list
5,353,165
def test_set(): """Test raw_to_bids conversion for EEGLAB data.""" # standalone .set file output_path = _TempDir() data_path = op.join(testing.data_path(), 'EEGLAB') raw_fname = op.join(data_path, 'test_raw_onefile.set') raw_to_bids(subject_id=subject_id, session_id=session_id, run=run, task=task, acquisition=acq, raw_file=raw_fname, output_path=output_path, overwrite=False, kind='eeg') cmd = ['bids-validator', '--bep006', output_path] run_subprocess(cmd, shell=shell) with pytest.raises(OSError, match="already exists"): raw_to_bids(subject_id=subject_id, session_id=session_id, run=run, task=task, acquisition=acq, raw_file=raw_fname, output_path=output_path, overwrite=False, kind='eeg') # .set with associated .fdt output_path = _TempDir() data_path = op.join(testing.data_path(), 'EEGLAB') raw_fname = op.join(data_path, 'test_raw.set') raw_to_bids(subject_id=subject_id, session_id=session_id, run=run, task=task, acquisition=acq, raw_file=raw_fname, output_path=output_path, overwrite=False, kind='eeg') cmd = ['bids-validator', '--bep006', output_path] run_subprocess(cmd, shell=shell)
5,353,166
def module(input, output, version): """A decorator which turn a function into a module""" def decorator(f): class Wrapper(Module): def __init__(self): super().__init__(input, output, version) @property def name(self): """The module's name""" return f.__name__ def execute(self, *args, **kwargs): return f(*args, **kwargs) wrapper = Wrapper() return functools.wraps(f)(wrapper) return decorator
5,353,167
def compile_modules_to_ir( result: BuildResult, mapper: genops.Mapper, compiler_options: CompilerOptions, errors: Errors, ) -> ModuleIRs: """Compile a collection of modules into ModuleIRs. The modules to compile are specified as part of mapper's group_map. Returns the IR of the modules. """ deser_ctx = DeserMaps({}, {}) modules = {} # Process the graph by SCC in topological order, like we do in mypy.build for scc in sorted_components(result.graph): scc_states = [result.graph[id] for id in scc] trees = [st.tree for st in scc_states if st.id in mapper.group_map and st.tree] if not trees: continue fresh = all(id not in result.manager.rechecked_modules for id in scc) if fresh: load_scc_from_cache(trees, result, mapper, deser_ctx) else: scc_ir = compile_scc_to_ir(trees, result, mapper, compiler_options, errors) modules.update(scc_ir) return modules
5,353,168
def minutes_to_restarttime(minutes) : """ converts an int meaning Minutes after midnight into a restartTime string understood by the bos command """ if minutes == -1 : return "never" pod = "am" if minutes > 12*60 : pod = "pm" minutes -= 12*60 time = "%d:%02d %s" % (minutes / 60, minutes % 60, pod) return time
5,353,169
def create_checkpoint(conn): """Function use config getter or get checkpoint file and write to disk""" if "nxos" in conn.platform: filename = f"{conn.hostname}-checkpoint.txt" backup = conn._get_checkpoint_file() with open(filename, "w") as f: f.write(backup) else: raise ValueError("Checkpoint requires NX-OS")
5,353,170
def clean_storage_exceeding_containers(): """Remove containers which exceeds the max container size defined by $MAX_CONTAINER_SIZE """ if max_container_size == -1: logging.info("The environment variable MAX_CONTAINER_SIZE was not set.") return container_size_field = "SizeRw" containers = docker_api_client.containers(all=True, size=True, filters={"label": label_filter}) for container in containers: if container_size_field in container: container_size_in_gb = container[container_size_field]/1000/1000/1000 container_id = container["Id"] try: if max_container_size < container_size_in_gb: logging.info("Delete storage exceeding container " + container["Names"][0]) docker_api_client.remove_container(container_id, force=True) except docker.errors.APIError as e: logging.error("Could not remove / re-create the container.", e)
5,353,171
def main(args): """ Parses command line arguments and do the work of the program. "args" specifies the program arguments, with args[0] being the executable name. The return value should be used as the program's exit code. """ print(random.choice(FACTS), file = sys.stderr) options = parse_args(args) # This holds the nicely-parsed options object # Make the output directory if it doesn't exist os.makedirs(options.outdir, exist_ok=True) # Make a place to total up all the stats stats_total = None # Count all the reads read_count = 0 # Record mapping parameters from at least one read params = None for read in read_line_oriented_json(options.input): if params is None: # Go get the mapping parameters params = sniff_params(read) # For the stats dict for each read stats = make_stats(read) if stats_total is None: stats_total = stats else: # Sum up all the stats add_in_stats(stats_total, stats) # Count the read read_count += 1 # After processing all the reads # Print the table now in case plotting fails print_table(read_count, stats_total, params) # Make filter statistic histograms plot_filter_statistic_histograms(options.outdir, stats_total)
5,353,172
def ccm_test(x, y,emb_dim = "auto", l_0 = "auto", l_1 = "auto", tau=1, n=10,mean_num = 10,max_dim = 10): """ estimate x from y to judge x->y cause :param x: :param y: :param l_0: :param l_1: :param emb_dim: :param tau: :param n: :return: """ if emb_dim == "auto": emb_dim = decide_dim(x,y) if l_0 == "auto": l_0 = int(np.ceil((len(x) - emb_dim + 1) * 0.1)) if l_1 == "auto": l_1 = int(np.ceil((len(x) - emb_dim + 1) * 0.9)) ys = twin_surrogate(y, emb_dim,num=n) raw_rhos = [] rhos = [] max_length = len(ys[0]) for i in tqdm(range(n)): mean = 0 for j in range(mean_num): rho_0, _ = estimate_using_bootstrap(x, y, length=l_0, emb_dim=emb_dim, tau=tau) rho_1, _ = estimate_using_bootstrap(x, y, length=l_1, emb_dim=emb_dim, tau=tau) rho_s_0, _ = estimate_from_emb_random(x, ys[i], length=l_0, emb_dim=emb_dim, tau=tau, max_length = max_length) rho_s_1, _ = estimate_from_emb_random(x, ys[i], length=l_1, emb_dim=emb_dim, tau=tau, max_length = max_length) raw_rhos.append([rho_0, rho_1, rho_s_0, rho_s_1]) mean += rho_1 -rho_0 -(rho_s_1 - rho_s_0 ) rhos.append(mean/mean_num) rhos = np.array(rhos) p = 1 - (len(rhos[rhos>0]) / n) return { "p_value" :p, "rhos" :rhos, "raw_rhos":raw_rhos }
5,353,173
def load_carla_env( env_name='why_carla-v0', discount=1.0, number_of_vehicles=100, number_of_walkers=0, display_size=256, max_past_step=1, dt=0.1, discrete=False, discrete_acc=[-3.0, 0.0, 3.0], discrete_steer=[-0.2, 0.0, 0.2], continuous_accel_range=[-3.0, 3.0], continuous_steer_range=[-0.3, 0.3], ego_vehicle_filter='vehicle.lincoln*', port=2000, town='Town03', task_mode='random', max_time_episode=500, max_waypt=12, obs_range=32, lidar_bin=0.5, d_behind=12, out_lane_thres=2.0, desired_speed=8, max_ego_spawn_times=200, display_route=True, pixor_size=64, pixor=False, obs_channels=None, action_repeat=1,): """Loads train and eval environments.""" env_params = { 'number_of_vehicles': number_of_vehicles, 'number_of_walkers': number_of_walkers, 'display_size': display_size, # screen size of bird-eye render 'max_past_step': max_past_step, # the number of past steps to draw 'dt': dt, # time interval between two frames 'discrete': discrete, # whether to use discrete control space 'discrete_acc': discrete_acc, # discrete value of accelerations 'discrete_steer': discrete_steer, # discrete value of steering angles 'continuous_accel_range': continuous_accel_range, # continuous acceleration range 'continuous_steer_range': continuous_steer_range, # continuous steering angle range 'ego_vehicle_filter': ego_vehicle_filter, # filter for defining ego vehicle 'port': port, # connection port 'town': town, # which town to simulate 'task_mode': task_mode, # mode of the task, [random, roundabout (only for Town03)] 'max_time_episode': max_time_episode, # maximum timesteps per episode 'max_waypt': max_waypt, # maximum number of waypoints 'obs_range': obs_range, # observation range (meter) 'lidar_bin': lidar_bin, # bin size of lidar sensor (meter) 'd_behind': d_behind, # distance behind the ego vehicle (meter) 'out_lane_thres': out_lane_thres, # threshold for out of lane 'desired_speed': desired_speed, # desired speed (m/s) 'max_ego_spawn_times': max_ego_spawn_times, # maximum times to spawn ego vehicle 'display_route': display_route, # whether to render the desired route 'pixor_size': pixor_size, # size of the pixor labels 'pixor': pixor, # whether to output PIXOR observation } gym_spec = gym.spec(env_name) env = gym_spec.make(params=env_params) env.reset() while True: action = [2.0, 0.0] obs,r,done,info = env.step(action) if done: obs = env.reset() # if done: # obs = env.reset()
5,353,174
def make_indiv_spacing(subject, ave_subject, template_spacing, subjects_dir): """ Identifies the suiting grid space difference of a subject's volume source space to a template's volume source space, before a planned morphing takes place. Parameters: ----------- subject : str Subject ID. ave_subject : str Name or ID of the template brain, e.g., fsaverage. template_spacing : float Grid spacing used for the template brain. subjects_dir : str Path to the subjects directory. Returns: -------- trans : SourceEstimate The generated source time courses. """ fname_surf = op.join(subjects_dir, subject, 'bem', 'watershed', '%s_inner_skull_surface' % subject) fname_surf_temp = op.join(subjects_dir, ave_subject, 'bem', 'watershed', '%s_inner_skull_surface' % ave_subject) surf = mne.read_surface(fname_surf, return_dict=True, verbose='ERROR')[-1] surf_temp = mne.read_surface(fname_surf_temp, return_dict=True, verbose='ERROR')[-1] mins = np.min(surf['rr'], axis=0) maxs = np.max(surf['rr'], axis=0) mins_temp = np.min(surf_temp['rr'], axis=0) maxs_temp = np.max(surf_temp['rr'], axis=0) # Check which dimension (x,y,z) has greatest difference diff = (maxs - mins) diff_temp = (maxs_temp - mins_temp) # print additional information # for c, mi, ma, md in zip('xyz', mins, maxs, diff): # logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm' # % (c, mi, ma, md)) # for c, mi, ma, md in zip('xyz', mins_temp, maxs_temp, diff_temp): # logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm' # % (c, mi, ma, md)) prop = (diff / diff_temp).mean() indiv_spacing = (prop * template_spacing) print(" '%s' individual-spacing to '%s'[%.2f] is: %.4fmm" % ( subject, ave_subject, template_spacing, indiv_spacing)) return indiv_spacing
5,353,175
def sqlpool_blob_auditing_policy_update( cmd, instance, state=None, storage_account=None, storage_endpoint=None, storage_account_access_key=None, storage_account_subscription_id=None, is_storage_secondary_key_in_use=None, retention_days=None, audit_actions_and_groups=None, is_azure_monitor_target_enabled=None): """ Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance. """ _audit_policy_update(cmd, instance, state, storage_account, storage_endpoint, storage_account_access_key, storage_account_subscription_id, is_storage_secondary_key_in_use, retention_days, audit_actions_and_groups, is_azure_monitor_target_enabled) return instance
5,353,176
def check_arguments_for_rescoring(usage_key): """ Do simple checks on the descriptor to confirm that it supports rescoring. Confirms first that the usage_key is defined (since that's currently typed in). An ItemNotFoundException is raised if the corresponding module descriptor doesn't exist. NotImplementedError is raised if the corresponding module doesn't support rescoring calls. Note: the string returned here is surfaced as the error message on the instructor dashboard when a rescore is submitted for a non-rescorable block. """ descriptor = modulestore().get_item(usage_key) if not _supports_rescore(descriptor): msg = _("This component cannot be rescored.") raise NotImplementedError(msg)
5,353,177
def from_system() -> Optional[Config]: """ Config-factory; producing a Config based on environment variables and when environment variables aren't set, fall back to the ``cij_root`` helper. """ conf = Config() # Setup configuration using environment variable definitions paths_from_evars = cij.paths_from_env( "CIJ", [f.upper() for f in CFG_FIELDS] ) missing = False for key, value in paths_from_evars.items(): if value is None: missing = True break setattr(conf, key.lower(), value) if not missing: return conf # Setup configuration using 'cij_root' with Popen(["cij_root"], stdout=PIPE) as proc: out, _ = proc.communicate() if proc.returncode: return None cij_root = out.decode("utf-8").strip() if not os.path.exists(cij_root): return None for field in CFG_FIELDS: setattr(conf, field, os.path.join(cij_root, field)) return conf
5,353,178
def format_search_filter(model_fields): """ Creates an LDAP search filter for the given set of model fields. """ ldap_fields = convert_model_fields_to_ldap_fields(model_fields); ldap_fields["objectClass"] = settings.LDAP_AUTH_OBJECT_CLASS search_filters = import_func(settings.LDAP_AUTH_FORMAT_SEARCH_FILTERS)(ldap_fields) return "(&{})".format("".join(search_filters));
5,353,179
def test_postagging_with_kytea(): """Test KyTea tokenizer.""" try: tokenizer = WordTokenizer(tokenizer="kytea", with_postag=True) except ImportError: pytest.skip("MyKyTea is not installed.") expect = [Token(**kwargs) for kwargs in kytea_tokens_list] result = tokenizer.tokenize("吾輩は猫である") assert expect == result
5,353,180
def multiprocess(func=None, pycsp_host='', pycsp_port=None): """ @multiprocess(pycsp_host='', pycsp_port=None) @multiprocess decorator for making a function into a CSP MultiProcess factory. Each generated CSP process is implemented as a single OS process. All objects and variables provided to multiprocesses through the parameter list must support pickling. Usage: >>> @multiprocess >>> def filter(dataIn, dataOut, tag, debug=False): >>> pass # perform filtering >>> >>> P = filter(A.reader(), B.writer(), "42", debug=True) or >>> @multiprocess(pycsp_host="localhost", pycsp_port=9998) >>> def filter(dataIn, dataOut, tag, debug=False): >>> pass # perform filtering >>> >>> P = filter(A.reader(), B.writer(), "42", debug=True) The CSP MultiProcess factory returned by the @multiprocess decorator: func(*args, **kwargs) """ if func: def _call(*args, **kwargs): return MultiProcess(func, *args, **kwargs) _call.__name__ = func.__name__ return _call else: def wrap_process(func): def _call(*args, **kwargs): kwargs['pycsp_host']= pycsp_host kwargs['pycsp_port']= pycsp_port return MultiProcess(func, *args, **kwargs) _call.__name__ = func.__name__ return _call return wrap_process
5,353,181
def only_percentage_ticks(plot): """ Only show ticks from 0.0 to 1.0. """ hide_ticks(plot, min_tick_value=0, max_tick_value=1.0)
5,353,182
def normalize_img(img): """ normalize image (caffe model definition compatible) input: opencv numpy array image (h, w, c) output: dnn input array (c, h, w) """ scale = 1.0 mean = [104,117,123] img = img.astype(np.float32) img = img * scale img -= mean img = np.transpose(img, (2, 0, 1)) return img
5,353,183
def pages(lst: List[Any], n: int, title: str, *, fmt: str = "```%s```", sep: str = "\n") -> List[discord.Embed]: # noinspection GrazieInspection """ Paginates a list into embeds to use with :class:disputils.BotEmbedPaginator :param lst: the list to paginate :param n: the number of elements per page :param title: the title of the embed :param fmt: a % string used to format the resulting page :param sep: the string to join the list elements with :return: a list of embeds """ l: List[List[str]] = group_list([str(i) for i in lst], n) pgs = [sep.join(page) for page in l] return [ discord.Embed( title=f"{title} - {i + 1}/{len(pgs)}", description=fmt % pg ) for i, pg in enumerate(pgs) ]
5,353,184
def Substitute_Percent(sentence): """ Substitutes percents with special token """ sentence = re.sub(r'''(?<![^\s"'[(])[+-]?[.,;]?(\d+[.,;']?)+%(?![^\s.,;!?'")\]])''', ' @percent@ ', sentence) return sentence
5,353,185
def ready_df1(df): """ This function prepares the dataframe for EDA. """ df = remove_columns(df, columns=[ 'nitrogen_dioxide', 'nitrogen_dioxide_aqi', 'sulfur_dioxide', 'sulfur_dioxide_aqi', 'trioxygen', 'trioxygen_aqi', 'volatile', 'volatile_aqi', ]) df['fahrenheit'] = 9.0/5.0 * df['temperature'] + 32 df = df.drop(columns=['temperature']) df = df.rename(index=str, columns={'fahrenheit':'temperature'}) df['carbon_monoxide'] = df['carbon_monoxide'].fillna(0).astype(int) df['timestamp'] = pd.to_datetime(df['timestamp']) return df
5,353,186
def save_reward_history(reward_history, file_name): """ Saves the reward history of the agent teams to create plots for learning performance :param reward_history: :param file_name: :return: """ dir_name = 'Output_Data/' # Intended directory for output files save_file_name = os.path.join(dir_name, file_name) with open(save_file_name, 'a+', newline='') as csvfile: # Record reward history for each stat run writer = csv.writer(csvfile) writer.writerow(['Performance'] + reward_history)
5,353,187
def _wrap(func, args, flip=True): """Return partial function with flipped args if flip=True :param function func: Any function :param args args: Function arguments :param bool flip: If true reverse order of arguments. :return: Returns function :rtype: function """ @wraps(func) def flippedfunc(*args): return func(*args[::-1]) return partial(flippedfunc if flip else func, args)
5,353,188
def compute_kkt_optimality(g, on_bound): """Compute the maximum violation of KKT conditions.""" g_kkt = g * on_bound free_set = on_bound == 0 g_kkt[free_set] = np.abs(g[free_set]) return np.max(g_kkt)
5,353,189
def replace_cipd_revision(file_path, old_revision, new_revision): """Replaces cipd revision strings in file. Args: file_path: Path to file. old_revision: Old cipd revision to be replaced. new_revision: New cipd revision to use as replacement. Returns: Number of replaced occurrences. Raises: IOError: If no occurrences were found. """ with open(file_path) as f: contents = f.read() num = contents.count(old_revision) if not num: raise IOError('Did not find old CIPD revision {} in {}'.format( old_revision, file_path)) newcontents = contents.replace(old_revision, new_revision) with open(file_path, 'w') as f: f.write(newcontents) return num
5,353,190
def spectral_derivs_plot(spec_der, contrast=0.1, ax=None, freq_range=None, fft_step=None, fft_size=None): """ Plot the spectral derivatives of a song in a grey scale. spec_der - The spectral derivatives of the song (computed with `spectral_derivs`) or the song itself contrast - The contrast of the plot ax - The matplotlib axis where the plot must be drawn, if None, a new axis is created freq_range - The amount of frequency to plot, usefull only if `spec_der` is a song. Given to `spectral_derivs` ov_params - The Parameters to override, passed to `spectral_derivs` """ if spec_der.ndim == 1: spec_der = spectral_derivs(spec_der, freq_range, fft_step, fft_size) ax = sns.heatmap(spec_der.T, yticklabels=50, xticklabels=50, vmin=-contrast, vmax=contrast, ax=ax, cmap='Greys', cbar=False) ax.invert_yaxis() return ax
5,353,191
def scale_center(pnt, fac, center): """scale point in relation to a center""" return add(scale(sub(pnt, center), fac), center)
5,353,192
def constrain_uptakes(model, xi): """When the cells are not competing (resource excess, high ξ) uptakes are restricted by the limits of metabolite importers. When competion occurs competiton is modeled by limiting uptakes to metabolite_concentration/ξ Parameters ---------- model : cobra.Model xi : float = D/X """ v_glc = .5 # mmol/gDW/h v_aa = 0.05 IMDM = pandas.read_table('IMDM.txt', comment='#', sep='\s+') conc = {} for index, row in IMDM.iterrows(): conc[row['id']] = row['mM'] for met in conc: if met == 'glc_D_e': if xi == 0: model.reactions.get_by_id('EX_glc_e_').lower_bound = -v_glc else: model.reactions.get_by_id('EX_glc_e_').lower_bound = \ -min(v_glc, conc[met]/xi) else: try: name = met.split('_')[0] if xi == 0: model.reactions.get_by_id( 'EX_' + name + '_e_').lower_bound = -v_aa else: model.reactions.get_by_id( 'EX_' + name + '_e_').lower_bound = \ -min(v_aa, conc[met]/xi) except KeyError: if xi == 0: model.reactions.get_by_id( 'EX_' + name + '_L_e_').lower_bound = -v_aa else: model.reactions.get_by_id( 'EX_' + name + '_L_e_').lower_bound \ = -min(v_aa, conc[met]/xi)
5,353,193
def gopherize_feed(feed_url, timestamp=False, plug=True): """Return a gophermap string for the feed at feed_url.""" return gopherize_feed_object(feedparser.parse(feed_url), timestamp, plug)
5,353,194
def first_sunday_of_month(datetime: pendulum.DateTime) -> pendulum.DateTime: """Get the first Sunday of the month based on a given datetime. :param datetime: the datetime. :return: the first Sunday of the month. """ return datetime.start_of("month").first_of("month", day_of_week=7)
5,353,195
def evaluate_single_model( model_path, model_index, save_preds_to_db, save_prefix, metrics, k_values, X, y, labeled_indices): """ Evaluate a single model with provided model specifications and data. Arguments: - model_path: path to load the model - model_index: index for the model - save_preds_to_db: whether or not to save predictions to database - save_prefix: string prefix for any tables created - metrics: a list of metrics to use - k_values: k-values used for computing the metrics - X: feature array - y: label array - labeled_indices: indices of rows that have labels Returns: - model_index: index for the model - model_results: an (M x K) array of model results, for each metric, at each k-value """ # Load saved model with open(model_path, 'rb') as file: model = pickle.load(file) # Get predictions pred_table_name = f'{save_prefix}_model_{model_index}' if save_preds_to_db else None y_preds, probs = get_predictions(model, X, k_values=k_values, pred_table_name=pred_table_name) # Filter labels y_preds_filtered = y_preds[labeled_indices] y_filtered = y.to_numpy(copy=True)[labeled_indices] # Calculate metrics for each k value model_results = np.zeros((len(metrics), len(k_values))) for i, metric in enumerate(metrics): for j in range(len(k_values)): model_results[i, j] = metric(y_filtered, y_preds_filtered[:, j]) return model_index, model_results
5,353,196
def slack_notify_update_user_queue(username: str): """ Queue 등록 알림 """ channel = settings.SLACK_CHANNEL_CRONTAB server = 'PROD' if settings.IS_PROD else 'LOCAL' attachments = [ { "color": "#ff0000", "title": 'RATE LIMIT 제한으로 update 실패', "pretext": f'[{server}] {username}이 Queue(DB)에 등록되었습니다.', } ] if channel: slack = slackweb.Slack(url=channel) slack.notify(attachments=attachments)
5,353,197
async def test_update_raceplan( http_service: Any, token: MockFixture, context: dict ) -> None: """Should return No Content.""" url = f"{http_service}/raceplans" headers = { hdrs.CONTENT_TYPE: "application/json", hdrs.AUTHORIZATION: f"Bearer {token}", } id = context["id"] url = f"{url}/{id}" update_raceplan = deepcopy(context) update_raceplan["event_id"] = "new_event_id" request_body = json.dumps(update_raceplan, indent=4, sort_keys=True, default=str) async with ClientSession() as session: async with session.put(url, headers=headers, data=request_body) as response: pass assert response.status == 204
5,353,198
def parse_args ( ) -> argparse.Namespace: """ Parser for cli arguments. Returns: A Namespace containing all parsed data """ # The parser itself parser = argparse.ArgumentParser(add_help=False) parser.description = "Evaluates single choice sheets" # Groups for ordering arguments in help command grp_req_excl = parser.add_argument_group("required arguments, mutually exclusive") grp_req = parser.add_argument_group("required arguments") grp_opt = parser.add_argument_group("optional arguments") ######################### ##### Required Args ##### ######################### # Input path - either an url or a path to a local file io_grp = grp_req_excl.add_mutually_exclusive_group(required=True) io_grp.add_argument("-u", "--url", dest="url", help="URL to the image or pdf to be evaluated.") io_grp.add_argument("-f", "--file", dest="file", help="path to the image or pdf to be evaluated.") # required arg for number of answers each question grp_req.add_argument("-n", "--num", dest="num", required=True, type=_arg_int_pos, help="number of answers per question") ######################### ##### Optional Args ##### ######################### # help message. Added manually so it is shown under optional grp_opt.add_argument("-h", "--help", action="help", help="show this help message and exit") # path to store the result picture to grp_opt.add_argument("-i", "--iout", dest="iout", help="path for the output picture to be stored.") # path to store the result list to grp_opt.add_argument("-d", "--dout", dest="dout", help="path for the output data to be stored.") # path to compare results generated by the program with data stored in a file grp_opt.add_argument("-c", "--compare", dest="comp", help="compares the calculated result to a given result") # plotting all steps grp_opt.add_argument("-p", "--plot", dest="plot", action="store_true", help="plots every single step") return parser.parse_args()
5,353,199