content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def chown( path: Pathable, owner: str, flags: t.Optional[str] = None, sudo: bool = False ) -> ChangeList: """Change a path's owner.""" path = _to_path(path) needs_sudo_w = need_sudo_to_write(path) needs_sudo_r = need_sudo_to_read(path) if needs_sudo_r and not sudo: raise NeedsSudoException(f"chown {path}") curr_owner = _run( f"stat -c '%U:%G' {path}", check=True, sudo=needs_sudo_r ).stdout.decode.strip() if ":" not in curr_owner: curr_owner = curr_owner.split(":", 1)[0] if curr_owner != owner: if needs_sudo_w and not sudo: raise NeedsSudoException(f"chown {owner} {path}") _run(f"chown {flags} {owner}", sudo=needs_sudo_w, check=True) return [cl(ChownModify, path, owner, curr_owner, flags)] return []
124ba2877dc2ff4396d84c3b8825846c9d057cf5
3,653,000
def metric_dist(endclasses, metrics='all', cols=2, comp_groups={}, bins=10, metric_bins={}, legend_loc=-1, xlabels={}, ylabel='count', title='', indiv_kwargs={}, figsize='default', v_padding=0.4, h_padding=0.05, title_padding=0.1, **kwargs): """ Plots the histogram of given metric(s) separated by comparison groups over a set of scenarios Parameters ---------- endclasses : dict Dictionary of metrics with structure {'scen':{'metric':value}} metrics : list, optional list of metrics in the dictionary to plot cols : int, optional columns to use in the figure. The default is 2. comp_groups : dict, optional Dictionary for comparison groups (if more than one) with structure: {'group1':('scen1', 'scen2'), 'group2':('scen3', 'scen4')} Default is {} If a legend is shown, group names are used as labels. bins : int Number of bins to use (for all plots). Default is None metric_bins : dict, Dictionary of number of bins to use for each metric with structure {'metric':num} Default is {} legend_loc : int, optional Specifies the plot to place the legend on, if runs are being compared. Default is -1 (the last plot) To remove the legend, give a value of False xlabels : dict, optional Label for the x-axes with structure {'metric':'label'} ylabel : str, optional Label for the y-axes. Default is 'time' title : str, optional overall title for the plot. Default is '' indiv_kwargs : dict, optional dict of kwargs with structure {comp1:kwargs1, comp2:kwargs2}, where where kwargs is an individual dict of keyword arguments for the comparison group comp (or scenario, if not aggregated) which overrides the global kwargs (or default behavior). figsize : tuple (float,float) x-y size for the figure. The default is 'default', which dymanically gives 3 for each column and 2 for each row v_padding : float vertical padding between subplots as a fraction of axis height h_padding : float horizontal padding between subplots as a fraction of axis width title_padding : float padding for title as a fraction of figure height **kwargs : kwargs keyword arguments to mpl.hist e.g. bins, etc """ #Sort into comparison groups if not comp_groups: groupmetrics = {'default':endclasses} else: groupmetrics = {group:{ec:cl for ec,cl in endclasses.items() if ec in groupscens} for group, groupscens in comp_groups.items()} template = [*endclasses.values()][0] if metrics=='all': plot_values = [i for i in template.keys()] else: plot_values = [i for i in template.keys() if i in metrics] num_plots = len(plot_values) if num_plots==1: cols=1 rows = int(np.ceil(num_plots/cols)) if figsize=='default': figsize=(cols*3, 2*rows) fig, axs = plt.subplots(rows,cols, sharey=True, sharex=False, figsize=figsize) if type(axs)==np.ndarray: axs = axs.flatten() else: axs=[axs] num_bins = bins for i, plot_value in enumerate(plot_values): ax = axs[i] xlabel = xlabels.get(plot_value, plot_value) if type(xlabel)==str: ax.set_xlabel(xlabel) else: ax.set_xlabel(' '.join(xlabel)) ax.grid(axis='y') fulldata = [ec[plot_value] for endc in groupmetrics.values() for ec in endc.values()] bins = np.histogram(fulldata, metric_bins.get(plot_value, num_bins))[1] if not i%cols: ax.set_ylabel(ylabel) for group, endclasses in groupmetrics.items(): local_kwargs = {**kwargs, **indiv_kwargs.get(group,{})} x = [ec[plot_value] for ec in endclasses.values()] ax.hist(x, bins, label=group, **local_kwargs) multiplot_legend_title(groupmetrics, axs, ax, legend_loc, title,v_padding, h_padding, title_padding) return fig, axs
95bbc645abad812585de58d4724787e310424f4a
3,653,001
def get_colors(df, colormap=None, vmin=None, vmax=None, axis=1): """ Function to automatically gets a colormap for all the values passed in, Have the option to normalise the colormap. :params: values list(): list of int() or str() that have all the values that need a color to be map to. In case of a list() of str(), the try/except use the range(len()) to map a colour colormap cm(): type of colormap that need to be used. All can be found here: https://matplotlib.org/examples/color/colormaps_reference.html vmin, vmax int(): Number to normalise the return of the colourmap if needed a Normalised colourmap :return: colormap cm.colormap(): An array of RGBA values Original version found on stackerOverflow (w/o the try/except) but cannot find it back """ if colormap is None: colormap = plt.cm.RdBu if axis == 0: values = df.index elif axis == 1: values = df.columns norm = plt.Normalize(vmin, vmax) try: return colormap(norm(values)) except (AttributeError, TypeError): # May happen when gives a list of categorical values return colormap(norm(range(len(values))))
7da0c0a8f8542c9a8137121c4664da91485d8cca
3,653,002
def proxy_channels(subreddits): """ Helper function to proxy submissions and posts. Args: subreddits (list of praw.models.Subreddit): A list of subreddits Returns: list of ChannelProxy: list of proxied channels """ channels = { channel.name: channel for channel in Channel.objects.filter( name__in=[subreddit.display_name for subreddit in subreddits] ) } return [ ChannelProxy(subreddit, channels[subreddit.display_name]) for subreddit in subreddits if subreddit.display_name in channels ]
caab3ecfa5a85b06d94192fe77308724f67b0e96
3,653,003
def anno2map(anno): """ anno: { 'file' ==> file index 'instances': [ { 'class_name': 'class_idx': 'silhouette': 'part': [(name, mask), ...] }, ... ] } """ height, width = anno.instances[0].silhouette.shape cls_mask = np.zeros((height, width), dtype=np.uint8) inst_mask = np.zeros((height, width), dtype=np.uint8) part_mask = np.zeros((height, width), dtype=np.uint8) for i, inst in enumerate(anno.instances): assert height == inst.silhouette.shape[0] and width == inst.silhouette.shape[1] cls_mask[inst.silhouette.astype(np.bool)] = inst.class_idx inst_mask[inst.silhouette.astype(np.bool)] = i for pname, pmask in inst.part: assert pname in PASCAL_PART2ID_[inst.class_idx-1], f'The part {pname} is undefined in {inst.class_name}' assert inst.silhouette[pmask.astype(np.bool)].all(), 'The part region is not a complete subregion of the object' # if not inst.silhouette[pmask].all(): # print(f'Warning: [{anno.file}: {pname}] The part region is not a complete subregion of the object') pid = PASCAL_PART2ID_[inst.class_idx-1][pname] part_mask[pmask.astype(np.bool)] = pid return cls_mask, inst_mask, part_mask
18841b323d4368c5f1681dd34586e82aa8a9d97c
3,653,004
def string_to_bool(val: str): """Convert a homie string bool to a python bool""" return val == STATE_ON
f7fc9768762256fc5c2cf818949793f72948db98
3,653,005
def profile(request): """ Update a User profile using built in Django Users Model if the user is logged in otherwise redirect them to registration version """ if request.user.is_authenticated(): obj = get_object_or_404(TolaUser, user=request.user) form = RegistrationForm(request.POST or None, instance=obj,initial={'username': request.user}) if request.method == 'POST': if form.is_valid(): form.save() messages.error(request, 'Your profile has been updated.', fail_silently=False) return render(request, "registration/profile.html", { 'form': form, 'helper': RegistrationForm.helper }) else: return HttpResponseRedirect("/accounts/register")
225ac41ec6565e30f54ece3cad76b2a0770a319d
3,653,006
def conj(Q): """Returns the conjugate of a dual quaternion. """ res = cs.SX.zeros(8) res[0] = -Q[0] res[1] = -Q[1] res[2] = -Q[2] res[3] = Q[3] res[4] = -Q[4] res[5] = -Q[5] res[6] = -Q[6] res[7] = Q[7] return res
e0a6d67d322f2c939e2d8249983789222c96363d
3,653,007
def benchmark_summary(benchmark_snapshot_df): """Creates summary table for a benchmark snapshot with columns: |fuzzer|time||count|mean|std|min|25%|median|75%|max| """ groups = benchmark_snapshot_df.groupby(['fuzzer', 'time']) summary = groups['edges_covered'].describe() summary.rename(columns={'50%': 'median'}, inplace=True) return summary.sort_values(('median'), ascending=False)
5cdaa888adb47906659a249076c8a4acb27c6d1d
3,653,008
def is_pio_job_running(*target_jobs: str) -> bool: """ pass in jobs to check if they are running ex: > result = is_pio_job_running("od_reading") > result = is_pio_job_running("od_reading", "stirring") """ with local_intermittent_storage("pio_jobs_running") as cache: for job in target_jobs: if cache.get(job, b"0") == b"1": return True return False
0ed9daf39372ead913ad52d5c93426eeb06f74ed
3,653,009
def encode(text, encoding='utf-8'): """ Returns a unicode representation of the string """ if isinstance(text, basestring): if not isinstance(text, unicode): text = unicode(text, encoding, 'ignore') return text
81d9d2d5cf920c0f15ffc5e50fb670b079ae1f90
3,653,010
def calculate_sparsity(df: pd.DataFrame) -> tuple: """Calculate the data sparsity based on ratings and reviews. Args: df ([pd.DataFrame]): DataFrame with counts of `overall` and `reviewText` measured against total `reviewerID` * `asin`. Returns: [tuple]: Tuple of data sparsity wrt. ratings (`overall`) and reviews (`reviewText`). """ # no. of ratings rating_numerator = df["overall"].count() review_numerator = df["reviewText"].count() # number of users and items num_users = df["reviewerID"].nunique() num_items = df["asin"].nunique() denominator = num_users * num_items rating_sparsity = (1.0 - (rating_numerator * 1.0) / denominator) * 100 review_sparsity = (1.0 - (review_numerator * 1.0) / denominator) * 100 return rating_sparsity, review_sparsity
53e6b2682b67ceb8bbb4f5a6857cdbd565321421
3,653,011
def char_fun_est( train_data, paras=[3, 20], n_trees = 200, uv = 0, J = 1, include_reward = 0, fixed_state_comp = None): """ For each cross-fitting-task, use QRF to do prediction paras == "CV_once": use CV_once to fit get_CV_paras == True: just to get paras by using CV Returns ------- a list of four estimated fun, and a list of four true y vectors """ char_funs = [] X1, y1 = get_pairs(train_data, is_forward = 1, J = J, include_reward = include_reward, fixed_state_comp = fixed_state_comp) X2, y2 = get_pairs(train_data, is_forward = 0, J = J, include_reward = include_reward, fixed_state_comp = fixed_state_comp) X, y = [X1, X2], [y1, y2] if paras in ["CV", "CV_once"]: for i in range(2): rfqr = RandomForestQuantileRegressor(random_state=0, n_estimators = n_trees) gd = GridSearchCV(estimator = rfqr, param_grid = param_grid, cv = 5, n_jobs = n_jobs, verbose=0) gd.fit(X[i], y[i]) best_paras = gd.best_params_ if paras == "CV_once": # only return forward return [best_paras['max_depth'], best_paras['min_samples_leaf']] elif paras == "CV": print("best_paras:", best_paras) # use the optimal paras and the whole dataset rfqr1 = RandomForestQuantileRegressor( random_state=0, n_estimators = n_trees, max_depth=best_paras['max_depth'], min_samples_leaf=best_paras['min_samples_leaf'], n_jobs = n_jobs) char_funs.append(rfqr1.fit(X[i], y[i])) else: # pre-specified paras max_depth, min_samples_leaf = paras for i in range(2): char_funs.append( RandomForestQuantileRegressor( random_state=0, n_estimators = n_trees, max_depth = max_depth, min_samples_leaf = min_samples_leaf, n_jobs = n_jobs).fit( X[i], y[i])) return char_funs
51012cc870d9bcd1f86fe69534e26d7d365ad271
3,653,012
def create_tables_for_import(volume_id, namespace): """Create the import or permanent obs_ tables and all the mult tables they reference. This does NOT create the target-specific obs_surface_geometry tables because we don't yet know what target names we have.""" volume_id_prefix = volume_id[:volume_id.find('_')] instrument_name = VOLUME_ID_PREFIX_TO_INSTRUMENT_NAME[volume_id_prefix] if instrument_name is None: instrument_name = 'GB' mission_abbrev = VOLUME_ID_PREFIX_TO_MISSION_ABBREV[volume_id_prefix] mission_name = MISSION_ABBREV_TO_MISSION_TABLE_SFX[mission_abbrev] mult_table_schema = import_util.read_schema_for_table('mult_template') # This is an awful hack because this one mult table has an extra field # in it. Yuck! XXX mult_target_name_table_schema = ( import_util.read_schema_for_table( 'mult_target_name_template')) table_schemas = {} table_names_in_order = [] for table_name in TABLES_TO_POPULATE: table_name = table_name.replace('<INST>', instrument_name.lower()) table_name = table_name.replace('<MISSION>', mission_name.lower()) if table_name.startswith('obs_surface_geometry__'): # Note that we aren't replacing <TARGET> here because we don't know # the target name! We're only using this schema to get field names, # data source, source order, etc. The real use of the schema will be # later when we finally create and insert into the correct table for # each target. table_schema = import_util.read_schema_for_table( 'obs_surface_geometry_target') else: table_schema = import_util.read_schema_for_table(table_name) if table_schema is None: continue table_schemas[table_name] = table_schema table_names_in_order.append(table_name) if table_name.startswith('obs_surface_geometry__'): # Skip surface geo tables until they are needed continue # Create the referenced mult_ tables for table_column in table_schema: if table_column.get('put_mults_here', False): continue field_name = table_column['field_name'] pi_form_type = table_column.get('pi_form_type', None) if pi_form_type is not None and pi_form_type.find(':') != -1: pi_form_type = pi_form_type[:pi_form_type.find(':')] if pi_form_type in GROUP_FORM_TYPES: mult_name = import_util.table_name_mult(table_name, field_name) if mult_name in MULT_TABLES_WITH_TARGET_GROUPING: schema = mult_target_name_table_schema else: schema = mult_table_schema if (impglobals.DATABASE.create_table(namespace, mult_name, schema) and namespace == 'import'): _CREATED_IMP_MULT_TABLES.add(mult_name) impglobals.DATABASE.create_table(namespace, table_name, table_schema) return table_schemas, table_names_in_order
8e02e98031e4242e2e0d559750258d74180593db
3,653,013
def org_repos(info): """ 处理组织的仓库 :param info: 字典 :return: 两个列表,第一个包含字典(id,全名,url),第二个包含所用到的语言 """ repo_info = [] languages = [] if info: for repo in info: temp = {"id": repo["id"], "full_name": repo["full_name"], "url": repo["url"], "language": repo["language"]} repo_info.append(temp) languages.append(repo["language"]) return repo_info, languages
9d5633bf834845e1301e0fd383a57c42f2bd530c
3,653,014
from typing import Union from datetime import datetime def year(yyyy_mm_dd: Union[str, datetime.date]) -> int: """ Extracts the year of a given date, similar to yyyy function but returns an int >>> year('2020-05-14') 2020 """ date, _ = _parse(yyyy_mm_dd, at_least="%Y") return date.year
eb34fb578d5ec7130d5670332aa4bbb9aca186ac
3,653,015
def getTypeLevel(Type): """Checks whether a spectral data type is available in the endmember library. Args: Type: the type of spectra to select. Returns: level: the metadata "level" of the group for subsetting. returns 0 if not found. """ for i in range(4): level = i + 1 available_types = listTypes(level=level) if Type in available_types: return level return 0
6c26f7dc570b5a7f0cacdc1171ae733b005e7992
3,653,016
from typing import Union def construct_creator(creator: Union[dict, str], ignore_email): """Parse input and return an instance of Person.""" if not creator: return None, None if isinstance(creator, str): person = Person.from_string(creator) elif isinstance(creator, dict): person = Person.from_dict(creator) else: raise errors.ParameterError("Invalid creator type") message = 'A valid format is "Name <email> [affiliation]"' if not person.name: # pragma: no cover raise errors.ParameterError(f'Name is invalid: "{creator}".\n{message}') if not person.email: if not ignore_email: # pragma: no cover raise errors.ParameterError(f'Email is invalid: "{creator}".\n{message}') else: no_email_warning = creator else: no_email_warning = None return person, no_email_warning
5306f288874f4d15d5823c34268321121909a3ad
3,653,017
def _encode_query(items: dict, *, mask=False) -> str: """Encode a dict to query string per CLI specifications.""" pairs = [] for key in sorted(items.keys()): value = _MASK if mask and key in _MASKED_PARAMS else items[key] item = "{}={}".format(key, _quote(value)) # Ensure 'url' goes last per CLI spec if key == "url": pairs.append(item) else: pairs.insert(0, item) return "&".join(pairs)
918f0aa4198367fb3889eb67bba622d272082af7
3,653,018
def spatial_shape_after_conv(input_spatial_shape, kernel_size, strides, dilation, padding): """ This function calculates the spatial shape after conv layer. The formula is obtained from: https://www.tensorflow.org/api_docs/python/tf/nn/convolution It should be note that current function assumes PS is done before conv :param input_spatial_shape: :param kernel_size: :param strides: :param dilation: :param padding: :return: """ if isinstance(input_spatial_shape, (list, tuple)): return [spatial_shape_after_conv( one_shape, kernel_size, strides, dilation, padding) for one_shape in input_spatial_shape] else: if padding in ['same', 'SAME']: return np.int(np.ceil(input_spatial_shape / strides)) else: return np.int(np.ceil((input_spatial_shape - (kernel_size - 1) * dilation) / strides))
a7d924260feb478e44a9ec166fe4248b51632270
3,653,019
def sample_partition(dependency_tensor, null_distribution, updates=100, initial_partition=None ): """ Sample partition for a multilayer network with specified interlayer dependencies :param dependency_tensor: dependency tensor :param null_distribution: null distribution (function that takes a state-node as input and returns a random mesoset assignment :param updates: expected number of (pseudo-)Gibbs-sampling updates per state-node (has no effect for fully ordered dependency tensor. (optional, default=100) :param initial_partition: mapping of state-nodes to initial meso-set assignment. (optional, default=sampled from null distribution) :return: sampled partition as a mapping (dict) from state-nodes to meso-set assignments. """ if initial_partition is None: partition = {node: null_distribution(node) for node in dependency_tensor.state_nodes()} else: partition = {node: initial_partition[node] for node in dependency_tensor.state_nodes()} random_layers = list(dependency_tensor.random_aspect_layers()) if len(random_layers) <= 1: n_updates = 1 else: n_updates = updates * len(random_layers) for ordered_layer in dependency_tensor.ordered_aspect_layers(): for it in range(n_updates): random_layer = _rand.choice(random_layers) layer = tuple(o+r for o, r in zip(ordered_layer, random_layer)) for node in dependency_tensor.state_nodes(layer): update_node = dependency_tensor.getrandneighbour(node) if update_node == node: partition[node] = null_distribution(node) else: partition[node] = partition[update_node] return partition
d6c469054057f18ad1e7ab3abd96103e81931649
3,653,020
import re def normalize_spaces(s: str) -> str: """ 連続する空白を1つのスペースに置き換え、前後の空白を削除した新しい文字列を取得する。 """ return re.sub(r'\s+', ' ', s).strip()
aac95ed5b77b5c65f9ce16cfa685d80c56f0e66f
3,653,021
def power_iter(mat_g, error_tolerance=1e-6, num_iters=100): """Power iteration. Args: mat_g: the symmetric PSD matrix. error_tolerance: Iterative exit condition. num_iters: Number of iterations. Returns: eigen vector, eigen value, num_iters """ mat_g_size = mat_g.shape[-1] def _iter_condition(state): i, unused_v, unused_s, unused_s_v, run_step = state return jnp.logical_and(i < num_iters, run_step) def _iter_body(state): """One step of power iteration.""" i, new_v, s, s_v, unused_run_step = state new_v = new_v / jnp.linalg.norm(new_v) s_v = jnp.einsum( 'ij,j->i', mat_g, new_v, precision=_INVERSE_PTH_ROOT_PRECISION) s_new = jnp.einsum( 'i,i->', new_v, s_v, precision=_INVERSE_PTH_ROOT_PRECISION) return (i + 1, s_v, s_new, s_v, jnp.greater(jnp.abs(s_new - s), error_tolerance)) # Figure out how to use step as seed for random. v_0 = onp.random.uniform(-1.0, 1.0, mat_g_size).astype(mat_g.dtype) init_state = tuple([0, v_0, jnp.zeros([], dtype=mat_g.dtype), v_0, True]) num_iters, v_out, s_out, _, _ = lax.while_loop( _iter_condition, _iter_body, init_state) v_out = v_out / jnp.linalg.norm(v_out) return v_out, s_out, num_iters
11717fda8b3dedce94e9be3157a78d8d95e0e989
3,653,022
from typing import Union from typing import Any def _get_values_target_representation( val: Union[str, Any], target_representation: str, conversion_type: str, conversion_rate: float, n_round: int, split: bool, input_symbol: str, target_symbol: str, ) -> Any: """ Returns the value of the converted currency in the specified format. The two formats specified are "abbr", "decimal". """ val_new = 0.0 val = float(val) # 1. for fiat-to-fiat and crypto-to-fiat we multiply # 2. for fiat-to-crypto we divide if conversion_type in ("fiat_to_fiat", "crypto_to_fiat"): val_new = val * conversion_rate else: val_new = val / conversion_rate if target_representation == "abbr": val = "{:,.{a}f}".format(val, a=n_round) target_val = "{:,.{a}f}".format(val_new, a=n_round) if split: return val, target_val else: return input_symbol.upper() + str(val), target_symbol.upper() + str(target_val) else: return np.round(val, n_round), np.round(val_new, n_round)
188fe2da51a177fc743ee30c67807b46730a3a34
3,653,023
from typing import OrderedDict def GetResidues(mol, atom_list=None): """Create dictrionary that maps residues to atom IDs: (res number, res name, chain id) --> [atom1 idx, atom2 idx, ...] """ residues = OrderedDict() if atom_list is None: atom_list = range(mol.GetNumAtoms()) for aid in atom_list: res_id = GetAtomResidueId(mol.GetAtomWithIdx(aid)) if res_id not in residues: residues[res_id] = [] residues[res_id].append(aid) return residues
51f66cf9c3203573df5660205581fd0571826876
3,653,024
def BIC(y_pred, y, k, llf = None): """Bayesian Information Criterion Args: y_pred (array-like) y (array-like) k (int): number of featuers llf (float): result of log-likelihood function """ n = len(y) if llf is None: llf = np.log(SSE(y_pred, y)) return np.log(n) * k - 2 * llf
f070400f045b1e8f98b453b8d5f8661271b1969e
3,653,025
def create_abstract_insert(table_name, row_json, return_field=None): """Create an abstracted raw insert psql statement for inserting a single row of data :param table_name: String of a table_name :param row_json: dictionary of ingestion data :param return_field: String of the column name to RETURNING in statement :return: String of an insert statement """ columns = [] for key, value in row_json.items(): if key in columns: continue else: columns.append(key) values = [':' + item for item in columns] values = ', '.join(map(str, values)) list_columns = ', '.join(map(str, columns)) if return_field is not None: statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \ + ' VALUES (' + values + ') RETURNING ' + str(return_field) else: statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \ + ' VALUES (' + values + ')' return statement
8b0a960178a0162b7a0c339682541f0f13520d85
3,653,026
import os def get_childs(root_dir, is_dir=False, extension='.jpg', max_depth=0): """ get files or directories related root dir :param root_dir: :param is_dir: :param extension: :param max_depth: :return: """ if os.path.exists(root_dir) is False: raise FileNotFoundError("not exist dir : {}".format(root_dir)) target_items = [] childs, next_dirs = _get_sub_childs(root_dir, is_dir, extension) target_items.extend(childs) while max_depth > 0: next_sub_dirs = [] for sub in next_dirs: if not os.path.isdir(sub): continue sub_child_items, sub_dirs = _get_sub_childs(sub, is_dir, extension) next_sub_dirs.extend(sub_dirs) target_items.extend(sub_child_items) max_depth -= 1 next_dirs = next_sub_dirs return target_items
6e705d796447dd968aa9a362093a68ec56516b85
3,653,027
def descsum_create(s): """Add a checksum to a descriptor without""" symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0] checksum = descsum_polymod(symbols) ^ 1 return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
52ce47b470dada282318cd23c61665adfb7554c3
3,653,028
def _get_header(key): """Return message header""" try: return request.headers[key] except KeyError: abort(400, "Missing header: " + key)
cbdf9928f6ce4c41145529c68039761eab65c3d0
3,653,029
def compute_solution(primes_list, triangle_sequence): """ Auxiliary function to compute the solution to the problem. """ factorise_w_primes = partial(factorise, primes=primes_list) all_factors = vmap(factorise_w_primes)(triangle_sequence) # number of divisors = number of possible combinations of prime factors # = inner product(number of states for each prime in a number) # e.g. 1024 has 11 states for prime=2, and 1 state for the others # 3072 has 11 states for prime=2 and 2 states for prime=3 -> 22 divisors all_factors = all_factors + 1 n_combinations = jnp.prod(all_factors, axis=1).astype(jnp.int32) return n_combinations
5df3444b10a4ae316fab1c21c87e3187d4792f14
3,653,030
import platform def key_description(character): """ Return the readable description for a key. :param character: An ASCII character. :return: Readable description for key. """ if "Windows" in platform.system(): for key, value in hex_keycodes.items(): if value == character: return key else: return "" else: ascii_code = ord(chr(character)) if ascii_code < 32: return 'Ctrl+{:c}'.format(ord('@') + ascii_code) else: return repr(character)
9ed5bd198898c2f5cf234cb0c46924286fa18e51
3,653,031
import os def find_paste_config(): """Find freezer's paste.deploy configuration file. freezer's paste.deploy configuration file is specified in the ``[paste_deploy]`` section of the main freezer-api configuration file, ``freezer-api.conf``. For example:: [paste_deploy] config_file = freezer-paste.ini :returns: The selected configuration filename :raises: exception.ConfigFileNotFound """ if CONF.paste_deploy.config_file: paste_config = CONF.paste_deploy.config_file if not os.path.isabs(paste_config): paste_config = CONF.find_file(paste_config) elif CONF.config_file: paste_config = CONF.config_file[0] else: # this provides backwards compatibility for keystone.conf files that # still have the entire paste configuration included, rather than just # a [paste_deploy] configuration section referring to an external file paste_config = CONF.find_file('freezer-api.conf') if not paste_config or not os.path.exists(paste_config): raise Exception('paste configuration file {0} not found !'. format(paste_config)) return paste_config
db37e317b4684536ce24c5d12ebcfd176c98d34d
3,653,032
def combine_index(df, n1, n2): """將dataframe df中的股票代號與股票名稱合併 Keyword arguments: Args: df (pandas.DataFrame): 此dataframe含有column n1, n2 n1 (str): 股票代號 n2 (str): 股票名稱 Returns: df (pandas.DataFrame): 此dataframe的index為「股票代號+股票名稱」 """ return df.set_index(df[n1].str.replace(' ', '') + \ ' ' + df[n2].str.replace(' ', '')).drop([n1, n2], axis=1)
645c62fdc7d8e541c9b55b5f1621d6c442ca683a
3,653,033
def safe_get_stopwords(stopwords_language): """ :type stopwords_language: basestring :rtype: list """ try: return get_stopwords(stopwords_language) except LanguageNotAvailable: return []
f6a32da469e59341aa9a928cac362b0075e5d792
3,653,034
import os def fetch_dataloader(types, data_dir, params): """ Fetches the DataLoader object for each type in types from data_dir. Args: types: (list) has one or more of 'train', 'val', 'test' depending on which data is required data_dir: (string) directory containing the dataset params: (Params) hyperparameters Returns: data: (dict) contains the DataLoader object for each type in types """ img_dimension = params.img_dimension dataloaders = {} train_transformer, eval_transformer = get_transformer(img_dimension) for split in ['train', 'val', 'test']: if split in types: path = os.path.join(data_dir, split) # use the train_transformer if training data, else use eval_transformer without random flip if split == 'train': dl = DataLoader(FaceMaskDataset(path, train_transformer), num_workers=params.num_workers, batch_size=params.batch_size, shuffle=True, pin_memory=params.cuda) else: dl = DataLoader(FaceMaskDataset(path, eval_transformer), batch_size=params.batch_size, shuffle=False, num_workers=params.num_workers, pin_memory=params.cuda) dataloaders[split] = dl return dataloaders
098747931adb678bcc1513414b9f447a016a6f81
3,653,035
def setup_mock_device(mock_device): """Prepare mock ONVIFDevice.""" mock_device.async_setup = AsyncMock(return_value=True) mock_device.available = True mock_device.name = NAME mock_device.info = DeviceInfo( MANUFACTURER, MODEL, FIRMWARE_VERSION, SERIAL_NUMBER, MAC, ) mock_device.capabilities = Capabilities() mock_device.profiles = [] def mock_constructor(hass, config): """Fake the controller constructor.""" return mock_device mock_device.side_effect = mock_constructor
f39951f9109e5646d1e4cdd4782907cce5ee3a1c
3,653,036
import os def join(*args): """Join multiple path - join('c:', 'pp', 'c.txt') -> 'c:\pp\c.txt'""" assert len(args) >= 2 ret_arg = args[0] for arg in args[1:]: ret_arg = os.path.join(ret_arg, arg) return ret_arg
f628b0fb47898ad9d98714d4329d2ded183242a3
3,653,037
def renyientropy(px,alpha=1,logbase=2,measure='R'): """ Renyi's generalized entropy Parameters ---------- px : array-like Discrete probability distribution of random variable X. Note that px is assumed to be a proper probability distribution. logbase : int or np.e, optional Default is 2 (bits) alpha : float or inf The order of the entropy. The default is 1, which in the limit is just Shannon's entropy. 2 is Renyi (Collision) entropy. If the string "inf" or numpy.inf is specified the min-entropy is returned. measure : str, optional The type of entropy measure desired. 'R' returns Renyi entropy measure. 'T' returns the Tsallis entropy measure. Returns ------- 1/(1-alpha)*np.log(sum(px**alpha)) In the limit as alpha -> 1, Shannon's entropy is returned. In the limit as alpha -> inf, min-entropy is returned. """ #TODO:finish returns #TODO:add checks for measure if not _isproperdist(px): print("px is not a proper probability distribution") alpha = float(alpha) # gets here if alpha != (1 or inf) px = px**alpha genent = np.log(px.sum()) if logbase == 2: return 1/(1-alpha) * genent else: return 1/(1-alpha) * logbasechange(2, logbase) * genent
d0b41285f34c79e27b9fab392433d75ea0aa7894
3,653,038
def convert(digits, base1, base2): """Convert given digits in base1 to digits in base2. digits: str -- string representation of number (in base1) base1: int -- base of given number base2: int -- base to convert to return: str -- string representation of number (in base2)""" # Handle up to base 36 [0-9a-z] assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1) assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2) return encode(decode(digits, base1), base2)
482e7207a27c6acfdaf088ef8195792f29d14452
3,653,039
def h(b, W ,X): """ This function implments the softmax regression hypothesis function Argument: b -- bias W -- predictive weight matrix X -- data matrix of size (numbers_examples, number_predictors) Returns: softmax(XW + b) """ return softmax( (X @ W) + b)
abca116d09993b310a70ddf80fcf0eea73b6d542
3,653,040
def get_random_points(N): """ - Takes number of parameters N - Returns tuple (x1,x2), where x1 and x2 are vectors """ x1 = np.random.uniform(-1,1,N) x2 = np.random.uniform(-1,1,N) return (x1,x2)
e118d2dbbc472bfa31fa30ffe1fabbf625a9c924
3,653,041
def n_bit_color(clk, din, vga_signals, vga_ports): """ Maps n bit input, din, to n bit vga color ports Ex: din=10010101, r=100, g=101, b=01 """ blu = len(vga_ports.blu) grn = len(vga_ports.grn) + blu red = len(vga_ports.red) + grn assert len(din) == red @always(clk.posedge) def colors(): vga_ports.h_sync.next = vga_signals.h_sync vga_ports.v_sync.next = vga_signals.v_sync vga_ports.red.next = 0 vga_ports.grn.next = 0 vga_ports.blu.next = 0 if vga_signals.video_on == 1: vga_ports.red.next = din[red:grn] vga_ports.grn.next = din[grn:blu] vga_ports.blu.next = din[blu:0] return colors
e760c21c0b87c54d5977e6ba29bed0f5dc20b6ab
3,653,042
from typing import Tuple from typing import Dict def point_cloud_transform_net(point_cloud: nn.Variable, train: bool) -> Tuple[nn.Variable, Dict[str, nn.Variable]]: """T net, create transformation matrix for point cloud Args: point_cloud (nn.Variable): point cloud, shape(batch, number of points, 3) train (bool): training flag Returns: Tuple[nn.Variable, Dict[str, nn.Variable]]: transformation matrix and internal variables """ batch_size, num_points, _ = point_cloud.shape # expand dim to B*C(=K)*H(=num_points)*W(=dim) point_cloud = F.reshape(point_cloud, shape=(batch_size, 1, num_points, 3)) with nn.parameter_scope("conv1"): conv_h1 = PF.convolution( point_cloud, 64, (1, 3), stride=(1, 1), with_bias=False) conv_h1 = PF.batch_normalization(conv_h1, batch_stat=train) conv_h1 = F.relu(conv_h1) with nn.parameter_scope("conv2"): conv_h2 = PF.convolution(conv_h1, 128, (1, 1), stride=(1, 1), with_bias=False) conv_h2 = PF.batch_normalization(conv_h2, batch_stat=train) conv_h2 = F.relu(conv_h2) with nn.parameter_scope("conv3"): conv_h3 = PF.convolution( conv_h2, 1024, (1, 1), stride=(1, 1), with_bias=False) conv_h3 = PF.batch_normalization(conv_h3, batch_stat=train) conv_h3 = F.relu(conv_h3) pool_h = F.max_pooling(conv_h3, (num_points, 1)) pool_h = F.reshape(pool_h, (batch_size, -1)) with nn.parameter_scope("affine1"): affine_h1 = PF.affine(pool_h, 512, with_bias=False) affine_h1 = PF.batch_normalization(affine_h1, batch_stat=train) affine_h1 = F.relu(affine_h1) with nn.parameter_scope("affine2"): affine_h2 = PF.affine(affine_h1, 256, with_bias=False) affine_h2 = PF.batch_normalization(affine_h2, batch_stat=train) affine_h2 = F.relu(affine_h2) with nn.parameter_scope("affine3"): # transform points (3 dim) so the matrix size is (3*3) transform_h = PF.affine(affine_h2, 3 * 3) eye_mat = nn.Variable.from_numpy_array( np.array([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=np.float32)) eye_mat = F.reshape(eye_mat, (1, 9)) transform_h = transform_h + eye_mat transform_h = F.reshape(transform_h, (batch_size, 3, 3)) return transform_h, { "conv_h1": conv_h1, "conv_h2": conv_h2, "conv_h3": conv_h3, "pool_h": pool_h, "affine_h1": affine_h1, "affine_h2": affine_h2, "transform_h": transform_h, }
59a3a30ef874dd1ce47a0d9a369c9170b30ac4ea
3,653,043
def diffie_hellman_server(p, g, public_key_pem): """ Function used to apply the Diffie Hellman algorithm in the server. It calculates the private and public components of server. :param p: Shared parameter :param g: Shared parameter :param public_key_pem: Public component of client :return: The private component and the public component """ pn = dh.DHParameterNumbers(p, g) parameters = pn.parameters(default_backend()) private_key = parameters.generate_private_key() public_key = private_key.public_key() p = parameters.parameter_numbers().p g = parameters.parameter_numbers().g public_key_pem = public_key.public_bytes( serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo ) logger.debug(f"My Public Key: {public_key}") logger.debug(f"My Public Key in Bytes: {public_key_pem}") return private_key, public_key_pem
6c5eb11a4e0775e881b33bc15cf99ff423b0bee6
3,653,044
def posix_getpgid(space, pid): """ posix_getpgid - Get process group id for job control """ try: return space.newint(os.getpgid(pid)) except OSError, e: space.set_errno(e.errno) return space.newbool(False) except OverflowError: return space.newbool(False)
b01f0d363ce7f0937a52c824aefc0a262f739757
3,653,045
def on_display_disconnected(): """Shortcut for registering handlers for ACTION_DISPLAY_DISCONNECTED events. Functions decorated with this decorator will be called when push2-python loses connection with the Push2 display. It will have the following positional arguments: * Push2 object instance Examples: @push2_python.on_display_disconnected() def function(push): print('Connection with Push2 display was just lost!') """ return action_handler(ACTION_DISPLAY_DISCONNECTED)
bfb4314e6da432193a0b1e9691ad60d0eb7de039
3,653,046
from typing import Union from typing import Dict from typing import Any def parse_received_data(blockpage_matcher: BlockpageMatcher, received: Union[str, Dict[str, Any]], anomaly: bool) -> Row: """Parse a received field into a section of a row to write to bigquery. Args: blockpage_matcher: Matcher object received: a dict parsed from json data, or a str anomaly: whether data may indicate blocking Returns: a dict containing the 'received_' keys/values in SCAN_BIGQUERY_SCHEMA """ if isinstance(received, str): row: Row = {'received_status': received} _add_blockpage_match(blockpage_matcher, received, anomaly, row) return row row = { 'received_status': received['status_line'], 'received_body': received['body'], 'received_headers': parse_received_headers(received.get('headers', {})), } full_http_response = _reconstruct_http_response(row) _add_blockpage_match(blockpage_matcher, full_http_response, anomaly, row) # hyperquack v1 TLS format tls = received.get('tls', None) if tls: tls_row = { 'received_tls_version': tls['version'], 'received_tls_cipher_suite': tls['cipher_suite'], 'received_tls_cert': tls['cert'] } row.update(tls_row) # hyperquack v2 TLS format if 'TlsVersion' in received: tls_row = { 'received_tls_version': received['TlsVersion'], 'received_tls_cipher_suite': received['CipherSuite'], 'received_tls_cert': received['Certificate'] } row.update(tls_row) return row
30a01d1b045d0f67e279cca34ade90aaf46b9c62
3,653,047
def get_filters(): """ Returns sidebar filters """ filters = { 'organisations': Organisation.objects.all(), 'topics': Topic.objects.all(), 'licenses': License.objects.all(), 'formats': Format.objects.all() } return filters
da137bbcd37d6504358e9e94a6722495bbc81d65
3,653,048
from xdsl.dialects.builtin import DenseIntOrFPElementsAttr, i32 import typing from typing import List from typing import Tuple def irdl_op_builder(cls: typing.Type[OpT], operands: List, operand_defs: List[Tuple[str, OperandDef]], res_types: List, res_defs: List[Tuple[str, ResultDef]], attributes: typing.Dict[str, typing.Any], attr_defs: typing.Dict[str, AttributeDef], successors, regions, options) -> OpT: """Builder for an irdl operation.""" # We need irdl to define DenseIntOrFPElementsAttr, but here we need # DenseIntOrFPElementsAttr. # So we have a circular dependency that we solve by importing in this function. # Build operands by forwarding the values to SSAValue.get if len(operand_defs) != len(operands): raise ValueError( f"Expected {len(operand_defs)} operands, got {len(operands)}") built_operands = [] for ((_, operand_def), operand) in zip(operand_defs, operands): if isinstance(operand_def, VarOperandDef): if not isinstance(operand, list): raise ValueError( f"Expected list for variadic operand builder, got {operand}" ) built_operands.extend([SSAValue.get(arg) for arg in operand]) else: built_operands.append(SSAValue.get(operand)) # Build results by forwarding the values to the attribute builders if len(res_defs) != len(res_types): raise ValueError( f"Expected {len(res_defs)} results, got {len(res_types)}") built_res_types = [] for ((_, res_def), res_type) in zip(res_defs, res_types): if isinstance(res_def, VarResultDef): if not isinstance(res_type, list): raise ValueError( f"Expected list for variadic result builder, got {res_type}" ) built_res_types.extend([ irdl_build_attribute(res_def.constr, res) for res in res_type ]) else: built_res_types.append( irdl_build_attribute(res_def.constr, res_type)) # Build attributes by forwarding the values to the attribute builders attr_defs = {name: def_ for (name, def_) in attr_defs} built_attributes = dict() for attr_name, attr in attributes.items(): if attr_name not in attr_defs: if isinstance(attr, Attribute): built_attributes[attr_name] = attr continue raise ValueError( f"Unexpected attribute name {attr_name} for operation {cls.name}" ) built_attributes[attr_name] = irdl_build_attribute( attr_defs[attr_name].constr, attr) # Take care of variadic operand and result segment sizes. if AttrSizedOperandSegments() in options: sizes = [ (len(operand) if isinstance(operand_def, VarOperandDef) else 1) for operand, (_, operand_def) in zip(operands, operand_defs) ] built_attributes[AttrSizedOperandSegments.attribute_name] =\ DenseIntOrFPElementsAttr.vector_from_list(sizes, i32) if AttrSizedResultSegments() in options: sizes = [(len(result) if isinstance(result_def, VarResultDef) else 1) for result, (_, result_def) in zip(res_types, res_defs)] built_attributes[AttrSizedResultSegments.attribute_name] =\ DenseIntOrFPElementsAttr.vector_from_list(sizes, i32) # Build regions using `Region.get`. regions = [Region.get(region) for region in regions] return cls.create(operands=built_operands, result_types=built_res_types, attributes=built_attributes, successors=successors, regions=regions)
24c103897b040f2b4959b3d3c1642bc6eca6fda2
3,653,049
def _listify(single: st.SearchStrategy) -> st.SearchStrategy: """ Put the result of `single` strategy into a list (all strategies should return lists) """ @st.composite def listify_(draw): return [draw(single)] strategy = listify_() strategy.function.__name__ = f"listified<{repr(single)}>" return strategy
eb4efb742e2c465754e79ba979b69b412f6c066e
3,653,050
def get_text(selector): """ Type the keys specified into the element, or the currently active element. """ if not get_instance(): raise Exception("You need to start a browser first with open_browser()") return get_text_g(get_instance(), selector)
b2866c93b80dcf3c61b8330fb09e4af054937e0b
3,653,051
from functools import partial import time import textwrap def _eps(code, version, file_or_path, scale=1, module_color=(0, 0, 0), background=None, quiet_zone=4): """This function writes the QR code out as an EPS document. The code is drawn by drawing only the modules corresponding to a 1. They are drawn using a line, such that contiguous modules in a row are drawn with a single line. The file parameter is used to specify where to write the document to. It can either be a writable (text) stream or a file path. The scale parameter is sets how large to draw a single module. By default one point (1/72 inch) is used to draw a single module. This may make the code to small to be read efficiently. Increasing the scale will make the code larger. This function will accept fractional scales (e.g. 2.5). :param module_color: Color of the QR code (default: ``(0, 0, 0)`` (black)) The color can be specified as triple of floats (range: 0 .. 1) or triple of integers (range: 0 .. 255) or as hexadecimal value (i.e. ``#36c`` or ``#33B200``). :param background: Optional background color. (default: ``None`` (no background)). See `module_color` for the supported values. :param quiet_zone: Border around the QR code (also known as quiet zone) (default: ``4``). Set to zero (``0``) if the code shouldn't have a border. """ def write_line(writemeth, content): """\ Writes `content` and ``LF``. """ # Postscript: Max. 255 characters per line for line in textwrap.wrap(content, 255): writemeth(line) writemeth('\n') def line(offset, length): """\ Returns coordinates to draw a line with the provided length. """ res = '' if offset > 0: res = ' {0} 0 m'.format(offset) res += ' {0} 0 l'.format(length) return res def rgb_to_floats(color): """\ Converts the provided color into an acceptable format for Postscript's ``setrgbcolor`` """ def to_float(clr): if isinstance(clr, float): if not 0.0 <= clr <= 1.0: raise ValueError('Invalid color "{0}". Not in range 0 .. 1' .format(clr)) return clr if not 0 <= clr <= 255: raise ValueError('Invalid color "{0}". Not in range 0 .. 255' .format(clr)) return 1/255.0 * clr if clr != 1 else clr if not isinstance(color, (tuple, list)): color = _hex_to_rgb(color) return tuple([to_float(i) for i in color]) f, autoclose = _get_writable(file_or_path, 'w') writeline = partial(write_line, f.write) size = tables.version_size[version] * scale + (2 * quiet_zone * scale) # Write common header writeline('%!PS-Adobe-3.0 EPSF-3.0') writeline('%%Creator: PyQRCode <https://pypi.python.org/pypi/PyQRCode/>') writeline('%%CreationDate: {0}'.format(time.strftime("%Y-%m-%d %H:%M:%S"))) writeline('%%DocumentData: Clean7Bit') writeline('%%BoundingBox: 0 0 {0} {0}'.format(size)) # Write the shortcuts writeline('/M { moveto } bind def') writeline('/m { rmoveto } bind def') writeline('/l { rlineto } bind def') mod_color = module_color if module_color == (0, 0, 0) else rgb_to_floats(module_color) if background is not None: writeline('{0:f} {1:f} {2:f} setrgbcolor clippath fill' .format(*rgb_to_floats(background))) if mod_color == (0, 0, 0): # Reset RGB color back to black iff module color is black # In case module color != black set the module RGB color later writeline('0 0 0 setrgbcolor') if mod_color != (0, 0, 0): writeline('{0:f} {1:f} {2:f} setrgbcolor'.format(*mod_color)) if scale != 1: writeline('{0} {0} scale'.format(scale)) writeline('newpath') # Current pen position y-axis # Note: 0, 0 = lower left corner in PS coordinate system y = tables.version_size[version] + quiet_zone + .5 # .5 = linewidth / 2 last_bit = 1 # Loop through each row of the code for row in code: offset = 0 # Set x-offset of the pen length = 0 y -= 1 # Move pen along y-axis coord = '{0} {1} M'.format(quiet_zone, y) # Move pen to initial pos for bit in row: if bit != last_bit: if length: coord += line(offset, length) offset = 0 length = 0 last_bit = bit if bit == 1: length += 1 else: offset += 1 if length: coord += line(offset, length) writeline(coord) writeline('stroke') writeline('%%EOF') if autoclose: f.close()
65e3f4d69eea5aa0385c1b53693d164aa0f5db6d
3,653,052
from typing import OrderedDict def _group_energy_terms(ener_xvg): """Parse energy.xvg file to extract and group the energy terms in a dict. """ with open(ener_xvg) as f: all_lines = f.readlines() energy_types = [line.split('"')[1] for line in all_lines if line[:3] == '@ s'] energy_values = [float(x) * units.kilojoule_per_mole for x in all_lines[-1].split()[1:]] e_out = OrderedDict(zip(energy_types, energy_values)) # Discard non-energy terms. for group in unwanted: if group in e_out: del e_out[group] # Dispersive energies. # TODO: Do buckingham energies also get dumped here? dispersive = ['LJ (SR)', 'LJ-14', 'Disper. corr.'] e_out['Dispersive'] = 0 * units.kilojoules_per_mole for group in dispersive: if group in e_out: e_out['Dispersive'] += e_out[group] # Electrostatic energies. electrostatic = ['Coulomb (SR)', 'Coulomb-14', 'Coul. recip.'] e_out['Electrostatic'] = 0 * units.kilojoules_per_mole for group in electrostatic: if group in e_out: e_out['Electrostatic'] += e_out[group] e_out['Non-bonded'] = e_out['Electrostatic'] + e_out['Dispersive'] all_angles = ['Angle', 'U-B', 'G96Angle', 'Restricted Angles', 'Bond-Cross', 'BA-Cross', 'Quartic Angles'] e_out['All angles'] = 0 * units.kilojoules_per_mole for group in all_angles: if group in e_out: e_out['All angles'] += e_out[group] all_dihedrals = ['Ryckaert-Bell.', 'Proper Dih.', 'Improper Dih.'] e_out['All dihedrals'] = 0 * units.kilojoules_per_mole for group in all_dihedrals: if group in e_out: e_out['All dihedrals'] += e_out[group] return e_out, ener_xvg
ac1be9871a01323e52611fb3182060db57b5b813
3,653,053
from typing import List from typing import Dict import click def get_packager_targets( targets: List[Target], connections: Dict[str, Connection], remote_api: ConnectionClient ) -> List[PackagerTarget]: """ Build targets for calling packager. Fetch and base64 decode connections by names using local manifest and ODAHU connections API :param targets: Targets from packaging manifest :param connections: Connections found in local manifest files :param remote_api: ConnectionClient to fetch missing Connections """ packager_targets: List[PackagerTarget] = [] for t in targets: conn = connections.get(t.connection_name) if not conn: click.echo( f'The "{t.connection_name}" connection of "{t.name}" target is not found in the manifest files. ' f'Trying to retrieve it from API server' ) conn = remote_api.get_decrypted(t.connection_name) _decode_connection(conn) packager_targets.append( PackagerTarget(conn, t.name) ) return packager_targets
1aac3748b2176f5f11ed8ed137f78d64bf01c112
3,653,054
def elina_texpr0_permute_dimensions(texpr2, dimperm): """ Permute dimensions of an ElinaTexpr0 following the semantics of an ElinaDimperm. Parameters ---------- texpr2 : ElinaTexpr0Ptr Pointer to the ElinaTexpr0 which dimensions we want to permute. dimperm : ElinaDimpermPtr Pointer to the ElinaDimperm which semantics we want to follow. Returns ------- texpr1 : ElinaTexpr0Ptr Pointer to the newly created ElinaTexpr0 with permuted dimensions. """ texpr1 = None try: elina_texpr0_permute_dimensions_c = elina_auxiliary_api.elina_texpr0_permute_dimensions elina_texpr0_permute_dimensions_c.restype = ElinaTexpr0Ptr elina_texpr0_permute_dimensions_c.argtypes = [ElinaTexpr0Ptr, ElinaDimpermPtr] texpr1 = elina_texpr0_permute_dimensions_c(texpr2, dimperm) except: print('Problem with loading/calling "elina_texpr0_permute_dimensions" from "libelinaux.so"') print('Make sure you are passing ElinaTexpr0Ptr, ElinaDimpermPtr to the function') return texpr1
f9c60e6285bc4e934eddf0a0be0511f08a57f45d
3,653,055
def rgb(red: int, green: int, blue: int, background: bool = False) -> Chalk: """Generate a new truecolor chalk from an RGB tuple. Args: red (int): The intensity of red (0-255). green (int): The intensity of green (0-255). blue (int): The intensity of blue (0-255). background (bool, optional): If ``True`` will generate the new chalk to be applied as a background color. Defaults to False. Returns: :class:`~.chalk.Chalk`: The new chalk instance. """ color = TrueColor(red, green, blue) return Chalk(background=color) if background else Chalk(foreground=color)
d5c1e79cc1bf7ee37f1e1df17e9518ac0e11f02b
3,653,056
def first(x: pd.Series) -> pd.Series: """ First value of series :param x: time series :return: time series of first value **Usage** Return series with first value of `X` for all dates: :math:`R_t = X_0` where :math:`X_0` is the first value in the series **Examples** Last value of series: >>> series = generate_series(100) >>> returns = first(series) **See also** :func:`last` """ return pd.Series(x[0], x.index)
1a2c856bdff7158ecd7512e43158427530cbc8e4
3,653,057
def simulate(iterations, graph_generator, graph_params, n_nodes, beta, rho, steps, n_infected_init, vacc=None): """Perform `iterations` simulations and compute averages. If vacc is not None, run the simulation using the SIRV model, otherwise use SIR.""" # Initialize arrays for computing averages over simulations s = np.zeros((iterations, steps + 1), dtype=int) i = np.zeros((iterations, steps + 1), dtype=int) r = np.zeros((iterations, steps + 1), dtype=int) ni = np.zeros((iterations, steps + 1), dtype=int) if vacc is not None: v = np.zeros((iterations, steps + 1), dtype=int) nv = np.zeros((iterations, steps + 1), dtype=int) for sim_id in range(iterations): graph = graph_generator(**{'n': n_nodes, **graph_params}) if vacc is not None: epidemic = Epidemic('sirv', graph, steps, beta=beta, rho=rho, n_infected_init=n_infected_init, vacc=vacc) else: epidemic = Epidemic('sir', graph, steps, beta=beta, rho=rho, n_infected_init=n_infected_init) sim = epidemic.simulate() # Compute four (steps, ) array containing the total number, at each # step, of susceptible (S), infected (I), recovered (R) and vaccinated # (V) respectively. s[sim_id] = np.ma.masked_not_equal(sim, 0).count(axis=1) i[sim_id] = np.ma.masked_not_equal(sim, 1).count(axis=1) r[sim_id] = np.ma.masked_not_equal(sim, 2).count(axis=1) if vacc is not None: v[sim_id] = np.ma.masked_not_equal(sim, 3).count(axis=1) # Compute a (steps, ) array containing the number of newly infected # individuals at each step. The number of newly infected at time t is # defined as the sum of nodes that went from state 0 (S) at time t-1 # to state 1 (I) at time t. ni[sim_id] = np.array( [n_infected_init] + [((sim[t - 1] == 0) & (sim[t] == 1)).sum() for t in range(1, steps + 1)], dtype=int) # Compute the same kind of array for newly vaccinated individuals. if vacc is not None: nv[sim_id] = np.array( [v[sim_id, 0]] + [((sim[t - 1] != 3) & (sim[t] == 3)).sum() for t in range(1, steps + 1)], dtype=int) # Compute the average total number of susceptible, infected, recovered and # vaccinated nodes at each week. s = s.mean(axis=0) i = i.mean(axis=0) r = r.mean(axis=0) if vacc is not None: v = v.mean(axis=0) # Compute the average number of newly infected and vaccinated individuals # each week. ni = ni.mean(axis=0) if vacc is not None: nv = nv.mean(axis=0) if vacc is not None: return s, i, r, v, ni, nv else: return s, i, r, ni
96eeb8be72ceb336d62f858337d983cc8f8d5a9d
3,653,058
import urllib import json def lookup_location(): """ Geolocation lookup of current position. Determines latitude and longitude coordinates of the system's position using the ipinfo.io service. Returns: Tuple (lat, lon) containing the latitude and longitude coordinates associated with the IP from which the request is performed. """ response = urllib.request.urlopen("https://ipinfo.io/json") data = json.loads(response.read()) coordinates = data["loc"] lat, lon = coordinates.split(",") return float(lat), float(lon)
5d654314aa8d53cbca2b488bb7c9eb3f1f9cf81a
3,653,059
def _str_or_none(value): """Helper: serialize value to JSON string.""" if value is not None: return str(value)
7aa1550f71accaa4111386153b2c331e2ff076bc
3,653,060
def create_song_graph_from_songs(songs: list[Song], parent_graph: song_graph.SongGraph = None, year_separation: int = 10) -> song_graph.SongGraph: """Create and return a song graph from a list of songs. (Optional) Add a parent graph from a larger dataset to the new song graph. (Optional) year_separation defines the way year attribute vertices are to be created. I.e. the intervals in year attribute vertices. For example, a year_separation of 10 will create year attribute vertices for each decade spanned by the playlist. Preconditions: - parent_graph is None or parent_graph.are_attributes_created() # parent_graph is not None implies parent_graph.are_attributes_created() """ graph = song_graph.SongGraph(parent_graph) for song in songs: graph.add_song(song) if parent_graph is None: graph.generate_attribute_vertices(year_separation) else: graph.generate_attribute_vertices(use_parent=True, year_separation=year_separation) return graph
647eb1ce77cf0c596c2fabd41aa32062636ca8a4
3,653,061
def convert(secs): """Takes a time in seconds and converts to min:sec:msec""" mins = int(secs // 60) secs %= 60 msecs = int(round(((secs - int(secs)) * 1000))) secs = int(secs) return f'{mins} mins, {secs} secs, {msecs} msecs'
70752f190f94d3bdb4cb3b562b6bf9d1c7d28479
3,653,062
def from_data(symbols, key_matrix, name_matrix, one_indexed=False): """ z-matrix constructor :param symbols: atomic symbols :type symbols: tuple[str] :param key_matrix: key/index columns of the z-matrix, zero-indexed :type key_matrix: tuple[tuple[float, float or None, float or None]] :param name_matrix: coordinate name columns of the z-matrix :type name_matrix; tuple[tuple[str, str or None, str or None]] """ syms = list(map(pt.to_E, symbols)) natms = len(syms) key_mat = _key_matrix(key_matrix, natms, one_indexed) name_mat = _name_matrix(name_matrix, natms) vma = tuple(zip(syms, key_mat, name_mat)) return vma
5b3f98b98dca797223a95af967e9aaff311d24f8
3,653,063
def should_drop_from_right_deck(n_left: int, n_right:int, seed: int=None) -> bool: """ Determine whether we drop a card from the right or left sub-deck. Either `n_left` or `n_right` (or both) must be greater than zero. :param n_left: the number of cards in the left sub-deck. :param n_right: the number of cards in the right sub-deck. :param seed: optional seed for the random number generator to enable deterministic behavior. :return: True if we should drop a card from the right sub-deck, False otherwise. Examples: >>> should_drop_from_right_deck(n_left=32, n_right=5, seed=0, ) True >>> should_drop_from_right_deck(n_left=0, n_right=5, ) True >>> should_drop_from_right_deck(n_left=7, n_right=0, ) False >>> should_drop_from_right_deck(n_left=0, n_right=0, ) Traceback (most recent call last): ... ValueError: Either `n_left` or `n_right` (or both) must be greater than zero. """ if n_left > 0 and n_right > 0: # There are cards left in both sub-decks, so pick a # sub-deck at random. random = sklearn.utils.check_random_state(seed=seed) num = random.randint(low=0, high=2) boolean = (num == 0) return boolean elif n_left == 0 and n_right > 0: # There are no more cards in the left sub-deck, only # the right sub-deck, so we drop from the right sub-deck. return True elif n_left > 0 and n_right == 0: # There are no more cards in the right sub-deck, only # the left sub-deck, so we drop from the left sub-deck. return False else: # There are no more cards in either sub-deck. raise ValueError ('Either `n_left` or `n_right` ' '(or both) must be greater than zero.')
42bbfc3c8a129f090b50c0979d95e53fd6d6a13f
3,653,064
import os def which(program): """ Locate an executable binary's full path by its name. :param str program: The executable's name. :return: The full path to the executable. :rtype: str """ is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK)) for path in os.environ['PATH'].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file if is_exe(program): return os.path.abspath(program) return None
19d77431baf3489288dbca863f1231d8f5c76efe
3,653,065
def EXPOSED(mu=1.0): """ matrix of exposed sites Parameters ---------- mu: rate """ pis = np.array([0.088367,0.078147,0.047163,0.087976,0.004517,0.058526,0.128039,0.056993,0.024856,0.025277,0.045202,0.094639,0.012338,0.016158,0.060124,0.055346,0.051290,0.006771,0.021554,0.036718]) W = np.array([ [0.0,0.526738,0.48315,0.658902,2.051872,1.280002,1.306565,1.370782,0.540809,0.171986,0.430511,0.697731,1.043937,0.265209,1.270693,4.826665,2.131819,0.143081,0.208643,2.544463], [0.526738,0.0,0.505837,0.051052,2.214326,2.039552,0.137928,0.363365,2.288922,0.237023,0.670514,3.881079,0.656943,0.097443,0.166534,0.751947,0.584329,0.47559,0.196271,0.313443], [0.48315,0.505837,0.0,3.902456,0.961103,1.301786,0.285806,1.8201,4.949307,0.337226,0.158937,1.677194,0.539827,0.182522,0.068692,4.412265,2.133604,0.061094,0.599369,0.172264], [0.658902,0.051052,3.902456,0.0,0.129989,0.399061,3.100403,0.885317,0.70089,0.018315,0.021949,0.10545,0.066925,0.026918,0.228829,0.975564,0.368887,0.042618,0.121313,0.073705], [2.051872,2.214326,0.961103,0.129989,0.0,0.456521,0.033946,0.886564,2.172284,1.037046,1.702066,0.146263,1.846562,3.002586,0.156216,5.294149,2.067387,1.603125,3.842632,4.207648], [1.280002,2.039552,1.301786,0.399061,0.456521,0.0,2.514377,0.320746,3.755421,0.212032,1.261113,2.570254,1.973592,0.080193,0.362501,1.033459,1.013613,0.210329,0.15847,0.497398], [1.306565,0.137928,0.285806,3.100403,0.033946,2.514377,0.0,0.303966,0.270957,0.084442,0.110508,0.730337,0.18816,0.023999,0.214847,0.382235,0.51139,0.048276,0.064648,0.48462], [1.370782,0.363365,1.8201,0.885317,0.886564,0.320746,0.303966,0.0,0.401311,0.012279,0.052946,0.279865,0.158136,0.084663,0.1489,1.970857,0.174527,0.186382,0.03928,0.132496], [0.540809,2.288922,4.949307,0.70089,2.172284,3.755421,0.270957,0.401311,0.0,0.317239,0.869247,0.598289,0.519993,2.047163,0.323141,0.99331,0.58096,0.961546,8.230282,0.329895], [0.171986,0.237023,0.337226,0.018315,1.037046,0.212032,0.084442,0.012279,0.317239,0.0,8.675343,0.338782,9.483497,2.193062,0.071992,0.190509,2.56363,0.208313,0.517123,23.711178], [0.430511,0.670514,0.158937,0.021949,1.702066,1.261113,0.110508,0.052946,0.869247,8.675343,0.0,0.313102,14.176858,4.802817,0.343919,0.389101,0.522334,1.130724,0.713426,3.466991], [0.697731,3.881079,1.677194,0.10545,0.146263,2.570254,0.730337,0.279865,0.598289,0.338782,0.313102,0.0,1.013268,0.044792,0.19547,0.592156,1.147459,0.052858,0.084962,0.348362], [1.043937,0.656943,0.539827,0.066925,1.846562,1.973592,0.18816,0.158136,0.519993,9.483497,14.176858,1.013268,0.0,3.261401,0.099252,0.557254,2.960091,1.328785,0.812142,4.136445], [0.265209,0.097443,0.182522,0.026918,3.002586,0.080193,0.023999,0.084663,2.047163,2.193062,4.802817,0.044792,3.261401,0.0,0.08702,0.668834,0.24442,5.210001,23.228875,1.199764], [1.270693,0.166534,0.068692,0.228829,0.156216,0.362501,0.214847,0.1489,0.323141,0.071992,0.343919,0.19547,0.099252,0.08702,0.0,1.223981,0.413148,0.045945,0.043249,0.368231], [4.826665,0.751947,4.412265,0.975564,5.294149,1.033459,0.382235,1.970857,0.99331,0.190509,0.389101,0.592156,0.557254,0.668834,1.223981,0.0,7.384701,0.316078,0.40531,0.266531], [2.131819,0.584329,2.133604,0.368887,2.067387,1.013613,0.51139,0.174527,0.58096,2.56363,0.522334,1.147459,2.960091,0.24442,0.413148,7.384701,0.0,0.144393,0.234217,3.184874], [0.143081,0.47559,0.061094,0.042618,1.603125,0.210329,0.048276,0.186382,0.961546,0.208313,1.130724,0.052858,1.328785,5.210001,0.045945,0.316078,0.144393,0.0,4.903887,0.252132], [0.208643,0.196271,0.599369,0.121313,3.842632,0.15847,0.064648,0.03928,8.230282,0.517123,0.713426,0.084962,0.812142,23.228875,0.043249,0.40531,0.234217,4.903887,0.0,0.459187], [2.544463,0.313443,0.172264,0.073705,4.207648,0.497398,0.48462,0.132496,0.329895,23.711178,3.466991,0.348362,4.136445,1.199764,0.368231,0.266531,3.184874,0.252132,0.459187,0.0] ]) gtr = GTR(alphabet=alphabets['aa_nogap']) gtr.assign_rates(mu=mu, pi=pis, W=W) return gtr
c203eb8affeddd4b2620836759acb35ae6f9114c
3,653,066
async def rename_conflicting_targets( ptgts: PutativeTargets, all_existing_tgts: AllUnexpandedTargets ) -> UniquelyNamedPutativeTargets: """Ensure that no target addresses collide.""" existing_addrs: set[str] = {tgt.address.spec for tgt in all_existing_tgts} uniquely_named_putative_targets: list[PutativeTarget] = [] for ptgt in ptgts: if not ptgt.addressable: # Non-addressable PutativeTargets never have collision issues. uniquely_named_putative_targets.append(ptgt) continue idx = 0 possibly_renamed_ptgt = ptgt # Targets in root-level BUILD files must be named explicitly. if possibly_renamed_ptgt.path == "" and possibly_renamed_ptgt.kwargs.get("name") is None: possibly_renamed_ptgt = possibly_renamed_ptgt.rename("root") # Eliminate any address collisions. while possibly_renamed_ptgt.address.spec in existing_addrs: possibly_renamed_ptgt = ptgt.rename(f"{ptgt.name}{idx}") idx += 1 uniquely_named_putative_targets.append(possibly_renamed_ptgt) existing_addrs.add(possibly_renamed_ptgt.address.spec) return UniquelyNamedPutativeTargets(PutativeTargets(uniquely_named_putative_targets))
9ac25549de1fca5912104135b87a1a17f1cd43fe
3,653,067
def outer2D(v1, v2): """Calculates the magnitude of the outer product of two 2D vectors, v1 and v2""" return v1[0]*v2[1] - v1[1]*v2[0]
b1f80afa3b8537eb11d79b17d0f12903bec9387c
3,653,068
from typing import Union def get_item_indent(item: Union[int, str]) -> Union[int, None]: """Gets the item's indent. Returns: indent as a int or None """ return internal_dpg.get_item_configuration(item)["indent"]
a80997e8c2cfa76a76ff8d09c7308196f0572f86
3,653,069
def V_RSJ_asym(i, ic_pos, ic_neg, rn, io, vo): """Return voltage with asymmetric Ic's in RSJ model""" if ic_pos < 0 or ic_neg > 0 or rn < 0: #or abs(ic_neg/ic_pos) > 1.2 or abs(ic_pos/ic_neg) > 1.2 : # set boundaries for fitting #pass v = 1e10 else: v = np.zeros(len(i)) n = i>io+ic_pos; v[n] = rn*np.sqrt((i[n]-io)**2-ic_pos**2)+vo n = i<io+ic_neg; v[n] = -rn*np.sqrt((i[n]-io)**2-ic_neg**2)+vo n = np.logical_and(i>=io+ic_neg, i<=io+ic_pos); v[n]=vo return v
5005beec6a90bf1a5054836f6f22dbe42dcda6f1
3,653,070
def h2_gas_costs(pipe_dist=-102.75, truck_dist=-106.0, pipeline=True, max_pipeline_dist=2000): """Calculates the transport cost of H2 gas. Requires as input the distance that H2 will be piped and trucked.""" if max_pipeline_dist > pipe_dist > 400: pipe = 0.0004 * pipe_dist + 0.0424 elif pipe_dist < 400: pipe = 0.0004 * 400 + 0.0424 else: pipe = np.nan if pipeline == False: pipe = np.nan truck = 0.003 * truck_dist + 0.3319 return pipe + truck
5b3623d33862038a9629349e8052e8214ddba51c
3,653,071
from typing import Union def number_to_words(input_: Union[int, str], capitalize: bool = False) -> str: """Converts integer version of a number into words. Args: input_: Takes the integer version of a number as an argument. capitalize: Boolean flag to capitalize the first letter. Returns: str: String version of the number. """ result = inflect.engine().number_to_words(num=input_) return result[0].upper() + result[1:] if capitalize else result
22c5f7c64354a76404150cdf888e3bc3582659f1
3,653,072
from typing import Dict from typing import OrderedDict def get_size_reduction_by_cropping(analyzer: DatasetAnalyzer) -> Dict[str, Dict]: """ Compute all size reductions of each case Args: analyzer: analzer which calls this property Returns: Dict: computed size reductions `size_reductions`: dictionary with each case id and reduction """ size_reduction = OrderedDict() for case_id in analyzer.case_ids: props = load_properties_of_cropped(analyzer.cropped_data_dir / case_id) shape_before_crop = props["original_size_of_raw_data"] shape_after_crop = props['size_after_cropping'] size_red = np.prod(shape_after_crop) / np.prod(shape_before_crop) size_reduction[case_id] = size_red return {"size_reductions": size_reduction}
ce1aad85d8f971cccc8cb0547b14be8074228261
3,653,073
import re def getProxyFile(path): """ Opens a text file and returns the contents with any setting of a certificate file replaced with the mitmproxy certificate. """ with open(path, "r") as fd: contents = fd.read() certReferences = re.findall("setcertificatesfile\(.*\)", contents, re.IGNORECASE) for certReference in certReferences: msg = "using mitmproxy certificate: %s (%s)" % (certReference, path) print(bcolors.OKBLUE + msg + bcolors.ENDC) contents = contents.replace(certReference, 'setCertificatesFile("pkg:/source/mitmproxy.crt")') return contents
c5c0d562e2b430b79b91b2a9ffd23f6d18320b6f
3,653,074
def bytes_filesize_to_readable_str(bytes_filesize: int) -> str: """Convert bytes integer to kilobyte/megabyte/gigabyte/terabyte equivalent string""" if bytes_filesize < 1024: return "{} B" num = float(bytes_filesize) for unit in ["B", "KB", "MB", "GB"]: if abs(num) < 1024.0: return "{:.1f} {}".format(num, unit) num /= 1024.0 return "{:.1f} {}".format(num, "TB")
cdeb228de80422f541c5fa682422d77a44d19ca2
3,653,075
def braf_mane_data(): """Create test fixture for BRAF MANE data.""" return { "#NCBI_GeneID": "GeneID:673", "Ensembl_Gene": "ENSG00000157764.14", "HGNC_ID": "HGNC:1097", "symbol": "BRAF", "name": "B-Raf proto-oncogene, serine/threonine kinase", "RefSeq_nuc": "NM_004333.6", "RefSeq_prot": "NP_004324.2", "Ensembl_nuc": "ENST00000646891.2", "Ensembl_prot": "ENSP00000493543.1", "MANE_status": "MANE Select", "GRCh38_chr": "7", "chr_start": 140730665, "chr_end": 140924929, "chr_strand": "-" }
7e62545147ef1a6f81c75e56d85f5ab8df3895e8
3,653,076
import os def get_decopath_genesets(decopath_ontology, gmt_dir: str): """Generate DecoPath gene sets with super pathways.""" concatenated_genesets_dict = {} dc_mapping = defaultdict(list) if not os.path.isdir(gmt_dir): make_geneset_dir() super_pathway_mappings, ontology_df = get_equivalent_pathway_dc_ids(decopath_ontology) # Get gene sets from individual databases gmt_files = [os.path.join(GMT_FILES_DIR, filename) for filename in os.listdir(gmt_dir) if filename.endswith('.gmt')] genesets = [_get_gmt_dict(file) for file in gmt_files] # Concatenate gene sets from individual databases for geneset in genesets: concatenated_genesets_dict.update(geneset) # Get super pathway gene sets with DecoPath IDs for pathway_id, dc_id, dc_name in super_pathway_mappings.values: if pathway_id in concatenated_genesets_dict: dc_mapping[dc_id].append(concatenated_genesets_dict[pathway_id]) # Return DecoPath gene sets return { pathway_id: {gene for sublist in geneset for gene in sublist} for pathway_id, geneset in dc_mapping.items() }
63701b37d88eadfef4df55ee31a1a1eb00a4985c
3,653,077
def import_class(path): """ Import a class from a dot-delimited module path. Accepts both dot and colon seperators for the class portion of the path. ex:: import_class('package.module.ClassName') or import_class('package.module:ClassName') """ if ':' in path: module_path, class_name = path.split(':') else: module_path, class_name = path.rsplit('.', 1) module = __import__(module_path, fromlist=[class_name], level=0) return getattr(module, class_name)
dcdf71a3bb665dae1fe5913e19be3a4c0aa3c5d3
3,653,078
import torch def elastic(X, kernel, padding, alpha=34.0): # type: (Tensor, Tensor, int, float) -> Tensor """ X: [(N,) C, H, W] """ H, W = X.shape[-2:] dx = torch.rand(X.shape[-2:], device=kernel.device) * 2 - 1 dy = torch.rand(X.shape[-2:], device=kernel.device) * 2 - 1 xgrid = torch.arange(W, device=dx.device).repeat(H, 1) ygrid = torch.arange(H, device=dy.device).repeat(W, 1).T dx = alpha * F.conv2d(unsqueeze_as(dx, X, 0), kernel, bias=None, padding=padding) dy = alpha * F.conv2d(unsqueeze_as(dy, X, 0), kernel, bias=None, padding=padding) H /= 2 W /= 2 dx = (dx + xgrid - W) / W dy = (dy + ygrid - H) / H grid = torch.stack((dx.squeeze(1), dy.squeeze(1)), dim=-1) return F.grid_sample(X, grid, padding_mode="reflection", align_corners=False)
580c9600cb4ddd77d114ae94303fc2c2a416cf17
3,653,079
def fixture_make_bucket(request): """ Return a factory function that can be used to make a bucket for testing. :param request: The Pytest request object that contains configuration data. :return: The factory function to make a test bucket. """ def _make_bucket(s3_stub, wrapper, bucket_name, region_name=None): """ Make a bucket that can be used for testing. When stubbing is used, a stubbed bucket is created. When AWS services are used, the bucket is deleted after the test completes. :param s3_stub: The S3Stubber object, configured for stubbing or AWS. :param wrapper: The bucket wrapper object, used to create the bucket. :param bucket_name: The unique name for the bucket. :param region_name: The Region in which to create the bucket. :return: The test bucket. """ if not region_name: region_name = s3_stub.region_name s3_stub.stub_create_bucket(bucket_name, region_name) # Bucket.wait_until_exists calls head_bucket on a timer until it returns 200. s3_stub.stub_head_bucket(bucket_name) bucket = wrapper.create_bucket(bucket_name, region_name) def fin(): if not s3_stub.use_stubs and wrapper.bucket_exists(bucket_name): bucket.delete() request.addfinalizer(fin) return bucket return _make_bucket
bdfbbad1b80f43a1b81f5bf8f69db350128e3304
3,653,080
def get_member_struc(*args): """ get_member_struc(fullname) -> struc_t Get containing structure of member by its full name "struct.field". @param fullname (C++: const char *) """ return _ida_struct.get_member_struc(*args)
ac2c226725af8bde1510a6f7fd2fdb64a8c52d01
3,653,081
from datetime import datetime def pop(): """Check the first task in redis(which is the task with the smallest score) if the score(timestamp) is smaller or equal to current timestamp, the task should be take out and done. :return: True if task is take out, and False if it is not the time. """ task = connection.zrange(QUEUE_KEY, 0, 0) if not task: return False, 'No emails now!' msg_id = task[0] timestamp = connection.zscore(QUEUE_KEY, msg_id) now = datetime.datetime.now().timestamp() if timestamp < now or abs(timestamp - now) <= 1e-6: message = connection.get(msg_id) pipeline = connection.pipeline() pipeline.zrem(QUEUE_KEY, msg_id) pipeline.delete(msg_id) pipeline.execute() return True, message return False, "It's too early now!"
0472d0bcffee84547d7ee400d547ecbb86e50e87
3,653,082
def xml_to_dictform(node): """ Converts a minidom node to "dict" form. See parse_xml_to_dictform. """ if node.nodeType != node.ELEMENT_NODE: raise Exception("Expected element node") result = (node.nodeName, {}, []) # name, attrs, items if node.attributes != None: attrs = node.attributes # hard to imagine a more contrived way of accessing attributes... for key, value in ((attrs.item(i).name, attrs.item(i).value) for i in xrange(attrs.length)): result[1][key] = value for child in node.childNodes: if child.nodeType == child.ELEMENT_NODE: result[2].append(xml_to_dictform(child)) return result
8fdc07070a32eb34c38e46cb12d23d367d71c606
3,653,083
def TranslateCoord(data, res, mode): """ Translates position of point to unified coordinate system Max value in each direction is 1.0 and the min is 0.0 :param data: (tuple(float, float)) Position to be translated :param res: (tuple(float, float)) Target resolution :param mode: (TranslationMode) Work mode. Available modes are: Encode, Decode. :returns: (tuple(int, int), tuple(float, float)) """ x, y = data resX, resY = res #encode if mode == TranslationMode.Encode: uX = x / resX uY = y / resY return (uX, uY) #decode elif mode == TranslationMode.Decode: x = Clamp(x, 0, 1) y = Clamp(y, 0, 1) tX = x * resX tY = y * resY return (int(tX), int(tY))
c89515692330ce02c0f6f371c16a9028c51e9bbe
3,653,084
def _get_mutator_plugins_bucket_url(): """Returns the url of the mutator plugin's cloud storage bucket.""" mutator_plugins_bucket = environment.get_value('MUTATOR_PLUGINS_BUCKET') if not mutator_plugins_bucket: logs.log_warn('MUTATOR_PLUGINS_BUCKET is not set in project config, ' 'skipping custom mutator strategy.') return None return 'gs://%s' % mutator_plugins_bucket
31073e1fbaf817d63d02de93a2fc224bd2904dec
3,653,085
from io import StringIO def objectify_json_lines(path_buf_stream, from_string=False, fatal_errors=True, encoding=_DEFAULT_ENCODING, ensure_ascii=False, encode_html_chars=False, avoid_memory_pressure=True): """Generator return an object for each line of JSON in a file, stream or string in: path_buf_stream: (str) A string file path containing JSON (stream) An open readable stream from a file containing JSON (stream) A string of JSON content (also requires `from_string=True`) This function intentionally operates as a generator, to avoid using huge amounts of memory when loading a very large file- afterall, this is the primary benefit of the JSON lines format. It is meant to be called many times in succession, sometimes up to millions of times, so it is important that it is relatively quick/simple. There are three ways to invoke this function Each of them returns a native Python object for obj in objectify_json_lines('file.json'): print(obj.items()) json_fd = open('file.json', 'r', encoding='utf-8') for obj in objectify_json_lines(json_fd): print(obj.items()) json_str = '{"A": "B"}\n{"C": "D"}' for obj in objectify_json_lines(json_str, from_string=True): print(obj.items()) """ if from_string is True: # If caller specifies path_buf_stream is a string, turn it into # a stream to avoid an extra set of logic below assert isinstance(path_buf_stream, str) path_buf_stream = StringIO(path_buf_stream) # If path_buf_stream has a read method, it is effectively stream reader = getattr(path_buf_stream, 'read', None) with (path_buf_stream if reader else open(path_buf_stream, 'r', encoding=encoding)) as infd: # If the user doesn't care about memory pressure, don't bother with a generator, just # give them a regular list of objects from the JSON lines file. I guess most of the time # nobody cares, and have to work with a generator in Python3 can be annoying for the caller if avoid_memory_pressure is False: if fatal_errors is True: try: return [loads(line) for line in infd.read.splitlines() if line] except JSONDecodeError: return None obj_list = list() for line in infd.read.splitlines(): try: obj = loads(line) obj_list.append(obj) except JSONDecodeError: # Silently ignore bad lines .. continue return obj_list for line in infd.readlines(): line = line.strip() # Exception handlers are expensive to set up and even more expensive # when they fire. If errors should be fatal, don't bother setting one # up at all if fatal_errors is True: yield loads(line) else: # The more expensive path, preparing to catch an exception and # continue gracefully if fatal_errors is False try: yield loads(line) except Exception as err: error('bad JSON-line line: {}'.format(repr(err))) continue
a3de3cd8f13c7a245573bd34944e67908dfd4786
3,653,086
def gll_int(f, a, b): """Integrate f from a to b using its values at gll points.""" n = f.size x, w = gll(n) return 0.5*(b-a)*np.sum(f*w)
d405e4c3951f9764077508fcdb73e000c107e4d4
3,653,087
import os import random def error404(request, exception): """View for 404 page.""" responses = open(os.path.join(BASE_DIR, 'CollaboDev/404_responses.txt')) responses = responses.read().split('\n') message = random.choice(responses) context = { 'message': message, 'error': exception } return HttpResponseNotFound(render(request, '404.html', context))
cf0b201b23dc06905f260ade10e126f38f169699
3,653,088
def _get_remote_user(): """ Get the remote username. Returns ------- str: the username. """ return input('\nRemote User Name: ')
5f2bb67b5f55ec053a755c015755f488ab6d8c71
3,653,089
import argparse def parse_args(): """Parse the args.""" parser = argparse.ArgumentParser( description='example code to play with InfluxDB') parser.add_argument('--host', type=str, required=False, default='localhost', help='hostname influxdb http API') parser.add_argument('--port', type=int, required=False, default=8086, help='port influxdb http API') parser.add_argument('--nb_day', type=int, required=False, default=15, help='number of days to generate time series data') return parser.parse_args()
83d8f6e036ceb065492feb6031b16198e57c3d89
3,653,090
def generate_warm_starts(vehicle, world: TrafficWorld, x0: np.array, other_veh_info, params: dict, u_mpc_previous=None, u_ibr_previous=None): """ Generate a dictionary of warm starts for the solver. Returns: Dictionary with warm_start_name: (state, control, desired_state) """ other_x0 = [veh_info.x0 for veh_info in other_veh_info] u_warm_profiles, ux_warm_profiles = generate_warm_u(params["N"], vehicle, x0) if len(other_x0) > 0: warm_velocity = np.median([x[4] for x in other_x0]) else: warm_velocity = x0[4] _, x_ux_warm_profiles = generate_warm_x(vehicle, world, x0, warm_velocity) ux_warm_profiles.update(x_ux_warm_profiles) if (u_mpc_previous is not None): # TODO: Try out the controls that were previous executed u_warm_profiles["previous_mpc"] = np.concatenate( ( u_mpc_previous[:, params["number_ctrl_pts_executed"]:], np.tile(u_mpc_previous[:, -1:], (1, params["number_ctrl_pts_executed"])), ), axis=1, ) x_warm, x_des_warm = vehicle.forward_simulate_all(x0.reshape(6, 1), u_warm_profiles["previous_mpc"]) ux_warm_profiles["previous_mpc"] = [ u_warm_profiles["previous_mpc"], x_warm, x_des_warm, ] if (u_ibr_previous is not None): # Try out the controller from the previous round of IBR u_warm_profiles["previous_ibr"] = u_ibr_previous x_warm, x_des_warm = vehicle.forward_simulate_all(x0.reshape(6, 1), u_warm_profiles["previous_ibr"]) ux_warm_profiles["previous_ibr"] = [ u_warm_profiles["previous_ibr"], x_warm, x_des_warm, ] return ux_warm_profiles
021266450f54ee59ae451dc2c9cad544f129f278
3,653,091
def nf_regnet_b4(pretrained=False, **kwargs): """ Normalization-Free RegNet-B4 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs)
642ba43a132128a16273bb6cc76178b71be6beaf
3,653,092
from typing import Tuple def load_data_binary_labels(path: str) -> Tuple[pd.DataFrame, pd.DataFrame]: """Loads data from CSV file and returns features (X) and only binary labels meaning (any kind of) toxic or not""" df = pd.read_csv(path) X = df.comment_text.to_frame() y = df[config.LIST_CLASSES].max(axis=1).to_frame(name="toxic") return X, y
e73b99b2d00d388298f9f6e2cdfca15f121a0238
3,653,093
from bs4 import BeautifulSoup def parse(html_url): """Parse.""" html = www.read(html_url) soup = BeautifulSoup(html, 'html.parser') data = {'paragraphs': []} content = soup.find('div', class_=CLASS_NAME_CONTENT) for child in content.find_all(): text = _clean(child.text) if child.name == 'h3': data['title'] = text elif child.name == 'h4': data['subtitle'] = text elif child.name == 'p': data['paragraphs'].append(text) return data
226245618d220db00eb2f298aaf462c1c861c32b
3,653,094
import logging def get_tp_algorithm(name: str) -> GenericTopologyProgramming: """ returns the requested topology programming instance """ name = name.lower() if name == "uniform_tp": return UniformTP() if name == "joint_tp": return JointTP() if name == "ssp_oblivious_tp": return SSPObliviousTP() err_msg = f"wan tp name not found: {name}" logging.error(err_msg) raise Exception(err_msg)
6f98613c13becf1ed85cb8a667fc35cfac86973f
3,653,095
def get_first_job_queue_with_capacity(): """Returns the first job queue that has capacity for more jobs. If there are no job queues with capacity, returns None. """ job_queue_depths = get_job_queue_depths()["all_jobs"] for job_queue in settings.AWS_BATCH_QUEUE_WORKERS_NAMES: if job_queue_depths[job_queue] <= settings.MAX_JOBS_PER_NODE: return job_queue return None
a23bf9dcef39d1377a1d7cb2a37abbe1186fac0a
3,653,096
def rotations(images, n_rot, ccw_limit, cw_limit): """ Rotates every image in the list "images" n_rot times, between 0 and cw_limit (clockwise limit) n_rot times and between 0 and ccw_limit (counterclockwise limit) n_rot times more. The limits are there to make sense of the data augmentation. E.g: Rotating an mnist digit 180 degrees turns a 6 into a 9, which makes no sense at all. cw_limit and ccw_limit are in degrees! Returns a list with all the rotated samples. Size will be 2*n_rot+1, because we also want the original sample to be included Example: images=[img],n_rot=3,ccw_limit=90,cw_limit=90 Returns: [img1: original, img2: 90 degrees rot ccw, img3: 60 degrees rot ccw, img4: 30 degrees rot ccw, img5: 30 degrees rot cw, img5: 60 degrees rot cw img5: 90 degrees rot cw] """ # if we only have 1 image, transform into a list to work with same script if type(images) is not list: images = [images] # calculate the initial angle and the step cw_step_angle = float(cw_limit) / float(n_rot) ccw_step_angle = float(ccw_limit) / float(n_rot) # container for rotated images rotated_images = [] # get every image and apply the number of desired rotations for img in images: # get rows and cols to rotate rows, cols, depth = img.shape # append the original one too rotated_images.append(img) # rotate the amount of times we want them rotated for i in range(1, n_rot + 1): # create rotation matrix with center in the center of the image, # scale 1, and the desired angle (we travel counter clockwise first, and # then clockwise M_ccw = cv2.getRotationMatrix2D( (cols / 2, rows / 2), i * ccw_step_angle, 1) # rotate using the matrix (using bicubic interpolation) rot_img = cv2.warpAffine(img, M_ccw, (cols, rows), flags=cv2.INTER_CUBIC) # append to rotated images container rotated_images.append(rot_img) M_cw = cv2.getRotationMatrix2D( (cols / 2, rows / 2), -i * cw_step_angle, 1) # rotate using the matrix (using bicubic interpolation) rot_img = cv2.warpAffine(img, M_cw, (cols, rows), flags=cv2.INTER_CUBIC) # append to rotated images container rotated_images.append(rot_img) return rotated_images
b1ca7a609faa6ed8903424976b94b477a4798096
3,653,097
def num_range(num): """ Use in template language to loop through numberic range """ return range(num)
7b66e4ffd264ea7b49850a9300c3a6c80282fce1
3,653,098
def datamodel_flights_column_names(): """ Get FLIGHTS_CSV_SCHEMA column names (keys) :return: list """ return list(FLIGHTS_CSV_SCHEMA.keys())
6e5edfa181e02955976602a289576eca307a13bc
3,653,099