content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def theoritical_spectrum(peptide_sequence): """Returns the theoritical spectrum of a given amino acid sequence. INPUT : peptide_sequence: string. The peptide sequence to get its theoritical spectrum OUTPUT: .: List. The theoritical spectrum of the given peptide sequence. """ linear_kmers = [] cyclic_kmers = [] for i in range(len(peptide_sequence)): for j in range(i,len(peptide_sequence)): linear_kmers.append(peptide_sequence[i:j+1]) for i in range(2,len(peptide_sequence)): for j in range(i-1): cyclic_kmers.append(peptide_sequence[i:len(peptide_sequence)]+peptide_sequence[0:j+1]) kmers = linear_kmers+cyclic_kmers return sorted(list(map(get_molecular_weight,kmers)))
1808daed80b553fe3a5a2b38e178956e4a0d7de0
3,656,160
def is_amazon(source_code): """ Method checks whether a given book is a physical book or a ebook giveaway for a linked Amazon account. :param source_code: :return: """ for line in source_code: if "Your Amazon Account" in line: return True return False
31c50622b4bb97a05d8cabb94c58f6e0a8f58971
3,656,161
def data_dim(p): """ Return the dimensionality of the dataset """ dataset_class = DATASETS[p.dataset] return dataset_class(p).get_in_dim()
25e32039733e8599c22d696f28bfffbf8b97cf02
3,656,163
import torch def create_supervised_evaluator(model, metrics=None, device=None, non_blocking=False, prepare_batch=_prepare_batch, output_transform= lambda x, y, y_pred: (y_pred, y,)): """ Factory function for creating an evaluator for supervised models. Args: model (`torch.nn.Module`): the model to train. metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics. device (str, optional): device type specification (default: None). Applies to both model and batches. non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits output expected by metrics. If you change it you should use `output_transform` in metrics. Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is a tuple of `(batch_pred, batch_y)` by default. Returns: Engine: an evaluator engine with supervised inference function. """ metrics = metrics or {} if device: model.to(device) def _inference(engine, batch): model.eval() with torch.no_grad(): # z is optional (e.g. task ids) x, y, *z = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(*(x, *z)) # if hasattr(model, 'arch_sampler'): # ent = model.arch_sampler.entropy().mean() return output_transform(x, y, y_pred) engine = Engine(_inference) for name, metric in metrics.items(): metric.attach(engine, name) return engine
2af4cc7b12a76c3c12940353a072d8b715fec8c1
3,656,164
import typing def historical_earning_calendar( apikey: str, symbol: str, limit: int = DEFAULT_LIMIT ) -> typing.Optional[typing.List[typing.Dict]]: """ Query FMP /historical/earning_calendar/ API. Note: Between the "from" and "to" parameters the maximum time interval can be 3 months. :param apikey: Your API key. :param symbol: Company ticker. :param limit: Number of rows to return. :return: A list of dictionaries. """ path = f"historical/earning_calendar/{symbol}" query_vars = { "apikey": apikey, "symbol": symbol, "limit": limit, } return __return_json_v3(path=path, query_vars=query_vars)
7f231b253ef4f462ab89826d58546a3259bdd3d2
3,656,165
def config_section_data(): """Produce the default configuration section for app.config, when called by `resilient-circuits config [-c|-u]` """ config_data = u"""[fn_query_tor_network] base_url = https://onionoo.torproject.org/details #The Flag can be 'Running','Exit' for more information on flag settings - https://metrics.torproject.org/onionoo.html flag = Exit # The data fields should be comma separated and no space should be given in between each fields data_fields = exit_addresses,or_addresses,host_name""" return config_data
239436c9b2141e17f6158aab20d7951d79359fcd
3,656,166
def show_object_id_by_date( move_data, create_features=True, kind=None, figsize=(21, 9), return_fig=True, save_fig=True, name='shot_points_by_date.png', ): """ Generates four visualizations based on datetime feature: - Bar chart trajectories by day periods - Bar chart trajectories day of the week - Line chart trajectory by date - Line chart of trajectory byhours of the day. Parameters ---------- move_data : pymove.core.MoveDataFrameAbstract subclass. Input trajectory data. create_features : bool, optional, default True. Represents whether or not to delete features created for viewing. kind: list or None Determines the kinds of each plot figsize : tuple, optional, default (21,9). Represents dimensions of figure. return_fig : bool, optional, default True. Represents whether or not to save the generated picture. save_fig : bool, optional, default True. Represents whether or not to save the generated picture. name : String, optional, default 'shot_points_by_date.png'. Represents name of a file. Returns ------- matplotlib.pyplot.figure or None The generated picture. References ---------- https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.plot.html """ if kind is None: kind = ['bar', 'bar', 'line', 'line'] fig, ax = plt.subplots(2, 2, figsize=figsize) move_data.generate_date_features() move_data.generate_hour_features() move_data.generate_time_of_day_features() move_data.generate_day_of_the_week_features() move_data.groupby([PERIOD])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[0], rot=0, ax=ax[0][0], fontsize=12 ) move_data.groupby([DAY])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[1], ax=ax[0][1], rot=0, fontsize=12 ) move_data.groupby([DATE])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[2], grid=True, ax=ax[1][0], rot=90, fontsize=12, ) move_data.groupby([HOUR])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[3], grid=True, ax=ax[1][1], fontsize=12 ) if not create_features: move_data.drop(columns=[DATE, HOUR, PERIOD, DAY], inplace=True) if save_fig: plt.savefig(fname=name, fig=fig) if return_fig: return fig
18bbd54adfba6ecfd0959904d99698cfaac4b198
3,656,167
def raw_escape(pattern, unix=None, raw_chars=True): """Apply raw character transform before applying escape.""" return _wcparse.escape(util.norm_pattern(pattern, False, raw_chars, True), unix=unix, pathname=True, raw=True)
e4df84b21b737f199a7314818cc7f892f93be1b8
3,656,168
def interpolate_effective_area_per_energy_and_fov( effective_area, grid_points, target_point, min_effective_area=1. * u.Unit('m2'), method='linear', ): """ Takes a grid of effective areas for a bunch of different parameters and interpolates (log) effective areas to given value of those parameters Parameters ---------- effective_area: np.array of astropy.units.Quantity[area] grid of effective area, of shape (n_grid_points, n_fov_offset_bins, n_energy_bins) grid_points: np.array list of parameters corresponding to effective_area, of shape (n_grid_points, n_interp_dim) target_point: np.array values of parameters for which the interpolation is performed, of shape (n_interp_dim) min_effective_area: astropy.units.Quantity[area] Minimum value of effective area to be considered for interpolation method: 'linear’, ‘nearest’, ‘cubic’ Interpolation method Returns ------- aeff_interp: astropy.units.Quantity[area] Interpolated Effective area array with shape (n_energy_bins, n_fov_offset_bins) """ # get rid of units effective_area = effective_area.to_value(u.m**2) min_effective_area = min_effective_area.to_value(u.m**2) # remove zeros and log it effective_area[effective_area < min_effective_area] = min_effective_area effective_area = np.log(effective_area) # interpolation aeff_interp = griddata(grid_points, effective_area, target_point, method=method).T # exp it and set to zero too low values aeff_interp = np.exp(aeff_interp) aeff_interp[aeff_interp < min_effective_area * 1.1] = 0 # 1.1 to correct for numerical uncertainty and interpolation return u.Quantity(aeff_interp, u.m**2, copy=False)
58c32f49c96ed7ceb14e734f1386ef0015920204
3,656,169
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list: """Extract edges. Args: stats (np.ndarray): Stats array that contains summary statistics of hills. idxs_upper (np.ndarray): Upper index for comparing. runner (int): Index. max_index (int): Unused. maximum_offset (float): Maximum offset when comparing edges. iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1. iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6. iso_mass_range (float, optional): Mass search range. Defaults to 5. Returns: list: List of edges. """ edges = [] mass1 = stats[runner, 0] delta_mass1 = stats[runner, 1] for j in range(runner+1, idxs_upper[runner]): mass2 = stats[j, 0] if np.abs(mass2 - mass1) <= maximum_offset: delta_mass2 = stats[j, 1] for charge in range(iso_charge_min, iso_charge_max + 1): if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range): edges.append((runner, j)) break return edges
8101a024c20d169f470d4e6632272e0ad00c484b
3,656,170
def _neq_attr(node, attr, gens, container): """ Calcs fitness based on the fact that node's target shall not have an attr with a certain value. """ trg_nd = container.nodes[gens[node]] if attr[0] in trg_nd and attr[1] == trg_nd[attr[0]]: return 10.1 return 0.0
adfa39aa60d0777b2b05f174a9cf61a847e55b1d
3,656,171
def getItem( user, list, itempk ): """ Get a single item from a list. :param user: user who owns list :param list: list containing item :param itempk: private key of item :return: item or None """ itemType = list.itemType item = None if itemType == 'Item': item = Item.objects.get( pk=itempk, list=list, user=user ) elif itemType == 'Link': item = Link.objects.get( pk=itempk, list=list, user=user ) elif itemType == 'Book': item = Book.objects.get( pk=itempk, list=list, user=user ) elif itemType == 'Show' or itemType == 'Movie': item = Video.objects.get( pk=itempk, list=list, user=user ) return item
f0d2c3a6d1881e0e1288aae451a556ebe856242e
3,656,172
def metric_divergence(neighborhood_vectors: np.ndarray, dL: float, polarity: int) -> float: """ Calculates the divergence of a sampling volume neighborhood. Note: For JIT to work, this must be declared at the top level. @param neighborhood_vectors: Sampling volume neighborhood vectors (six 3D vectors) @param dL: Length element @param polarity: Polarity filter (-1: Keep values <= 0; 0: Keep all values; +1: Keep values >= 0) """ dxp = neighborhood_vectors[0][0] dxn = neighborhood_vectors[3][0] dyp = neighborhood_vectors[1][1] dyn = neighborhood_vectors[4][1] dzp = neighborhood_vectors[2][2] dzn = neighborhood_vectors[5][2] value = (dxp - dxn + dyp - dyn + dzp - dzn) / 2 / dL if polarity == -1: if value > 0: return np.NaN else: return -value # Keep divergence positive, especially for use as alpha metric elif polarity == +1: if value < 0: return np.NaN else: return value else: return value
87dd2b19c654143ed54f3783059ece50eb32ec71
3,656,173
def tag(request): """ Add/Remove tag to email """ if request.is_ajax(): mail = request.POST.get("mail") tag = request.POST.get("tag") op = request.POST.get("op") mail = get_object_or_404(Mail, pk=mail) if op == "ADD": mail.tags.add(tag) elif op == "REMOVE": mail.tags.remove(tag) else: raise Http404("404") return JsonResponse({"ok": True}) raise Http404("404")
b1f5c2e65393be1d68a03b01c522214413e5b321
3,656,175
def sid_to_smiles(sid): """Takes an SID and prints the associated SMILES string.""" substance = pc.Substance.from_sid(sid) cid = substance.standardized_cid compound = pc.get_compounds(cid)[0] return compound.isomeric_smiles
e243e201a8ac4e4ee63332454a8b8c64f0f43692
3,656,176
def view_static(request, **kwargs): """Outputs static page.""" template = kwargs.get('template', None) if not template: raise Http404 template = '.'.join([template, 'html']) title = kwargs.get('title', 'static page') img = kwargs.get('img', 'bgag.jpg') return render_to_response(template, { 'is_mobile': request.user_agent.is_mobile, 'page_title': title, 'menu': MenuItem.active().order_by('order_id'), 'page_img': img, })
b6997e86175688f9b1293b0888faeb337bb5f3b6
3,656,177
def start_call(called_ident, skicall): """When a call is initially received this function is called. Unless you want to divert to another page, this function should return called_ident which would typically be the ident of a Responder or Template page dealing with the call. If a ServeFile exception is raised, which contains a pathlib.Path object of a local server file then that server file will be sent to the client. In this case, the end_call function will not be called.""" # To serve a directory of static files, you can map a url to a server directory with the # skicall.map_url_to_server method, which returns pathlib.Path objects, and then # raise a ServeFile exception, which causes the file to be served. For example: # servedfile = skicall.map_url_to_server("images", "/home/user/thisproject/imagefiles") # if servedfile: # raise ServeFile(servedfile) # Of particular interest at this point are the attributes: # skicall.received_cookies is a dictionary of cookie name:values received from the client # skicall.call_data is a dictionary which you can set with your own data and, as skicall is # passed on to the submit_data and end_call functions defined below, can be used to pass # data to these functions. # Normally you would return called_ident, which is the page being called, or None to cause a # page not found error, or another ident (project, pagenumber) to divert the call to another page. return called_ident
0353d81273ea6638858bf18271f4480895ca1db1
3,656,178
def getmemory(): """ Returns the memory limit for data arrays (in MB). """ return NX_MEMORY
f6850ac2ad5854f9798ef480e9ca105bf31644ed
3,656,179
import this def get_object_syncing_state(): """ Get a dictionary mapping which object trackers are active. The dictionary contains name:bool pairs that can be fed back into the func:`set_object_syncing_state()` function. """ states = { "selection": bool(this._on_selection_changed_cb_id), "duplicate": bool(this._on_before_duplicate_cb_id), "name": bool(this._on_name_changed_cb_id), "existence": bool(this._on_object_deleted_cb_id), "relationship": bool(this._on_parent_changed_cb_id), } return states
c6fa40e7945b8186db06cc00b461fc2fe6a16c36
3,656,180
def determine_nohit_score(cons, invert): """ Determine the value in the matrix assigned to nohit given SeqFindr options :param cons: whether the Seqfindr run is using mapping consensus data or not :param invert: whether the Seqfindr run is inverting (missing hits to be shown as black bars. :type cons: None of boolean :type cons: boolean :returns: the value defined as no hit in the results matrix """ if cons is None: nohit = 0.5 else: nohit = 1.0 if invert: nohit = nohit*-1.0 return nohit
d0539b5ac4dda8b4a15c6800fb4a821cb305b319
3,656,181
def library_get_monomer_desc(res_name): """Loads/caches/returns the monomer description objec MonomerDesc for the given monomer residue name. """ assert isinstance(res_name, str) try: return MONOMER_RES_NAME_CACHE[res_name] except KeyError: pass mon_desc = library_construct_monomer_desc(res_name) if mon_desc is None: return None MONOMER_RES_NAME_CACHE[res_name] = mon_desc return mon_desc
98b4790995bd1d2eba96775e99826fae7b7cfc8a
3,656,183
def _seqfix(ref_seq, seq, comp_len, rev): """ Fill or trim a portion of the beginning of a sequence relative to a reference sequence Args: ref_seq (str): reference sequence e.g. germline gene seq (str): sequence to compare to reference comp_len (int): length of subsequence to compare e.g. necessary to exclude the CDR3 rev (bool): whether to reverse the sequences for J gene filling / trimming Returns: seq_fixed (str): sequence filled / trimmed as necessary """ if rev: ref_comp = ref_seq[::-1][:comp_len] seq_comp = seq[::-1][:comp_len] else: ref_comp = ref_seq[:comp_len] seq_comp = seq[:comp_len] ref_aligned, seq_aligned = global_pw_align(ref_comp, seq_comp) # replace N's in seq if present seq_aligned = _replace_Ns_with_ref(ref_aligned, seq_aligned) if ref_aligned.startswith('-'): # need to trim sequence fixed = _trim_extra_nt(ref_aligned, seq_aligned) elif seq_aligned.startswith('-'): # need to fill sequence fixed = _fill_missing_nt(ref_aligned, seq_aligned) else: fixed = seq_aligned if rev: seq_fixed = seq[:-comp_len] + fixed[::-1] else: seq_fixed = fixed + seq[comp_len:] return seq_fixed.replace('-', '')
222ba3a8e2c4bced8ebcde6662890c10a0b41cf8
3,656,187
import torch from typing import Tuple def get_dedup_tokens(logits_batch: torch.Tensor) \ -> Tuple[torch.Tensor, torch.Tensor]: """Converts a batch of logits into the batch most probable tokens and their probabilities. Args: logits_batch (Tensor): Batch of logits (N x T x V). Returns: Tuple: Deduplicated tokens. The first element is a tensor (token indices) and the second element is a tensor (token probabilities) """ logits_batch = logits_batch.softmax(-1) out_tokens, out_probs = [], [] for i in range(logits_batch.size(0)): logits = logits_batch[i] max_logits, max_indices = torch.max(logits, dim=-1) max_logits = max_logits[max_indices!=0] max_indices = max_indices[max_indices!=0] cons_tokens, counts = torch.unique_consecutive( max_indices, return_counts=True) out_probs_i = torch.zeros(len(counts), device=logits.device) ind = 0 for i, c in enumerate(counts): max_logit = max_logits[ind:ind + c].max() out_probs_i[i] = max_logit ind = ind + c out_tokens.append(cons_tokens) out_probs.append(out_probs_i) out_tokens = pad_sequence(out_tokens, batch_first=True, padding_value=0.).long() out_probs = pad_sequence(out_probs, batch_first=True, padding_value=0.) return out_tokens, out_probs
885048842e6d1b50cd5b98c5b455aeb71e49c191
3,656,188
def com(im): """ Compute the center of mass of im. Expects that im is leveled (ie zero-centered). Ie, a pure noise image should have zero mean. Sometimes this is improved if you square the im first com(im**2) Returns: y, x in array form. """ im = np.nan_to_num(im) mass = np.sum(im) ry = ( np.arange(im.shape[0]) + 0.5 ) # 0.5 because we want the mass of a pixel at the center of the pixel rx = np.arange(im.shape[1]) + 0.5 y = np.sum(ry * np.sum(im, axis=1)) x = np.sum(rx * np.sum(im, axis=0)) return utils.np_safe_divide(np.array([y, x]), mass)
5e1a7c20075df3fe5804213e5fdddd4f46d276c6
3,656,189
def get_fitting_custom_pipeline(): """ Pipeline looking like this lagged -> custom -> ridge """ lagged_node = PrimaryNode('lagged') lagged_node.custom_params = {'window_size': 50} # For custom model params as initial approximation and model as function is necessary custom_node = SecondaryNode('custom', nodes_from=[lagged_node]) custom_node.custom_params = {'alpha': 5, 'model_predict': custom_ml_model_imitation_predict, 'model_fit': custom_ml_model_imitation_fit} node_final = SecondaryNode('lasso', nodes_from=[custom_node]) pipeline = Pipeline(node_final) return pipeline
3ed78dc2f83110b0ac7dd4622a76511d0316404f
3,656,190
def get_regularizable_variables(scope): """ Get *all* regularizable variables in the scope. :param scope: scope to filter variables by :return: """ return tf.get_collection(REGULARIZABLE_VARS, scope)
67a6673be12af47128a453e413778f18f4344eaa
3,656,191
import glob def load(midi_path: str, config: dict): """ returns a 3-tuple of `tf.Dataset` each returning `(input_seq, target_seq)`, representing train, validation, and test portions of the overall dataset. `input_seq` represents the `inp_split` portion of each midi sequence in `midi_path`. """ batch_size = config.get('batch_size', None) # get midi files filenames = tf.random.shuffle(glob.glob(f'{midi_path}/**/*.midi', recursive=True)) # get train, validation, and test sizes train_split, test_split = config.get('train_size', None), config.get('test_size', None) train_split = int(train_split * len(filenames)) test_split = int(test_split * len(filenames)) val_split = len(filenames) - train_split + test_split # split filenames to train, test, split midi_ds, midi_tokenizer = _create_dataset( filenames=filenames, inp_len=config.get('inp_len', None), tar_len=config.get('tar_len', None), velocity_bins=config.get('velocity_bins', None), rest_resolution=config.get('rest_resolution', None)) train_ds = midi_ds.take(train_split) val_ds = train_ds.skip(train_split) test_ds = val_ds.skip(val_split) val_ds = val_ds.take(val_split) return (_optimize_dataset(train_ds.padded_batch(batch_size)), _optimize_dataset(val_ds.padded_batch(batch_size)), _optimize_dataset(test_ds.padded_batch(batch_size)), midi_tokenizer)
38d890a78cf85ce43cbdb783246ef1a5e7e2cd06
3,656,192
import re def _parse_docstring(doc): """Extract documentation from a function's docstring.""" if doc is None: return _Doc('', '', {}, []) # Convert Google- or Numpy-style docstrings to RST. # (Should do nothing if not in either style.) # use_ivar avoids generating an unhandled .. attribute:: directive for # Attribute blocks, preferring a benign :ivar: field. cfg = Config(napoleon_use_ivar=True) doc = str(GoogleDocstring(doc, cfg)) doc = str(NumpyDocstring(doc, cfg)) with _sphinx_common_roles(): tree = docutils.core.publish_doctree( # Disable syntax highlighting, as 1) pygments is not a dependency # 2) we don't render with colors and 3) SH breaks the assumption # that literal blocks contain a single text element. doc, settings_overrides={'syntax_highlight': 'none'}) class Visitor(NodeVisitor): optional = [ 'document', 'docinfo', 'field_list', 'field_body', 'literal', 'problematic', # Introduced by our custom passthrough handlers, but the Visitor # will recurse into the inner text node by itself. 'TextElement', ] def __init__(self, document): super().__init__(document) self.paragraphs = [] self.start_lines = [] self.params = defaultdict(dict) self.raises = [] self._current_paragraph = None self._indent_iterator_stack = [] self._indent_stack = [] def _do_nothing(self, node): pass def visit_paragraph(self, node): self.start_lines.append(node.line) self._current_paragraph = [] def depart_paragraph(self, node): text = ''.join(self._current_paragraph) text = ''.join(self._indent_stack) + text self._indent_stack = [ ' ' * len(item) for item in self._indent_stack] text = text.replace('\n', '\n' + ''.join(self._indent_stack)) self.paragraphs.append(text) self._current_paragraph = None visit_block_quote = visit_doctest_block = visit_paragraph depart_block_quote = depart_doctest_block = depart_paragraph def visit_Text(self, node): self._current_paragraph.append(node) depart_Text = _do_nothing def visit_emphasis(self, node): self._current_paragraph.append('\033[3m') # *foo*: italic def visit_strong(self, node): self._current_paragraph.append('\033[1m') # **foo**: bold def visit_title_reference(self, node): self._current_paragraph.append('\033[4m') # `foo`: underlined def _depart_markup(self, node): self._current_paragraph.append('\033[0m') depart_emphasis = depart_strong = depart_title_reference = \ _depart_markup def visit_rubric(self, node): self.visit_paragraph(node) def depart_rubric(self, node): # Style consistent with "usage:", "positional arguments:", etc. self._current_paragraph[:] = [ (t.lower() if t == t.title() else t) + ':' for t in self._current_paragraph] self.depart_paragraph(node) def visit_literal_block(self, node): text, = node self.start_lines.append(node.line) self.paragraphs.append( re.sub('^|\n', r'\g<0> ', text)) # indent raise SkipNode def visit_bullet_list(self, node): self._indent_iterator_stack.append( (node['bullet'] + ' ' for _ in range(len(node)))) def depart_bullet_list(self, node): self._indent_iterator_stack.pop() def visit_enumerated_list(self, node): enumtype = node['enumtype'] fmt = {('(', ')'): 'parens', ('', ')'): 'rparen', ('', '.'): 'period'}[node['prefix'], node['suffix']] start = node.get('start', 1) enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0] for i in range(start, start + len(node))] width = max(map(len, enumerators)) enumerators = [enum.ljust(width) for enum in enumerators] self._indent_iterator_stack.append(iter(enumerators)) def depart_enumerated_list(self, node): self._indent_iterator_stack.pop() def visit_list_item(self, node): self._indent_stack.append(next(self._indent_iterator_stack[-1])) def depart_list_item(self, node): self._indent_stack.pop() def visit_field(self, node): field_name_node, field_body_node = node field_name, = field_name_node parts = field_name.split() if len(parts) == 2: doctype, name = parts # docutils>=0.16 represents \* as \0* in the doctree. name = name.lstrip('*\0') elif len(parts) == 3: doctype, type_, name = parts name = name.lstrip('*\0') if doctype not in _PARAM_TYPES: raise SkipNode if 'type' in self.params[name]: raise ValueError('type defined twice for {}'.format(name)) self.params[name]['type'] = type_ else: raise SkipNode if doctype in _PARAM_TYPES: doctype = 'param' if doctype in _TYPE_NAMES: doctype = 'type' if doctype in ['param', 'type'] and doctype in self.params[name]: raise ValueError( '{} defined twice for {}'.format(doctype, name)) visitor = Visitor(self.document) field_body_node.walkabout(visitor) if doctype in ['param', 'type']: self.params[name][doctype] = ''.join(visitor.paragraphs) elif doctype in ['raises']: self.raises.append(name) raise SkipNode def visit_comment(self, node): self.paragraphs.append(comment_token) # Comments report their line as the *end* line of the comment. self.start_lines.append( node.line - node.children[0].count('\n') - 1) raise SkipNode def visit_system_message(self, node): raise SkipNode comment_token = object() visitor = Visitor(tree) tree.walkabout(visitor) tuples = {name: _Param(values.get('param'), values.get('type')) for name, values in visitor.params.items()} if visitor.paragraphs: text = [] for start, paragraph, next_start in zip( visitor.start_lines, visitor.paragraphs, visitor.start_lines[1:] + [0]): if paragraph is comment_token: continue text.append(paragraph) # Insert two newlines to separate paragraphs by a blank line. # Actually, paragraphs may or may not already have a trailing # newline (e.g. text paragraphs do but literal blocks don't) but # argparse will strip extra newlines anyways. This means that # extra blank lines in the original docstring will be stripped, but # this is less ugly than having a large number of extra blank lines # arising e.g. from skipped info fields (which are not rendered). # This means that list items are always separated by blank lines, # which is an acceptable tradeoff for now. text.append('\n\n') parsed = _Doc(text[0], ''.join(text), tuples, visitor.raises) else: parsed = _Doc('', '', tuples, visitor.raises) return parsed
ff4e3ce300748c32c2e65129c381f1e74912f4a1
3,656,193
def extract_remove_outward_edges_filter(exceptions_from_removal): """ This creates a closure that goes through the list of tuples to explicitly state which edges are leaving from the first argument of each tuple. Each tuple that is passed in has two members. The first member is a string representing a single node from which the children will be explicitly stated. The second member is the list of nodes that are in its child set. If the This covers both barren_nodes and explicit_parent_offspring. """ def remove_outward_edges_filter(G): graph = G.copy() list_of_parents = [x[0] for x in exceptions_from_removal if len(x[1]) > 0] list_of_barrens = [x[0] for x in exceptions_from_removal if len(x[1]) == 0] for barren in list_of_barrens: graph.remove_edges_from([edge for edge in graph.edges() if edge[0] == barren]) for parent in list_of_parents: current_edges = graph.out_edges(parent) valid_edges = [(x[0],y) for x in exceptions_from_removal if x[0] == parent for y in x[1]] graph.remove_edges_from([edge for edge in current_edges if edge not in valid_edges]) return graph return remove_outward_edges_filter
543e5823b8375cbdec200988ea5dd0c4f2d23d05
3,656,194
import torch def ln_addTH(x : torch.Tensor, beta : torch.Tensor) -> torch.Tensor: """ out = x + beta[None, :, None] """ return x + beta[None, :, None]
77e556c41a33a8c941826604b4b595ea7d456f9a
3,656,195
def drude2(tags, e, p): """dielectric function according to Drude theory for fitting""" return drude(e, p[0], p[1], p[2], p[3])
8032c61df099f6c1ac671f2b81c3bb93d1f81317
3,656,196
def ParseFile(path): """Parse function names and comments from a .h path. Returns mapping from function name to comment. """ result = {} with open(path, 'r') as fp: lines = fp.readlines() i = 0 n = len(lines) while i < n: line = lines[i] m = MCRE.match(line) if m and not m.group('rest') and not m.group('params'): # Looks like a function definition. Consume all adjacent following # comment lines. name = m.group('name') tmpl = m.group('tmpl') params = m.group('params') if tmpl is not None: name += '<%s>' % tmpl if params is not None: name += '(%s)' % params # print '%3d: %s' % (i+1, m.groupdict()) comments = [] i += 1 while i < n: m = CRE.match(lines[i]) if not m: break comments.append(m.group('line')) i += 1 result[name] = comments else: i += 1 return result
6319137de084aaf366b28e76af52cc1911298d8b
3,656,197
from typing import Dict def get_records(data: Dict[_Expr, Dict], column_order): """Output data as a list of records""" def cell_callback(expr, i, val, spreadsheet_data): spreadsheet_data[-1].append(val) return spreadsheet_data def row_callback(spreadsheet_data): spreadsheet_data[-1] = tuple(spreadsheet_data[-1]) spreadsheet_data.append([]) return spreadsheet_data out = [[]] out = print_analyses_v2(data, column_order, cell_callback, row_callback, out) return out[:-1]
8a8eb0e69c9dabe6dfc59c9b5637fdf4ee2d2dd1
3,656,198
import torch def support_mask_to_label(support_masks, n_way, k_shot, num_points): """ Args: support_masks: binary (foreground/background) masks with shape (n_way, k_shot, num_points) """ support_masks = support_masks.view(n_way, k_shot*num_points) support_labels = [] for n in range(support_masks.shape[0]): support_mask = support_masks[n, :] #(k_shot*num_points) support_label = torch.zeros_like(support_mask) mask_index = torch.nonzero(support_mask).squeeze(1) support_label= support_label.scatter_(0, mask_index, n+1) support_labels.append(support_label) support_labels = torch.stack(support_labels, dim=0) support_labels = support_labels.view(n_way, k_shot, num_points) return support_labels.long()
e6d73dc93e1e0b54d805d9c8b69785168dd2621e
3,656,199
def cal_covered_users(positions, heat_map, radius): """ :param positions: $k$ positions array of !!!(y, x)!!! :param heat_map: grid data with count :param radius: 0(1 grid), 1(8 grids), 2(25 grids) :return: coverage score """ row_num, col_num = heat_map.shape mask = np.zeros(heat_map.shape, dtype=int) for position in positions: center_x = position[1] center_y = position[0] max_x = center_x + radius if center_x + radius < col_num else col_num - 1 min_x = center_x - radius if center_x - radius >= 0 else 0 max_y = center_y + radius if center_y + radius < row_num else row_num - 1 min_y = center_y - radius if center_y - radius >= 0 else 0 for x in range(min_x, max_x + 1): for y in range(min_y, max_y + 1): mask[y, x] = 1 return np.sum(np.multiply(mask, heat_map))
52e3fec6b7aa01c9882c15ca3331b3199fa554a2
3,656,200
from typing import Union import pathlib def certificate_from_file( filename: Union[str, pathlib.Path], format=OpenSSL.crypto.FILETYPE_PEM, ) -> TS.X509: """Load an X509 certificate from ``filename``. :param filename: The path to the certificate on disk. :param format: The format of the certificate, from :doc:`OpenSSL:api/crypto`. """ with open(filename, 'r') as handle: return certificate_from_string(handle.read(), format)
1cc3cb514454118ed6af9257b35aa39586bce31b
3,656,201
def get_welcome_response(session): """ Welcome the user to my python skill """ card_title = "Welcome" speech_output = "Welcome to my python skill. You can search for GitHub repositories. " # If the user either does not reply to the welcome message or says something # that is not understood, they will be prompted again with this text. reprompt_text = "Ask me to search GitHub for a repository. " session_attributes = session.get('attributes', {}) speechlet_response = build_speechlet_response( card_title, speech_output, reprompt_text ) return build_response(session_attributes, speechlet_response)
d90bbd14bef29f1d7400042bbc593e4bb63b8713
3,656,202
import numpy as np def rotate_quaternion ( angle, axis, old ): """Returns a quaternion rotated by angle about axis relative to old quaternion.""" # Note that the axis vector should be normalized and we test for this # In general, the old quaternion need not be normalized, and the same goes for the result # although in our applications we only ever use unit quaternions (to represent orientations) assert old.size==4, 'Error in old quaternion dimension' assert axis.size==3, 'Error in axis dimension' assert np.isclose (np.sum(axis**2),1.0), 'axis normalization error {} {} {}'.format(*axis) # Standard formula for rotation quaternion, using half angles rot = np.sin(0.5*angle) * axis rot = np.array([np.cos(0.5*angle),rot[0],rot[1],rot[2]],dtype=np.float_) e = quatmul ( rot, old ) # Apply rotation to old quaternion return e
ccc67dbcd2153b40a4e4c560d423d4c495912d8e
3,656,203
def rochepot_dl(x, y, z, q): """ Dimensionless Roche potential (:math:`\\Phi_n`, synchronous rotation) More massive component (:math:`m_1`) is centered at (x,y,z) = (0,0,0). Less massive component (:math:`m_2`) is at (1,0,0). The unit of length is the distance between the objects. Both objects are in the x,y plane (x-axis along the connecting line and z perpendicular to the orbital plane). Parameters ---------- x, y, z : float or array Location(s) at which to calculate the potential. Unit of length is the distance between the masses m1 and m2. q : float Mass ratio (0 <= m2/m1 <= 1) Returns ------- Potential : float or array The potential at the specified location(s) """ _checkq(q) r1, r2 = _r1r2_dl(x, y, z) p = 2/((1+q)*r1) + 2*q/((1+q)*r2) + (x - q/(1+q))**2 + y**2 return p
f3d15ea27e6b4c476d345fa8af254b2a14cbdfbc
3,656,205
def health_check(config): """ Tests the API to ensure it is working. """ itglue = ITGlue(config['api_key'], config['itglue_host']) try: itglue._make_request('organizations', {}) return True except: return False
02b9a582b506f590adcdcdbd661abbc7aec52d26
3,656,206
import io import time def capture_image(resolution=(1024, 768), size=(320, 240), sleep=2): """ Captures image from raspberry pi camera resolution -- resolution of capture size -- size of output sleep -- sleep time in seconds """ stream = io.BytesIO() with picamera.PiCamera() as camera: #camera.led = False camera.resolution = resolution camera.start_preview() time.sleep(sleep) camera.capture(stream, format='jpeg', resize=size) # "Rewind" the stream to the beginning so we can read its content stream.seek(0) image = Image.open(stream) return image
c8967d6bce5f953d11878fb31fa02dbffbe4e283
3,656,207
import numpy def MLVR(XDATA,YDATA,xreference=0,residual=1,xlabel='',ylabel='',title='',alpha = 0.01,iters = 1000,plot=1): """Does Multivariant Linear Regression properties: XDATA = The Feature Dataframe YDATA = The Target Dataframe xreference = 1/0 -> The column index in XDATA for ploting graph xlabel = Label for X in Graph ylabel = Label for Y in Graph title = title for graph] alpha = Learning rate for model iters = the number of iteration to train the model """ XDATA.conv_type('float',change_self=True) xpure = XDATA[xreference] XDATA.normalize(change_self=True) YDATA.conv_type('float',change_self=True) ypure = YDATA.tolist[0] YDATA.normalize(change_self=True) X=XDATA y=YDATA df =DataFrame() ones = df.new(X.shape[0],1,elm=1.) X = df.concat(ones,X,axis=1) theta = DataFrame().new(1,length(X.columns),elm=0.) def computeCost(X,y,theta): dot_product = DataFrame().dot(X,theta.T) return float( ( (dot_product - y)**2 ).sum(axis=0) )/(2 * X.shape[0]) def gradientDescent(X,y,theta,iters,alpha): #cost = np.zeros(iters) cost = [] for i in range(iters): dot_product = DataFrame().dot(X,theta.T) derivative = DataFrame(dataframe = [[(alpha/X.shape[0])]]) * ( X*(dot_product - y) ).sum(axis = 0 ) theta = theta - derivative cost.append( computeCost(X, y, theta) ) #cost[i] = computeCost(X, y, theta) return theta,cost def print_equation(g): stra = "Estimated equation, y = %s"%g[0] g0 = g[0] del g[0] for c in range(length(g)): stra += " + %s*x%s"%(g[c],c+1) print(stra) def predict_li(XDATA,g): g0 = g[0] del g[0] y_pred = [] for row in range(XDATA.shape[0]): suma = 0 suma += sum(list_multiplication( g , XDATA.row(row) ) ) yres = g0 + suma y_pred.append(yres) return y_pred g,cost = gradientDescent(X,y,theta,iters,alpha) finalCost = computeCost(X,y,g) #g = g.T g = g.two2oneD() print("Thetas = %s"%g) #print("cost = ",cost) print("finalCost = %s" % finalCost) gN = g[:] print_equation(gN) gN = g[:] y_pred = predict_li(XDATA,gN) y_PRED = reference_reverse_normalize(ypure,y_pred) emin,emean,emax = minResidual(ypure , y_PRED),meanResidual(ypure , y_PRED),maxResidual(ypure , y_PRED) print("Min,Mean,Max residual = %s, %s, %s"%( emin,emean,emax ) ) print("Residual Min - Max Range = %s"%(emax-emin)) print("Residual range percentage = %s" %((emax-emin)/(max(ypure) - min(ypure))) ) print("Residual mean percentage = %s" %(emean/ArithmeticMean(ypure)) ) #-- If finalcost is lowest mean Residual or mean Error distance also will be lowest #y_pred = [g[0] + g[1]*my_data[0][c] + g[2]*my_data[1][c] for c in range(my_data.shape[0])] y_actual = YDATA.tolist[0] x = XDATA[xreference] if plot == 1: fig, ax = plt.subplots() ax.plot(numpy.arange(iters), cost, 'r') ax.set_xlabel('Iterations') ax.set_ylabel('Cost') ax.set_title('Error vs. Training Epoch') plt.show() x_a, y_a = give_time_series(xpure,y_PRED)#give_time_series(x,y_pred) plt.plot(x_a,y_a,color='r',marker='.',label='Prediction') x_a, y_a = give_time_series(xpure,ypure)#give_time_series(x,y_actual) plt.plot(x_a,y_a,color='g',marker='.',label='Real') if residual == 1: plot_error_distance(xpure,y_PRED,ypure) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.legend() plt.show() else: print('plot off') return finalCost
84509c2e8ccc9b4f52b5d90432e74a18da226b0a
3,656,208
def FilterAndTagWrapper(target, dontRemoveTag=False): """\ Returns a component that wraps a target component, tagging all traffic going into its inbox; and filtering outany traffic coming out of its outbox with the same unique id. """ if dontRemoveTag: Filter = FilterButKeepTag else: Filter = FilterTag return Graphline( TAGGER = UidTagger(), FILTER = Filter(), TARGET = target, linkages = { ("TARGET", "outbox") : ("FILTER", "inbox"), # filter data coming from target ("FILTER", "outbox") : ("self", "outbox"), ("TAGGER", "uid") : ("FILTER", "uid"), # ensure filter uses right uid ("self", "inbox") : ("TAGGER", "inbox"), # tag data going to target ("TAGGER", "outbox") : ("TARGET", "inbox"), ("self", "control") : ("TARGET", "control"), # shutdown signalling path ("TARGET", "signal") : ("TAGGER", "control"), ("TAGGER", "signal") : ("FILTER", "control"), ("FILTER", "signal") : ("self", "signal"), }, )
045cdd4f0716ba187211fbb1a4536f1f4c863bc1
3,656,209
from typing import Union def format_time(seconds: Union[int, float]) -> str: """Convert the seconds to human readable string with days, hours, minutes and seconds.""" s = int(np.rint(seconds)) if s < 60: return "{0}s".format(s) elif s < 60 * 60: return "{0}m {1:02}s".format(s // 60, s % 60) elif s < 24 * 60 * 60: return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) else: return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
f50b7d96a91e6e261169f0f0c9d71186e3c208fe
3,656,210
def run_model(df, i, name, gscv, calibrate=True): """Given customercode values in dict_folds, 1. create balanced dataset 2. split into train, test sets 3. run grid search 4. get probability scores 5. calibrate as directed 6. find optimal cutoff from precision-recall 7. return predictions, data, scores """ df_undersampled = pd.concat([ df.query(target==0).sample(frac=0.3, random_state=0), df.query("target==1") ]) X = df_undersampled.drop("target", axis=1).copy() y = df_undersampled.loc[:, "target"].copy() X_tr, X_te, y_tr, y_te = train_test_split(X, y, train_size=0.7, stratify=y) model = gscv.fit(X_tr, y_tr) # Probabilities y_scores = model.predict_proba(X_te)[:, 1] if calibrate: sigmoid = CalibratedClassifierCV(model, cv=2, method="sigmoid") sigmoid.fit(X_tr, y_tr) y_probs = sigmoid.predict_proba(X_te)[:, 1] else: y_probs = np.array(y_scores) # Cutoff p, r, t = precision_recall_curve(y_te, y_probs, pos_label=1) df_pr = (pd.DataFrame(data=zip(p, r, t), columns=["precision", "recall", "threshold"]) .set_index("threshold")) cutoff = (pd.Series(data=np.abs(df_pr["precision"] - df_pr["recall"]), index=df_pr.index) .idxmin() .round(2)) # Predictions y_pred = (y_probs >= cutoff).astype(int) dict_data = { "X_tr": X_tr, "X_te": X_te, "y_tr": y_tr, "y_te": y_te, "y_scores": y_scores, "y_probs": y_probs, "y_pred": y_pred, } dict_scores = { "precision": precision_score(y_te, y_pred), "recall": recall_score(y_te, y_pred), } payload = { "name": name, "model": model, "data": dict_data, "scores": dict_scores } return payload
0f5513b7e4117580dd297ee5e9b7a88afc691b3a
3,656,211
import re def extract_date(db): """Extract Release Date from metadata and convert it into YYYY MM format""" date_pattern = 'releaseDate\":(\d{9,10})' def format_date(x): """Takes epoch time as argument and returns date in YYYY MM format""" date = re.search(date_pattern, x) if date: val = pd.to_datetime(date.group(1), unit='s') val = val.strftime('%Y %b') return val else: return 'No Date' db['date'] = db['meta'].apply(format_date) db = db.drop('meta', axis=1) return db
9d4d8c19846a49967f9e3deb3be8808df9d69812
3,656,213
def split_test_image(aa): """ Separate image created by mk_test_image into x,y components """ if aa.dtype.kind == 'f': y = np.round((aa % 1)*1024) x = np.floor(aa) else: nshift = (aa.dtype.itemsize*8)//2 mask = (1 << nshift) - 1 y = aa & mask x = aa >> nshift return x, y
4a06a0c0fb80dfcb8a58d9509971bfdc0b026d27
3,656,214
def sphdist(ra1, dec1, ra2, dec2): """measures the spherical distance between 2 points Inputs: (ra1, dec1) in degrees (ra2, dec2) in degrees Outputs: returns a distance in degrees """ dec1_r = deg2rad(dec1) dec2_r = deg2rad(dec2) return 2. * rad2deg( arcsin( sqrt( ( sin((dec1_r - dec2_r) / 2)) ** 2 + cos(dec1_r) * cos(dec2_r) * ( sin((deg2rad(ra1 - ra2)) / 2)) ** 2)))
517f7c67370c6e065c8860b2be59470a2801567d
3,656,215
def parse_kwargs(kwargs, a_list): """ extract values from kwargs or set default """ if a_list is not None: num_colors = len(a_list) default_colors = generate_colors(num_colors) else: num_colors = 1 default_colors = 'k' logscale = kwargs.get('logscale', [False, False]) Range = kwargs.get('Range', [[], []]) colors = kwargs.get('colors', default_colors) figure_name = kwargs.get('figure_name', None) show = kwargs.get('show', True) dist = kwargs.get('dist', None) values = [logscale, Range, colors, figure_name, show, dist] return values
3f1006a8f638b3304ec6aa975346be1a4b6e8189
3,656,216
def talib_WCLPRICE(DataFrame): """WCLPRICE - Weighted Close Price 加权收盘价""" res = talib.WCLPRICE(DataFrame.high.values, DataFrame.low.values, DataFrame.close.values) return pd.DataFrame({'WCLPRICE': res}, index=DataFrame.index)
6e2d4530fcb33d64b9fbe8a3f0a8a5d64c8f8107
3,656,217
def is_pi_parallel(ring1_center: np.ndarray, ring1_normal: np.ndarray, ring2_center: np.ndarray, ring2_normal: np.ndarray, dist_cutoff: float = 8.0, angle_cutoff: float = 30.0) -> bool: """Check if two aromatic rings form a parallel pi-pi contact. Parameters ---------- ring1_center, ring2_center: np.ndarray Positions of centers of the two rings. Can be computed with the compute_ring_center function. ring1_normal, ring2_normal: np.ndarray Normals of the two rings. Can be computed with the compute_ring_normal function. dist_cutoff: float Distance cutoff. Max allowed distance between the ring center (Angstroms). angle_cutoff: float Angle cutoff. Max allowed deviation from the ideal (0deg) angle between the rings (in degrees). Returns ------- bool True if two aromatic rings form a parallel pi-pi. """ dist = np.linalg.norm(ring1_center - ring2_center) angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and dist < dist_cutoff): return True return False
def4eaba9e25e9034fce7559041e5142f82fc3c8
3,656,218
def _fetch_alleninf_coords(*args, **kwargs): """ Gets updated MNI coordinates for AHBA samples, as shipped with `alleninf` Returns ------- coords : :class:`pandas.DataFrame` Updated MNI coordinates for all AHBA samples References ---------- Updated MNI coordinates taken from https://github.com/chrisfilo/alleninf, which is licensed under the BSD-3 (reproduced here): Copyright (c) 2018, Krzysztof Gorgolewski All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ coords = resource_filename('abagen', 'data/corrected_mni_coordinates.csv') coords = pd.read_csv(coords).rename(dict(corrected_mni_x='mni_x', corrected_mni_y='mni_y', corrected_mni_z='mni_z'), axis=1) return coords.set_index('well_id')
dae30a0f5404151a3e7d82f129ff36cfec14caa0
3,656,219
from typing import List from typing import Union from typing import DefaultDict from typing import Dict from typing import Tuple def get_classes_for_mol_network(can: canopus.Canopus, hierarchy: List[str], npc_hierarchy: List[str], class_p_cutoff: float, max_class_depth: Union[int, None]) -> \ DefaultDict[str, List[Union[str, Dict[str, List[Tuple[ Union[str, float]]]]]]]: """Loop through mol network and gather CF and NPC classes :param can: Canopus object of canopus results with gnps mol network data :param hierarchy: the CF class level names to be included in output in order of hierarchy :param npc_hierarchy: the NPC class level names to be included in output in order of hierarchy :param class_p_cutoff: probability cutoff for including a class :param max_class_depth: max class depth for finding CF class :return: classes output - dict of lists of {componentindex: [cluster index, formula, {CF_level: [(class, prob)]}, {NPC_level: [(class, prob)]}]} CF classes are found by looking for the class at deepest depth (or max_class_depth) and then ordering these deepest classes based on priority. Then, the classes are traced back to higher hierarchy and sorted in output, again based on priority of deepest classes. """ results = defaultdict(list) for node_id, node in can.gnps.nodes.items(): # get canopus compound obj compound = can.sirius.compounds.get(node_id) if compound: cf_classes_dict = get_cf_classes(can, compound, hierarchy, class_p_cutoff, max_class_depth) npc_classes_dict = get_npc_classes(can, compound, npc_hierarchy) formula = compound.formula comp_id = node.componentId if comp_id == '-1': # handling of singleton -1 components comp_id += f"_{node_id}" results[comp_id].append( [node_id, formula, cf_classes_dict, npc_classes_dict]) return results
6808a751ed1873b7fb573bb3ecc55586d94590b1
3,656,220
def list_books(books): """Creates a string that, on each line, informs about a book.""" return '\n'.join([f'+ {book.name}: {book.renew_count}: {book.return_date}' for book in books])
fce770a39def7f40ed12820a578b4e327df7da43
3,656,221
def getHSPLNamespace(): """ Retrieve the namespace of the HSPL XML. @return: The namespace of the HSPL XML. """ return HSPL_NAMESPACE
481db5781ff9d0b4a4e4702cccafb088379e38a4
3,656,222
def add_lead_zero(num,digit,IgnoreDataManipulation=False,RaiseDataManipulationError=False,DigitMustAtLeastTwo=False): """Add leading the letters '0' to inputted integer 'num' according to defined 'digit' and return as string. Required keyword arguments: - num (int) : Integer (can be positive, zero, or negative) - digit (int) : How much digits of number should be in returned string. Optional keyword arguments: - IgnoreDataManipulation (bool) : Avoid raising acceptable data manipulation warning. - RaiseDataManipulationError (bool) : Raise every data manipulation warning as error exception. (IgnoreDataManipulation must be False.) - DigitMustAtLeastTwo (bool) : Raise warning or error if defined digit is less than 2. Data manipulation error: - Digit should be at least 2. (Ignore by default) - Amount of defined digits is less than digits of number in inputted integer. """ if type(num) is not int or type(digit) is not int: raise TypeError('parameters \'num\', \'digit\' should be integer.') if type(IgnoreDataManipulation) is not bool or type(RaiseDataManipulationError) is not bool or type(DigitMustAtLeastTwo) is not bool: raise TypeError('parameters \'IgnoreDataManipulation\', \'RaiseDataManipulationError\', and \'DigitMustAtLeastTwo\' should be boolean.') if IgnoreDataManipulation: RaiseDataManipulationError=False if digit<1: raise ValueError('Digit should be at least one.') if digit<2 and DigitMustAtLeastTwo: msg='Amount of digits should be at least 2.' if not IgnoreDataManipulation and not RaiseDataManipulationError: alternative_warn(msg,ValueWarning,'add_lead_zero') if RaiseDataManipulationError: raise ValueError(msg) # Reuse variable 'digit' if num>=0: num=str(num) IsNegative=False else: num=str(abs(num)) IsNegative=True digit=digit-len(num) if digit>0: for x in range(0,digit): # Reuse variable 'num' num='0'+num if not IsNegative: return num else: return '-'+num elif digit==0: if not IsNegative: return num else: return '-'+num else: msg='Defined digits amount is less than digits of number in inputted integer. It possibly means that some of used data has been manipulated incorrectly.' if not IgnoreDataManipulation and not RaiseDataManipulationError: alternative_warn(msg,ValueWarning,'add_lead_zero') if RaiseDataManipulationError: raise ValueError(msg) if not IsNegative: return num else: return '-'+num
ae3cffa2470a2acf5900a41b342366fb7c6e92da
3,656,223
def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers): """ Attaches servers to a monitoring policy. """ try: attach_servers = [] for _server_id in servers: server_id = get_server(oneandone_conn, _server_id) attach_server = oneandone.client.AttachServer( server_id=server_id ) attach_servers.append(attach_server) if module.check_mode: if attach_servers: return True return False monitoring_policy = oneandone_conn.attach_monitoring_policy_server( monitoring_policy_id=monitoring_policy_id, servers=attach_servers) return monitoring_policy except Exception as ex: module.fail_json(msg=str(ex))
bf096804ec6be47fa4e41c9f4e50d51313f8ef3f
3,656,224
from typing import Union def get_generator_contingency_fcas_availability_term_2(data, trader_id, trade_type, intervention) -> Union[float, None]: """Get generator contingency FCAS term 2""" # Parameters lower_slope_coefficient = get_lower_slope_coefficient(data, trader_id, trade_type) if lower_slope_coefficient == 0: return None enablement_min = lookup.get_trader_quantity_band_attribute(data, trader_id, trade_type, '@EnablementMin', float) reg_target = lookup.get_trader_solution_attribute(data, trader_id, '@L5RegTarget', float, intervention) energy_target = lookup.get_trader_solution_attribute(data, trader_id, '@EnergyTarget', float, intervention) return (energy_target - reg_target - enablement_min) / lower_slope_coefficient
8ec9c76c1941713511f8b472c4649954fd725d58
3,656,225
def format_pvalue(p_value, alpha=0.05, include_equal=True): """ If p-value is lower than 0.05, change it to "<0.05", otherwise, round it to two decimals :param p_val: input p-value as a float :param alpha: significance level :param include_equal: include equal sign ('=') to pvalue (e.g., '=0.06') or not (e.g., '0.06') :return: p_val: processed p-value (replaced by "<0.05" or rounded to two decimals) as a str """ if p_value < alpha: p_value = "<" + str(alpha) else: if include_equal: p_value = '=' + str(round(p_value, 3)) else: p_value = str(round(p_value, 3)) return p_value
aa6506b14b68746f4fa58d951f246321e8b5a627
3,656,226
def _compute_y(x, ll): """Computes y.""" return np.sqrt(1 - ll ** 2 * (1 - x ** 2))
773a0695676e43984bb0ca8c1d8af2e0bc3bb4fd
3,656,227
def create_axis(length=1.0, use_neg=True): """ Create axis. :param length: :param use_neg: If False, Only defined in Positive planes :return: Axis object """ # Defining the location and colors of each vertex of the shape vertices = [ # positions colors -length * use_neg, 0.0, 0.0, 1.0, 0.0, 0.0, length, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -length * use_neg, 0.0, 0.0, 1.0, 0.0, 0.0, length, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -length * use_neg, 0.0, 0.0, 1.0, 0.0, 0.0, length, 0.0, 0.0, 1.0] # Defining connections among vertices # We have a triangle every 3 indices specified indices = [ 0, 1, 2, 3, 4, 5] return Shape(vertices, indices)
fe9c9d49de786147a382e1fda1e6ab92d26a1fe9
3,656,228
def genmatrix(list, combinfunc, symmetric=False, diagonal=None): """ Takes a list and generates a 2D-matrix using the supplied combination function to calculate the values. PARAMETERS list - the list of items combinfunc - the function that is used to calculate teh value in a cell. It has to cope with two arguments. symmetric - Whether it will be a symmetric matrix along the diagonal. For example, it the list contains integers, and the combination function is abs(x-y), then the matrix will be symmetric. Default: False diagonal - The value to be put into the diagonal. For some functions, the diagonal will stay constant. An example could be the function "x-y". Then each diagonal cell will be "0". If this value is set to None, then the diagonal will be calculated. Default: None """ matrix = [] row_index = 0 for item in list: row = [] col_index = 0 for item2 in list: if diagonal is not None and col_index == row_index: # if this is a cell on the diagonal row.append(diagonal) elif symmetric and col_index < row_index: # if the matrix is symmetric and we are "in the lower left triangle" row.append( matrix[col_index][row_index] ) else: # if this cell is not on the diagonal row.append(combinfunc(item, item2)) col_index += 1 matrix.append(row) row_index += 1 return matrix
b7d8ebc916f57621a20c371139162cb0504470cd
3,656,229
def get_all_raw_codes_by_area(area: EmisPermArea) -> list: """ Returns a list of code names for all permissions within a logical area, for all possible modes. """ return get_raw_codes_by_area( area, EmisPermMode.CREATE | EmisPermMode.UPDATE | EmisPermMode.VIEW )
d5887af92ba5fb7c373078dca84a8f9e74a089dc
3,656,230
def cartesian_pair(df1, df2, **kwargs): """ Make a cross join (cartesian product) between two dataframes by using a constant temporary key. Also sets a MultiIndex which is the cartesian product of the indices of the input dataframes. See: https://github.com/pydata/pandas/issues/5401 :param df1 dataframe 1 :param df1 dataframe 2 :param kwargs keyword arguments that will be passed to pd.merge() :return cross join of df1 and df2 """ df1['_tmpkey'] = 1 df2['_tmpkey'] = 1 res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1) df1.drop('_tmpkey', axis=1, inplace=True) df2.drop('_tmpkey', axis=1, inplace=True) return res
e4ec1526f7a7906c5349bff20f5d4f83244c8878
3,656,231
def get_cases_by_landkreise_3daysbefore(): """ Return all Hospitals """ hospitals_aggregated = db.session.query(CasesPerLandkreis3DaysBefore).all() return jsonify(__as_feature_collection(hospitals_aggregated)), 200
0442f66ff78549617dd582bc0d1529c0041e7edb
3,656,232
def shape_list(x, out_type=tf.int32): """Deal with dynamic shape in tensorflow cleanly.""" static = x.shape.as_list() dynamic = tf.shape(x, out_type=out_type) return [dynamic[i] if s is None else s for i, s in enumerate(static)]
80eea7ccdd4ebfa5a3318fb6070ec996df5b4972
3,656,233
import json from typing import cast def load_config( config_file: str, print_warnings: bool = False ) -> InfestorConfiguration: """ Loads an infestor configuration from file and validates it. """ try: with open(config_file, "r") as ifp: raw_config = json.load(ifp) except: raise ConfigurationError(f"Could not read configuration: {config_file}") configuration, warnings, errors = parse_config(raw_config) if print_warnings: warning_items = "\n".join([f"- {warning}" for warning in warnings]) if warnings: print( f"Warnings when loading configuration file ({config_file}):\n{warning_items}" ) if errors: error_items = "\n".join([f"- {error}" for error in errors]) error_message = ( f"Errors loading configuration file ({config_file}):\n{error_items}" ) raise ConfigurationError(error_message) return cast(InfestorConfiguration, configuration)
b1d4a1385bb8855530f7043ddff5cc8d2f48be79
3,656,235
def what_do_you_mean_response(ctx: Context) -> REPLY_TYPE: """Generate response when we are asked about subject of the dialog Returns: template phrase based on previous skill or intent or topic confidence (can be 0.0, DONTKNOW_CONF, UNIVERSAL_RESPONSE_CONF, SUPER_CONF) human attributes (empty), bot attributes (empty), attributes (empty or MUST_CONTINUE) """ dialog = ctx.misc["agent"]["dialog"] attr = {} try: what_do_you_mean_intent = get_what_do_you_mean_intent(dialog["human_utterances"][-1]) if not (what_we_talk_about(dialog["human_utterances"][-1]) or what_do_you_mean_intent): reply, confidence = "", 0 elif len(dialog.get("human_utterances", [])) < 2: reply, confidence = DONTKNOW_PHRASE, DONTKNOW_CONF else: reply = get_bot_based_on_skill_reply(dialog.get("bot_utterances", [])) if reply is None: reply = get_bot_based_on_topic_or_intent_reply( dialog["human_utterances"][-2] if len(dialog["human_utterances"]) > 1 else [] ) if reply is None: reply, confidence = DONTKNOW_PHRASE, DONTKNOW_CONF else: if what_we_talk_about(dialog["human_utterances"][-1]): confidence = SUPER_CONF attr = {"can_continue": MUST_CONTINUE} else: # what_do_you_mean_intent but not regexp confidence = UNIVERSAL_RESPONSE_CONF except Exception as e: logger.exception("exception in grounding skill") logger.info(str(e)) sentry_sdk.capture_exception(e) reply = "" confidence = 0 return reply, confidence, {}, {}, attr
694b693d5ed1595781fdfe975f716cca4ff2dcd2
3,656,236
import warnings def get_market_tops(symbols=None, **kwargs): """ MOVED to iexfinance.iexdata.get_tops """ warnings.warn(WNG_MSG % ("get_market_tops", "iexdata.get_tops")) return TOPS(symbols, **kwargs).fetch()
4c94e35f447762a3d3ed9c076708450f1d1f200b
3,656,238
from pathlib import Path def reduce_output_path(path=None, pdb_name=None): """Defines location of Reduce output files relative to input files.""" if not path: if not pdb_name: raise NameError( "Cannot save an output for a temporary file without a PDB" "code specified") pdb_name = pdb_name.lower() output_path = Path(global_settings['structural_database']['path'], pdb_name[1:3].lower(), pdb_name[:4].lower(), 'reduce', pdb_name + '_reduced.mmol') else: input_path = Path(path) if len(input_path.parents) > 1: output_path = input_path.parents[1] / 'reduce' / \ (input_path.stem + '_reduced' + input_path.suffix) else: output_path = input_path.parent / \ (input_path.stem + '_reduced' + input_path.suffix) return output_path
0add37e0d5b71998112045af34aba4c0a17310f9
3,656,240
def link_discord(request: HttpRequest): """Page to prompt user to link their discord account to their user account.""" skip_confirmation = request.GET.get("skip-confirm") if skip_confirmation and skip_confirmation == "true": return redirect("discord_register") return render(request, "link_discord.html")
05aba45b508e5a23cf62f5791a04e2525bbbbae0
3,656,242
import six import functools def rpc(f=None, **kwargs): """Marks a method as RPC.""" if f is not None: if isinstance(f, six.string_types): if 'name' in kwargs: raise ValueError('name option duplicated') kwargs['name'] = f else: return rpc(**kwargs)(f) return functools.partial(_rpc, **kwargs)
37ac21bd800bb202a78542636e9249ac3519c54e
3,656,243
def fig_fits_h(fig, y): """Lista ut of figuren *fig* far plats pa hojden pa skarmen vid position *x*, *y* """ _, h = _get_max_width() win_h = fig.window.winfo_height() result = (y + win_h) < h return result
4e3254d7a4fad2d8de816b36aacbfd069378c1fc
3,656,244
def index(): """ Handler for the root url. Loads all movies and renders the first page. """ if path_set(): load_movies() return flask.render_template('main.html')
8f5c3295175cfd45b3604d523ac6b7de086702e9
3,656,246
def __hitScore__(srcMZ, targetMZ, srcRT, targetRT, parameters): # type: (float, float, float, float, LFParameters) -> float """Return the hit score of the target frame for the given source frame. Keyword Arguments: srcMZ -- source m/z targetMZ -- target m/z srcRT -- source retention time targetRT -- target retention time parameters -- LipidFinder's Amalgamator parameters instance """ mzDelta = mz_delta(srcMZ, parameters['mzFixedError'], parameters['mzPPMError']) mzDiff = abs(srcMZ - targetMZ) rtDelta = rt_delta(parameters['maxRTDiffAdjFrame']) rtDiff = abs(srcRT - targetRT) return sqrt(min(mzDiff / mzDelta, 1.0) ** 2 \ + min(rtDiff / rtDelta, 1.0) ** 2)
1b35cfbb2f1e028ccbb53d4fed16459d5a469ac1
3,656,247
def compute_propeller_nonuniform_freestream(prop, upstream_wake,conditions): """ Computes the inflow velocities in the frame of the rotating propeller Inputs: prop. SUAVE propeller tip_radius - propeller radius [m] rotation - propeller rotation direction [-] thrust_angle - thrust angle of prop [rad] number_radial_stations - number of propeller radial stations [-] number_azimuthal_stations - number of propeller azimuthal stations [-] upstream_wake. u_velocities - Streamwise velocities from upstream wake v_velocities - Spanwise velocities from upstream wake w_velocities - Downwash velocities from upstream wake VD - Vortex distribution from upstream wake conditions. frames Outputs: Va Axial velocities at propeller [m/s] Vt Tangential velocities at propeller [m/s] Vr Radial velocities at propeller [m/s] """ # unpack propeller parameters Vv = conditions.frames.inertial.velocity_vector R = prop.tip_radius rotation = prop.rotation c = prop.chord_distribution Na = prop.number_azimuthal_stations Nr = len(c) ua_wing = upstream_wake.u_velocities uv_wing = upstream_wake.v_velocities uw_wing = upstream_wake.w_velocities VD = upstream_wake.VD # Velocity in the Body frame T_body2inertial = conditions.frames.body.transform_to_inertial T_inertial2body = orientation_transpose(T_body2inertial) V_body = orientation_product(T_inertial2body,Vv) body2thrust = prop.body_to_prop_vel() T_body2thrust = orientation_transpose(np.ones_like(T_body2inertial[:])*body2thrust) V_thrust = orientation_product(T_body2thrust,V_body) # azimuth distribution psi = np.linspace(0,2*np.pi,Na+1)[:-1] psi_2d = np.tile(np.atleast_2d(psi),(Nr,1)) # 2 dimensiona radial distribution non dimensionalized chi = prop.radius_distribution /R # Reframe the wing induced velocities: y_center = prop.origin[0][1] # New points to interpolate data: (corresponding to r,phi locations on propeller disc) points = np.array([[VD.YC[i], VD.ZC[i]] for i in range(len(VD.YC))]) ycoords = np.reshape((R*chi*np.cos(psi_2d).T).T,(Nr*Na,)) zcoords = prop.origin[0][2] + np.reshape((R*chi*np.sin(psi_2d).T).T,(Nr*Na,)) xi = np.array([[y_center+ycoords[i],zcoords[i]] for i in range(len(ycoords))]) ua_w = sp.interpolate.griddata(points,ua_wing,xi,method='linear') uv_w = sp.interpolate.griddata(points,uv_wing,xi,method='linear') uw_w = sp.interpolate.griddata(points,uw_wing,xi,method='linear') ua_wing = np.reshape(ua_w,(Nr,Na)) uw_wing = np.reshape(uw_w,(Nr,Na)) uv_wing = np.reshape(uv_w,(Nr,Na)) if rotation == [1]: Vt_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.cos(psi_2d) + np.array(uv_wing)*np.sin(psi_2d) ) # velocity tangential to the disk plane, positive toward the trailing edge eqn 6.34 pg 165 Vr_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.sin(psi_2d) - np.array(uv_wing)*np.cos(psi_2d) ) # radial velocity , positive outward Va_2d = V_thrust[:,0]* np.array(ua_wing) # velocity perpendicular to the disk plane, positive downward eqn 6.36 pg 166 else: Vt_2d = V_thrust[:,0]*( np.array(uw_wing)*np.cos(psi_2d) - np.array(uv_wing)*np.sin(psi_2d) ) # velocity tangential to the disk plane, positive toward the trailing edge Vr_2d = V_thrust[:,0]*( -np.array(uw_wing)*np.sin(psi_2d) - np.array(uv_wing)*np.cos(psi_2d) ) # radial velocity , positive outward Va_2d = V_thrust[:,0]* np.array(ua_wing) # velocity perpendicular to the disk plane, positive downward # Append velocities to propeller prop.tangential_velocities_2d = Vt_2d prop.radial_velocities_2d = Vr_2d prop.axial_velocities_2d = Va_2d return prop
c7dc48066356e4d79e512812976a3e1a80b16749
3,656,248
def _expect_const(obj): """Return a Constant, or raise TypeError.""" if obj in (0, "0"): return ZERO if obj in (1, "1"): return ONE if obj in ("x", "X"): return LOGICAL if obj == "?": return ILLOGICAL if isinstance(obj, Constant): return obj raise TypeError("Expected obj to be a Constant")
33aff48ff285b89f36d28a99148afeea97302a05
3,656,249
def _eval_input_receiver_fn(tf_transform_output, schema, label_key): """Build everything needed for the tf-model-analysis to run the model. Args: tf_transform_output: A TFTransformOutput. schema: the schema of the input data. label_key: the name of the transformed label Returns: EvalInputReceiver function, which contains: - Tensorflow graph which parses raw untransformed features, applies the tf-transform preprocessing operators. - Set of raw, untransformed features. - Label against which predictions will be compared. """ # Notice that the inputs are raw features, not transformed features here. raw_feature_spec = _get_raw_feature_spec(schema) raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn( raw_feature_spec, default_batch_size=None) serving_input_receiver = raw_input_fn() features = serving_input_receiver.features.copy() transformed_features = tf_transform_output.transform_raw_features(features) # NOTE: Model is driven by transformed features (since training works on the # materialized output of TFT, but slicing will happen on raw features. features.update(transformed_features) return tfma.export.EvalInputReceiver( features=features, receiver_tensors=serving_input_receiver.receiver_tensors, labels=transformed_features[label_key])
60f0a6cf9a87894f7e37495b8b4e9f7bd9e85e22
3,656,250
def get_lpar_names(adp): """Get a list of the LPAR names. :param adp: A pypowervm.adapter.Adapter instance for the PowerVM API. :return: A list of string names of the PowerVM Logical Partitions. """ return [x.name for x in pvm_lpar.LPAR.search(adp, is_mgmt_partition=False)]
4009ed95b23ba6a35cbe38e1354f109e29fb7fc7
3,656,251
def init_mlp(in_dim, out_dim, hidden_dim, num_layers, non_linearity=None, bias=True): """Initializes a MultilayerPerceptron. Args: in_dim: int out_dim: int hidden_dim: int num_layers: int non_linearity: differentiable function (tanh by default) bias (bool) Returns: a MultilayerPerceptron with the architecture x -> Linear(in_dim, hidden_dim) -> non_linearity -> ... Linear(hidden_dim, hidden_dim) -> non_linearity -> Linear(hidden_dim, out_dim) -> y where num_layers = 0 corresponds to x -> Linear(in_dim, out_dim) -> y """ if non_linearity is None: non_linearity = nn.Tanh() dims = [in_dim] + [hidden_dim for _ in range(num_layers)] + [out_dim] return MultilayerPerceptron(dims, non_linearity, bias)
a2d5b8535af5d363df459cf0d2138b29b2356f30
3,656,252
def c_grad_curry_regularized(data, target): """A closure constructor with regularization term for functional.""" def loss(layerweight): model = (lambda x: layerweight @ x.t()) reg = 1e-3 * (layerweight**2).sum()/2 return criterion(model(data).t(), target) + reg return loss
4571c8849bb1643b4d27bad7d2d0ed88ed23c2fa
3,656,253
from typing import Counter def convert_examples_to_features_yake(examples, label_list, max_seq_length, tokenizer, output_mode, cls_token_at_end=False, pad_on_left=False, cls_token='[CLS]', sep_token='[SEP]', noi_token='[NOI]', pad_token=0, sequence_a_segment_id=0, cls_token_segment_id=1, pad_token_segment_id=0, mask_padding_with_zero=True, args=None): """ Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ noi_token_id = tokenizer.convert_tokens_to_ids(noi_token) num_exm = len(examples) idf_dict = {} for (ex_index, example) in enumerate(examples): if ex_index % 100000 == 0: logger.info("Writing idf example %d of %d" % (ex_index, len(examples))) if args.model_name_or_path == 'bert-base-uncased' or args.model_name_or_path == 'bert-large-uncased': tokens_a = tokenizer.tokenize(example.text_a) elif args.model_name_or_path == 'bert-base-cased': tokens_a = example.text_a.split() tokens = ["[CLS]"] + tokens_a + ["[SEP]"] for t in tokens: idf_dict[t] = idf_dict.get(t, 0) + 1 for t in idf_dict.keys(): idf_dict[t] = idf_dict[t] / num_exm stop_words = set(stopwords.words('english') ) for t in stop_words: if t in idf_dict: idf_dict[t] *= 0.001 inp = " ".join(idf_dict.keys()) spacy_nlp = spacy.load('en_core_web_sm') inp_results = [(token.text, token.tag_) for token in spacy_nlp(inp)] allowed_tags = ['VB','NN','JJ','RB'] # UH for "yes", "no", etc. ignored_words = ['was','were','be','is','are','am',"'s","'re"] + ['do','did','done','does'] # verb of no info for word, tag in inp_results: if word in idf_dict.keys(): if len(tag)>=2 and tag[:2] in allowed_tags and (word not in ignored_words): if tag[:2] in ['VB','NN']: idf_dict[word] *= 4 else: idf_dict[word] *= 2 features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.tokenize(example.text_a) # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = tokens_a + [sep_token] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens = tokens + [cls_token] segment_ids = segment_ids + [cls_token_segment_id] else: tokens = [cls_token] + tokens segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) tf = Counter(tokens) tokens_len = float(len(tokens)) # score: higher will be more likely to be keeped prob_list = np.array([idf_dict[t] * tf[t] / tokens_len for t in tokens]) # prob_list = np.array([idf_dict[t] for t in tokens]) # add yake key_word_len = 100 kw_extractor = yake.KeywordExtractor() keywords = kw_extractor.extract_keywords(" ".join(tokens)) key_word_len = len(keywords) for i, t in enumerate(tokens): if t in keywords: prob_list[i] *= 100 # Repeat words for i, t in enumerate(tokens): if t in tokens[:i]: prob_list[i] /= 10 prob_list = max(prob_list) - prob_list N = len(tokens) lm_label_ids = [noi_token_id] * max_seq_length # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids else: input_ids = input_ids + ([pad_token] * padding_length) input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("lm_label_ids: %s" % " ".join([str(x) for x in lm_label_ids])) # logger.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids)) while N > 1: mask_pos = np.array(house_robber(prob_list)) unmask_pos = np.setdiff1d(np.arange(N), mask_pos) tokens = [t for i,t in enumerate(tokens) if i in unmask_pos] N = len(tokens) # mask_lm_label_ids = input_ids lm_label_ids = [pad_token] * max_seq_length j=0 i = 1 while i < len(prob_list): if i in mask_pos: lm_label_ids[j] = input_ids[i] i += 2 else: lm_label_ids[j] = noi_token_id i += 1 j += 1 # print(i,j) while j < len(unmask_pos): lm_label_ids[j] = noi_token_id # no input for last token of new sequence j+= 1 prob_list = prob_list[unmask_pos] # Zero-pad up to the sequence length. padding_length = max_seq_length - len(unmask_pos) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) segment_ids = [sequence_a_segment_id] * len(tokens) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids else: input_ids = input_ids + ([pad_token] * padding_length) input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("lm_label_ids: %s" % " ".join([str(x) for x in lm_label_ids])) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids)) return features
4af89357339a2a63ff765f9da8660ca3895ba8b5
3,656,254
def sq_to_hr(bins, rho, S_k, k, axis=1): """ Takes the structure factor s(q) and computes the real space total correlation function h(r) """ # setup scales dr = np.pi / (k[0] * bins) radius = dr * np.arange(1, bins + 1, dtype=np.float) # Rearrange to find total correlation function from structure factor H_k = (S_k - 1.) / rho # # Transform back to real space iFT = idst(H_k * k[:bins], type=1, axis=axis) normalisation = bins * k[0] / (4 * np.pi**2 * radius) / (bins + 1) h_r = normalisation * iFT return h_r, radius
870e535ee3cdec3b138da1c205b000292eaee8ba
3,656,255
def scale17(data, factor): """Solution to exercise C-1.17. Had we implemented the scale function (page 25) as follows, does it work properly? def scale(data, factor): for val in data: val *= factor Explain why or why not. -------------------------------------------------------------------------- Solution: -------------------------------------------------------------------------- No, it doesn't work. Per the text, page 21: "It is worth noting that val is treated as a standard identifier. If the element of the original data happens to be mutable, the val identifier can be used to invoke its methods. But a reassignment of identifier val to a new value has no affect on the original data, nor on the next iteration of the loop." The code above fails because it tries to assign a new value to the "val" identifier. This merely breaks the alias without changing the list. """ for val in data: val *= factor return data
84ac4012e0c839b78cb8617b6b9b7c2e8c54caa2
3,656,256
import sqlite3 def initialize_database() -> sqlite3.Connection: """Create a sqlite3 database stored in memory with two tables to hold users, records and history. Returns the connection to the created database.""" with sqlite3.connect("bank_buds.db") as conn: conn.execute("""CREATE TABLE IF NOT EXISTS user( customer_id TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, userName TEXT NOT NULL, userPass TEXT NOT NULL, balance INTEGER NOT NULL)""") conn.execute("""CREATE TABLE IF NOT EXISTS user_record( rec_id TEXT REFERENCES user NOT NULL, wins INTEGER NOT NULL, losses INTEGER NOT NULL)""") conn.execute("""CREATE TABLE IF NOT EXISTS challenge_history( challenge_id INTEGER NOT NULL, challenge_starter TEXT REFERENCES user NOT NULL, challenge_opponent TEXT REFERENCES user NOT NULL, challenge_winner TEXT REFERENCES user NOT NULL, challenge_loser TEXT REFERENCES user NOT NULL, is_active INTEGER NOT NULL, goal INTEGER NOT NULL)""") return conn
c3e32534de39a53686672c5c537a2c277fa2d06d
3,656,257
def stateless_multinomial(logits, num_samples, seed, output_dtype=dtypes.int64, name=None): """Draws deterministic pseudorandom samples from a multinomial distribution. This is a stateless version of `tf.random.categorical`: if run twice with the same seeds, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.stateless_categorical( tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17]) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] integer Tensor of seeds to the random number generator. output_dtype: integer type to use for the output. Defaults to int64. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`. """ with ops.name_scope(name, "stateless_multinomial", [logits, seed]): return stateless_multinomial_categorical_impl(logits, num_samples, output_dtype, seed)
da750b8a33348b4f6ff0b47897b4421a8099f12e
3,656,258
def calc_kss(tag,vj): """ calculate Kolmogorov-Smirnov statistics as in CMap; Lamb J, Science, 2006 Parameters ---------- tag: tuple tuple of up-/down-gene lists; (up,down) sorted with the values in the descending order vj: dict dictionary corresponding to V(j) in CMap; Lamb J, Science, 2006 key, gene; val, rank """ a_up,b_up = _ab(tag[0],vj) a_dn,b_dn = _ab(tag[1],vj) if a_up > b_up: ks_up = a_up else: ks_up = -1*b_up if a_dn > b_dn: ks_dn = a_dn else: ks_dn = -1*b_dn if ks_up*ks_dn > 0: ks = 0 else: ks = ks_up - ks_dn n = len(vj) tu = len(tag[0]) td = len(tag[1]) kssmax = _kss_max(n,tu,td) return ks/kssmax
8dbb6233fb82a65a3ffad347f8444d3c16f8f4a9
3,656,259
def encode(elem): """This is the general function to call when you wish to encode an element and all its children and sub-children. Encode in this context means to convert from pymm elements to xml.etree.ElementTree elements. Typically this is called by pymm.write() """ converter = ConversionHandler() return converter.convert_element_hierarchy(elem, 'encode')
13578267efb0a6e21b61d86a6c60f5ecd9235b05
3,656,260
def register_blueprints(app: "Flask") -> "Flask": """A function to register flask blueprint. To register blueprints add them like the example Example usage: from app.blueprints import blueprint app.register_blueprint(blueprint) Args: app (Flask): Flask Application instance Returns: Flask: Flask Application instance """ app.register_blueprint(api_v1_bp) return app
13564aa6f95d995362a56e9be02a51e50475e446
3,656,261
def build_history_class( cls: declarative.DeclarativeMeta, prop: T_PROPS, schema: str = None) -> nine.Type[TemporalProperty]: """build a sqlalchemy model for given prop""" class_name = "%s%s_%s" % (cls.__name__, 'History', prop.key) table = build_history_table(cls, prop, schema) base_classes = ( TemporalProperty, declarative.declarative_base(metadata=table.metadata), ) class_attrs = { '__table__': table, 'entity': orm.relationship( lambda: cls, backref=orm.backref('%s_history' % prop.key, lazy='dynamic'), ), } if isinstance(prop, orm.RelationshipProperty): class_attrs[prop.key] = orm.relationship(prop.argument, lazy='noload') model = type(class_name, base_classes, class_attrs) return model
696b379172c57145c215b64e3e3dc4648b42e535
3,656,262
def geo_distance(left, right): """ Compute distance between two geo spatial data Parameters ---------- left : geometry or geography right : geometry or geography Returns ------- distance : double scalar """ op = ops.GeoDistance(left, right) return op.to_expr()
8a7f1bc14eacf38cecda874d8b16d6c38d9d2049
3,656,263
def svn_dirent_local_style(*args): """svn_dirent_local_style(char dirent, apr_pool_t pool) -> char""" return _core.svn_dirent_local_style(*args)
afe170a321713c9d0f671303fa71d86bc93d8167
3,656,264
def make_generator_model(): """ The Generator The generator uses `tf.keras.layers.Conv2DTranspose` (upsampling) tf.keras.layers.to produce an image from a seed (random noise). Start with a `Dense` layer that takes this seed as input, then upsample several times until you reach the desired image size of 28x28x1. Notice the `tf.keras.layers.LeakyReLU` activation for each layer, except the output layer which uses tanh. :return: """ model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add(tf.keras.layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size model.add( tf.keras.layers.Conv2DTranspose( 128, (5, 5), strides=(1, 1), padding="same", use_bias=False ) ) assert model.output_shape == (None, 7, 7, 128) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add( tf.keras.layers.Conv2DTranspose( 64, (5, 5), strides=(2, 2), padding="same", use_bias=False ) ) assert model.output_shape == (None, 14, 14, 64) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add( tf.keras.layers.Conv2DTranspose( 1, (5, 5), strides=(2, 2), padding="same", use_bias=False, activation="tanh" ) ) assert model.output_shape == (None, 28, 28, 1) return model
e02fa5487805a85aaa5830ce90a6cc26cb2f27a4
3,656,265
def find_simple_cycles(dg): """ Find all simple cycles given a networkx graph. Args: dg (obj): a networkx directed graph Returns: simple_cycles (list of lists): a list of simple cycles ordered by number of segments. """ simple_cycles = [c for c in nx.simple_cycles(dg) if len(c) > 2] #simple_cycles.sort(key=lambda cycle: len(cycle), reverse=True) # sort by number of segments return simple_cycles
4ed18ec26df80631c415086b99e470567e2641ae
3,656,266
from typing import Optional def augment_edge(edge_index: np.ndarray, nodes: np.ndarray, edge_weight: np.ndarray = None, *, nbrs_to_link: Optional[np.ndarray] = None, common_nbrs: Optional[np.ndarray] = None, fill_weight: float = 1.0) -> tuple: """Augment a set of edges by connecting nodes to element in ``nbrs_to_link``. Parameters ---------- edge_index: shape [M, 2] or [2, M] edge indices of a Scipy sparse adjacency matrix. nodes: the nodes that will be linked to the graph. list or np.array: the nodes connected to `nbrs_to_link` int: new added nodes connected to ``nbrs_to_link``, node ids [num_nodes, ..., num_nodes+nodes-1]. edge_weight: shape [M,] edge weights of a Scipy sparse adjacency matrix. nbrs_to_link: a list of N elements, where N is the length of 'nodes'. the specified neighbor(s) for each added node. if `None`, it will be set to `[0, ..., N-1]`. common_nbrs: shape [None,]. specified common neighbors for each added node. fill_weight: edge weight for the augmented edges. NOTE: ----- Both ``nbrs_to_link`` and ``common_nbrs`` should NOT be specified together. See Also -------- graphgallery.functional.augment_adj """ if nbrs_to_link is not None and common_nbrs is not None: raise RuntimeError("Only one of them should be specified.") edge_index = asedge(edge_index, shape="col_wise") if edge_weight is None: edge_weight = np.ones(edge_index.shape[1], dtype=gg.floatx()) num_nodes = edge_index.max() + 1 if gg.is_intscalar(nodes): # int, add nodes to the graph nodes = np.arange(num_nodes, num_nodes + nodes, dtype=edge_index.dtype) else: # array-like, link nodes to the graph nodes = np.asarray(nodes, dtype=edge_index.dtype) if common_nbrs is None and nbrs_to_link is None: nbrs_to_link = np.arange(nodes.size, dtype=edge_index.dtype) if not nodes.size == len(nbrs_to_link): raise ValueError("The length of 'nbrs_to_link' should equal to 'nodes'.") if nbrs_to_link is not None: edges_to_link = np.hstack([np.vstack([np.tile(node, get_length(nbr)), nbr]) for node, nbr in zip(nodes, nbrs_to_link)]) else: num_repeat = len(common_nbrs) edges_to_link = np.hstack([np.vstack([np.tile(node, num_repeat), common_nbrs]) for node in nodes]) edges_to_link = np.hstack([edges_to_link, edges_to_link[[1, 0]]]) added_edge_weight = np.zeros(edges_to_link.shape[1], dtype=edge_weight.dtype) + fill_weight augmented_edge_index = np.hstack([edge_index, edges_to_link]) augmented_edge_weight = np.hstack([edge_weight, added_edge_weight]) return augmented_edge_index, augmented_edge_weight
9b128dfd4bcefa7912af857de6998183ef4da3c2
3,656,267
def status(proc): """Check for processes status""" if proc.is_alive==True: return 'alive' elif proc.is_alive==False: return 'dead' else: return proc.is_alive()
e257385f06979643e19fd9facc2118f4ae07c909
3,656,269
def is_plumed_file(filename): """ Check if given file is in PLUMED format. Parameters ---------- filename : string, optional PLUMED output file Returns ------- bool wheter is a plumed output file """ headers = pd.read_csv(filename, sep=" ", skipinitialspace=True, nrows=0) is_plumed = True if " ".join(headers.columns[:2]) == "#! FIELDS" else False return is_plumed
b6fca7c82efb2b07779406f06c15bf195bb4b5e9
3,656,270
def detect_llj_xarray(da, inverse=False): """ Identify local maxima in wind profiles. args: - da : xarray.DataArray with wind profile data - inverse : to flip the array if the data is stored upside down returns: : xarray.Dataset with vertical dimension removed containing: - falloff : 0 or largest difference between local max and subseq min - strength : 0 or wind speed at jet height - index : -1 or index along <axis> Note: vertical dimension should be labeled 'level' and axis=1 """ # Move <axis> to first dimension, to easily index and iterate over it. xv = np.rollaxis(da.values, 1) if inverse: xv = xv[::-1, ...] # Set initial arrays min_elem = xv[-1].copy() max_elem = np.zeros(min_elem.shape) max_diff = np.zeros(min_elem.shape) max_idx = np.ones(min_elem.shape, dtype=int) * (-1) # Start at end of array and search backwards for larger differences. for i, elem in reversed(list(enumerate(xv))): min_elem = np.minimum(elem, min_elem) new_max_identified = elem - min_elem > max_diff max_diff = np.where(new_max_identified, elem - min_elem, max_diff) max_elem = np.where(new_max_identified, elem, max_elem) max_idx = np.where(new_max_identified, i, max_idx) # Combine the results in a dataframe get_height = lambda i: np.where(i > 0, da.level.values[i], da.level.values[ -1]) dims = da.isel(level=0).drop('level').dims coords = da.isel(level=0).drop('level').coords lljs = xr.Dataset( { 'falloff': (dims, max_diff), 'strength': (dims, max_elem), 'level': (dims, get_height(max_idx)), }, coords=coords) print( 'Beware! Level is also filled if no jet is detected! ' 'Use ds.sel(level=lljs.level).where(lljs.falloff>0) to get rid of them' ) return lljs
3fbe444e5eed6ff1ec4f525145276e2bc974050c
3,656,271
def gen_blinds(depth, width, height, spacing, angle, curve, movedown): """Generate genblinds command for genBSDF.""" nslats = int(round(height / spacing, 0)) slat_cmd = "!genblinds blindmaterial blinds " slat_cmd += "{} {} {} {} {} {}".format( depth, width, height, nslats, angle, curve) slat_cmd += "| xform -rz -90 -rx -90 -t " slat_cmd += f"{-width/2} {-height/2} {-movedown}\n" return slat_cmd
2e8a2751f2bb2be0c2ffdff8218961b0b1c0191b
3,656,272
def dev_Sonic(Mach, gamma=defg._gamma): """computes the deviation angle for a downstream SONIC Mach number Args: Mach: param gamma: (Default value = defg._gamma) gamma: (Default value = defg._gamma) Returns: """ return deflection_Mach_sigma(Mach, sigma_Sonic(Mach, gamma=gamma), gamma=gamma)
a29f90ec1de25a3b86c2dcc1a1a6becedbfbf696
3,656,273