content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def dict_compare(d1, d2): """ compares all differences between 2x dicts. returns sub-dicts: "added", "removed", "modified", "same" """ d1_keys = set(d1.keys()) d2_keys = set(d2.keys()) intersect_keys = d1_keys.intersection(d2_keys) added = d1_keys - d2_keys removed = d2_keys - d1_keys modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]} same = set(o for o in intersect_keys if d1[o] == d2[o]) return added, removed, modified, same
284368eade7de1e1abfd629ea903f6dff113e279
3,655,200
from datetime import datetime def toLocalTime(seconds, microseconds=0): """toLocalTime(seconds, microseconds=0) -> datetime Converts the given number of seconds since the GPS Epoch (midnight on January 6th, 1980) to this computer's local time. Returns a Python datetime object. Examples: >>> toLocalTime(0) datetime.datetime(1980, 1, 6, 0, 0) >>> toLocalTime(25 * 86400) datetime.datetime(1980, 1, 31, 0, 0) """ delta = datetime.timedelta(seconds=seconds, microseconds=microseconds) return GPS_Epoch + delta
9dd9352003b19b5e785766c7fb6e11716284c3ed
3,655,201
def get_part_01_answer(): """ Static method that will return the answer to Day01.01 :return: The product result :rtype: float """ return prod(summation_equals(puzzle_inputs, 2020, 2))
4fcc108bef3d0e5117caff4f02ff7797021a0efd
3,655,202
def eig_of_series(matrices): """Returns the eigenvalues and eigenvectors for a series of matrices. Parameters ---------- matrices : array_like, shape(n,m,m) A series of square matrices. Returns ------- eigenvalues : ndarray, shape(n,m) The eigenvalues of the matrices. eigenvectors : ndarray, shape(n,m,m) The eigenvectors of the matrices. """ s = matrices.shape eigenvalues = np.zeros((s[0], s[1]), dtype=np.complex) eigenvectors = np.zeros(s, dtype=np.complex) for i, A in enumerate(matrices): eVal, eVec = np.linalg.eig(matrices[i]) eigenvalues[i] = eVal eigenvectors[i] = eVec return eigenvalues, eigenvectors
fd34bdb1dc1458d0d495259a07572e0b0a2e685a
3,655,203
from re import T def injectable( cls: T = None, *, qualifier: str = None, primary: bool = False, namespace: str = None, group: str = None, singleton: bool = False, ) -> T: """ Class decorator to mark it as an injectable dependency. This decorator accepts customization parameters but can be invoked without the parenthesis when no parameter will be specified. .. note:: All files using this decorator will be executed when :meth:`load_injection_container <injectable.load_injection_container>` is invoked. :param cls: (cannot be explicitly passed) the decorated class. This will be automatically passed to the decorator by Python magic. :param qualifier: (optional) string qualifier for the injectable to be registered with. Defaults to None. :param primary: (optional) marks the injectable as primary for resolution in ambiguous cases. Defaults to False. :param namespace: (optional) namespace in which the injectable will be registered. Defaults to :const:`injectable.constants.DEFAULT_NAMESPACE`. :param group: (optional) group to be assigned to the injectable. Defaults to None. :param singleton: (optional) when True the injectable will be a singleton, i.e. only one instance of it will be created and shared globally. Defaults to False. Usage:: >>> from injectable import injectable >>> >>> @injectable ... class Foo: ... ... """ def decorator(klass: T, direct_call: bool = False) -> T: steps_back = 3 if direct_call else 2 caller_filepath = get_caller_filepath(steps_back) if caller_filepath == InjectionContainer.LOADING_FILEPATH: InjectionContainer._register_injectable( klass, caller_filepath, qualifier, primary, namespace, group, singleton ) return klass return decorator(cls, True) if cls is not None else decorator
06f1b6d5b3747d92e91cf751e62d90222e06d9a8
3,655,204
def build_graph(defined_routes): """ build the graph form route definitions """ G = {} for row in defined_routes: t_fk_oid = int(row["t_fk_oid"]) t_pk_oid = int(row["t_pk_oid"]) if not t_fk_oid in G: G[t_fk_oid] = {} if not t_pk_oid in G: G[t_pk_oid] = {} G[t_fk_oid][t_pk_oid] = row["routing_cost"] G[t_pk_oid][t_fk_oid] = row["routing_cost"] return G
16962ee1f4e336a9a1edc7cc05712113461f9a1a
3,655,205
def grando_transform_gauss_batch(batch_of_images, mean, variance): """Input: batch of images; type: ndarray: size: (batch, 784) Output: batch of images with gaussian nois; we use clip function to be assured that numbers in matrixs belong to interval (0,1); type: ndarray; size: (batch, 784); """ x = batch_of_images + np.random.normal(mean, variance, batch_of_images.shape) return x
4def857b315c425337d3edb6273af16f052cb1ba
3,655,206
import subprocess def get_poetry_project_version() -> VersionInfo: """Run poetry version and get the project version""" command = ["poetry", "version"] poetry_version_output = subprocess.check_output(command, text=True) return version_string_to_version_info(poetry_version_output.split(" ")[1])
a4dc920239aef09b269a6f75568701788db2f708
3,655,207
def LF_DG_DISTANCE_SHORT(c): """ This LF is designed to make sure that the disease mention and the gene mention aren't right next to each other. """ return -1 if len(list(get_between_tokens(c))) <= 2 else 0
23b5450b844f91d6cae0993f2b7cda5c80460be1
3,655,208
def populate_user_flags(conf, args): """Populate a dictionary of configuration flag parameters, "conf", from values supplied on the command line in the structure, "args".""" if args.cflags: conf['cflags'] = args.cflags.split(sep=' ') if args.ldflags: conf['ldflags'] = args.ldflags.split(sep=' ') return conf
3f3fe64e2e352e0685a048747c9c8351575e40fb
3,655,209
def combine_raytrace(input_list): """ Produce a combined output from a list of raytrace outputs. """ profiler.start('combine_raytrace') output = dict() output['config'] = input_list[0]['config'] output['total'] = dict() output['total']['meta'] = dict() output['total']['image'] = dict() output['found'] = dict() output['found']['meta'] = dict() output['found']['history'] = dict() output['lost'] = dict() output['lost']['meta'] = dict() output['lost']['history'] = dict() num_iter = len(input_list) key_opt_list = list(input_list[0]['total']['meta'].keys()) key_opt_last = key_opt_list[-1] # Combine the meta data. for key_opt in key_opt_list: output['total']['meta'][key_opt] = dict() key_meta_list = list(input_list[0]['total']['meta'][key_opt].keys()) for key_meta in key_meta_list: output['total']['meta'][key_opt][key_meta] = 0 for ii_iter in range(num_iter): output['total']['meta'][key_opt][key_meta] += input_list[ii_iter]['total']['meta'][key_opt][key_meta] # Combine the images. for key_opt in key_opt_list: if key_opt in input_list[0]['total']['image']: if input_list[0]['total']['image'][key_opt] is not None: output['total']['image'][key_opt] = np.zeros(input_list[0]['total']['image'][key_opt].shape) for ii_iter in range(num_iter): output['total']['image'][key_opt] += input_list[ii_iter]['total']['image'][key_opt] else: output['total']['image'][key_opt] = None # Combine all the histories. if len(input_list[0]['found']['history']) > 0: final_num_found = 0 final_num_lost = 0 for ii_run in range(num_iter): final_num_found += len(input_list[ii_run]['found']['history'][key_opt_last]['mask']) final_num_lost += len(input_list[ii_run]['lost']['history'][key_opt_last]['mask']) rays_found_temp = RayArray() rays_found_temp.zeros(final_num_found) rays_lost_temp = RayArray() rays_lost_temp.zeros(final_num_lost) for key_opt in key_opt_list: output['found']['history'][key_opt] = rays_found_temp.copy() output['lost']['history'][key_opt] = rays_lost_temp.copy() index_found = 0 index_lost = 0 for ii_run in range(num_iter): num_found = len(input_list[ii_run]['found']['history'][key_opt_last]['mask']) num_lost = len(input_list[ii_run]['lost']['history'][key_opt_last]['mask']) for key_opt in key_opt_list: for key_ray in output['found']['history'][key_opt]: output['found']['history'][key_opt][key_ray][index_found:index_found + num_found] = ( input_list[ii_run]['found']['history'][key_opt][key_ray][:]) output['lost']['history'][key_opt][key_ray][index_lost:index_lost + num_lost] = ( input_list[ii_run]['lost']['history'][key_opt][key_ray][:]) index_found += num_found index_lost += num_lost profiler.stop('combine_raytrace') return output
2ed47dc60585e793bdc40fa39e04057c26db347d
3,655,210
def is_dict(): """Expects any dictionary""" return TypeMatcher(dict)
81bed05f5c8dae6ba3b8e77caa4d2d7777fb7ea9
3,655,211
from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSFreeSurferDir import os from pathlib import Path def init_dmriprep_wf(): """ Build *dMRIPrep*'s pipeline. This workflow organizes the execution of *dMRIPrep*, with a sub-workflow for each subject. If FreeSurfer's recon-all is to be run, a FreeSurfer derivatives folder is created and populated with any needed template subjects. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from dmriprep.config.testing import mock_config from dmriprep.workflows.base import init_dmriprep_wf with mock_config(): wf = init_dmriprep_wf() """ ver = Version(config.environment.version) dmriprep_wf = Workflow(name=f"dmriprep_{ver.major}_{ver.minor}_wf") dmriprep_wf.base_dir = config.execution.work_dir freesurfer = config.workflow.run_reconall if freesurfer: fsdir = pe.Node( BIDSFreeSurferDir( derivatives=config.execution.output_dir, freesurfer_home=os.getenv("FREESURFER_HOME"), spaces=config.workflow.spaces.get_fs_spaces(), ), name=f"fsdir_run_{config.execution.run_uuid.replace('-', '_')}", run_without_submitting=True, ) if config.execution.fs_subjects_dir is not None: fsdir.inputs.subjects_dir = str( config.execution.fs_subjects_dir.absolute() ) for subject_id in config.execution.participant_label: single_subject_wf = init_single_subject_wf(subject_id) single_subject_wf.config["execution"]["crashdump_dir"] = str( Path(config.execution.dmriprep_dir) / f"sub-{subject_id}" / "log" / config.execution.run_uuid ) for node in single_subject_wf._get_all_nodes(): node.config = deepcopy(single_subject_wf.config) if freesurfer: dmriprep_wf.connect( fsdir, "subjects_dir", single_subject_wf, "fsinputnode.subjects_dir", ) else: dmriprep_wf.add_nodes([single_subject_wf]) # Dump a copy of the config file into the log directory log_dir = ( Path(config.execution.dmriprep_dir) / f"sub-{subject_id}" / "log" / config.execution.run_uuid ) log_dir.mkdir(exist_ok=True, parents=True) config.to_filename(log_dir / "dmriprep.toml") return dmriprep_wf
3cc4825cf5e1f376c19610c9289e89d2813e7125
3,655,212
import re def get_list_from_comma_separated_string(comma_separated_list): """ get a python list of resource names from comma separated list :param str comma_separated_list: :return: """ # remove all extra whitespace after commas and before/after string but NOT in between resource names removed_whitespace_str = re.sub(r"(,\s+)", ",", comma_separated_list).strip() resource_names = removed_whitespace_str.split(",") return resource_names
73df5fe431aceec0fec42d6019269a247b5587a5
3,655,213
def cci(series, window=14): """ compute commodity channel index """ price = typical_price(series) typical_mean = rolling_mean(price, window) res = (price - typical_mean) / (.015 * np.std(typical_mean)) return pd.Series(index=series.index, data=res)
b3eeabd4369fc1a041c3a714edc193deced804dc
3,655,214
def load_definition_from_string(qualified_module, cache=True): """Load a definition based on a fully qualified string. Returns: None or the loaded object Example: .. code-block:: python definition = load_definition_from_string('watson.http.messages.Request') request = definition() """ if qualified_module in definition_lookup and cache: return definition_lookup[qualified_module] parts = qualified_module.split('.') try: module = import_module('.'.join(parts[:-1])) obj = getattr(module, parts[-1:][0]) definition_lookup[qualified_module] = obj return obj except ImportError: raise
c435b4945878c5b5c2f5c7e252259da2be2345d0
3,655,215
import requests import logging def get_session(auth_mechanism, username, password, host): """Takes a username, password and authentication mechanism, logs into ICAT and returns a session ID""" # The ICAT Rest API does not accept json in the body of the HTTP request. # Instead it takes the form parameter 'json' with a string value - which is # the json-encoded data - eurrgh! The json-encoded data is sensitive to # order so we cannot pass a Python dictionary to the requests.post call as # Python dictionaries do not preserve order - eurrgh! So we construct a # string with the json data in the correct order - an OrderedDict may work # here - untested. (Also, dictionaries preserve order in Python 3.something) form_data = {'json': '{"plugin": "' + auth_mechanism + '", "credentials":[{"username":"' + username + '"}, {"password":"' + password + '"}]}'} session_url = host + "/icat/session" response = requests.post(session_url, data=form_data) if response.ok: return response.json()['sessionId'] else: logging.critical("Failed to get a session ID. Exiting.") log_response(response) raise RuntimeError()
2395751bc64300ae32c052e5a9aba04e50f7941f
3,655,216
from typing import List def _create_all_aux_operators(num_modals: List[int]) -> List[VibrationalOp]: """Generates the common auxiliary operators out of the given WatsonHamiltonian. Args: num_modals: the number of modals per mode. Returns: A list of VibrationalOps. For each mode the number of occupied modals will be evaluated. """ aux_second_quantized_ops_list = [] for mode in range(len(num_modals)): aux_second_quantized_ops_list.append(_create_occ_modals_per_mode(num_modals, mode)) return aux_second_quantized_ops_list
58a30221757bfcaa5c8b8790fd5556b060ecc8d1
3,655,217
from typing import List from typing import Concatenate def add_conv(X: tf.Tensor, filters: List[int], kernel_sizes: List[int], output_n_filters: int) -> tf.Tensor: """ Builds a single convolutional layer. :param X: input layer. :param filters: number of output filters in the convolution. :param kernel_sizes: list of lengths of the 1D convolution window. :param output_n_filters: number of 1D output filters. :return: output layer. """ # normalize the input X = BatchNormalization()(X) # add convolutions convs = [] for n_filters, kernel_size in zip(filters, kernel_sizes): conv = Conv1D(filters=n_filters, kernel_size=kernel_size, padding="same", activation="relu") convs.append(conv(X)) # concatenate all convolutions conc = Concatenate(axis=-1)(convs) conc = BatchNormalization()(conc) # dimensionality reduction conv = Conv1D(filters=output_n_filters, kernel_size=1, padding="same", activation="relu") return conv(conc)
69e907fc87780ce754c69500108dc00866fef716
3,655,218
def show_toast(view, message, timeout=DEFAULT_TIMEOUT, style=DEFAULT_STYLE): # type: (sublime.View, str, int, Dict[str, str]) -> Callable[[], None] """Show a toast popup at the bottom of the view. A timeout of -1 makes a "sticky" toast. """ messages_by_line = escape_text(message).splitlines() content = style_message("<br />".join(messages_by_line), style) # Order can matter here. If we calc width *after* visible_region we get # different results! width, _ = view.viewport_extent() visible_region = view.visible_region() last_row, _ = view.rowcol(visible_region.end()) line_start = view.text_point(last_row - 4 - len(messages_by_line), 0) vid = view.id() key = IDS() def on_hide(vid, key): if HIDE_POPUP_TIMERS.get(vid) == key: HIDE_POPUP_TIMERS.pop(vid, None) def __hide_popup(vid, key, sink): if HIDE_POPUP_TIMERS.get(vid) == key: HIDE_POPUP_TIMERS.pop(vid, None) sink() inner_hide_popup = show_popup( view, content, max_width=width * 2 / 3, location=line_start, on_hide=partial(on_hide, vid, key) ) HIDE_POPUP_TIMERS[vid] = key hide_popup = partial(__hide_popup, vid, key, inner_hide_popup) if timeout > 0: sublime.set_timeout(hide_popup, timeout) return hide_popup
910809b3efca6c1256af3540acbe42449080bebc
3,655,219
def flip_metropolised_gibbs_numba_classic(p, z): """ Given the *probability* of z=1 flip z according to metropolised Gibbs """ if z == 1: if p <= .5: return -z # alpha = 1 # TODO, can return -x here else: alpha = (1 - p) / p else: if p >= .5: return -z # alpha = 1 else: alpha = p / (1 - p) if np.random.rand() < alpha: return -z else: return z
c2399ae8923fd8a533e1b9020a2462d8109c0855
3,655,220
def get_default_interpreter(): """Returns an instance of the default interpreter class.""" return __default_interpreter.get()
8807e2480787d26e81ab1be3377f8e3a11daa1de
3,655,221
def fx_ugoira_frames(): """frames data.""" return { '000000.jpg': 1000, '000001.jpg': 2000, '000002.jpg': 3000, }
e3517b37bb4c9cd1dfb70b13128d16ef80a9801a
3,655,222
import array def coherent_tmm(pol, n_list, d_list, th_0, lam_vac): """ This is my slightly modified version of byrnes's "coh_tmm" I've rearranged the calculations in a way that is more intuitive to me Example inputs: For angle dependence, be careful to include air first, otherwise the angle will be wrong layers = [ 'Air','SiO2', 'ITO' ,'PEDOT' ,'TCTA' , 'TCTA-tpbi-Irppy' ,'tpbi', 'Al', 'Air'] doping = [ 1, 1 , 1 , 1 , 1 ,[0.475,0.475,0.05] , 1, 1, 1] d_list = np.array([0,0, 100 , 70 , 20 , 60 , 20 , 100, 0]) n_list = load_nk(layers,doping,wavelength_nm,df_nk) Assign a thickness of 0 to incoherent layers (air, substrate) Notes from byrnes: Main "coherent transfer matrix method" calc. Given parameters of a stack, calculates everything you could ever want to know about how light propagates in it. (If performance is an issue, you can delete some of the calculations without affecting the rest.) pol is light polarization, "s" or "p". n_list is the list of refractive indices, in the order that the light would pass through them. The 0'th element of the list should be the semi-infinite medium from which the light enters, the last element should be the semi- infinite medium to which the light exits (if any exits). th_0 is the angle of incidence: 0 for normal, pi/2 for glancing. Remember, for a dissipative incoming medium (n_list[0] is not real), th_0 should be complex so that n0 sin(th0) is real (intensity is constant as a function of lateral position). d_list is the list of layer thicknesses (front to back). Should correspond one-to-one with elements of n_list. First and last elements should be "inf". lam_vac is vacuum wavelength of the light. Outputs the following as a dictionary (see manual for details) * r--reflection amplitude * t--transmission amplitude * R--reflected wave power (as fraction of incident) * T--transmitted wave power (as fraction of incident) * power_entering--Power entering the first layer, usually (but not always) equal to 1-R (see manual). * vw_list-- n'th element is [v_n,w_n], the forward- and backward-traveling amplitudes, respectively, in the n'th medium just after interface with (n-1)st medium. * kz_list--normal component of complex angular wavenumber for forward-traveling wave in each layer. * th_list--(complex) propagation angle (in radians) in each layer * pol, n_list, d_list, th_0, lam_vac--same as input """ # Convert to numpy arrays if not already n_list = np.array(n_list) d_list = np.array(d_list) # Input tests if ((hasattr(lam_vac, 'size') and lam_vac.size > 1) or (hasattr(th_0, 'size') and th_0.size > 1)): raise ValueError('This function is not vectorized; you need to run one ' 'calculation at a time (1 wavelength, 1 angle, etc.)') if (n_list.ndim != 1) or (d_list.ndim != 1) or (n_list.size != d_list.size): raise ValueError("Problem with n_list or d_list!") #assert d_list[0] == d_list[-1] == inf, 'd_list must start and end with inf!' assert abs((n_list[0]*np.sin(th_0)).imag) < 100*EPSILON, 'Error in n0 or th0!' assert is_forward_angle(n_list[0], th_0), 'Error in n0 or th0!' # using a mix of notation from byrnes and pettersson # because pettersson's notation is often garbage num_layers = n_list.size n0 = n_list[0] cosTheta_list = sqrt(1 - (n0/n_list)**2 * sin(th_0)**2) th_list = list_snell(n_list, th_0) sinTheta_list = (n0/n_list)**2 * sin(th_0)**2 kz_list = 2 * pi / lam_vac * n_list * cosTheta_list delta = kz_list * d_list t_list = zeros((num_layers, num_layers), dtype=complex) r_list = zeros((num_layers, num_layers), dtype=complex) I_list = zeros((num_layers, 2, 2), dtype=complex) L_list = zeros((num_layers, 2, 2), dtype=complex) M_list = zeros((num_layers, 2, 2), dtype=complex) Mtilde = make_2x2_array(1, 0, 0, 1, dtype=complex) for j in range(0,num_layers-1): # t and r are shared notation for pettersson and byrnes t_list[j,j+1] = interface_t_cos(pol, n_list[j], n_list[j+1], cosTheta_list[j], cosTheta_list[j+1]) r_list[j,j+1] = interface_r_cos(pol, n_list[j], n_list[j+1], cosTheta_list[j], cosTheta_list[j+1]) # interface matrix, eqn. 1 pettersson I_list[j] = 1/t_list[j,j+1] * make_2x2_array(1,r_list[j,j+1], r_list[j,j+1],1, dtype=complex) # M and L are not defined for the 0th layer # i.e. the substrate or ambient is incoherent if j==0: # Pre-factor in byrnes eqn 13 Mtilde = np.dot(I_list[j],Mtilde) if j>0: # Layer matrix (phase matrix), eqn. 5 pettersson L_list[j] = make_2x2_array(exp(-1j*delta[j]),0, 0,exp(1j*delta[j]),dtype=complex) # M matrix (byrnes eqn. 11) M_list[j] = np.dot(L_list[j],I_list[j]) # Mtilde byrnes eqn. 13 Mtilde = np.dot(Mtilde,M_list[j]) # Net complex transmission and reflection amplitudes # byrnes eqn 15, petterson eqns 9-10 r = Mtilde[1,0]/Mtilde[0,0] t = 1/Mtilde[0,0] # Construct list of forward and backward amplitudes (byrnes eqn 10) # vw_list[n] = [v_n, w_n]. v_0 and w_0 are undefined because the 0th medium # has no left interface. vw_list = zeros((num_layers, 2), dtype=complex) v_list = zeros((num_layers,1), dtype=complex) w_list = zeros((num_layers,1), dtype=complex) # Final layer v and w, Transmitted amplitude, assuming no back illumination vw = array([[t],[0]]) vw_list[-1,:] = np.transpose(vw) for i in range(num_layers-2, 0, -1): vw = np.dot(M_list[i], vw) v_list[i] = vw[0] w_list[i] = vw[1] vw_list[i,:] = np.transpose(vw) # Assuming incident intensity is 1 vw = array([[1],[r]]) vw_list[0,:] = np.transpose(vw) # Net transmitted and reflected power, as a proportion of the incoming light # power. R = R_from_r(r) T = T_from_t(pol, t, n_list[0], n_list[-1], th_0, th_list[-1]) power_entering = power_entering_from_r(pol, r, n_list[0], th_0) th_list=0 return {'r': r, 't': t, 'R': R, 'T': T, 'power_entering': power_entering, 'vw_list': vw_list, 'kz_list': kz_list, 'th_list': th_list, 'pol': pol, 'n_list': n_list, 'd_list': d_list, 'th_0': th_0, 'lam_vac':lam_vac, 'M_list':M_list, 't_list':t_list, 'r_list':r_list, 'Mtilde':Mtilde, 'I_list':I_list, 'L_list':L_list}
3e10041325ee211d684c9ad960b445df8e6de2db
3,655,223
def base_info(): """ 基本资料的展示和修改 1、尝试获取用户信息 2、如果是get请求,返回用户信息给模板 如果是post请求: 1、获取参数,nick_name,signature,gender[MAN,WOMAN] 2、检查参数的完整性 3、检查gender性别必须在范围内 4、保存用户信息 5、提交数据 6、修改redis缓存中的nick_name 注册:session['nick_name'] = mobile 登录:session['nick_name'] = user.nick_name 修改:session['nick_name'] = nick_name 7、返回结果 :return: """ user = g.user if request.method == 'GET': data = { 'user': user.to_dict() } return render_template('blogs/user_base_info.html', data=data) # 获取参数 nick_name = request.json.get('nick_name') signature = request.json.get('signature') gender = request.json.get('gender') # 检查参数 if not all([nick_name, signature, gender]): return jsonify(errno=RET.PARAMERR, errmsg='参数缺失') # 校验性别参数范围 if gender not in ['MAN', 'WOMAN']: return jsonify(errno=RET.PARAMERR, errmsg='参数范围错误') # 保存用户信息 user.nick_name = nick_name user.signature = signature user.gender = gender # 提交数据 try: db.session.add(user) db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg='保存数据失败') # 修改redis缓存中的用户信息 session['nick_name'] = nick_name # 返回结果 return jsonify(errno=RET.OK, errmsg='OK')
87d5595171e2cecc469ea933b210783e15c477d2
3,655,224
def to_list(obj): """ """ if isinstance(obj, np.ndarray): return obj.tolist() raise TypeError('Not serializable')
92e4851bb117ab908dc256f8b42ef03c85d70e28
3,655,225
from sage.symbolic.expression import Expression from sage.symbolic.ring import SR from inspect import signature, Parameter def symbolic_expression(x): """ Create a symbolic expression or vector of symbolic expressions from x. INPUT: - ``x`` - an object OUTPUT: - a symbolic expression. EXAMPLES:: sage: a = symbolic_expression(3/2); a 3/2 sage: type(a) <type 'sage.symbolic.expression.Expression'> sage: R.<x> = QQ[]; type(x) <type 'sage.rings.polynomial.polynomial_rational_flint.Polynomial_rational_flint'> sage: a = symbolic_expression(2*x^2 + 3); a 2*x^2 + 3 sage: type(a) <type 'sage.symbolic.expression.Expression'> sage: from sage.symbolic.expression import is_Expression sage: is_Expression(a) True sage: a in SR True sage: a.parent() Symbolic Ring Note that equations exist in the symbolic ring:: sage: E = EllipticCurve('15a'); E Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field sage: symbolic_expression(E) x*y + y^2 + y == x^3 + x^2 - 10*x - 10 sage: symbolic_expression(E) in SR True If ``x`` is a list or tuple, create a vector of symbolic expressions:: sage: v=symbolic_expression([x,1]); v (x, 1) sage: v.base_ring() Symbolic Ring sage: v=symbolic_expression((x,1)); v (x, 1) sage: v.base_ring() Symbolic Ring sage: v=symbolic_expression((3,1)); v (3, 1) sage: v.base_ring() Symbolic Ring sage: E = EllipticCurve('15a'); E Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field sage: v=symbolic_expression([E,E]); v (x*y + y^2 + y == x^3 + x^2 - 10*x - 10, x*y + y^2 + y == x^3 + x^2 - 10*x - 10) sage: v.base_ring() Symbolic Ring If ``x`` is a function, for example defined by a ``lambda`` expression, create a symbolic function:: sage: f = symbolic_expression(lambda z: z^2 + 1); f z |--> z^2 + 1 sage: f.parent() Callable function ring with argument z sage: f(7) 50 If ``x`` is a list or tuple of functions, or if ``x`` is a function that returns a list or tuple, create a callable symbolic vector:: sage: symbolic_expression([lambda mu, nu: mu^2 + nu^2, lambda mu, nu: mu^2 - nu^2]) (mu, nu) |--> (mu^2 + nu^2, mu^2 - nu^2) sage: f = symbolic_expression(lambda uwu: [1, uwu, uwu^2]); f uwu |--> (1, uwu, uwu^2) sage: f.parent() Vector space of dimension 3 over Callable function ring with argument uwu sage: f(5) (1, 5, 25) sage: f(5).parent() Vector space of dimension 3 over Symbolic Ring TESTS: Also functions defined using ``def`` can be used, but we do not advertise it as a use case:: sage: def sos(x, y): ....: return x^2 + y^2 sage: symbolic_expression(sos) (x, y) |--> x^2 + y^2 Functions that take a varying number of arguments or keyword-only arguments are not accepted:: sage: def variadic(x, *y): ....: return x sage: symbolic_expression(variadic) Traceback (most recent call last): ... TypeError: unable to convert <function variadic at 0x...> to a symbolic expression sage: def function_with_keyword_only_arg(x, *, sign=1): ....: return sign * x sage: symbolic_expression(function_with_keyword_only_arg) Traceback (most recent call last): ... TypeError: unable to convert <function function_with_keyword_only_arg at 0x...> to a symbolic expression """ if isinstance(x, Expression): return x elif hasattr(x, '_symbolic_'): return x._symbolic_(SR) elif isinstance(x, (tuple, list)): return vector([symbolic_expression(item) for item in x]) elif callable(x): try: s = signature(x) except ValueError: pass else: if all(param.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD) for param in s.parameters.values()): vars = [SR.var(name) for name in s.parameters.keys()] result = x(*vars) if isinstance(result, (tuple, list)): return vector(SR, result).function(*vars) else: return SR(result).function(*vars) return SR(x)
648c85a8fd3f4ffefec44e5720f8c9ac68c10388
3,655,226
def seq_hyphentation(words): """ Converts words in a list of strings into lists of syllables :param words: a list of words (strings) :return: a list of lists containing word syllables """ return [hyphenation(w) for w in words]
dd1ab65f64926e724718edac316a98bac99991da
3,655,227
def angle(A, B, dim=1): """ Computes the angle in radians between the inputs along the specified dimension Parameters ---------- A : Tensor first input tensor B : Tensor second input tensor dim : int (optional) dimension along the angle is computed (default is 1) Returns ------- Tensor the tensor containing the angle between the inputs """ return acos(clamp(dot(A, B, dim=dim), -1, 1))
f64950b8004a32e2ab274efee3a9bedf6441439a
3,655,228
import functools def _run_lint_helper( *, fail_on_missing_sub_src, exclude_lint, warn_lint, site_name=None): """Helper for executing lint on specific site or all sites in repo.""" if site_name: func = functools.partial(engine.lint.site, site_name=site_name) else: func = engine.lint.full warns = func( fail_on_missing_sub_src=fail_on_missing_sub_src, exclude_lint=exclude_lint, warn_lint=warn_lint) return warns
a73e2e9a4bb968376622308cf7af2f97f6533595
3,655,229
def simulate_from_orders_nb(target_shape: tp.Shape, group_lens: tp.Array1d, init_cash: tp.Array1d, call_seq: tp.Array2d, size: tp.ArrayLike = np.asarray(np.inf), price: tp.ArrayLike = np.asarray(np.inf), size_type: tp.ArrayLike = np.asarray(SizeType.Amount), direction: tp.ArrayLike = np.asarray(Direction.Both), fees: tp.ArrayLike = np.asarray(0.), fixed_fees: tp.ArrayLike = np.asarray(0.), slippage: tp.ArrayLike = np.asarray(0.), min_size: tp.ArrayLike = np.asarray(0.), max_size: tp.ArrayLike = np.asarray(np.inf), size_granularity: tp.ArrayLike = np.asarray(np.nan), reject_prob: tp.ArrayLike = np.asarray(0.), lock_cash: tp.ArrayLike = np.asarray(False), allow_partial: tp.ArrayLike = np.asarray(True), raise_reject: tp.ArrayLike = np.asarray(False), log: tp.ArrayLike = np.asarray(False), val_price: tp.ArrayLike = np.asarray(np.inf), close: tp.ArrayLike = np.asarray(np.nan), auto_call_seq: bool = False, ffill_val_price: bool = True, update_value: bool = False, max_orders: tp.Optional[int] = None, max_logs: int = 0, flex_2d: bool = True) -> tp.Tuple[tp.RecordArray, tp.RecordArray]: """Creates on order out of each element. Iterates in the column-major order. Utilizes flexible broadcasting. !!! note Should be only grouped if cash sharing is enabled. If `auto_call_seq` is True, make sure that `call_seq` follows `CallSeqType.Default`. Single value should be passed as a 0-dim array (for example, by using `np.asarray(value)`). Usage: * Buy and hold using all cash and closing price (default): ```pycon >>> import numpy as np >>> from vectorbt.records.nb import col_map_nb >>> from vectorbt.portfolio.nb import simulate_from_orders_nb, asset_flow_nb >>> from vectorbt.portfolio.enums import Direction >>> close = np.array([1, 2, 3, 4, 5])[:, None] >>> order_records, _ = simulate_from_orders_nb( ... target_shape=close.shape, ... close=close, ... group_lens=np.array([1]), ... init_cash=np.array([100]), ... call_seq=np.full(close.shape, 0) ... ) >>> col_map = col_map_nb(order_records['col'], close.shape[1]) >>> asset_flow = asset_flow_nb(close.shape, order_records, col_map, Direction.Both) >>> asset_flow array([[100.], [ 0.], [ 0.], [ 0.], [ 0.]]) ``` """ check_group_lens_nb(group_lens, target_shape[1]) cash_sharing = is_grouped_nb(group_lens) check_group_init_cash_nb(group_lens, target_shape[1], init_cash, cash_sharing) order_records, log_records = init_records_nb(target_shape, max_orders, max_logs) init_cash = init_cash.astype(np.float_) last_position = np.full(target_shape[1], 0., dtype=np.float_) last_debt = np.full(target_shape[1], 0., dtype=np.float_) last_val_price = np.full(target_shape[1], np.nan, dtype=np.float_) order_price = np.full(target_shape[1], np.nan, dtype=np.float_) temp_order_value = np.empty(target_shape[1], dtype=np.float_) oidx = 0 lidx = 0 from_col = 0 for group in range(len(group_lens)): to_col = from_col + group_lens[group] group_len = to_col - from_col cash_now = init_cash[group] free_cash_now = init_cash[group] for i in range(target_shape[0]): for k in range(group_len): col = from_col + k # Resolve order price _price = flex_select_auto_nb(price, i, col, flex_2d) if np.isinf(_price): if _price > 0: _price = flex_select_auto_nb(close, i, col, flex_2d) # upper bound is close elif i > 0: _price = flex_select_auto_nb(close, i - 1, col, flex_2d) # lower bound is prev close else: _price = np.nan # first timestamp has no prev close order_price[col] = _price # Resolve valuation price _val_price = flex_select_auto_nb(val_price, i, col, flex_2d) if np.isinf(_val_price): if _val_price > 0: _val_price = _price # upper bound is order price elif i > 0: _val_price = flex_select_auto_nb(close, i - 1, col, flex_2d) # lower bound is prev close else: _val_price = np.nan # first timestamp has no prev close if not np.isnan(_val_price) or not ffill_val_price: last_val_price[col] = _val_price # Calculate group value and rearrange if cash sharing is enabled if cash_sharing: # Same as get_group_value_ctx_nb but with flexible indexing value_now = cash_now for k in range(group_len): col = from_col + k if last_position[col] != 0: value_now += last_position[col] * last_val_price[col] # Dynamically sort by order value -> selling comes first to release funds early if auto_call_seq: # Same as sort_by_order_value_ctx_nb but with flexible indexing for k in range(group_len): col = from_col + k temp_order_value[k] = approx_order_value_nb( flex_select_auto_nb(size, i, col, flex_2d), flex_select_auto_nb(size_type, i, col, flex_2d), flex_select_auto_nb(direction, i, col, flex_2d), cash_now, last_position[col], free_cash_now, last_val_price[col], value_now ) # Sort by order value insert_argsort_nb(temp_order_value[:group_len], call_seq[i, from_col:to_col]) for k in range(group_len): col = from_col + k if cash_sharing: col_i = call_seq[i, col] if col_i >= group_len: raise ValueError("Call index exceeds bounds of the group") col = from_col + col_i # Get current values per column position_now = last_position[col] debt_now = last_debt[col] val_price_now = last_val_price[col] if not cash_sharing: value_now = cash_now if position_now != 0: value_now += position_now * val_price_now # Generate the next order order = order_nb( size=flex_select_auto_nb(size, i, col, flex_2d), price=order_price[col], size_type=flex_select_auto_nb(size_type, i, col, flex_2d), direction=flex_select_auto_nb(direction, i, col, flex_2d), fees=flex_select_auto_nb(fees, i, col, flex_2d), fixed_fees=flex_select_auto_nb(fixed_fees, i, col, flex_2d), slippage=flex_select_auto_nb(slippage, i, col, flex_2d), min_size=flex_select_auto_nb(min_size, i, col, flex_2d), max_size=flex_select_auto_nb(max_size, i, col, flex_2d), size_granularity=flex_select_auto_nb(size_granularity, i, col, flex_2d), reject_prob=flex_select_auto_nb(reject_prob, i, col, flex_2d), lock_cash=flex_select_auto_nb(lock_cash, i, col, flex_2d), allow_partial=flex_select_auto_nb(allow_partial, i, col, flex_2d), raise_reject=flex_select_auto_nb(raise_reject, i, col, flex_2d), log=flex_select_auto_nb(log, i, col, flex_2d) ) # Process the order state = ProcessOrderState( cash=cash_now, position=position_now, debt=debt_now, free_cash=free_cash_now, val_price=val_price_now, value=value_now, oidx=oidx, lidx=lidx ) order_result, new_state = process_order_nb( i, col, group, state, update_value, order, order_records, log_records ) # Update state cash_now = new_state.cash position_now = new_state.position debt_now = new_state.debt free_cash_now = new_state.free_cash val_price_now = new_state.val_price value_now = new_state.value oidx = new_state.oidx lidx = new_state.lidx # Now becomes last last_position[col] = position_now last_debt[col] = debt_now if not np.isnan(val_price_now) or not ffill_val_price: last_val_price[col] = val_price_now from_col = to_col return order_records[:oidx], log_records[:lidx]
32898fa1a1aadf50d6d07553da8e7bed94f3de0e
3,655,230
def get_data_value(k: int, data: bytes) -> bytes: """Extracts the kth value from data. data should be in the format value0:value1:value2:...:valueN. This last representation is merely for understanding the logic. In practice, data will be a sequence of bytes, with each value preceded by the length of such value. # TODO allow values larger than 255 bytes (some logic similar to OP_PUSHDATA1?) :param k: index of item to retrieve :type k: int :param data: data to get value from :type data: bytes :raises OutOfData: if data length to read is larger than what's available :raises DataIndexError: index requested from data is not available """ data_len = len(data) position = 0 iteration = 0 while position < data_len: length = data[position] if length == 0: # TODO throw error pass position += 1 if (position + length) > len(data): raise OutOfData('trying to read {} bytes starting at {}, available {}'.format(length, position, len(data))) value = data[position:position + length] if iteration == k: return value iteration += 1 position += length raise DataIndexError
9ee1c2370ff8935df2f26680c73655b89dfcc7aa
3,655,231
def exp_map_individual(network, variable, max_degree): """Summary measure calculate for the non-parametric mapping approach described in Sofrygin & van der Laan (2017). This approach works best for networks with uniform degree distributions. This summary measure generates a number of columns (a total of ``max_degree``). Each column is then an indicator variable for each observation. To keep all columns the same number of dimensions, zeroes are filled in for all degrees above unit i's observed degree. Parameters ---------- network : networkx.Graph The NetworkX graph object to calculate the summary measure for. variable : str Variable to calculate the summary measure for (this will always be the exposure variable internally). max_degree : int Maximum degree in the network (defines the number of columns to generate). Returns ------- dataframe Data set containing all generated columns """ attrs = [] for i in network.nodes: j_attrs = [] for j in network.neighbors(i): j_attrs.append(network.nodes[j][variable]) attrs.append(j_attrs[:max_degree]) return pd.DataFrame(attrs, columns=[variable+'_map'+str(x+1) for x in range(max_degree)])
cb4424ad10dae3df4a3d60ec5d7b143b2130a9bb
3,655,232
def bridge_meshes(Xs, Ys, Zs, Cs): """ Concatenate multiple meshes, with hidden transparent bridges, to a single mesh, so that plt.plot_surface uses correct drawing order between meshes (as it really should) :param list Xs: list of x-coordinates for each mesh :param list Ys: list of y-coordinates for each mesh :param list Zs: list of z-coordinates for each mesh :param list Cs: list of colors for each mesh :return: Concatenated meshes X_full, Y_full, Z_full, C_full """ assert len(Xs) == len(Ys) == len(Zs) == len(Cs) if len(Xs) > 2: X1, Y1, Z1, C1 = bridge_meshes(Xs[1:], Ys[1:], Zs[1:], Cs[1:]) elif len(Xs) == 2: X1, Y1, Z1, C1 = Xs[1], Ys[1], Zs[1], Cs[1] else: raise Exception X0, Y0, Z0, C0 = Xs[0], Ys[0], Zs[0], Cs[0] X_bridge = np.vstack(np.linspace(X0[-1, :], X1[-1, :], 1)) Y_bridge = np.vstack(np.linspace(Y0[-1, :], Y1[-1, :], 1)) Z_bridge = np.vstack(np.linspace(Z0[-1, :], Z1[-1, :], 1)) color_bridge = np.empty_like(Z_bridge, dtype=object) color_bridge.fill((1, 1, 1, 0)) # Make the bridge transparant # Join surfaces X_full = np.vstack([X0, X_bridge, X1]) Y_full = np.vstack([Y0, Y_bridge, Y1]) Z_full = np.vstack([Z0, Z_bridge, Z1]) color_full = np.vstack([C0, color_bridge, C1]) return X_full, Y_full, Z_full, color_full
389948e3d357cb7a87e844eee8417f2466c41cab
3,655,233
def get_groups(): """ Get the list of label groups. @return: the list of label groups. """ labels_dict = load_yaml_from_file("labels") groups = [] for group_info in labels_dict["groups"]: group = Group(**group_info) label_names = group_info.pop("labels", []) groups.append(group) for label_info in label_names: Label(**label_info, group=group) return groups
03822287ab1a2525560f6fdf2a55a3c2461c6bea
3,655,234
def diffractometer_rotation(phi=0, chi=0, eta=0, mu=0): """ Generate the 6-axis diffracometer rotation matrix R = M * E * X * P Also called Z in H. You, J. Appl. Cryst 32 (1999), 614-623 :param phi: float angle in degrees :param chi: float angle in degrees :param eta: float angle in degrees :param mu: float angle in degrees :return: [3*3] array """ P = rotmatrixz(phi) X = rotmatrixy(chi) E = rotmatrixz(eta) M = rotmatrixx(mu) return np.dot(M, np.dot(E, np.dot(X, P)))
7f56caf6585f74406b8f681614c6a6f32592ad91
3,655,235
def supports_build_in_container(config): """ Given a workflow config, this method provides a boolean on whether the workflow can run within a container or not. Parameters ---------- config namedtuple(Capability) Config specifying the particular build workflow Returns ------- tuple(bool, str) True, if this workflow can be built inside a container. False, along with a reason message if it cannot be. """ def _key(c): return str(c.language) + str(c.dependency_manager) + str(c.application_framework) # This information could have beeen bundled inside the Workflow Config object. But we this way because # ultimately the workflow's implementation dictates whether it can run within a container or not. # A "workflow config" is like a primary key to identify the workflow. So we use the config as a key in the # map to identify which workflows can support building within a container. unsupported = { _key(DOTNET_CLIPACKAGE_CONFIG): "We do not support building .NET Core Lambda functions within a container. " "Try building without the container. Most .NET Core functions will build " "successfully.", _key(GO_MOD_CONFIG): "We do not support building Go Lambda functions within a container. " "Try building without the container. Most Go functions will build " "successfully.", } thiskey = _key(config) if thiskey in unsupported: return False, unsupported[thiskey] return True, None
278bde73252d13784298d01d954a56fcecd986dc
3,655,236
def get_img_array_mhd(img_file): """Image array in zyx convention with dtype = int16.""" itk_img = sitk.ReadImage(img_file) img_array_zyx = sitk.GetArrayFromImage(itk_img) # indices are z, y, x origin = itk_img.GetOrigin() # x, y, z world coordinates (mm) origin_zyx = [origin[2], origin[1], origin[0]] # y, x, z spacing = itk_img.GetSpacing() # x, y, z world coordinates (mm) spacing_zyx = [spacing[2], spacing[1], spacing[0]] # z, y, x acquisition_exception = None # no acquisition number found in object return img_array_zyx, spacing_zyx, origin_zyx, acquisition_exception
6c6bafedf34aaf0c03367c9058b29401bf133fd0
3,655,237
def registration(request): """Render the registration page.""" if request.user.is_authenticated: return redirect(reverse('index')) if request.method == 'POST': registration_form = UserRegistrationForm(request.POST) if registration_form.is_valid(): registration_form.save() user = auth.authenticate(username=request.POST['username'], password=request.POST['password1']) if user: auth.login(user=user, request=request) messages.success(request, "You have registered successfully.") return redirect(reverse('index')) else: messages.error(request, "Unable to register your account at this time.") else: registration_form = UserRegistrationForm() return render(request, 'registration.html', {"registration_form": registration_form})
dae59392e290291d9d81ca427ee35b07c6ed554b
3,655,238
def _get_arc2height(arcs): """ Parameters ---------- arcs: list[(int, int)] Returns ------- dict[(int, int), int] """ # arc2height = {(b,e): np.abs(b - e) for b, e in arcs} n_arcs = len(arcs) arcs_sorted = sorted(arcs, key=lambda x: np.abs(x[0] - x[1])) arc2height = {arc: 1 for arc in arcs} for arc_i in range(n_arcs): bi, ei = sorted(arcs_sorted[arc_i]) for arc_j in range(n_arcs): if arc_i == arc_j: continue bj, ej = sorted(arcs_sorted[arc_j]) if bi <= bj <= ej <= ei: arc2height[arcs_sorted[arc_i]] = max(arc2height[arcs_sorted[arc_j]] + 1, arc2height[arcs_sorted[arc_i]]) return arc2height
feb929e9f2e23e1c154423930ae33944b95af699
3,655,239
from shutil import copyfile def init_ycm(path): """ Generate a ycm_extra_conf.py file in the given path dir to specify compilation flags for a project. This is necessary to get semantic analysis for c-family languages. Check ycmd docs for more details. """ conf = join(path, '.ycm_extra_conf.py') if exists(conf): root.status.set_msg('File overwritten: %s' % conf) copyfile(join(dirname(__file__), 'ycm_extra_conf.py'), conf) return conf
361d744982c2a8c4fd1e787408150381a3b111d3
3,655,240
def get_aggregate_stats_flows_single_appliance( self, ne_pk: str, start_time: int, end_time: int, granularity: str, traffic_class: int = None, flow: str = None, ip: str = None, data_format: str = None ) -> dict: """Get aggregate flow stats data for a single appliance filter by query parameters .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - aggregateStats - GET - /stats/aggregate/flow/{nePk} :param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE`` :type ne_pk: str :param start_time: Long(Signed 64 bits) value of seconds since EPOCH time indicating the starting time boundary of data time range :type start_time: int :param end_time: Long(Signed 64 bits) value of seconds since EPOCH time indicating the ending time boundary of data time range :type end_time: int :param granularity: Data granularity filtering whether data is minutely data, hourly data or daily data. Accepted values are ``minute``, ``hour``, and ``day`` :type granularity: str :param traffic_class: Filter for data which belongs to particular traffic class, accepted values between 1-10, defaults to None :type traffic_class: int, optional :param flow: Filter for data of a particular flow type. Accepted values are "TCP_ACCELERATED" "TCP_NOT_ACCELERATED" "NON_TCP", defaults to None :type flow: str, optional :param ip: ``True`` to use IP address as key to sort results or ``False`` or ``None`` for default sorting by appliance ID, defaults to None :type ip: bool, optional :param data_format: The only format other than JSON currently supported is CSV, accepted value is ``csv``, defaults to None :type data_format: str, optional :return: Returns dictionary of aggregate stats filtered by query parameters :rtype: dict """ path = ( "/stats/aggregate/flow/" + "{}?startTime={}&endTime={}&granularity={}".format( ne_pk, start_time, end_time, granularity ) ) if traffic_class is not None: path = path + "&trafficClass={}".format(traffic_class) if flow is not None: path = path + "&flow={}".format(flow) if ip is not None: path = path + "&ip={}".format(ip) if data_format is not None: path = path + "&format={}".format(data_format) return self._get(path)
5ca6e2b5ce1b176aea603a254b0ca655e0f43c0c
3,655,241
def load_user(userid): """Callback to load user from db, called by Flask-Login""" db = get_db() user = db.execute("SELECT id FROM users WHERE id = ?", [userid]).fetchone() if user is not None: return User(user[0]) return None
0dd9516af3670794c107bd6633c74a033f0a4983
3,655,242
import torch def get_partial_outputs_with_prophecies(prophecies, loader, model, my_device, corpus, seq2seq): """ Parameters ---------- prophecies : dict Dictionary mapping from sequence index to a list of prophecies, one for each prefix in the sequence. loader : torch.utils.data.dataloader.DataLoader Data loader, batch must have size 1. model : models.<model> NN model not BERT my_device : torch.device PyTorch device. label_pad : int Index of padding label. seq2seq : bool True if sequence tagging, else False for sequence classification. Returns ------- results : dict of dicts A dictionary storing partial outputs, accuracy w.r.t. the gold labels and an np matrix that indicates editions. """ # see comments in function above model.eval() results = {'partial_outputs':{}, 'log_changes':{}, 'accuracy':{}} with torch.no_grad(): for x, lens, y, idx in loader: #if idx.item() not in prophecies: # continue x = x.to(my_device) y = y.to(my_device) lens = lens.to(my_device) if seq2seq: predictions = np.zeros((lens, lens)) changes = np.zeros((lens, lens)) else: predictions = np.zeros((lens, 1)) changes = np.zeros((lens, 1)) pad = corpus.word2id['<pad>'] for l in range(1,lens.item()+1): if l != lens.item(): part_x = x[:,:l] # add prophecy prophecy = nltk.word_tokenize( prophecies[idx.item()][l-1][0]) prophecy_ids = torch.tensor([[corpus.word2id.get(w, pad) for w in prophecy[l:]]], dtype=torch.long, device=x.device) part_x = torch.cat((part_x, prophecy_ids),dim=1) part_len = torch.tensor([l+prophecy_ids.shape[1]], device=x.device) # create any y to append will not be used (but cannot be the same idx as # label of predicate in SRL), we use zero and check if 'srl' in corpus.task: assert corpus.label2id['B-V'] != 0 if seq2seq: extra_pad = torch.tensor([[0]*(part_x.shape[1]-l)], device=x.device, dtype=torch.long) part_y = torch.cat((y[:,:l], extra_pad), dim=1) #part_y = torch.zeros((1, part_len.item()), dtype=torch.long, # device=y.device) else: part_y = y else: # complete sentence does not need prophecy part_x = x part_y = y part_len = lens #unpacked, mask = model(x, lens) # _ = (hidden, context) _, predicted = model(part_x, part_len, part_y, seq2seq) if seq2seq: predictions[l-1] = np.array((predicted[:l].tolist() + (lens.item() - l)*[np.inf])) else: predictions[l-1] = np.array((predicted.tolist())) if l == 1: changes[l-1][0] = 1 else: changes[l-1] = predictions[l-1] != predictions[l-2] y = y.reshape(-1) y = torch.tensor([i for i in y if i!=corpus.label2id['<pad>']]) if seq2seq: acc = (predictions[-1] == y.cpu().numpy()).sum() / lens.item() else: acc = (predictions[-1] == y.cpu().numpy()).sum() results['partial_outputs'][idx.item()] = predictions results['log_changes'][idx.item()] = changes results['accuracy'][idx.item()] = acc return results
cae0ed8643f677a5d2a2f3e75858b68f473acc50
3,655,243
from typing import Tuple from typing import Optional from typing import List import io import textwrap from re import I def _generate_deserialize_impl( symbol_table: intermediate.SymbolTable, spec_impls: specific_implementations.SpecificImplementations, ) -> Tuple[Optional[Stripped], Optional[List[Error]]]: """Generate the implementation for deserialization functions.""" blocks = [ _generate_skip_whitespace_and_comments(), _generate_read_whole_content_as_base_64(), ] # type: List[Stripped] errors = [] # type: List[Error] for symbol in symbol_table.symbols: if isinstance(symbol, intermediate.Enumeration): # NOTE (mristin, 2022-04-13): # Enumerations are going to be directly deserialized using # ``Stringification``. continue elif isinstance(symbol, intermediate.ConstrainedPrimitive): # NOTE (mristin, 2022-04-13): # Constrained primitives are only verified, but do not represent a C# type. continue elif isinstance( symbol, (intermediate.AbstractClass, intermediate.ConcreteClass) ): if symbol.is_implementation_specific: implementation_keys = [ specific_implementations.ImplementationKey( f"Xmlization/DeserializeImplementation/" f"{symbol.name}_from_element.cs" ), specific_implementations.ImplementationKey( f"Xmlization/DeserializeImplementation/" f"{symbol.name}_from_sequence.cs" ), ] for implementation_key in implementation_keys: implementation = spec_impls.get(implementation_key, None) if implementation is None: errors.append( Error( symbol.parsed.node, f"The xmlization snippet is missing " f"for the implementation-specific " f"class {symbol.name}: {implementation_key}", ) ) continue else: blocks.append(spec_impls[implementation_key]) else: if isinstance(symbol, intermediate.ConcreteClass): ( block, generation_errors, ) = _generate_deserialize_impl_cls_from_sequence(cls=symbol) if generation_errors is not None: errors.append( Error( symbol.parsed.node, f"Failed to generate the XML deserialization code " f"for the class {symbol.name}", generation_errors, ) ) else: assert block is not None blocks.append(block) if symbol.interface is not None: blocks.append( _generate_deserialize_impl_interface_from_element( interface=symbol.interface ) ) if isinstance(symbol, intermediate.ConcreteClass): blocks.append( _generate_deserialize_impl_concrete_cls_from_element(cls=symbol) ) else: assert_never(symbol) if len(errors) > 0: return None, errors writer = io.StringIO() writer.write( """\ /// <summary> /// Implement the deserialization of meta-model classes from XML. /// </summary> /// <remarks> /// The implementation propagates an <see cref="Reporting.Error" /> instead of /// relying on exceptions. Under the assumption that incorrect data is much less /// frequent than correct data, this makes the deserialization more /// efficient. /// /// However, we do not want to force the client to deal with /// the <see cref="Reporting.Error" /> class as this is not intuitive. /// Therefore we distinguish the implementation, realized in /// <see cref="DeserializeImplementation" />, and the facade given in /// <see cref="Deserialize" /> class. /// </remarks> internal static class DeserializeImplementation { """ ) for i, block in enumerate(blocks): if i > 0: writer.write("\n\n") writer.write(textwrap.indent(block, I)) writer.write("\n} // internal static class DeserializeImplementation") return Stripped(writer.getvalue()), None
3e2e3c78709b75a8b650d775d4b0f8b6c8287ca0
3,655,244
def timestep_to_transition_idx(snapshot_years, transitions, timestep): """Convert timestep to transition index. Args: snapshot_years (list): a list of years corresponding to the provided rasters transitions (int): the number of transitions in the scenario timestep (int): the current timestep Returns: transition_idx (int): the current transition """ for i in xrange(0, transitions): if timestep < (snapshot_years[i+1] - snapshot_years[0]): return i
96bcda2493fcd51f9c7b335ea75fd612384207e3
3,655,245
import os def _make_abs_path(path, cwd=None, default=None): """convert 'path' to absolute if necessary (could be already absolute) if not defined (empty, or None), will return 'default' one or 'cwd' """ cwd = cwd or get_cwd() if not path: abs_path = default or cwd elif os.path.isabs(path): abs_path = path else: abs_path = os.path.normpath(os.path.join(cwd, path)) return abs_path
7fbe6187d544935cf3dd933b0796a119c7cf36d0
3,655,246
def resolve_checks(names, all_checks): """Returns a set of resolved check names. Resolving a check name expands tag references (e.g., "@tag") to all the checks that contain the given tag. OpenShiftCheckException is raised if names contains an unknown check or tag name. names should be a sequence of strings. all_checks should be a sequence of check classes/instances. """ known_check_names = set(check.name for check in all_checks) known_tag_names = set(name for check in all_checks for name in check.tags) check_names = set(name for name in names if not name.startswith('@')) tag_names = set(name[1:] for name in names if name.startswith('@')) unknown_check_names = check_names - known_check_names unknown_tag_names = tag_names - known_tag_names if unknown_check_names or unknown_tag_names: msg = [] if unknown_check_names: msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names)))) if unknown_tag_names: msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names)))) msg.append('Make sure there is no typo in the playbook and no files are missing.') raise OpenShiftCheckException('\n'.join(msg)) tag_to_checks = defaultdict(set) for check in all_checks: for tag in check.tags: tag_to_checks[tag].add(check.name) resolved = check_names.copy() for tag in tag_names: resolved.update(tag_to_checks[tag]) return resolved
d86dcd9a5539aeaa31fb3c86304c62f8d86bbb11
3,655,247
from typing import Optional def swish( data: NodeInput, beta: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). :param data: Tensor with input data floating point type. :return: The new node which performs Swish """ if beta is None: beta = make_constant_node(1.0, np.float32) return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {})
d17562d0e63aa1610d9bc641faabec27264a2919
3,655,248
from datetime import datetime def cut_out_interval(data, interval, with_gaps=False): """ Cuts out data from input array. Interval is the start-stop time pair. If with_gaps flag is True, then one NaN value will be added between the remaining two pieces of data. Returns modified data array. :param data: 2-dimensional array with data :param interval: list or array with two time points :type data: np.ndarray :type interval: list or tuple or np.ndarray :return: modified data array, start and stop point of deleted interval :rtype: tuple """ supported_arr_types = "np.ndarray" supported_interval_types = "list or tuple or np.ndarray" assert isinstance(data, np.ndarray), \ "Arr value is of an unsupported type. " \ "Expected {}, got {} instead.".format(supported_arr_types, type(data)) assert data.ndim == 2, \ "Data must be 2-dimensional array. Got {} ndims instead.".format(data.ndim) assert isinstance(interval, list) or \ isinstance(interval, tuple) or \ isinstance(interval, np.ndarray), \ "Interval value is of an unsupported type. " \ "Expected {}, got {} instead." \ "".format(supported_interval_types, type(interval)) assert len(interval) == 2, \ "Unsupported interval length. " \ "Expected 2, got {} instead.".format(len(interval)) assert interval[0] <= interval[1], \ "Left interval border ({}) is greater than the right ({})." \ "".format(interval[0], interval[1]) idx_start, idx_stop = _get_interval_idx(data, interval) if idx_start is None or idx_stop is None: return data, None, None # 1-dimensional mask mask = np.ones(shape=data.shape[1], dtype=bool) # right border value is included mask[idx_start:idx_stop + 1] = False start_str = datetime.fromtimestamp(data[0, idx_start]).strftime("%Y.%m.%d %H:%M:%S") stop_str = datetime.fromtimestamp(data[0, idx_stop]).strftime("%Y.%m.%d %H:%M:%S") # add nan if cutting inner interval if with_gaps and idx_start > 0 and idx_stop < data.shape[1] - 1: # leave one element and replace it with nan mask[idx_stop] = True data[:, idx_stop] = np.nan # masking (cutting out) all columns data = data[:, mask] else: # masking (cutting out) all columns data = data[:, mask] return data, start_str, stop_str
753be7e45102a7e0adc1b19365d10e009c8f6b89
3,655,249
import re def _abbreviations_to_word(text: str): """ 对句子中的压缩次进行扩展成单词 :param text: 单个句子文本 :return: 转换后的句子文本 """ abbreviations = [ (re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ ('mrs', 'misess'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'fort') ] ] for regex, replacement in abbreviations: text = re.sub(regex, replacement, text) return text
576eb1588c40ab4b9ffa7d368249e520ecf887ba
3,655,250
def resnet56(num_classes=100): """Constructs a ResNet-56 model for CIFAR-10 (by default) Args: num_classes (uint): number of classes """ model = CifarResNet(ResNetBasicblock, 56, num_classes) return model
98070a6a1b6f69b2d253537b604c616ae52de9b2
3,655,251
import logging def _fetch_latest_from_memcache(app_version): """Get the latest configuration data for this app-version from memcache. Args: app_version: the major version you want configuration data for. Returns: A Config class instance for most recently set options or None if none could be found in memcache. """ proto_string = memcache.get(app_version, namespace=NAMESPACE) if proto_string: logging.debug('Loaded most recent conf data from memcache.') return db.model_from_protobuf(proto_string) logging.debug('Tried to load conf data from memcache, but found nothing.') return None
d9d4fc89c492b58e56b847dee7a2d69f98715a9e
3,655,252
def pad_set_room(request): """ pad修改关联会议室 :param request: :return: """ dbs = request.dbsession user_id = request.POST.get('user_id', '') room_id = request.POST.get('room_id', '') pad_code = request.POST.get('pad_code', '') if not user_id: error_msg = '用户ID不能为空!' elif not pad_code: error_msg = '终端编码不能为空!' elif not room_id: error_msg = '会议室ID不能为空!' else: room, error_msg = set_room(dbs, user_id, pad_code, room_id) update_last_time(dbs, pad_code, 'setRoom') logger.info('setRoom--user_id:' + user_id + ',pad_code:' + pad_code + ',room_id:' + room_id) if error_msg: json = { 'success': 'false', 'error_msg': error_msg, } else: json = { 'success': 'true', 'room': room } return json
1646204a666e68021c649b6d322b74cbcd515fd2
3,655,253
def airffromrh_wmo(rh_wmo,temp,pres,asat=None,dhsat=None,chkvals=False, chktol=_CHKTOL,asat0=None,dhsat0=None,chkbnd=False,mathargs=None): """Calculate dry fraction from WMO RH. Calculate the dry air mass fraction from the relative humidity. The relative humidity used here is defined by the WMO as: rh_wmo = [(1-airf)/airf] / [(1-asat)/asat] where asat is the dry air fraction at saturation. :arg float rh_wmo: Relative humidity, unitless. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :arg asat: Saturation dry air mass fraction in kg/kg. If unknown, pass None (default) and it will be calculated. :type asat: float or None :arg dhsat: Saturation humid air density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dhsat: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg asat0: Initial guess for the saturation dry air mass fraction in kg/kg. If None (default) then `_approx_tp` is used. :type asat0: float or None :arg dhsat0: Initial guess for the saturation humid air density in kg/m3. If None (default) then `_approx_tp` is used. :type dhsat0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: In-situ dry air mass fraction in kg/kg. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> airffromrh_wmo(0.8,270.,1e5) 0.997645698908 """ asat = massfractionair(temp=temp,pres=pres,airf=asat,dhum=dhsat, chkvals=chkvals,chktol=chktol,airf0=asat0,dhum0=dhsat0,chkbnd=chkbnd, mathargs=mathargs) airf = asat / (rh_wmo*(1-asat) + asat) return airf
1e4418591087a4bd26b48c470239df58087cdb6e
3,655,254
import ast def resolve_If(node: ast.If, tree: ast.Module, context: Context) -> WorkflowStep: """ Make the resolved condition string and body into a workflow step. TODO: support assignments, not just calls TODO: support multi-statement bodies """ if len(node.body) > 1: raise NotImplementedError("Can't yet handle multi-statement bodies. Only single function-calls are allowed.") body = node.body[0] if isinstance(body, ast.Expr) and isinstance(body.value, ast.Call): condition = resolve_cond(node, context.locals) return make_workflow_step( call_node=body.value, tree=tree, context=context, condition=condition, ) else: raise NotImplementedError("Can only transpile function call inside of conditional statements.")
386b154a48ca36a28b6c91c4e355178fadf74eb7
3,655,255
import base64 import zlib def inflate(data: str) -> str: """ reverses the compression used by draw.io see: https://drawio-app.com/extracting-the-xml-from-mxfiles/ see: https://stackoverflow.com/questions/1089662/python-inflate-and-deflate-implementations :param data: base64 encoded string :return: "plain text" version of the deflated data """ data = base64.b64decode(data) decompress = zlib.decompressobj(-zlib.MAX_WBITS) inflated = decompress.decompress(data) inflated += decompress.flush() return unquote(inflated.decode('utf-8'))
e4456c7482591611436a92a71464754871461fd5
3,655,256
import operator def get_tree(data_path,sep,root,cutoff,layer_max,up=True): """ This function takes the path of a data file of edge list with numeric weights and returns a tree (DiGraph object). The parameters include: data_path: The path of a data file of edge list with numeric weights. sep: The delimiter of the data file. root: A root node to start with. cutoff: The edge weight threshold. layer_max: The number of layers to explore. up: The direction (upstream or downstream) of the tree. The default is upstream. """ # Read in the network data. F = nx.read_weighted_edgelist(data_path,delimiter=sep,create_using=nx.DiGraph()) # create_using is to specify a directed network, otherwise, an # undirected network is returned. # Filter the edges with the cutoff value. G = nx.DiGraph( [ (u,v,d) for u,v,d in F.edges(data=True) if d['weight']>=cutoff] ) reachset = set() unreachset = set() for n in G.nodes(): if(n != root): unreachset.add(n) else: reachset.add(n) H = nx.DiGraph() # Initiate a tree. oldreach = len(reachset) newreach = oldreach +1 rndcount = 0 if(up==True): # When an upstream tree is requested. while(newreach>oldreach and rndcount<layer_max): oldreach = len(reachset) candidatesIn = {} for ee in G.edges(data = True): e1 = ee[0] e2 = ee[1] w = ee[2]['weight'] if(e2 in reachset and e1 in unreachset): # e2 in reachset because the direction is upstream. candidatesIn[(e1,e2)] = w sorted_edges_in = sorted(candidatesIn.iteritems(), key=operator.itemgetter(1), reverse = True) # reverse = True is to pick the edge with the largest weight # first. Otherwise, the edge with the smallest weight will be # picked first. if(len(sorted_edges_in) > 0): for se in sorted_edges_in: if (se[0][0] in unreachset): # The same candidate node may appear more than once # connecting with different existing nodes. So # se[0][0] needs to be checked if still in # unreachset before being added. This is to ensure # that all the nodes in the tree are unique. For # each round/layer of search, the edge with a # larger weight is preferred. reachset.add(se[0][0]) unreachset.remove(se[0][0]) H.add_edge(se[0][0],se[0][1],weight=se[1],layer=rndcount+1) # The edge attribute layer is added. H.node[se[0][0]]['dist']=rndcount+1 # The node attribute dist (distance from the root) is added. newreach=len(reachset) else: newreach=oldreach rndcount=rndcount+1 if(H.number_of_nodes()>0): # Error if empty tree. H.node[root]['dist']=0 # Add the attribute dist for the root. else: # When a downstream tree is requested. while(newreach>oldreach and rndcount<layer_max): oldreach = len(reachset) candidatesOut = {} for ee in G.edges(data = True): e1 = ee[0] e2 = ee[1] w = ee[2]['weight'] if(e1 in reachset and e2 in unreachset): # e1 in reachset because the direction is downstream. candidatesOut[(e1,e2)] = w sorted_edges_out = sorted(candidatesOut.iteritems(), key=operator.itemgetter(1), reverse = True) # reverse = True is to pick the edge with the largest weight # first. Otherwise, the edge with the smallest weight will be # picked first. if(len(sorted_edges_out) > 0): for se in sorted_edges_out: if (se[0][1] in unreachset): # The same candidate node may appear more than once # connecting with different existing nodes. So # se[0][1] needs to be checked if still in # unreachset before being added. This is to ensure # that all the nodes in the tree are unique. For # each round/layer of search, the edge with a # larger weight is preferred. reachset.add(se[0][1]) unreachset.remove(se[0][1]) H.add_edge(se[0][0],se[0][1],weight=se[1],layer=rndcount+1) # The edge attribute layer is added. H.node[se[0][1]]['dist']=rndcount+1 # The node attribute dist (distance from the root) is added. newreach=len(reachset) else: newreach=oldreach rndcount=rndcount+1 if(H.number_of_nodes()>0): # Error if empty tree. H.node[root]['dist']=0 # Add the attribute dist for the root. return H
6f2d151aac39786311c61da4f38140e6c0159562
3,655,257
def delete_functions(lambda_client, function_list) -> list: """Deletes all instances in the instances parameter. Args: lambda_client: A lambda boto3 client function_list: A list of instances you want deleted. Returns: A count of deleted instances """ terminated_functions = [] for lambda_function in function_list: function_name = lambda_function["FunctionName"] if helpers.check_in_whitelist(function_name, WHITELIST_NAME): continue try: lambda_client.delete_function( FunctionName=function_name ) except ClientError as error: error_string = "{0} on {1} - {2}".format(error, RESOURCE_NAME, function_name) print(error_string) terminated_functions.append(error_string) continue terminated_functions.append(lambda_function["FunctionName"]) return terminated_functions
f0ca59647f6813d04bf2bbd6ec33ed7744acdd04
3,655,258
def make_random_shares(seed, minimum, n_shares, share_strength=256): """ Generates a random shamir pool for a given seed phrase. Returns share points as seeds phrases (word list). """ if minimum > n_shares: raise ValueError( "More shares needed (%d) to recover the seed phrase than created " "(%d). Seed phrase would be irrecoverable." % (minimum, n_shares) ) seed_length = len(seed.split(" ")) if seed_length not in LENGTH_ALLOWED: raise ValueError( "Seed phrase should have %s words, but not %d words." % (LENGTH_STR, seed_length) ) seed_strength = seed_length // 3 * 32 if share_strength not in STRENGTH_ALLOWED: raise ValueError( "Share strength should be one of the following %s. " "But it is not (%d)." % (STRENGTH_STR, share_strength) ) if share_strength < seed_strength: raise ValueError( "Share strength (%d) is lower that seed strength (%d). Seed phrase " "would be irrecoverable." % (share_strength, seed_strength) ) prime = PRIMES[share_strength] secret = seed_to_int(seed) poly = [secret] + [random_int(prime - 1) for i in range(minimum - 1)] points = [(i, _eval_at(poly, i, prime)) for i in range(1, n_shares + 1)] shares = [(i, int_to_seed(point, strength=share_strength)) for i, point in points] return shares
a8496909cc06f3663d07036e54af744ac7e26b18
3,655,259
from typing import Optional from typing import Sequence def confusion_matrix( probs: Optional[Sequence[Sequence]] = None, y_true: Optional[Sequence] = None, preds: Optional[Sequence] = None, class_names: Optional[Sequence[str]] = None, title: Optional[str] = None, ): """ Computes a multi-run confusion matrix. Arguments: probs (2-d arr): Shape [n_examples, n_classes] y_true (arr): Array of label indices. preds (arr): Array of predicted label indices. class_names (arr): Array of class names. Returns: Nothing. To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ``` vals = np.random.uniform(size=(10, 5)) probs = np.exp(vals)/np.sum(np.exp(vals), keepdims=True, axis=1) y_true = np.random.randint(0, 5, size=(10)) labels = ["Cat", "Dog", "Bird", "Fish", "Horse"] wandb.log({'confusion_matrix': wandb.plot.confusion_matrix(probs, y_true=y_true, class_names=labels)}) ``` """ np = util.get_module( "numpy", required="confusion matrix requires the numpy library, install with `pip install numpy`", ) # change warning assert probs is None or len(probs.shape) == 2, ( "confusion_matrix has been updated to accept" " probabilities as the default first argument. Use preds=..." ) assert (probs is None or preds is None) and not ( probs is None and preds is None ), "Must provide probabilties or predictions but not both to confusion matrix" if probs is not None: preds = np.argmax(probs, axis=1).tolist() assert len(preds) == len( y_true ), "Number of predictions and label indices must match" if class_names is not None: n_classes = len(class_names) class_inds = [i for i in range(n_classes)] assert max(preds) <= len( class_names ), "Higher predicted index than number of classes" assert max(y_true) <= len( class_names ), "Higher label class index than number of classes" else: class_inds = set(preds).union(set(y_true)) n_classes = len(class_inds) class_names = [f"Class_{i}" for i in range(1, n_classes + 1)] # get mapping of inds to class index in case user has weird prediction indices class_mapping = {} for i, val in enumerate(sorted(list(class_inds))): class_mapping[val] = i counts = np.zeros((n_classes, n_classes)) for i in range(len(preds)): counts[class_mapping[y_true[i]], class_mapping[preds[i]]] += 1 data = [] for i in range(n_classes): for j in range(n_classes): data.append([class_names[i], class_names[j], counts[i, j]]) fields = { "Actual": "Actual", "Predicted": "Predicted", "nPredictions": "nPredictions", } title = title or "" return wandb.plot_table( "wandb/confusion_matrix/v1", wandb.Table(columns=["Actual", "Predicted", "nPredictions"], data=data), fields, {"title": title}, )
c2b63ccf7e3f226b6bfbea4656bc816eaa6e336a
3,655,260
import html def get_monitor_details(): """Render the index page.""" monitor_id = paranoid_clean(request.args.get('id')) monitors = mongo.db[app.config['MONITORS_COLLECTION']] monitor = monitors.find_one({'hashed': monitor_id}, {'_id': 0}) if not monitor: return jsonify({'success': False, 'error': 'Monitor was not found.'}) articles = mongo.db[app.config['ARTICLES_COLLECTION']] link = monitor['metadata']['rss_link'] articles = list(articles.find({'feed_source': link}, {'_id': 0})) for idx, item in enumerate(articles): articles[idx]['title'] = html.unescape(item['title']) articles[idx]['date'] = item['collected'][:10] articles.sort(key=lambda x: x['collected'], reverse=True) return jsonify({'success': True, 'monitor': monitor, 'articles': articles})
6a45ed67ff79216c9048ce9e3ed80be4e43b9bd9
3,655,261
def _simplify(obj: object) -> object: """ This function takes an object as input and returns a simple Python object which is supported by the chosen serialization method (such as JSON or msgpack). The reason we have this function is that some objects are either NOT supported by high level (fast) serializers OR the high level serializers don't support the fastest form of serialization. For example, PyTorch tensors have custom pickle functionality thus its better to pre-serialize PyTorch tensors using pickle and then serialize the binary in with the rest of the message being sent. Args: obj: an object which may need to be simplified Returns: obj: an simple Python object which msgpack can serialize Raises: ValueError: if `move_this` or `in_front_of_that` are not both single ASCII characters. """ try: # check to see if there is a simplifier # for this type. If there is, run return # the simplified object current_type = type(obj) result = (simplifiers[current_type][0], simplifiers[current_type][1](obj)) return result except KeyError: # if there is not a simplifier for this # object, then the object is already a # simple python object and we can just # return it return obj
fc17b64e3701faa70ea5bfb36a8e2b9195dcbab1
3,655,262
import copy def match_v2v3(aperture_1, aperture_2, verbose=False): """Use the V2V3 from aperture_1 in aperture_2 modifying X[Y]DetRef,X[Y]SciRef to match. Also shift the polynomial coefficients to reflect the new reference point origin and for NIRCam recalculate angles. Parameters ---------- aperture_1 : `pysiaf.Aperture object` Aperture whose V2,V3 reference position is to be used aperture_2 : `pysiaf.Aperture object` The V2,V3 reference position is to be altered to match that of aperture_1 verbose : bool verbosity Returns ------- new_aperture_2: `pysiaf.Aperture object` An aperture object derived from aperture_2 but with some parameters changed to match altered V2V3. """ instrument = aperture_1.InstrName assert instrument != 'NIRSPEC', 'Program not working for NIRSpec' assert (aperture_2.AperType in ['FULLSCA', 'SUBARRAY', 'ROI']), \ "2nd aperture must be pixel-based" order = aperture_1.Sci2IdlDeg V2Ref1 = aperture_1.V2Ref V3Ref1 = aperture_1.V3Ref newV2Ref = V2Ref1 newV3Ref = V3Ref1 if verbose: print('Current Vref', aperture_2.V2Ref, aperture_2.V3Ref) print('Shift to ', V2Ref1, V3Ref1) # Need to work in aperture 2 coordinate systems aperName_1 = aperture_1.AperName aperName_2 = aperture_2.AperName detector_1 = aperName_1.split('_')[0] detector_2 = aperName_2.split('_')[0] if verbose: print('Detector 1', detector_1, ' Detector 2', detector_2) V2Ref2 = aperture_2.V2Ref V3Ref2 = aperture_2.V3Ref theta0 = aperture_2.V3IdlYAngle if verbose: print('Initial VRef', V2Ref2, V3Ref2) print('Initial theta', theta0) theta = radians(theta0) coefficients = aperture_2.get_polynomial_coefficients() A = coefficients['Sci2IdlX'] B = coefficients['Sci2IdlY'] C = coefficients['Idl2SciX'] D = coefficients['Idl2SciY'] if verbose: print('\nA') print_triangle(A) print('B') print_triangle(B) print('C') print_triangle(C) print('D') print_triangle(D) (stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(A, B, C, D, verbose=verbose, instrument=instrument) print('Round trip X Y') print(' Means%8.4F %8.4f' % (xmean, ymean)) print(' STDs%8.4f %8.4f' % (xstd, ystd)) # Use convert (newXSci, newYSci) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'sci') (newXDet, newYDet) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'det') (newXIdl, newYIdl) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'idl') dXSciRef = newXSci - aperture_2.XSciRef dYSciRef = newYSci - aperture_2.YSciRef AS = shift_coefficients(A, dXSciRef, dYSciRef) BS = shift_coefficients(B, dXSciRef, dYSciRef) if verbose: print('VRef1', V2Ref1, V3Ref1) print('Idl', newXIdl, newYIdl) print('Shift pixel origin by', dXSciRef, dYSciRef) print('New Ideal origin', newXIdl, newYIdl) CS = shift_coefficients(C, AS[0], BS[0]) DS = shift_coefficients(D, AS[0], BS[0]) AS[0] = 0.0 BS[0] = 0.0 CS[0] = 0.0 DS[0] = 0.0 if verbose: print('\nShifted Polynomials') print('AS') print_triangle(AS) print('BS') print_triangle(BS) print('CS') print_triangle(CS) print('DS') print_triangle(DS) print('\nABCDS') (stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(AS, BS, CS, DS, verbose=verbose, instrument=instrument) if verbose: print('Round trip X Y') print(' Means%8.4F %8.4f' % (xmean, ymean)) print(' STDs%8.4f %8.4f' % (xstd, ystd)) newA = AS newB = BS newC = CS newD = DS new_aperture_2 = copy.deepcopy(aperture_2) # For NIRCam only, adjust angles if instrument == 'NIRCAM': newV3IdlYAngle = degrees(atan2(-AS[2], BS[2])) # Everything rotates by this amount if abs(newV3IdlYAngle) > 90.0: newV3IdlYAngle = newV3IdlYAngle - copysign(180, newV3IdlYAngle) newA = AS*cos(radians(newV3IdlYAngle)) + BS*sin(radians(newV3IdlYAngle)) newB = -AS*sin(radians(newV3IdlYAngle)) + BS*cos(radians(newV3IdlYAngle)) if verbose: print('New angle', newV3IdlYAngle) print('\nnewA') print_triangle(newA) print('newB') print_triangle(newB) newC = prepend_rotation_to_polynomial(CS, -newV3IdlYAngle) newD = prepend_rotation_to_polynomial(DS, -newV3IdlYAngle) if verbose: print('newC') print_triangle(newC) print('newD') print_triangle(newD) (stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(newA, newB, newC, newD, verbose=verbose, instrument=instrument) print('\nFinal coefficients') print('Round trip X Y') print(' Means%8.4F %8.4f' % (xmean, ymean)) print(' STDs%8.4f %8.4f' % (xstd, ystd)) newV3SciXAngle = aperture_2.V3SciXAngle + newV3IdlYAngle newV3SciYAngle = aperture_2.V3SciXAngle + newV3IdlYAngle newV3IdlYAngle = aperture_2.V3IdlYAngle + newV3IdlYAngle new_aperture_2.V3SciXAngle = newV3SciXAngle new_aperture_2.V3SciYAngle = newV3SciYAngle new_aperture_2.V3IdlYAngle = newV3IdlYAngle # Set new values in new_aperture_2 new_aperture_2.V2Ref = newV2Ref new_aperture_2.V3Ref = newV3Ref new_aperture_2.XDetRef = newXDet new_aperture_2.YDetRef = newYDet new_aperture_2.XSciRef = newXSci new_aperture_2.YSciRef = newYSci if verbose: print('Initial', aperture_2.V2Ref, aperture_2.V3Ref, aperture_2.XDetRef, aperture_2.YDetRef) print('Changes', newV2Ref, newV3Ref, newXDet, newYDet) print('Modified', new_aperture_2.V2Ref, new_aperture_2.V3Ref, new_aperture_2.XDetRef, new_aperture_2.YDetRef) new_aperture_2.set_polynomial_coefficients(newA, newB, newC, newD) (xcorners, ycorners) = new_aperture_2.corners('idl', rederive=True) for c in range(4): suffix = "{}".format(c+1) setattr(new_aperture_2, 'XIdlVert' + suffix, xcorners[c]) setattr(new_aperture_2, 'YIdlVert' + suffix, ycorners[c]) return new_aperture_2
295eb72c43f073f71b1cedaf8a94d6b1cc61dbf7
3,655,263
import sys def PlatformPager() -> PagerCommand: """ Return the default pager command for the current platform. """ if sys.platform.startswith('aix'): return More() if sys.platform.startswith('win32'): return More() return Less()
23aaaf8a14b4ed83ea3b2e92dfbf8e58c18817a7
3,655,264
import time def get_offset(sample_time): """ Find simple offsett values. During the sample time of this function the BBB with the magnetometer on should be rotated along all axis. sample_time is in seconds """ start = time.clock() mag_samples = [] mag_max = [0,0,0] mag_min = [0,0,0] offset = [0,0,0] while (time.clock() - start) < sample_time: raw_data = get_raw_mag() mag_samples.append(transform_readable(raw_data)) # blink leds to signify timespan while mag_samples != []: a = mag_samples.pop() # find maximum, minimum Values for i in range(3): if (a[i] > mag_max[i]): mag_max[i] = a[i] if (a[i] < mag_max[i]): mag_min[i] = a[i] #print(mag_max) #print(mag_min) # calculate offset from Extrema for i in range(3): offset[i] = (mag_max[i] + mag_min[i])/2 return offset
712fe82dbdc50e198baf93b752f572331ce33f63
3,655,265
import os def get_zips(directory: str) -> list: """ Return a the ZIP from a specified directory after running some sanity checks """ zips = {} for file in [os.path.join(dp, file) for dp, dn, fn in os.walk(directory) for file in fn]: if file.split('.')[-1] != 'zip': continue zip_name = file.split('/')[-1] try: version, buildtype, device, builddate = get_metadata_from_zip(zip_name) except IndexError: continue if buildtype.lower() not in ALLOWED_BUILDTYPES: continue if version not in ALLOWED_VERSIONS: continue if device in zips: if get_date_from_zip(zips[device]) > builddate: continue zips[device] = zip_name data = list(zips.values()) data.sort() return data
7bee4d1acc1fa51fc1499ca47a8759a9aa61ec67
3,655,266
def get_multimode_2d_dist(num_modes: int = 1, scale: float = 1.0): """Get a multimodal distribution of Gaussians.""" angles = jnp.linspace(0, jnp.pi * 2, num_modes + 1) angles = angles[:-1] x, y = jnp.cos(angles) * scale / 2., jnp.sin(angles) * scale / 2. loc = jnp.array([x, y]).T scale = jnp.ones((num_modes, 2)) * scale / 10. return tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical( probs=jnp.ones((num_modes,)) / num_modes), components_distribution=tfd.MultivariateNormalDiag( loc=loc, scale_diag=scale))
dab5400e545feb7cde2804af151f3c20c600b0ce
3,655,267
def residual_squared_error(data_1, data_2): """ Calculation the residual squared error between two arrays. Parameters ---------- data: numpy array Data calc: numpy array Calculated values Return ------ rse: float residual squared error """ RSS = np.sum(np.square(data_1 - data_2)) rse = np.sqrt(RSS / (len(data_1) - 2)) return rse
771c365fc38d6eda07989a1a6eb34c0f96347c3c
3,655,268
def by_index(pot): """ Build a new potential where the keys of the potential dictionary correspond to the indices along values of n-dimensional grids, rather than, possibly, the coordinate values of the grids themselves. Key Transformation: ((grid_val_i, grid_val_j, ...)_i,) -> ((i, j, ...)_i,) :param pot: potential along a coordinate :type pot: dict[tuple(float)] = float :rtype: dict[tuple(int)] = float """ pot_keys = list(pot.keys()) dim = dimension(pot) remap_dcts = [] for i in range(dim): _coords = sorted(list(set(lst[i] for lst in pot_keys))) _idxs = list(range(len(_coords))) remap_dcts.append(dict(zip(_coords, _idxs))) new_dct = {} for keys in pot_keys: new_tup = () for i, val in enumerate(keys): new_tup += (remap_dcts[i][val],) new_dct[new_tup] = pot[keys] return new_dct
7235322f606cf972c8bf4ad46a614001f235b3e9
3,655,269
def current_user(): """Получить текущего пользователя или отредактировать профиль""" user = get_user_from_request() if request.method == "POST": json = request.get_json() user.email = json.get("email", user.email) user.name = json.get("name", user.name) user.about = sanitize(json.get("about", user.about)) user.birthday = json.get("birthday", user.birthday) if "avatar" in json: content = Content.get_or_none(Content.id == json["avatar"]) if content: if not content.is_image: return errors.user_avatar_is_not_image() elif content.size > 1024 * 500: # 500kb return errors.user_avatar_too_large() else: user.avatar = content user.save() user = User.get(User.id == user.id) return jsonify({"success": 1, "user": user.to_json_with_email()})
e7e3db1744e21c64732217e1609a113b938c677c
3,655,270
from datetime import datetime async def async_union_polygons(bal_name, geom_list): """union a set of polygons & return the resulting multipolygon""" start_time = datetime.now() big_poly = unary_union(geom_list) print(f"\t - {bal_name} : set of polygons unioned: {datetime.now() - start_time}") return big_poly
2432818d6bb38232e08a4439e7a69007a7c24334
3,655,271
def _error_text(because: str, text: str, backend: usertypes.Backend) -> str: """Get an error text for the given information.""" other_backend, other_setting = _other_backend(backend) if other_backend == usertypes.Backend.QtWebKit: warning = ("<i>Note that QtWebKit hasn't been updated since " "July 2017 (including security updates).</i>") suffix = " (not recommended)" else: warning = "" suffix = "" return ("<b>Failed to start with the {backend} backend!</b>" "<p>qutebrowser tried to start with the {backend} backend but " "failed because {because}.</p>{text}" "<p><b>Forcing the {other_backend.name} backend{suffix}</b></p>" "<p>This forces usage of the {other_backend.name} backend by " "setting the <i>backend = '{other_setting}'</i> option " "(if you have a <i>config.py</i> file, you'll need to set " "this manually). {warning}</p>".format( backend=backend.name, because=because, text=text, other_backend=other_backend, other_setting=other_setting, warning=warning, suffix=suffix))
cb4fda8ab6c06d01ae01e6226d435d30cd0bd971
3,655,272
def COUNT(condition: pd.DataFrame, n: int): """the number of days fits the 'condition' in the past n days Args: condition (pd.DataFrame): dataframe index by date time(level 0) and asset(level 1), containing bool values n (int): the number of past days """ return condition.rolling(n, center=False, min_periods=n).sum()
ed380061249803e9c378950a88dc5162543cfee0
3,655,273
def Mat33_nrow(): """Mat33_nrow() -> int""" return _simbody.Mat33_nrow()
7f22177bcf150458e6545ed204e47b3326ce6193
3,655,274
def isstruct(ob): """ isstruct(ob) Returns whether the given object is an SSDF struct. """ if hasattr(ob, '__is_ssdf_struct__'): return bool(ob.__is_ssdf_struct__) else: return False
465196af79c9de1f7685e0004e92b68a7f524149
3,655,275
def where_between(field_name, start_date, end_date): """ Return the bit of query for the dates interval. """ str = """ {0} between date_format('{1}', '%%Y-%%c-%%d %%H:%%i:%%S') and date_format('{2}', '%%Y-%%c-%%d 23:%%i:%%S') """ .format( field_name, start_date.strftime("%Y-%m-%d %H:%M:%S"), end_date.strftime("%Y-%m-%d %H:%M:%S")) return str
4801d01ac8743f138e7c558da40518b75ca6daed
3,655,276
def to_console_formatted_string(data: dict) -> str: """...""" def make_line(key: str) -> str: if key.startswith('__cauldron_'): return '' data_class = getattr(data[key], '__class__', data[key]) data_type = getattr(data_class, '__name__', type(data[key])) value = '{}'.format(data[key])[:250].replace('\n', '\n ') if value.find('\n') != -1: value = '\n{}'.format(value) return '+ {name} ({type}): {value}'.format( name=key, type=data_type, value=value ) keys = list(data.keys()) keys.sort() lines = list(filter( lambda line: len(line) > 0, [make_line(key) for key in keys] )) return '\n'.join(lines)
05cec50b3eee8199b19024aae32dda2a8ba33115
3,655,277
def cluster_instance_get_info_ajax(request, c_id): """ get cluster instance status """ dic = {"res": True, "info":None, "err":None} instance_id = request.GET.get("instance_id") require_vnc = request.GET.get("require_vnc") if require_vnc == "true": require_vnc = True else: require_vnc = False if instance_id.isdecimal(): instance_id = int(instance_id) instance_info = get_cluster_instance_info(request.user, instance_id,require_vnc=require_vnc) if not instance_info: raise Http404 dic["info"] = {"status":instance_info["status"], "status_name":instance_info["status_name"], "vnc_url":instance_info["vnc_url"]} else: dic["res"] = False dic["err"] = "Invalid ID" return JsonResponse(dic)
1c000a659893b375a2e89faadedccde7ca8dcab6
3,655,278
import time def timeit(verbose=False): """ Time functions via decoration. Optionally output time to stdout. Parameters: ----------- verbose : bool Example Usage: >>> @timeit(verbose=True) >>> def foo(*args, **kwargs): pass """ def _timeit(f): @wraps(f) def wrapper(*args, **kwargs): if verbose: start = time.time() res = f(*args, **kwargs) runtime = time.time() - start print(f'{f.__name__!r} in {runtime:.4f} s') else: res = f(*args, **kwargs) return res return wrapper return _timeit
5e8e0441914b5d26db99fc378374bebde2d39376
3,655,279
def signal_period(peaks, sampling_rate=1000, desired_length=None, interpolation_order="cubic"): """Calculate signal period from a series of peaks. Parameters ---------- peaks : list, array, DataFrame, Series or dict The samples at which the peaks occur. If an array is passed in, it is assumed that it was obtained with `signal_findpeaks()`. If a DataFrame is passed in, it is assumed it is of the same length as the input signal in which occurrences of R-peaks are marked as "1", with such containers obtained with e.g., ecg_findpeaks() or rsp_findpeaks(). sampling_rate : int The sampling frequency of the signal that contains peaks (in Hz, i.e., samples/second). Defaults to 1000. desired_length : int By default, the returned signal rate has the same number of elements as the raw signal. If set to an integer, the returned signal rate will be interpolated between peaks over `desired_length` samples. Has no effect if a DataFrame is passed in as the `signal` argument. Defaults to None. interpolation_order : str Order used to interpolate the rate between peaks. See `signal_interpolate()`. Returns ------- array A vector containing the period. See Also -------- signal_findpeaks, signal_fixpeaks, signal_plot Examples -------- >>> import neurokit2 as nk >>> >>> signal = nk.signal_simulate(duration=10, sampling_rate=1000, >>> frequency=1) >>> info = nk.signal_findpeaks(signal) >>> >>> rate = nk.signal_rate(peaks=info["Peaks"]) >>> nk.signal_plot(rate) """ peaks, desired_length = _signal_formatpeaks_sanitize(peaks, desired_length) # Sanity checks. if len(peaks) <= 3: print("NeuroKit warning: _signal_formatpeaks(): too few peaks detected" " to compute the rate. Returning empty vector.") return np.full(desired_length, np.nan) # Calculate period in sec, based on peak to peak difference and make sure # that rate has the same number of elements as peaks (important for # interpolation later) by prepending the mean of all periods. period = np.ediff1d(peaks, to_begin=0) / sampling_rate period[0] = np.mean(period[1:]) # Interpolate all statistics to desired length. if desired_length != np.size(peaks): period = signal_interpolate(peaks, period, desired_length=desired_length, method=interpolation_order) return period
dae9a7af6d23fdaa1f742cbc3b18649a525c4041
3,655,280
import google.cloud.dataflow as df from google.cloud.dataflow.utils.options import PipelineOptions def model_co_group_by_key_tuple(email_list, phone_list, output_path): """Applying a CoGroupByKey Transform to a tuple. URL: https://cloud.google.com/dataflow/model/group-by-key """ p = df.Pipeline(options=PipelineOptions()) # [START model_group_by_key_cogroupbykey_tuple] # Each data set is represented by key-value pairs in separate PCollections. # Both data sets share a common key type (in this example str). # The email_list contains values such as: ('joe', '[email protected]') with # multiple possible values for each key. # The phone_list contains values such as: ('mary': '111-222-3333') with # multiple possible values for each key. emails = p | df.Create('email', email_list) phones = p | df.Create('phone', phone_list) # The result PCollection contains one key-value element for each key in the # input PCollections. The key of the pair will be the key from the input and # the value will be a dictionary with two entries: 'emails' - an iterable of # all values for the current key in the emails PCollection and 'phones': an # iterable of all values for the current key in the phones PCollection. # For instance, if 'emails' contained ('joe', '[email protected]') and # ('joe', '[email protected]'), then 'result' will contain the element # ('joe', {'emails': ['[email protected]', '[email protected]'], 'phones': ...}) result = {'emails': emails, 'phones': phones} | df.CoGroupByKey() def join_info((name, info)): return '; '.join(['%s' % name, '%s' % ','.join(info['emails']), '%s' % ','.join(info['phones'])]) contact_lines = result | df.Map(join_info) # [END model_group_by_key_cogroupbykey_tuple] contact_lines | df.io.Write(df.io.TextFileSink(output_path)) p.run()
7256b9dac30fe731011729ea463e37b39d8c4dde
3,655,281
def get_recommendation(anime_name, cosine_sim, clean_anime, anime_index): """ Getting pairwise similarity scores for all anime in the data frame. The function returns the top 10 most similar anime to the given query. """ idx = anime_index[anime_name] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[0:11] anime_indices = [i[0] for i in sim_scores] result = clean_anime[['name']].iloc[anime_indices].drop(idx) return result
93bc3e53071200810b34e31674fcaa0a98cdaebb
3,655,282
def get_nwb_metadata(experiment_id): """ Collects metadata based on the experiment id and converts the weight to a float. This is needed for further export to nwb_converter. This function also validates, that all metadata is nwb compatible. :param experiment_id: The experiment id given by the user. :return: Final nwb metadata to be passed on. :rtype: dict """ metadata = get_raw_nwb_metadata(experiment_id) # nwb_converter unfortunately needs the weight to be a float in kg. metadata["Subject"]["weight"] = convert_weight(metadata["Subject"]["weight"]) if validate_pynwb_data(metadata): return metadata else: raise Exception("Could not validate nwb file.")
9882e71cb869e1ebf762fd851074d316b9fda462
3,655,283
from typing import Tuple from typing import Union def string_to_value_error_mark(string: str) -> Tuple[float, Union[float, None], str]: """ Convert string to float and error. Parameters ---------- string : str DESCRIPTION. Returns ------- value : float Value. error : float Error. """ value, error, mark = None, None, "" ind_1 = string.find("(") s_sigma = "" if value == ".": pass elif ind_1 != -1: ind_2 = string.find(")") if ind_2 > ind_1: s_sigma = string[(ind_1+1):ind_2] if not(s_sigma.isdigit()): s_sigma = "" str_1 = string.split("(")[0] value = float(str_1) mark = string[ind_2+1:].strip() if s_sigma != "": s_h = "".join(["0" if _h.isdigit() else _h for _h in str_1[:-len(s_sigma)]]) error = abs(float(s_h+s_sigma)) else: error = 0. else: try: value = float(string) except ValueError: value = None return value, error, mark
c2c69c419d44e8342376ee24f6a4ced6ee2090e7
3,655,284
import itertools def _children_with_tags(element, tags): """Returns child elements of the given element whose tag is in a given list. Args: element: an ElementTree.Element. tags: a list of strings that are the tags to look for in child elements. Returns: an iterable of ElementTree.Element instances, which are the children of the input element whose tags matched one of the elements of the list. """ return itertools.chain(*(_children_with_tag(element, tag) for tag in tags))
522151e7e9ad355e5c6850cef62093e1bd4ed0a0
3,655,285
import logging import collections import functools def train_and_eval(): """Train and evaluate StackOver NWP task.""" logging.info('Show FLAGS for debugging:') for f in HPARAM_FLAGS: logging.info('%s=%s', f, FLAGS[f].value) hparam_dict = collections.OrderedDict([ (name, FLAGS[name].value) for name in HPARAM_FLAGS ]) if FLAGS.experiment_type == 'private': # Evaluate on StackOverflow train_dataset_computation, train_set, validation_set, test_set = ( _preprocess_data('stackoverflow_private', FLAGS.vocab_size, FLAGS.num_oov_buckets, FLAGS.sequence_length, FLAGS.num_validation_examples, FLAGS.client_batch_size, FLAGS.client_epochs_per_round, FLAGS.max_elements_per_user)) elif FLAGS.experiment_type == 'public_SO' or FLAGS.experiment_type == 'stackoverflow_SGD': # Evaluate on StackOverflow _, train_set_private, _, _ = ( _preprocess_data('stackoverflow_private', FLAGS.vocab_size, FLAGS.num_oov_buckets, FLAGS.sequence_length, FLAGS.num_validation_examples, FLAGS.client_batch_size, FLAGS.client_epochs_per_round, FLAGS.max_elements_per_user)) train_dataset_computation, train_set, validation_set, test_set = ( _preprocess_data('stackoverflow_public', FLAGS.vocab_size, FLAGS.num_oov_buckets, FLAGS.sequence_length, FLAGS.num_validation_examples, FLAGS.client_batch_size, FLAGS.client_epochs_per_round, FLAGS.max_elements_per_user)) client_ids_size = int(100) training_set_client_ids = train_set.client_ids[:client_ids_size] elif FLAGS.experiment_type == 'warmstart': #Evaluate on StackOverflow train_dataset_computation, train_set, validation_set, test_set = ( _preprocess_data('stackoverflow_private', FLAGS.vocab_size, FLAGS.num_oov_buckets, FLAGS.sequence_length, FLAGS.num_validation_examples, FLAGS.client_batch_size, FLAGS.client_epochs_per_round, FLAGS.max_elements_per_user)) input_spec = train_dataset_computation.type_signature.result.element metrics = _get_metrics(FLAGS.vocab_size, FLAGS.num_oov_buckets) if FLAGS.use_tff_learning: iterative_process, evaluate_fn, server_state_update_fn = _build_tff_learning_model_and_process( input_spec, metrics) else: iterative_process, evaluate_fn, server_state_update_fn = _build_custom_model_and_process( input_spec, metrics) iterative_process = tff.simulation.compose_dataset_computation_with_iterative_process( dataset_computation=train_dataset_computation, process=iterative_process) if FLAGS.total_epochs is None: # def client_dataset_ids_fn(round_num: int, epoch: int): # return _sample_client_ids(FLAGS.clients_per_round, train_set, round_num, # epoch) def client_dataset_ids_fn(round_num: int, epoch: int): logging.info("Sampling from subset of public") return _sample_public_client_ids(FLAGS.clients_per_round, training_set_client_ids, round_num, epoch) logging.info('Sample clients for max %d rounds', FLAGS.total_rounds) total_epochs = 0 else: client_shuffer = training_loop.ClientIDShuffler(FLAGS.clients_per_round, train_set) client_dataset_ids_fn = client_shuffer.sample_client_ids logging.info('Shuffle clients for max %d epochs and %d rounds', FLAGS.total_epochs, FLAGS.total_rounds) total_epochs = FLAGS.total_epochs warmstart_loop.run( iterative_process, client_dataset_ids_fn, warmstart_file=FLAGS.warmstart_file, validation_fn=functools.partial(evaluate_fn, dataset=validation_set), total_epochs=total_epochs, total_rounds=FLAGS.total_rounds, experiment_name=FLAGS.experiment_name, train_eval_fn=None, test_fn=functools.partial(evaluate_fn, dataset=test_set), root_output_dir=FLAGS.root_output_dir, hparam_dict=hparam_dict, rounds_per_eval=FLAGS.rounds_per_eval, rounds_per_checkpoint=FLAGS.rounds_per_checkpoint, rounds_per_train_eval=2000, server_state_epoch_update_fn=server_state_update_fn) return else: raise ValueError('Experiment type is not supported %s'.format( FLAGS.experiment_type)) input_spec = train_dataset_computation.type_signature.result.element metrics = _get_metrics(FLAGS.vocab_size, FLAGS.num_oov_buckets) if FLAGS.use_tff_learning: iterative_process, evaluate_fn, server_state_update_fn = _build_tff_learning_model_and_process( input_spec, metrics) else: iterative_process, evaluate_fn, server_state_update_fn = _build_custom_model_and_process( input_spec, metrics) iterative_process = tff.simulation.compose_dataset_computation_with_iterative_process( dataset_computation=train_dataset_computation, process=iterative_process) if FLAGS.total_epochs is None: def client_dataset_ids_fn(round_num: int, epoch: int): if FLAGS.experiment_type == 'public_SO' or FLAGS.experiment_type == 'stackoverflow_SGD': logging.info("Sampling from subset of public") return _sample_public_client_ids(FLAGS.clients_per_round, training_set_client_ids, round_num, epoch) else: return _sample_client_ids(FLAGS.clients_per_round, train_set, round_num, epoch) logging.info('Sample clients for max %d rounds', FLAGS.total_rounds) total_epochs = 0 else: client_shuffer = training_loop.ClientIDShuffler(FLAGS.clients_per_round, train_set) client_dataset_ids_fn = client_shuffer.sample_client_ids logging.info('Shuffle clients for max %d epochs and %d rounds', FLAGS.total_epochs, FLAGS.total_rounds) total_epochs = FLAGS.total_epochs if FLAGS.experiment_type != 'stackoverflow_SGD': training_loop.run( iterative_process, client_dataset_ids_fn, validation_fn=functools.partial(evaluate_fn, dataset=validation_set), total_epochs=total_epochs, total_rounds=FLAGS.total_rounds, experiment_name=FLAGS.experiment_name, train_eval_fn=None, test_fn=functools.partial(evaluate_fn, dataset=test_set), root_output_dir=FLAGS.root_output_dir, hparam_dict=hparam_dict, rounds_per_eval=FLAGS.rounds_per_eval, rounds_per_checkpoint=FLAGS.rounds_per_checkpoint, rounds_per_train_eval=2000, server_state_epoch_update_fn=server_state_update_fn)
807284651327ef29260ac2ce8ab753d40349a786
3,655,286
def align_with_known_width(val, width: int, lowerBitCntToAlign: int): """ Does same as :func:`~.align` just with the known width of val """ return val & (mask(width - lowerBitCntToAlign) << lowerBitCntToAlign)
8c9b7ffd8fced07f2ca78db7665ea5425417e45a
3,655,287
def get_email_from_request(request): """Use cpg-utils to extract user from already-authenticated request headers.""" user = get_user_from_headers(request.headers) if not user: raise web.HTTPForbidden(reason='Invalid authorization header') return user
60872f86bb69de6b1b339f715a2561dafd231489
3,655,288
from typing import List from typing import Tuple def get_kernels(params: List[Tuple[str, int, int, int, int]]) -> List[np.ndarray]: """ Create list of kernels :param params: list of tuples with following format ("kernel name", angle, multiplier, rotation angle) :return: list of kernels """ kernels = [] # type: List[np.ndarray] for param in params: if len(param) < 5: print('Number of parameters given must be 4, got', param, 'len(', len(param), ') instead') return None if param[0] == 'gauss': kernels.append(rotate_matrix(get_gauss(param[1], param[2]) * param[3], param[4])) elif param[0] == 'log': kernels.append(rotate_matrix(get_log(param[1], param[2]) * param[3], param[4])) elif param[0] == 'sobel': kernels.append(rotate_matrix(get_sobel(param[1], param[2]) * param[3], param[4])) elif param[0] == 'ft0': kernels.append(rotate_matrix(get_ft0(param[1], param[2]) * param[3], param[4])) elif param[0] == 'ft1': kernels.append(rotate_matrix(get_ft1(param[1], param[2]) * param[3], param[4])) elif param[0] == 'ft2c': kernels.append(rotate_matrix(get_ft2c(param[1], param[2]) * param[3], param[4])) if len(kernels) == 1: return kernels[0] else: return kernels
b39fd152fe94f4c52398ae4984414d2cefbf401f
3,655,289
def forward_propagation(propagation_start_node, func, x): """A forward propagation starting at the `propagation_start_node` and wrapping the all the composition operations along the way. Parameters ---------- propagation_start_node : Node The node where the gradient function (or anything similar) is requested. func : function The function to apply at the node (most likely be a composition of functions). x : narray A set of parameters for the function. Returns ------- Wrapper The ending wrapper wrapping the propagation end node. """ trace_marker = marker_stack.get_marker() propagation_start_wrapper = new_wrapper( x, trace_marker, propagation_start_node) propagation_end_wrapper = func(propagation_start_wrapper) marker_stack.release_marker(trace_marker) if isinstance(propagation_end_wrapper, Wrapper) and propagation_end_wrapper._trace_marker == propagation_start_wrapper.trace_marker: return propagation_end_wrapper._value, propagation_end_wrapper._node else: return propagation_end_wrapper, None
12fbbb53fd329aacdf5f5fffbfa2a81342663fb8
3,655,290
import requests def main(): """ Main function of the script. """ args = parse_args() if args.version: print("{v}".format(v=__version__)) return 0 config = ConfigFile(args.config_file, CONFIG_FILE_SCHEMA) if args.help_config: print(config.help()) return 0 if args.config_file is None: print("Error: Config file must be specified.") return 1 print("Using plexmediafixup config file: {file}". format(file=args.config_file)) try: config.load() except ConfigFileError as exc: print("Error: {}".format(exc)) return 1 plexapi_config_path = config.data['plexapi_config_path'] # required item direct_connection = config.data['direct_connection'] # required item server_name = config.data['server_name'] # optional but defaulted item fixups = config.data['fixups'] # optional but defaulted item fixup_mgr = FixupManager() if not plexapi_config_path: plexapi_config_path = plexapi.CONFIG_PATH print("Using PlexAPI config file: {file}". format(file=plexapi_config_path)) plexapi_config = plexapi.config.PlexConfig(plexapi_config_path) # Verify that the fixups can be loaded for fixup in fixups: name = fixup['name'] # required item enabled = fixup['enabled'] # required item if enabled: print("Loading fixup: {name}".format(name=name)) fixup_mgr.get_fixup(name) if direct_connection: server_baseurl = plexapi_config.get('auth.server_baseurl', None) if server_baseurl is None: print("Error: Parameter auth.server_baseurl is required for " "direct connection but is not set in PlexAPI config file " "{file}". format(file=plexapi_config_path)) return 1 server_token = plexapi_config.get('auth.server_token', None) if server_token is None: print("Error: Parameter auth.server_token is required for " "direct connection but is not set in PlexAPI config file " "{file}". format(file=plexapi_config_path)) return 1 print("Connecting directly to Plex Media Server at {url}". format(url=server_baseurl)) try: with Watcher() as w: # If the PMS is not reachable on the network, this raises # requests.exceptions.ConnectionError (using max_retries=0 and # the connect and read timeout configured in the plexapi config # file as plexapi.timeout). plex = plexapi.server.PlexServer() except (plexapi.exceptions.PlexApiException, requests.exceptions.RequestException) as exc: print("Error: Cannot connect to Plex server at {url}: {msg} " "({w.debug_str})". format(url=server_baseurl, msg=exc, w=w)) return 1 print("Connected directly to Plex Media Server at {url}". format(url=server_baseurl)) else: myplex_username = plexapi_config.get('auth.myplex_username', None) if not myplex_username: print("Error: Parameter auth.myplex_username is required for " "indirect connection but is not set in PlexAPI config file " "{file}". format(file=plexapi_config_path)) return 1 myplex_password = plexapi_config.get('auth.myplex_password', None) if not myplex_username: print("Error: Parameter auth.myplex_password is required for " "indirect connection but is not set in PlexAPI config file " "{file}". format(file=plexapi_config_path)) return 1 if not server_name: print("Error: Parameter server_name is required for " "indirect connection but is not set in plexmediafixup " "config file {file}". format(file=config.filepath)) return 1 print("Connecting indirectly to server {srv} of Plex account {user}". format(srv=server_name, user=myplex_username)) try: with Watcher() as w: account = plexapi.myplex.MyPlexAccount( myplex_username, myplex_password) except (plexapi.exceptions.PlexApiException, requests.exceptions.RequestException) as exc: print("Error: Cannot login to Plex account {user}: {msg} " "({w.debug_str})". format(user=myplex_username, msg=exc, w=w)) return 1 try: with Watcher() as w: plex = account.resource(server_name).connect() except (plexapi.exceptions.PlexApiException, requests.exceptions.RequestException) as exc: print("Error: Cannot connect to server {srv} of Plex account " "{user}: {msg} ({w.debug_str})". format(srv=server_name, user=myplex_username, msg=exc, w=w)) return 1 print("Connected indirectly to server {srv} of Plex account {user}". format(srv=server_name, user=myplex_username)) for fixup in fixups: name = fixup['name'] # required item enabled = fixup['enabled'] # required item dryrun = args.dryrun fixup_kwargs = fixup.get('kwargs', dict()) if enabled: fixup = fixup_mgr.get_fixup(name) print("Executing fixup: {name} (dryrun={dryrun})". format(name=name, dryrun=dryrun)) rc = fixup.run(plex=plex, dryrun=dryrun, verbose=args.verbose, config=config, fixup_kwargs=fixup_kwargs) if rc: print("Error: Fixup {name} has encountered errors - aborting". format(name=name)) return 1 print("Fixup succeeded: {name} (dryrun={dryrun})". format(name=name, dryrun=dryrun)) return 0
bdbbb2d695a69a253aee2454128d9c098392bd57
3,655,291
def read_entities(): """ find list of entities :return: """ intents = Entity.objects.only('name','id') return build_response.sent_json(intents.to_json())
842ec7506b49abd6557219e2c9682bdd48df86fb
3,655,292
def available(unit, item) -> bool: """ If any hook reports false, then it is false """ for skill in unit.skills: for component in skill.components: if component.defines('available'): if component.ignore_conditional or condition(skill, unit): if not component.available(unit, item): return False return True
7550a197e2d877ef4ff622d08a056be434f1f06e
3,655,293
def cleanArray(arr): """Clean an array or list from unsupported objects for plotting. Objects are replaced by None, which is then converted to NaN. """ try: return np.asarray(arr, float) except ValueError: return np.array([x if isinstance(x, number_types) else None for x in arr], float)
7ab7d645209ad0815a3eb831a1345cdad0ae4aba
3,655,294
import argparse def parse_args(): """Main function for parsing args. Utilizes the 'check_config' function from the config module to ensure an API key has been passed. If not, user is prompted to conduct initial configuration for pwea. If a valid configuration is found (there is currently no validity check for the API key, argparser will look for location and optional arguments. Location is required. Default weather report is current, forecast can be specified using -t) """ if check_config(): parser = argparse.ArgumentParser( usage='pwea [location] <optional args>', description="description: pwea is a simple tool used to retrieve" "current and forecasted weather information") parser.add_argument('location', nargs='+', help="Input a city name or US/UK/Canadian postal code") parser.add_argument("-t" "--type", dest="report_type", default="current", help="Acceptable report types are 'current' or 'forecast'. Default is 'current'") parser.add_argument('--config', dest='config', default=None, help="Pass your API key for https://weatherapi.com") args = parser.parse_args() args.location = ' '.join(args.location) args.report_type = args.report_type.lower() else: parser = argparse.ArgumentParser( usage='No API key found in ~/.config/pwearc. Please set your API key using pwea --config <API_KEY>', description="description: pwea is a simple tool used to retrieve" "current and forecasted weather information") parser.add_argument('--config', dest='config', required=True, help="Pass your API key for https://weatherapi.com") args = parser.parse_args() return args
ebac086d48fad3ec8a8c715c9d75acb1ac1e5e24
3,655,295
def _ensure_args(G, source, method, directed, return_predecessors, unweighted, overwrite, indices): """ Ensures the args passed in are usable for the API api_name and returns the args with proper defaults if not specified, or raises TypeError or ValueError if incorrectly specified. """ # checks common to all input types if (method is not None) and (method != "auto"): raise ValueError("only 'auto' is currently accepted for method") if (indices is not None) and (type(indices) == list): raise ValueError("indices currently cannot be a list-like type") if (indices is not None) and (source is not None): raise TypeError("cannot specify both 'source' and 'indices'") if (indices is None) and (source is None): raise TypeError("must specify 'source' or 'indices', but not both") G_type = type(G) # Check for Graph-type inputs if (G_type in [Graph, DiGraph]) or is_nx_graph_type(G_type): exc_value = "'%s' cannot be specified for a Graph-type input" if directed is not None: raise TypeError(exc_value % "directed") if return_predecessors is not None: raise TypeError(exc_value % "return_predecessors") if unweighted is not None: raise TypeError(exc_value % "unweighted") if overwrite is not None: raise TypeError(exc_value % "overwrite") directed = False # Check for non-Graph-type inputs else: if (directed is not None) and (type(directed) != bool): raise ValueError("'directed' must be a bool") if (return_predecessors is not None) and \ (type(return_predecessors) != bool): raise ValueError("'return_predecessors' must be a bool") if (unweighted is not None) and (unweighted is not True): raise ValueError("'unweighted' currently must be True if " "specified") if (overwrite is not None) and (overwrite is not False): raise ValueError("'overwrite' currently must be False if " "specified") source = source if source is not None else indices if return_predecessors is None: return_predecessors = True return (source, directed, return_predecessors)
6d9168de0d25f5ee4d720347182763ad744600a6
3,655,296
def read_siemens_scil_b0(): """ Load Siemens 1.5T b0 image form the scil b0 dataset. Returns ------- img : obj, Nifti1Image """ file = pjoin(dipy_home, 'datasets_multi-site_all_companies', '1.5T', 'Siemens', 'b0.nii.gz') return nib.load(file)
edf700fc6e14a35b5741e4419ba96cb753188da8
3,655,297
def gdpcleaner(gdpdata: pd.DataFrame): """ Author: Gabe Fairbrother Remove spurious columns, Rename relevant columns, Remove NaNs Parameters ---------- gdpdata: DataFrame a loaded dataframe based on a downloaded Open Government GDP at basic prices dataset (https://open.canada.ca/en/open-data) Returns ------- DataFrame: A cleaned and simplified DataFrame of the relevant columns for summary and visualization. Possible columns (dataset dependent) include: Date: Date of data Location: Province or Jurisdiction Scale: Scale of the Value column (Percent, Millions, etc) Unit: Unit of Measure Value: Portion of the GDP for the Location and Date NAICS_Class: North American Industry Classification System ID Industry: Industry of Record Sub-sector: Non-profit sub-sector Special_Industry: Special Industry Aggregate Examples -------- >>> result = gdpcleaner(example_data) """ #Check for DataFrame input argument if (isinstance(gdpdata, pd.core.frame.DataFrame)): pass else: raise TypeError("Argument must be a Pandas DataFrame") cleaned_frame = gdpdata #Remove spurious columns spurious = ['DGUID', 'UOM_ID', 'SCALAR_ID', 'VECTOR', 'COORDINATE', 'STATUS', 'SYMBOL', 'TERMINATED', 'DECIMALS', 'Value', 'Seasonal adjustment'] for column in cleaned_frame.columns : if column in spurious: cleaned_frame = cleaned_frame.drop(columns=column) #Drop any rows with null value cleaned_frame = cleaned_frame.dropna() #Rename relevant columns cleaned_frame = cleaned_frame.rename(columns={'REF_DATE': 'Date', 'GEO': 'Location', 'SCALAR_FACTOR': 'Scale', 'VALUE': 'Value', 'UOM': 'Unit'}) for column in cleaned_frame.columns: if 'NAICS' in column: cleaned_frame = cleaned_frame.rename(columns={column: 'NAICS_Class'}) if 'aggregat' in column: #Not a spelling mistake, there are multiple similar column headers in different datasets cleaned_frame = cleaned_frame.rename(columns={column: 'Special_Industry'}) return cleaned_frame
4c685a244a746f05fbef5216518e23a956ae8da7
3,655,298
import re def sort_with_num(path): """Extract leading numbers in a file name for numerical sorting.""" fname = path.name nums = re.match('^\d+', fname) if nums: return int(nums[0]) else: return 0
2209384720c33b8201c06f7a14b431972712814a
3,655,299