content
stringlengths
22
815k
id
int64
0
4.91M
def GetHomeFunctorViaPose(): """ Deprecated. Returns a function that will move the robot to the home position when called. """ js_home = GetPlanToHomeService() req = ServoToPoseRequest() pose_home = GetHomePoseKDL() req.target = pm.toMsg(pose_home) open_gripper = GetOpenGripperService() move = GetPlanToPoseService() servo_mode = GetServoModeService() def home(): rospy.loginfo("HOME: set servo mode") servo_mode("servo") rospy.loginfo("HOME: open gripper to drop anything") open_gripper() rospy.loginfo("HOME: move to config home") max_tries = 10 tries = 0 res1 = None while tries < max_tries and (res1 is None or "failure" in res1.ack.lower()): res1 = js_home(ServoToPoseRequest()) tries += 1 if res1 is None or "failure" in res1.ack.lower(): rospy.logerr(res1.ack) raise RuntimeError("HOME(): error moving to home1: " + str(res1.ack)) rospy.loginfo("HOME: move to pose over objects") res2 = None tries = 0 while tries < max_tries and (res2 is None or "failure" in res2.ack.lower()): res2 = move(req) tries += 1 if res2 is None or "failure" in res2.ack.lower(): rospy.logerr("move failed:" + str(res2.ack)) raise RuntimeError("HOME(): error moving to pose over workspace" + str(res2.ack)) rospy.loginfo("HOME: done") return home
5,352,600
def getAveragePlatPrice(item_name): """ Get the current average price of the item on the Warframe marketplace. Args: item_name (str): The name of the item. Returns: float: the average platinum market price of the item. """ avg_price = -1 item_name = clean(item_name) item_info = requests.get(API + item_name.replace(" ", "_") + "/statistics").json() try: avg_price = item_info["payload"]["statistics_closed"]["48hours"][0]['avg_price'] except KeyError: print(item_name + " is not listed on warframe.market.") return avg_price
5,352,601
def mode_strength(n, kr, sphere_type='rigid'): """Mode strength b_n(kr) for an incident plane wave on sphere. Parameters ---------- n : int Degree. kr : array_like kr vector, product of wavenumber k and radius r_0. sphere_type : 'rigid' or 'open' Returns ------- b_n : array_like Mode strength b_n(kr). References ---------- Rafaely, B. (2015). Fundamentals of Spherical Array Processing. Springer. eq. (4.4) and (4.5). """ if sphere_type == 'open': b_n = 4*np.pi*1j**n * scyspecial.spherical_jn(n, kr) elif sphere_type == 'rigid': b_n = 4*np.pi*1j**n * (scyspecial.spherical_jn(n, kr) - (scyspecial.spherical_jn(n, kr, True) / spherical_hn2(n, kr, True)) * spherical_hn2(n, kr)) else: raise ValueError('sphere_type Not implemented.') return b_n
5,352,602
def data_dir(test_dir: Path) -> Path: """ Create a directory for storing the mock data set. """ _data_dir = test_dir / 'data' _data_dir.mkdir(exist_ok=True) return _data_dir
5,352,603
def get_runtime_brief(): """ A digest version of get_runtime to be used more frequently """ return {"cpu_count": multiprocessing.cpu_count()}
5,352,604
def test_get_tasks_n(): """Test get a list of tasks percluster and instance.""" cluster_name = "test-cluster" cluster = initialize_ecs_cluster(cluster_name) num_services = 5 num_instances = 10 num_tasks = 3 infraestructure = initialize_ecs_infrastructure( cluster, num_services=num_services, num_instances=num_instances, num_tasks=num_tasks, ) services = ecs_data.get_services(cluster_name) assert len(services) == num_services instances = ecs_data.get_containers_instances(cluster_name) assert len(instances) == num_instances tasks_cluster = ecs_data.get_tasks_cluster(cluster_name) assert len(tasks_cluster) == num_tasks * num_instances for instance in infraestructure["instances"]: tasks_ins = ecs_data.get_tasks_container_instance( cluster_name, instance["containerInstanceArn"] ) assert len(tasks_ins) == num_tasks
5,352,605
def dict_compare(d1, d2): """ compares all differences between 2x dicts. returns sub-dicts: "added", "removed", "modified", "same" """ d1_keys = set(d1.keys()) d2_keys = set(d2.keys()) intersect_keys = d1_keys.intersection(d2_keys) added = d1_keys - d2_keys removed = d2_keys - d1_keys modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]} same = set(o for o in intersect_keys if d1[o] == d2[o]) return added, removed, modified, same
5,352,606
def toLocalTime(seconds, microseconds=0): """toLocalTime(seconds, microseconds=0) -> datetime Converts the given number of seconds since the GPS Epoch (midnight on January 6th, 1980) to this computer's local time. Returns a Python datetime object. Examples: >>> toLocalTime(0) datetime.datetime(1980, 1, 6, 0, 0) >>> toLocalTime(25 * 86400) datetime.datetime(1980, 1, 31, 0, 0) """ delta = datetime.timedelta(seconds=seconds, microseconds=microseconds) return GPS_Epoch + delta
5,352,607
def get_part_01_answer(): """ Static method that will return the answer to Day01.01 :return: The product result :rtype: float """ return prod(summation_equals(puzzle_inputs, 2020, 2))
5,352,608
def eig_of_series(matrices): """Returns the eigenvalues and eigenvectors for a series of matrices. Parameters ---------- matrices : array_like, shape(n,m,m) A series of square matrices. Returns ------- eigenvalues : ndarray, shape(n,m) The eigenvalues of the matrices. eigenvectors : ndarray, shape(n,m,m) The eigenvectors of the matrices. """ s = matrices.shape eigenvalues = np.zeros((s[0], s[1]), dtype=np.complex) eigenvectors = np.zeros(s, dtype=np.complex) for i, A in enumerate(matrices): eVal, eVec = np.linalg.eig(matrices[i]) eigenvalues[i] = eVal eigenvectors[i] = eVec return eigenvalues, eigenvectors
5,352,609
def config(): """List and modify configuration parameters""" pass
5,352,610
def injectable( cls: T = None, *, qualifier: str = None, primary: bool = False, namespace: str = None, group: str = None, singleton: bool = False, ) -> T: """ Class decorator to mark it as an injectable dependency. This decorator accepts customization parameters but can be invoked without the parenthesis when no parameter will be specified. .. note:: All files using this decorator will be executed when :meth:`load_injection_container <injectable.load_injection_container>` is invoked. :param cls: (cannot be explicitly passed) the decorated class. This will be automatically passed to the decorator by Python magic. :param qualifier: (optional) string qualifier for the injectable to be registered with. Defaults to None. :param primary: (optional) marks the injectable as primary for resolution in ambiguous cases. Defaults to False. :param namespace: (optional) namespace in which the injectable will be registered. Defaults to :const:`injectable.constants.DEFAULT_NAMESPACE`. :param group: (optional) group to be assigned to the injectable. Defaults to None. :param singleton: (optional) when True the injectable will be a singleton, i.e. only one instance of it will be created and shared globally. Defaults to False. Usage:: >>> from injectable import injectable >>> >>> @injectable ... class Foo: ... ... """ def decorator(klass: T, direct_call: bool = False) -> T: steps_back = 3 if direct_call else 2 caller_filepath = get_caller_filepath(steps_back) if caller_filepath == InjectionContainer.LOADING_FILEPATH: InjectionContainer._register_injectable( klass, caller_filepath, qualifier, primary, namespace, group, singleton ) return klass return decorator(cls, True) if cls is not None else decorator
5,352,611
def build_graph(defined_routes): """ build the graph form route definitions """ G = {} for row in defined_routes: t_fk_oid = int(row["t_fk_oid"]) t_pk_oid = int(row["t_pk_oid"]) if not t_fk_oid in G: G[t_fk_oid] = {} if not t_pk_oid in G: G[t_pk_oid] = {} G[t_fk_oid][t_pk_oid] = row["routing_cost"] G[t_pk_oid][t_fk_oid] = row["routing_cost"] return G
5,352,612
def grando_transform_gauss_batch(batch_of_images, mean, variance): """Input: batch of images; type: ndarray: size: (batch, 784) Output: batch of images with gaussian nois; we use clip function to be assured that numbers in matrixs belong to interval (0,1); type: ndarray; size: (batch, 784); """ x = batch_of_images + np.random.normal(mean, variance, batch_of_images.shape) return x
5,352,613
def get_poetry_project_version() -> VersionInfo: """Run poetry version and get the project version""" command = ["poetry", "version"] poetry_version_output = subprocess.check_output(command, text=True) return version_string_to_version_info(poetry_version_output.split(" ")[1])
5,352,614
def LF_DG_DISTANCE_SHORT(c): """ This LF is designed to make sure that the disease mention and the gene mention aren't right next to each other. """ return -1 if len(list(get_between_tokens(c))) <= 2 else 0
5,352,615
def populate_user_flags(conf, args): """Populate a dictionary of configuration flag parameters, "conf", from values supplied on the command line in the structure, "args".""" if args.cflags: conf['cflags'] = args.cflags.split(sep=' ') if args.ldflags: conf['ldflags'] = args.ldflags.split(sep=' ') return conf
5,352,616
def set_up(): """ """ reset_config() config['env']['exchanges'][exchanges.SANDBOX]['fee'] = 0 config['env']['exchanges'][exchanges.SANDBOX]['starting_balance'] = 1000 config['app']['trading_mode'] = 'backtest' config['app']['considering_exchanges'] = ['Sandbox'] router.set_routes([(exchanges.SANDBOX, 'BTCUSD', '5m', 'Test19')]) store.reset(True) global position global exchange global broker position = selectors.get_position(exchanges.SANDBOX, 'BTCUSD') position.current_price = 50 exchange = selectors.get_exchange(exchanges.SANDBOX) broker = Broker(position, exchanges.SANDBOX, 'BTCUSD', timeframes.MINUTE_5)
5,352,617
def test_base_dense_head_get_bboxes__ncnn(): """Test get_bboxes rewrite of base dense head.""" backend_type = Backend.NCNN check_backend(backend_type) anchor_head = get_anchor_head_model() anchor_head.cpu().eval() s = 128 img_metas = [{ 'scale_factor': np.ones(4), 'pad_shape': (s, s, 3), 'img_shape': (s, s, 3) }] output_names = ['output'] deploy_cfg = mmcv.Config( dict( backend_config=dict(type=backend_type.value), onnx_config=dict(output_names=output_names, input_shape=None), codebase_config=dict( type='mmdet', task='ObjectDetection', model_type='ncnn_end2end', post_processing=dict( score_threshold=0.05, iou_threshold=0.5, max_output_boxes_per_class=200, pre_top_k=5000, keep_top_k=100, background_label_id=-1, )))) # the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16), # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) seed_everything(1234) cls_score = [ torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1) ] seed_everything(5678) bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] # to get outputs of onnx model after rewrite img_metas[0]['img_shape'] = torch.Tensor([s, s]) wrapped_model = WrapModel( anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True) rewrite_inputs = { 'cls_scores': cls_score, 'bbox_preds': bboxes, } rewrite_outputs, is_backend_output = get_rewrite_outputs( wrapped_model=wrapped_model, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) # output should be of shape [1, N, 6] if is_backend_output: rewrite_outputs = rewrite_outputs[0] assert rewrite_outputs.shape[-1] == 6
5,352,618
def combine_raytrace(input_list): """ Produce a combined output from a list of raytrace outputs. """ profiler.start('combine_raytrace') output = dict() output['config'] = input_list[0]['config'] output['total'] = dict() output['total']['meta'] = dict() output['total']['image'] = dict() output['found'] = dict() output['found']['meta'] = dict() output['found']['history'] = dict() output['lost'] = dict() output['lost']['meta'] = dict() output['lost']['history'] = dict() num_iter = len(input_list) key_opt_list = list(input_list[0]['total']['meta'].keys()) key_opt_last = key_opt_list[-1] # Combine the meta data. for key_opt in key_opt_list: output['total']['meta'][key_opt] = dict() key_meta_list = list(input_list[0]['total']['meta'][key_opt].keys()) for key_meta in key_meta_list: output['total']['meta'][key_opt][key_meta] = 0 for ii_iter in range(num_iter): output['total']['meta'][key_opt][key_meta] += input_list[ii_iter]['total']['meta'][key_opt][key_meta] # Combine the images. for key_opt in key_opt_list: if key_opt in input_list[0]['total']['image']: if input_list[0]['total']['image'][key_opt] is not None: output['total']['image'][key_opt] = np.zeros(input_list[0]['total']['image'][key_opt].shape) for ii_iter in range(num_iter): output['total']['image'][key_opt] += input_list[ii_iter]['total']['image'][key_opt] else: output['total']['image'][key_opt] = None # Combine all the histories. if len(input_list[0]['found']['history']) > 0: final_num_found = 0 final_num_lost = 0 for ii_run in range(num_iter): final_num_found += len(input_list[ii_run]['found']['history'][key_opt_last]['mask']) final_num_lost += len(input_list[ii_run]['lost']['history'][key_opt_last]['mask']) rays_found_temp = RayArray() rays_found_temp.zeros(final_num_found) rays_lost_temp = RayArray() rays_lost_temp.zeros(final_num_lost) for key_opt in key_opt_list: output['found']['history'][key_opt] = rays_found_temp.copy() output['lost']['history'][key_opt] = rays_lost_temp.copy() index_found = 0 index_lost = 0 for ii_run in range(num_iter): num_found = len(input_list[ii_run]['found']['history'][key_opt_last]['mask']) num_lost = len(input_list[ii_run]['lost']['history'][key_opt_last]['mask']) for key_opt in key_opt_list: for key_ray in output['found']['history'][key_opt]: output['found']['history'][key_opt][key_ray][index_found:index_found + num_found] = ( input_list[ii_run]['found']['history'][key_opt][key_ray][:]) output['lost']['history'][key_opt][key_ray][index_lost:index_lost + num_lost] = ( input_list[ii_run]['lost']['history'][key_opt][key_ray][:]) index_found += num_found index_lost += num_lost profiler.stop('combine_raytrace') return output
5,352,619
def blank_line_split(seq): """ Group subseqences of the given string sequence, split by blank lines. """ group = [] for line in seq: if len(line) == 0: # Ignore consecutive blank lines (empty group) if len(group): yield group group = [] else: group.append(line) # Yield the final group if len(group): yield group
5,352,620
def is_dict(): """Expects any dictionary""" return TypeMatcher(dict)
5,352,621
def init_dmriprep_wf(): """ Build *dMRIPrep*'s pipeline. This workflow organizes the execution of *dMRIPrep*, with a sub-workflow for each subject. If FreeSurfer's recon-all is to be run, a FreeSurfer derivatives folder is created and populated with any needed template subjects. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from dmriprep.config.testing import mock_config from dmriprep.workflows.base import init_dmriprep_wf with mock_config(): wf = init_dmriprep_wf() """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSFreeSurferDir ver = Version(config.environment.version) dmriprep_wf = Workflow(name=f"dmriprep_{ver.major}_{ver.minor}_wf") dmriprep_wf.base_dir = config.execution.work_dir freesurfer = config.workflow.run_reconall if freesurfer: fsdir = pe.Node( BIDSFreeSurferDir( derivatives=config.execution.output_dir, freesurfer_home=os.getenv("FREESURFER_HOME"), spaces=config.workflow.spaces.get_fs_spaces(), ), name=f"fsdir_run_{config.execution.run_uuid.replace('-', '_')}", run_without_submitting=True, ) if config.execution.fs_subjects_dir is not None: fsdir.inputs.subjects_dir = str( config.execution.fs_subjects_dir.absolute() ) for subject_id in config.execution.participant_label: single_subject_wf = init_single_subject_wf(subject_id) single_subject_wf.config["execution"]["crashdump_dir"] = str( Path(config.execution.dmriprep_dir) / f"sub-{subject_id}" / "log" / config.execution.run_uuid ) for node in single_subject_wf._get_all_nodes(): node.config = deepcopy(single_subject_wf.config) if freesurfer: dmriprep_wf.connect( fsdir, "subjects_dir", single_subject_wf, "fsinputnode.subjects_dir", ) else: dmriprep_wf.add_nodes([single_subject_wf]) # Dump a copy of the config file into the log directory log_dir = ( Path(config.execution.dmriprep_dir) / f"sub-{subject_id}" / "log" / config.execution.run_uuid ) log_dir.mkdir(exist_ok=True, parents=True) config.to_filename(log_dir / "dmriprep.toml") return dmriprep_wf
5,352,622
def test_external_query_pure(bq, gcs_data, gcs_external_config, dest_dataset, dest_table, mock_env): """tests the basic external query ingrestion mechanics with bq_transform.sql and external.json """ if not gcs_data.exists(): raise google.cloud.exceptions.NotFound("test data objects must exist") if not all((blob.exists() for blob in gcs_external_config)): raise google.cloud.exceptions.NotFound("config objects must exist") test_event = { "attributes": { "bucketId": gcs_data.bucket.name, "objectId": gcs_data.name } } gcs_ocn_bq_ingest.main.main(test_event, None) test_data_file = os.path.join(TEST_DIR, "resources", "test-data", "nation", "part-m-00001") expected_num_rows = sum(1 for _ in open(test_data_file)) bq_wait_for_rows(bq, dest_table, expected_num_rows)
5,352,623
def get_list_from_comma_separated_string(comma_separated_list): """ get a python list of resource names from comma separated list :param str comma_separated_list: :return: """ import re # remove all extra whitespace after commas and before/after string but NOT in between resource names removed_whitespace_str = re.sub(r"(,\s+)", ",", comma_separated_list).strip() resource_names = removed_whitespace_str.split(",") return resource_names
5,352,624
def _set_eeg_montage(cfg, raw, subject, session, run) -> None: """Set an EEG template montage if requested. Modifies ``raw`` in-place. """ montage = cfg.eeg_template_montage is_mne_montage = isinstance(montage, mne.channels.montage.DigMontage) montage_name = 'custom_montage' if is_mne_montage else montage if cfg.datatype == 'eeg' and montage: msg = (f'Setting EEG channel locations to template montage: ' f'{montage}.') logger.info(**gen_log_kwargs( message=msg, subject=subject, session=session, run=run) ) if not is_mne_montage: montage = mne.channels.make_standard_montage(montage_name) raw.set_montage(montage, match_case=False, on_missing='warn')
5,352,625
def cci(series, window=14): """ compute commodity channel index """ price = typical_price(series) typical_mean = rolling_mean(price, window) res = (price - typical_mean) / (.015 * np.std(typical_mean)) return pd.Series(index=series.index, data=res)
5,352,626
def load_definition_from_string(qualified_module, cache=True): """Load a definition based on a fully qualified string. Returns: None or the loaded object Example: .. code-block:: python definition = load_definition_from_string('watson.http.messages.Request') request = definition() """ if qualified_module in definition_lookup and cache: return definition_lookup[qualified_module] parts = qualified_module.split('.') try: module = import_module('.'.join(parts[:-1])) obj = getattr(module, parts[-1:][0]) definition_lookup[qualified_module] = obj return obj except ImportError: raise
5,352,627
def get_session(auth_mechanism, username, password, host): """Takes a username, password and authentication mechanism, logs into ICAT and returns a session ID""" # The ICAT Rest API does not accept json in the body of the HTTP request. # Instead it takes the form parameter 'json' with a string value - which is # the json-encoded data - eurrgh! The json-encoded data is sensitive to # order so we cannot pass a Python dictionary to the requests.post call as # Python dictionaries do not preserve order - eurrgh! So we construct a # string with the json data in the correct order - an OrderedDict may work # here - untested. (Also, dictionaries preserve order in Python 3.something) form_data = {'json': '{"plugin": "' + auth_mechanism + '", "credentials":[{"username":"' + username + '"}, {"password":"' + password + '"}]}'} session_url = host + "/icat/session" response = requests.post(session_url, data=form_data) if response.ok: return response.json()['sessionId'] else: logging.critical("Failed to get a session ID. Exiting.") log_response(response) raise RuntimeError()
5,352,628
def _create_all_aux_operators(num_modals: List[int]) -> List[VibrationalOp]: """Generates the common auxiliary operators out of the given WatsonHamiltonian. Args: num_modals: the number of modals per mode. Returns: A list of VibrationalOps. For each mode the number of occupied modals will be evaluated. """ aux_second_quantized_ops_list = [] for mode in range(len(num_modals)): aux_second_quantized_ops_list.append(_create_occ_modals_per_mode(num_modals, mode)) return aux_second_quantized_ops_list
5,352,629
def security(session: Session) -> None: """Check security safety.""" args = session.posargs or [] session.install("safety") session.run("safety", "check", "-r", "workflow/envs/security.txt", *args)
5,352,630
def add_conv(X: tf.Tensor, filters: List[int], kernel_sizes: List[int], output_n_filters: int) -> tf.Tensor: """ Builds a single convolutional layer. :param X: input layer. :param filters: number of output filters in the convolution. :param kernel_sizes: list of lengths of the 1D convolution window. :param output_n_filters: number of 1D output filters. :return: output layer. """ # normalize the input X = BatchNormalization()(X) # add convolutions convs = [] for n_filters, kernel_size in zip(filters, kernel_sizes): conv = Conv1D(filters=n_filters, kernel_size=kernel_size, padding="same", activation="relu") convs.append(conv(X)) # concatenate all convolutions conc = Concatenate(axis=-1)(convs) conc = BatchNormalization()(conc) # dimensionality reduction conv = Conv1D(filters=output_n_filters, kernel_size=1, padding="same", activation="relu") return conv(conc)
5,352,631
def show_toast(view, message, timeout=DEFAULT_TIMEOUT, style=DEFAULT_STYLE): # type: (sublime.View, str, int, Dict[str, str]) -> Callable[[], None] """Show a toast popup at the bottom of the view. A timeout of -1 makes a "sticky" toast. """ messages_by_line = escape_text(message).splitlines() content = style_message("<br />".join(messages_by_line), style) # Order can matter here. If we calc width *after* visible_region we get # different results! width, _ = view.viewport_extent() visible_region = view.visible_region() last_row, _ = view.rowcol(visible_region.end()) line_start = view.text_point(last_row - 4 - len(messages_by_line), 0) vid = view.id() key = IDS() def on_hide(vid, key): if HIDE_POPUP_TIMERS.get(vid) == key: HIDE_POPUP_TIMERS.pop(vid, None) def __hide_popup(vid, key, sink): if HIDE_POPUP_TIMERS.get(vid) == key: HIDE_POPUP_TIMERS.pop(vid, None) sink() inner_hide_popup = show_popup( view, content, max_width=width * 2 / 3, location=line_start, on_hide=partial(on_hide, vid, key) ) HIDE_POPUP_TIMERS[vid] = key hide_popup = partial(__hide_popup, vid, key, inner_hide_popup) if timeout > 0: sublime.set_timeout(hide_popup, timeout) return hide_popup
5,352,632
def flip_metropolised_gibbs_numba_classic(p, z): """ Given the *probability* of z=1 flip z according to metropolised Gibbs """ if z == 1: if p <= .5: return -z # alpha = 1 # TODO, can return -x here else: alpha = (1 - p) / p else: if p >= .5: return -z # alpha = 1 else: alpha = p / (1 - p) if np.random.rand() < alpha: return -z else: return z
5,352,633
def get_default_interpreter(): """Returns an instance of the default interpreter class.""" return __default_interpreter.get()
5,352,634
def fx_ugoira_frames(): """frames data.""" return { '000000.jpg': 1000, '000001.jpg': 2000, '000002.jpg': 3000, }
5,352,635
def handle_tododone(bot, ievent): """ t-done <listofnrs> .. remove todo items """ if len(ievent.args) == 0: ievent.missing('<list of nrs>') ; return try: nrs = [] for i in ievent.args: nrs.append(int(i)) except ValueError: ievent.reply('%s is not an integer' % i) ; return name = getusers().getname(ievent.userhost) nrdone = 0 for i in nrs: nrdone += todo.delete(name, i) if nrdone == 1: ievent.reply('%s item deleted' % nrdone) elif nrdone == 0: ievent.reply('no items deleted') else: ievent.reply('%s items deleted' % nrdone)
5,352,636
def coherent_tmm(pol, n_list, d_list, th_0, lam_vac): """ This is my slightly modified version of byrnes's "coh_tmm" I've rearranged the calculations in a way that is more intuitive to me Example inputs: For angle dependence, be careful to include air first, otherwise the angle will be wrong layers = [ 'Air','SiO2', 'ITO' ,'PEDOT' ,'TCTA' , 'TCTA-tpbi-Irppy' ,'tpbi', 'Al', 'Air'] doping = [ 1, 1 , 1 , 1 , 1 ,[0.475,0.475,0.05] , 1, 1, 1] d_list = np.array([0,0, 100 , 70 , 20 , 60 , 20 , 100, 0]) n_list = load_nk(layers,doping,wavelength_nm,df_nk) Assign a thickness of 0 to incoherent layers (air, substrate) Notes from byrnes: Main "coherent transfer matrix method" calc. Given parameters of a stack, calculates everything you could ever want to know about how light propagates in it. (If performance is an issue, you can delete some of the calculations without affecting the rest.) pol is light polarization, "s" or "p". n_list is the list of refractive indices, in the order that the light would pass through them. The 0'th element of the list should be the semi-infinite medium from which the light enters, the last element should be the semi- infinite medium to which the light exits (if any exits). th_0 is the angle of incidence: 0 for normal, pi/2 for glancing. Remember, for a dissipative incoming medium (n_list[0] is not real), th_0 should be complex so that n0 sin(th0) is real (intensity is constant as a function of lateral position). d_list is the list of layer thicknesses (front to back). Should correspond one-to-one with elements of n_list. First and last elements should be "inf". lam_vac is vacuum wavelength of the light. Outputs the following as a dictionary (see manual for details) * r--reflection amplitude * t--transmission amplitude * R--reflected wave power (as fraction of incident) * T--transmitted wave power (as fraction of incident) * power_entering--Power entering the first layer, usually (but not always) equal to 1-R (see manual). * vw_list-- n'th element is [v_n,w_n], the forward- and backward-traveling amplitudes, respectively, in the n'th medium just after interface with (n-1)st medium. * kz_list--normal component of complex angular wavenumber for forward-traveling wave in each layer. * th_list--(complex) propagation angle (in radians) in each layer * pol, n_list, d_list, th_0, lam_vac--same as input """ # Convert to numpy arrays if not already n_list = np.array(n_list) d_list = np.array(d_list) # Input tests if ((hasattr(lam_vac, 'size') and lam_vac.size > 1) or (hasattr(th_0, 'size') and th_0.size > 1)): raise ValueError('This function is not vectorized; you need to run one ' 'calculation at a time (1 wavelength, 1 angle, etc.)') if (n_list.ndim != 1) or (d_list.ndim != 1) or (n_list.size != d_list.size): raise ValueError("Problem with n_list or d_list!") #assert d_list[0] == d_list[-1] == inf, 'd_list must start and end with inf!' assert abs((n_list[0]*np.sin(th_0)).imag) < 100*EPSILON, 'Error in n0 or th0!' assert is_forward_angle(n_list[0], th_0), 'Error in n0 or th0!' # using a mix of notation from byrnes and pettersson # because pettersson's notation is often garbage num_layers = n_list.size n0 = n_list[0] cosTheta_list = sqrt(1 - (n0/n_list)**2 * sin(th_0)**2) th_list = list_snell(n_list, th_0) sinTheta_list = (n0/n_list)**2 * sin(th_0)**2 kz_list = 2 * pi / lam_vac * n_list * cosTheta_list delta = kz_list * d_list t_list = zeros((num_layers, num_layers), dtype=complex) r_list = zeros((num_layers, num_layers), dtype=complex) I_list = zeros((num_layers, 2, 2), dtype=complex) L_list = zeros((num_layers, 2, 2), dtype=complex) M_list = zeros((num_layers, 2, 2), dtype=complex) Mtilde = make_2x2_array(1, 0, 0, 1, dtype=complex) for j in range(0,num_layers-1): # t and r are shared notation for pettersson and byrnes t_list[j,j+1] = interface_t_cos(pol, n_list[j], n_list[j+1], cosTheta_list[j], cosTheta_list[j+1]) r_list[j,j+1] = interface_r_cos(pol, n_list[j], n_list[j+1], cosTheta_list[j], cosTheta_list[j+1]) # interface matrix, eqn. 1 pettersson I_list[j] = 1/t_list[j,j+1] * make_2x2_array(1,r_list[j,j+1], r_list[j,j+1],1, dtype=complex) # M and L are not defined for the 0th layer # i.e. the substrate or ambient is incoherent if j==0: # Pre-factor in byrnes eqn 13 Mtilde = np.dot(I_list[j],Mtilde) if j>0: # Layer matrix (phase matrix), eqn. 5 pettersson L_list[j] = make_2x2_array(exp(-1j*delta[j]),0, 0,exp(1j*delta[j]),dtype=complex) # M matrix (byrnes eqn. 11) M_list[j] = np.dot(L_list[j],I_list[j]) # Mtilde byrnes eqn. 13 Mtilde = np.dot(Mtilde,M_list[j]) # Net complex transmission and reflection amplitudes # byrnes eqn 15, petterson eqns 9-10 r = Mtilde[1,0]/Mtilde[0,0] t = 1/Mtilde[0,0] # Construct list of forward and backward amplitudes (byrnes eqn 10) # vw_list[n] = [v_n, w_n]. v_0 and w_0 are undefined because the 0th medium # has no left interface. vw_list = zeros((num_layers, 2), dtype=complex) v_list = zeros((num_layers,1), dtype=complex) w_list = zeros((num_layers,1), dtype=complex) # Final layer v and w, Transmitted amplitude, assuming no back illumination vw = array([[t],[0]]) vw_list[-1,:] = np.transpose(vw) for i in range(num_layers-2, 0, -1): vw = np.dot(M_list[i], vw) v_list[i] = vw[0] w_list[i] = vw[1] vw_list[i,:] = np.transpose(vw) # Assuming incident intensity is 1 vw = array([[1],[r]]) vw_list[0,:] = np.transpose(vw) # Net transmitted and reflected power, as a proportion of the incoming light # power. R = R_from_r(r) T = T_from_t(pol, t, n_list[0], n_list[-1], th_0, th_list[-1]) power_entering = power_entering_from_r(pol, r, n_list[0], th_0) th_list=0 return {'r': r, 't': t, 'R': R, 'T': T, 'power_entering': power_entering, 'vw_list': vw_list, 'kz_list': kz_list, 'th_list': th_list, 'pol': pol, 'n_list': n_list, 'd_list': d_list, 'th_0': th_0, 'lam_vac':lam_vac, 'M_list':M_list, 't_list':t_list, 'r_list':r_list, 'Mtilde':Mtilde, 'I_list':I_list, 'L_list':L_list}
5,352,637
def direct_pm25(ds): """ Add to dataset the calculated pm2.5 direclty fromWRFchem outputs variables. Calculation for sum follows the calculation in WRF-Chem module_mosaic_sumpm.F subroutine sum_pm_mosaic_vbs4. :param ds: WRF-chem output. :type ds: xarray DataSet. :return: Dataset with added tot pm25. :rtype: xarray DataSet. """ ds["pm25_dir_tot"]= ((ds.so4_a01 +ds.so4_a02 + ds.so4_a03)/ds.ALT + (ds.nh4_a01 +ds.nh4_a02 + ds.nh4_a03)/ds.ALT + (ds.no3_a01 +ds.no3_a02 + ds.no3_a03)/ds.ALT + (ds.bc_a01 + ds.bc_a02 + ds.bc_a03)/ds.ALT + (ds.oc_a01 + ds.oc_a02 + ds.oc_a03)/ds.ALT + (ds.glysoa_r1_a01 +ds.glysoa_r1_a02+ ds.glysoa_r1_a03)/ds.ALT + (ds.glysoa_r2_a01 +ds.glysoa_r2_a02+ ds.glysoa_r2_a03)/ds.ALT + (ds.glysoa_oh_a01 +ds.glysoa_oh_a02+ ds.glysoa_oh_a03)/ds.ALT + (ds.glysoa_sfc_a01 +ds.glysoa_sfc_a02+ ds.glysoa_sfc_a03)/ds.ALT + (ds.glysoa_nh4_a01 +ds.glysoa_nh4_a02+ ds.glysoa_nh4_a03)/ds.ALT + (ds.oin_a01 + ds.oin_a02 +ds.oin_a03)/ds.ALT + (ds.na_a01 +ds.na_a02 +ds.na_a03)/ds.ALT + (ds.cl_a01 +ds.cl_a02 +ds.cl_a03)/ds.ALT + (ds.asoaX_a01 +ds.asoaX_a02 +ds.asoaX_a03)/ds.ALT + (ds.asoa1_a01 +ds.asoa1_a02 +ds.asoa1_a03)/ds.ALT + (ds.asoa2_a01 +ds.asoa2_a02 +ds.asoa2_a03)/ds.ALT + (ds.asoa3_a01 +ds.asoa3_a02 +ds.asoa3_a03)/ds.ALT + (ds.asoa4_a01 +ds.asoa4_a02 +ds.asoa4_a03)/ds.ALT + (ds.bsoaX_a01 +ds.bsoaX_a02 +ds.bsoaX_a03)/ds.ALT + (ds.bsoa1_a01 +ds.bsoa1_a02 +ds.bsoa1_a03)/ds.ALT + (ds.bsoa2_a01 +ds.bsoa2_a02 +ds.bsoa2_a03)/ds.ALT + (ds.bsoa3_a01 +ds.bsoa3_a02 +ds.bsoa3_a03)/ds.ALT + (ds.bsoa4_a01 +ds.bsoa4_a02 +ds.bsoa4_a03)/ds.ALT)
5,352,638
def base_info(): """ 基本资料的展示和修改 1、尝试获取用户信息 2、如果是get请求,返回用户信息给模板 如果是post请求: 1、获取参数,nick_name,signature,gender[MAN,WOMAN] 2、检查参数的完整性 3、检查gender性别必须在范围内 4、保存用户信息 5、提交数据 6、修改redis缓存中的nick_name 注册:session['nick_name'] = mobile 登录:session['nick_name'] = user.nick_name 修改:session['nick_name'] = nick_name 7、返回结果 :return: """ user = g.user if request.method == 'GET': data = { 'user': user.to_dict() } return render_template('blogs/user_base_info.html', data=data) # 获取参数 nick_name = request.json.get('nick_name') signature = request.json.get('signature') gender = request.json.get('gender') # 检查参数 if not all([nick_name, signature, gender]): return jsonify(errno=RET.PARAMERR, errmsg='参数缺失') # 校验性别参数范围 if gender not in ['MAN', 'WOMAN']: return jsonify(errno=RET.PARAMERR, errmsg='参数范围错误') # 保存用户信息 user.nick_name = nick_name user.signature = signature user.gender = gender # 提交数据 try: db.session.add(user) db.session.commit() except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg='保存数据失败') # 修改redis缓存中的用户信息 session['nick_name'] = nick_name # 返回结果 return jsonify(errno=RET.OK, errmsg='OK')
5,352,639
def build_mode(arg_namespace): """Check command line arguments and run build function.""" custom_temps = arg_namespace.template or [] temp_paths = [rel_to_cwd("templates", temp) for temp in custom_temps] try: result = builder.build( templates=temp_paths, schemes=arg_namespace.scheme, base_output_dir=arg_namespace.output, verbose=arg_namespace.verbose, ) # return with exit code 2 if there were any non-fatal incidents during sys.exit(0 if result else 2) except (LookupError, PermissionError) as exception: if isinstance(exception, LookupError): err_print( "Necessary resources for building not found in current " "working directory." ) if isinstance(exception, PermissionError): err_print("Lacking necessary access permissions for output directory.")
5,352,640
def to_list(obj): """ """ if isinstance(obj, np.ndarray): return obj.tolist() raise TypeError('Not serializable')
5,352,641
def symbolic_expression(x): """ Create a symbolic expression or vector of symbolic expressions from x. INPUT: - ``x`` - an object OUTPUT: - a symbolic expression. EXAMPLES:: sage: a = symbolic_expression(3/2); a 3/2 sage: type(a) <type 'sage.symbolic.expression.Expression'> sage: R.<x> = QQ[]; type(x) <type 'sage.rings.polynomial.polynomial_rational_flint.Polynomial_rational_flint'> sage: a = symbolic_expression(2*x^2 + 3); a 2*x^2 + 3 sage: type(a) <type 'sage.symbolic.expression.Expression'> sage: from sage.symbolic.expression import is_Expression sage: is_Expression(a) True sage: a in SR True sage: a.parent() Symbolic Ring Note that equations exist in the symbolic ring:: sage: E = EllipticCurve('15a'); E Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field sage: symbolic_expression(E) x*y + y^2 + y == x^3 + x^2 - 10*x - 10 sage: symbolic_expression(E) in SR True If ``x`` is a list or tuple, create a vector of symbolic expressions:: sage: v=symbolic_expression([x,1]); v (x, 1) sage: v.base_ring() Symbolic Ring sage: v=symbolic_expression((x,1)); v (x, 1) sage: v.base_ring() Symbolic Ring sage: v=symbolic_expression((3,1)); v (3, 1) sage: v.base_ring() Symbolic Ring sage: E = EllipticCurve('15a'); E Elliptic Curve defined by y^2 + x*y + y = x^3 + x^2 - 10*x - 10 over Rational Field sage: v=symbolic_expression([E,E]); v (x*y + y^2 + y == x^3 + x^2 - 10*x - 10, x*y + y^2 + y == x^3 + x^2 - 10*x - 10) sage: v.base_ring() Symbolic Ring If ``x`` is a function, for example defined by a ``lambda`` expression, create a symbolic function:: sage: f = symbolic_expression(lambda z: z^2 + 1); f z |--> z^2 + 1 sage: f.parent() Callable function ring with argument z sage: f(7) 50 If ``x`` is a list or tuple of functions, or if ``x`` is a function that returns a list or tuple, create a callable symbolic vector:: sage: symbolic_expression([lambda mu, nu: mu^2 + nu^2, lambda mu, nu: mu^2 - nu^2]) (mu, nu) |--> (mu^2 + nu^2, mu^2 - nu^2) sage: f = symbolic_expression(lambda uwu: [1, uwu, uwu^2]); f uwu |--> (1, uwu, uwu^2) sage: f.parent() Vector space of dimension 3 over Callable function ring with argument uwu sage: f(5) (1, 5, 25) sage: f(5).parent() Vector space of dimension 3 over Symbolic Ring TESTS: Also functions defined using ``def`` can be used, but we do not advertise it as a use case:: sage: def sos(x, y): ....: return x^2 + y^2 sage: symbolic_expression(sos) (x, y) |--> x^2 + y^2 Functions that take a varying number of arguments or keyword-only arguments are not accepted:: sage: def variadic(x, *y): ....: return x sage: symbolic_expression(variadic) Traceback (most recent call last): ... TypeError: unable to convert <function variadic at 0x...> to a symbolic expression sage: def function_with_keyword_only_arg(x, *, sign=1): ....: return sign * x sage: symbolic_expression(function_with_keyword_only_arg) Traceback (most recent call last): ... TypeError: unable to convert <function function_with_keyword_only_arg at 0x...> to a symbolic expression """ from sage.symbolic.expression import Expression from sage.symbolic.ring import SR if isinstance(x, Expression): return x elif hasattr(x, '_symbolic_'): return x._symbolic_(SR) elif isinstance(x, (tuple, list)): return vector([symbolic_expression(item) for item in x]) elif callable(x): from inspect import signature, Parameter try: s = signature(x) except ValueError: pass else: if all(param.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD) for param in s.parameters.values()): vars = [SR.var(name) for name in s.parameters.keys()] result = x(*vars) if isinstance(result, (tuple, list)): return vector(SR, result).function(*vars) else: return SR(result).function(*vars) return SR(x)
5,352,642
def seq_hyphentation(words): """ Converts words in a list of strings into lists of syllables :param words: a list of words (strings) :return: a list of lists containing word syllables """ return [hyphenation(w) for w in words]
5,352,643
def to_ufo_background_image(self, ufo_glyph, layer): """Copy the backgound image from the GSLayer to the UFO Glyph.""" image = layer.backgroundImage if image is None: return ufo_image = ufo_glyph.image ufo_image.fileName = image.path ufo_image.transformation = image.transform ufo_glyph.lib[CROP_KEY] = list(image.crop) ufo_glyph.lib[LOCKED_KEY] = image.locked ufo_glyph.lib[ALPHA_KEY] = image.alpha
5,352,644
def angle(A, B, dim=1): """ Computes the angle in radians between the inputs along the specified dimension Parameters ---------- A : Tensor first input tensor B : Tensor second input tensor dim : int (optional) dimension along the angle is computed (default is 1) Returns ------- Tensor the tensor containing the angle between the inputs """ return acos(clamp(dot(A, B, dim=dim), -1, 1))
5,352,645
def _run_lint_helper( *, fail_on_missing_sub_src, exclude_lint, warn_lint, site_name=None): """Helper for executing lint on specific site or all sites in repo.""" if site_name: func = functools.partial(engine.lint.site, site_name=site_name) else: func = engine.lint.full warns = func( fail_on_missing_sub_src=fail_on_missing_sub_src, exclude_lint=exclude_lint, warn_lint=warn_lint) return warns
5,352,646
def simulate_from_orders_nb(target_shape: tp.Shape, group_lens: tp.Array1d, init_cash: tp.Array1d, call_seq: tp.Array2d, size: tp.ArrayLike = np.asarray(np.inf), price: tp.ArrayLike = np.asarray(np.inf), size_type: tp.ArrayLike = np.asarray(SizeType.Amount), direction: tp.ArrayLike = np.asarray(Direction.Both), fees: tp.ArrayLike = np.asarray(0.), fixed_fees: tp.ArrayLike = np.asarray(0.), slippage: tp.ArrayLike = np.asarray(0.), min_size: tp.ArrayLike = np.asarray(0.), max_size: tp.ArrayLike = np.asarray(np.inf), size_granularity: tp.ArrayLike = np.asarray(np.nan), reject_prob: tp.ArrayLike = np.asarray(0.), lock_cash: tp.ArrayLike = np.asarray(False), allow_partial: tp.ArrayLike = np.asarray(True), raise_reject: tp.ArrayLike = np.asarray(False), log: tp.ArrayLike = np.asarray(False), val_price: tp.ArrayLike = np.asarray(np.inf), close: tp.ArrayLike = np.asarray(np.nan), auto_call_seq: bool = False, ffill_val_price: bool = True, update_value: bool = False, max_orders: tp.Optional[int] = None, max_logs: int = 0, flex_2d: bool = True) -> tp.Tuple[tp.RecordArray, tp.RecordArray]: """Creates on order out of each element. Iterates in the column-major order. Utilizes flexible broadcasting. !!! note Should be only grouped if cash sharing is enabled. If `auto_call_seq` is True, make sure that `call_seq` follows `CallSeqType.Default`. Single value should be passed as a 0-dim array (for example, by using `np.asarray(value)`). Usage: * Buy and hold using all cash and closing price (default): ```pycon >>> import numpy as np >>> from vectorbt.records.nb import col_map_nb >>> from vectorbt.portfolio.nb import simulate_from_orders_nb, asset_flow_nb >>> from vectorbt.portfolio.enums import Direction >>> close = np.array([1, 2, 3, 4, 5])[:, None] >>> order_records, _ = simulate_from_orders_nb( ... target_shape=close.shape, ... close=close, ... group_lens=np.array([1]), ... init_cash=np.array([100]), ... call_seq=np.full(close.shape, 0) ... ) >>> col_map = col_map_nb(order_records['col'], close.shape[1]) >>> asset_flow = asset_flow_nb(close.shape, order_records, col_map, Direction.Both) >>> asset_flow array([[100.], [ 0.], [ 0.], [ 0.], [ 0.]]) ``` """ check_group_lens_nb(group_lens, target_shape[1]) cash_sharing = is_grouped_nb(group_lens) check_group_init_cash_nb(group_lens, target_shape[1], init_cash, cash_sharing) order_records, log_records = init_records_nb(target_shape, max_orders, max_logs) init_cash = init_cash.astype(np.float_) last_position = np.full(target_shape[1], 0., dtype=np.float_) last_debt = np.full(target_shape[1], 0., dtype=np.float_) last_val_price = np.full(target_shape[1], np.nan, dtype=np.float_) order_price = np.full(target_shape[1], np.nan, dtype=np.float_) temp_order_value = np.empty(target_shape[1], dtype=np.float_) oidx = 0 lidx = 0 from_col = 0 for group in range(len(group_lens)): to_col = from_col + group_lens[group] group_len = to_col - from_col cash_now = init_cash[group] free_cash_now = init_cash[group] for i in range(target_shape[0]): for k in range(group_len): col = from_col + k # Resolve order price _price = flex_select_auto_nb(price, i, col, flex_2d) if np.isinf(_price): if _price > 0: _price = flex_select_auto_nb(close, i, col, flex_2d) # upper bound is close elif i > 0: _price = flex_select_auto_nb(close, i - 1, col, flex_2d) # lower bound is prev close else: _price = np.nan # first timestamp has no prev close order_price[col] = _price # Resolve valuation price _val_price = flex_select_auto_nb(val_price, i, col, flex_2d) if np.isinf(_val_price): if _val_price > 0: _val_price = _price # upper bound is order price elif i > 0: _val_price = flex_select_auto_nb(close, i - 1, col, flex_2d) # lower bound is prev close else: _val_price = np.nan # first timestamp has no prev close if not np.isnan(_val_price) or not ffill_val_price: last_val_price[col] = _val_price # Calculate group value and rearrange if cash sharing is enabled if cash_sharing: # Same as get_group_value_ctx_nb but with flexible indexing value_now = cash_now for k in range(group_len): col = from_col + k if last_position[col] != 0: value_now += last_position[col] * last_val_price[col] # Dynamically sort by order value -> selling comes first to release funds early if auto_call_seq: # Same as sort_by_order_value_ctx_nb but with flexible indexing for k in range(group_len): col = from_col + k temp_order_value[k] = approx_order_value_nb( flex_select_auto_nb(size, i, col, flex_2d), flex_select_auto_nb(size_type, i, col, flex_2d), flex_select_auto_nb(direction, i, col, flex_2d), cash_now, last_position[col], free_cash_now, last_val_price[col], value_now ) # Sort by order value insert_argsort_nb(temp_order_value[:group_len], call_seq[i, from_col:to_col]) for k in range(group_len): col = from_col + k if cash_sharing: col_i = call_seq[i, col] if col_i >= group_len: raise ValueError("Call index exceeds bounds of the group") col = from_col + col_i # Get current values per column position_now = last_position[col] debt_now = last_debt[col] val_price_now = last_val_price[col] if not cash_sharing: value_now = cash_now if position_now != 0: value_now += position_now * val_price_now # Generate the next order order = order_nb( size=flex_select_auto_nb(size, i, col, flex_2d), price=order_price[col], size_type=flex_select_auto_nb(size_type, i, col, flex_2d), direction=flex_select_auto_nb(direction, i, col, flex_2d), fees=flex_select_auto_nb(fees, i, col, flex_2d), fixed_fees=flex_select_auto_nb(fixed_fees, i, col, flex_2d), slippage=flex_select_auto_nb(slippage, i, col, flex_2d), min_size=flex_select_auto_nb(min_size, i, col, flex_2d), max_size=flex_select_auto_nb(max_size, i, col, flex_2d), size_granularity=flex_select_auto_nb(size_granularity, i, col, flex_2d), reject_prob=flex_select_auto_nb(reject_prob, i, col, flex_2d), lock_cash=flex_select_auto_nb(lock_cash, i, col, flex_2d), allow_partial=flex_select_auto_nb(allow_partial, i, col, flex_2d), raise_reject=flex_select_auto_nb(raise_reject, i, col, flex_2d), log=flex_select_auto_nb(log, i, col, flex_2d) ) # Process the order state = ProcessOrderState( cash=cash_now, position=position_now, debt=debt_now, free_cash=free_cash_now, val_price=val_price_now, value=value_now, oidx=oidx, lidx=lidx ) order_result, new_state = process_order_nb( i, col, group, state, update_value, order, order_records, log_records ) # Update state cash_now = new_state.cash position_now = new_state.position debt_now = new_state.debt free_cash_now = new_state.free_cash val_price_now = new_state.val_price value_now = new_state.value oidx = new_state.oidx lidx = new_state.lidx # Now becomes last last_position[col] = position_now last_debt[col] = debt_now if not np.isnan(val_price_now) or not ffill_val_price: last_val_price[col] = val_price_now from_col = to_col return order_records[:oidx], log_records[:lidx]
5,352,647
def get_data_value(k: int, data: bytes) -> bytes: """Extracts the kth value from data. data should be in the format value0:value1:value2:...:valueN. This last representation is merely for understanding the logic. In practice, data will be a sequence of bytes, with each value preceded by the length of such value. # TODO allow values larger than 255 bytes (some logic similar to OP_PUSHDATA1?) :param k: index of item to retrieve :type k: int :param data: data to get value from :type data: bytes :raises OutOfData: if data length to read is larger than what's available :raises DataIndexError: index requested from data is not available """ data_len = len(data) position = 0 iteration = 0 while position < data_len: length = data[position] if length == 0: # TODO throw error pass position += 1 if (position + length) > len(data): raise OutOfData('trying to read {} bytes starting at {}, available {}'.format(length, position, len(data))) value = data[position:position + length] if iteration == k: return value iteration += 1 position += length raise DataIndexError
5,352,648
def exp_map_individual(network, variable, max_degree): """Summary measure calculate for the non-parametric mapping approach described in Sofrygin & van der Laan (2017). This approach works best for networks with uniform degree distributions. This summary measure generates a number of columns (a total of ``max_degree``). Each column is then an indicator variable for each observation. To keep all columns the same number of dimensions, zeroes are filled in for all degrees above unit i's observed degree. Parameters ---------- network : networkx.Graph The NetworkX graph object to calculate the summary measure for. variable : str Variable to calculate the summary measure for (this will always be the exposure variable internally). max_degree : int Maximum degree in the network (defines the number of columns to generate). Returns ------- dataframe Data set containing all generated columns """ attrs = [] for i in network.nodes: j_attrs = [] for j in network.neighbors(i): j_attrs.append(network.nodes[j][variable]) attrs.append(j_attrs[:max_degree]) return pd.DataFrame(attrs, columns=[variable+'_map'+str(x+1) for x in range(max_degree)])
5,352,649
def test_dpp_tcp_pkex_auto_connect_2_status_fail(dev, apdev, params): """DPP/PKEXv2 over TCP and automatic connection status for failure""" run_dpp_tcp_pkex_auto_connect_2(dev, apdev, params, True, start_ap=False)
5,352,650
def bridge_meshes(Xs, Ys, Zs, Cs): """ Concatenate multiple meshes, with hidden transparent bridges, to a single mesh, so that plt.plot_surface uses correct drawing order between meshes (as it really should) :param list Xs: list of x-coordinates for each mesh :param list Ys: list of y-coordinates for each mesh :param list Zs: list of z-coordinates for each mesh :param list Cs: list of colors for each mesh :return: Concatenated meshes X_full, Y_full, Z_full, C_full """ assert len(Xs) == len(Ys) == len(Zs) == len(Cs) if len(Xs) > 2: X1, Y1, Z1, C1 = bridge_meshes(Xs[1:], Ys[1:], Zs[1:], Cs[1:]) elif len(Xs) == 2: X1, Y1, Z1, C1 = Xs[1], Ys[1], Zs[1], Cs[1] else: raise Exception X0, Y0, Z0, C0 = Xs[0], Ys[0], Zs[0], Cs[0] X_bridge = np.vstack(np.linspace(X0[-1, :], X1[-1, :], 1)) Y_bridge = np.vstack(np.linspace(Y0[-1, :], Y1[-1, :], 1)) Z_bridge = np.vstack(np.linspace(Z0[-1, :], Z1[-1, :], 1)) color_bridge = np.empty_like(Z_bridge, dtype=object) color_bridge.fill((1, 1, 1, 0)) # Make the bridge transparant # Join surfaces X_full = np.vstack([X0, X_bridge, X1]) Y_full = np.vstack([Y0, Y_bridge, Y1]) Z_full = np.vstack([Z0, Z_bridge, Z1]) color_full = np.vstack([C0, color_bridge, C1]) return X_full, Y_full, Z_full, color_full
5,352,651
def get_groups(): """ Get the list of label groups. @return: the list of label groups. """ labels_dict = load_yaml_from_file("labels") groups = [] for group_info in labels_dict["groups"]: group = Group(**group_info) label_names = group_info.pop("labels", []) groups.append(group) for label_info in label_names: Label(**label_info, group=group) return groups
5,352,652
def create_workspace(db_workspace): """ :param workspace: :return: """ sql_create_workspace = ''' INSERT OR IGNORE INTO workspace(name,output_dir) VALUES(?,?) ''' CUR.execute(sql_create_workspace,db_workspace) CONNECTION.commit()
5,352,653
def print_table(table, headers='keys', highlight_strs=None, **echo_args): """ Print a tabular collection. Outputs to the system pager, with colored unicode formatting Args: table: A nested dict or list of dicts, with keys representing columns headers (str): Header type to be used with :py:mod:`tabulate` highlight_strs (list): Additional specific strings to be highlighted \\*\\*echo_args (dict): Optional arguments to :py:func:`click.echo` """ formatted = tabulate(table, headers=headers, tablefmt='fancy_grid') print_diagram(formatted, highlight_strs=highlight_strs, **echo_args)
5,352,654
def diffractometer_rotation(phi=0, chi=0, eta=0, mu=0): """ Generate the 6-axis diffracometer rotation matrix R = M * E * X * P Also called Z in H. You, J. Appl. Cryst 32 (1999), 614-623 :param phi: float angle in degrees :param chi: float angle in degrees :param eta: float angle in degrees :param mu: float angle in degrees :return: [3*3] array """ P = rotmatrixz(phi) X = rotmatrixy(chi) E = rotmatrixz(eta) M = rotmatrixx(mu) return np.dot(M, np.dot(E, np.dot(X, P)))
5,352,655
def supports_build_in_container(config): """ Given a workflow config, this method provides a boolean on whether the workflow can run within a container or not. Parameters ---------- config namedtuple(Capability) Config specifying the particular build workflow Returns ------- tuple(bool, str) True, if this workflow can be built inside a container. False, along with a reason message if it cannot be. """ def _key(c): return str(c.language) + str(c.dependency_manager) + str(c.application_framework) # This information could have beeen bundled inside the Workflow Config object. But we this way because # ultimately the workflow's implementation dictates whether it can run within a container or not. # A "workflow config" is like a primary key to identify the workflow. So we use the config as a key in the # map to identify which workflows can support building within a container. unsupported = { _key(DOTNET_CLIPACKAGE_CONFIG): "We do not support building .NET Core Lambda functions within a container. " "Try building without the container. Most .NET Core functions will build " "successfully.", _key(GO_MOD_CONFIG): "We do not support building Go Lambda functions within a container. " "Try building without the container. Most Go functions will build " "successfully.", } thiskey = _key(config) if thiskey in unsupported: return False, unsupported[thiskey] return True, None
5,352,656
def get_img_array_mhd(img_file): """Image array in zyx convention with dtype = int16.""" itk_img = sitk.ReadImage(img_file) img_array_zyx = sitk.GetArrayFromImage(itk_img) # indices are z, y, x origin = itk_img.GetOrigin() # x, y, z world coordinates (mm) origin_zyx = [origin[2], origin[1], origin[0]] # y, x, z spacing = itk_img.GetSpacing() # x, y, z world coordinates (mm) spacing_zyx = [spacing[2], spacing[1], spacing[0]] # z, y, x acquisition_exception = None # no acquisition number found in object return img_array_zyx, spacing_zyx, origin_zyx, acquisition_exception
5,352,657
def registration(request): """Render the registration page.""" if request.user.is_authenticated: return redirect(reverse('index')) if request.method == 'POST': registration_form = UserRegistrationForm(request.POST) if registration_form.is_valid(): registration_form.save() user = auth.authenticate(username=request.POST['username'], password=request.POST['password1']) if user: auth.login(user=user, request=request) messages.success(request, "You have registered successfully.") return redirect(reverse('index')) else: messages.error(request, "Unable to register your account at this time.") else: registration_form = UserRegistrationForm() return render(request, 'registration.html', {"registration_form": registration_form})
5,352,658
def _get_arc2height(arcs): """ Parameters ---------- arcs: list[(int, int)] Returns ------- dict[(int, int), int] """ # arc2height = {(b,e): np.abs(b - e) for b, e in arcs} n_arcs = len(arcs) arcs_sorted = sorted(arcs, key=lambda x: np.abs(x[0] - x[1])) arc2height = {arc: 1 for arc in arcs} for arc_i in range(n_arcs): bi, ei = sorted(arcs_sorted[arc_i]) for arc_j in range(n_arcs): if arc_i == arc_j: continue bj, ej = sorted(arcs_sorted[arc_j]) if bi <= bj <= ej <= ei: arc2height[arcs_sorted[arc_i]] = max(arc2height[arcs_sorted[arc_j]] + 1, arc2height[arcs_sorted[arc_i]]) return arc2height
5,352,659
def init_ycm(path): """ Generate a ycm_extra_conf.py file in the given path dir to specify compilation flags for a project. This is necessary to get semantic analysis for c-family languages. Check ycmd docs for more details. """ conf = join(path, '.ycm_extra_conf.py') if exists(conf): root.status.set_msg('File overwritten: %s' % conf) copyfile(join(dirname(__file__), 'ycm_extra_conf.py'), conf) return conf
5,352,660
def get_aggregate_stats_flows_single_appliance( self, ne_pk: str, start_time: int, end_time: int, granularity: str, traffic_class: int = None, flow: str = None, ip: str = None, data_format: str = None ) -> dict: """Get aggregate flow stats data for a single appliance filter by query parameters .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - aggregateStats - GET - /stats/aggregate/flow/{nePk} :param ne_pk: Network Primary Key (nePk) of appliance, e.g. ``3.NE`` :type ne_pk: str :param start_time: Long(Signed 64 bits) value of seconds since EPOCH time indicating the starting time boundary of data time range :type start_time: int :param end_time: Long(Signed 64 bits) value of seconds since EPOCH time indicating the ending time boundary of data time range :type end_time: int :param granularity: Data granularity filtering whether data is minutely data, hourly data or daily data. Accepted values are ``minute``, ``hour``, and ``day`` :type granularity: str :param traffic_class: Filter for data which belongs to particular traffic class, accepted values between 1-10, defaults to None :type traffic_class: int, optional :param flow: Filter for data of a particular flow type. Accepted values are "TCP_ACCELERATED" "TCP_NOT_ACCELERATED" "NON_TCP", defaults to None :type flow: str, optional :param ip: ``True`` to use IP address as key to sort results or ``False`` or ``None`` for default sorting by appliance ID, defaults to None :type ip: bool, optional :param data_format: The only format other than JSON currently supported is CSV, accepted value is ``csv``, defaults to None :type data_format: str, optional :return: Returns dictionary of aggregate stats filtered by query parameters :rtype: dict """ path = ( "/stats/aggregate/flow/" + "{}?startTime={}&endTime={}&granularity={}".format( ne_pk, start_time, end_time, granularity ) ) if traffic_class is not None: path = path + "&trafficClass={}".format(traffic_class) if flow is not None: path = path + "&flow={}".format(flow) if ip is not None: path = path + "&ip={}".format(ip) if data_format is not None: path = path + "&format={}".format(data_format) return self._get(path)
5,352,661
def save_confusion_matrix_from_tensor(confusion_matrix, labels, current_epoch, save_dir): """Receive a confusion matrix from tensor, generate a image with seaborn and save as .png in mlflow experiment Args: confusion_matrix (torch.Tensor): Tensor of confusion matrix labels (list): Classification labels current_epoch (int): Current epoch number save_dir (str): Directory to save """ image_file_name = 'confusion_matrix_validation_{}.png'.format( current_epoch) plt.figure(figsize=(16, 10)) matrix = sns.heatmap(confusion_matrix.long().numpy(), annot=True, cmap=plt.cm.Blues, xticklabels=labels, yticklabels=labels, fmt='d') plt.yticks(rotation=0) plt.savefig(os.path.join(save_dir, image_file_name)) mlflow.log_artifact(os.path.join( save_dir, image_file_name), artifact_path="images")
5,352,662
def load_user(userid): """Callback to load user from db, called by Flask-Login""" db = get_db() user = db.execute("SELECT id FROM users WHERE id = ?", [userid]).fetchone() if user is not None: return User(user[0]) return None
5,352,663
def run_global_bears(message_queue, timeout, global_bear_queue, global_bear_list, global_result_dict, control_queue): """ Run all global bears. :param message_queue: A queue that contains messages of type errors/warnings/debug statements to be printed in the Log. :param timeout: The queue blocks at most timeout seconds for a free slot to execute the put operation on. After the timeout it returns queue Full exception. :param global_bear_queue: queue (read, write) of indexes of global bear instances in the global_bear_list. :param global_bear_list: list of global bear instances :param global_result_dict: A Manager.dict that will be used to store global results. The list of results of one global bear will be stored with the bear name as key. :param control_queue: If any result gets written to the result_dict a tuple containing a CONTROL_ELEMENT (to indicate what kind of event happened) and either a bear name(for global results) or a file name to indicate the result will be put to the queue. """ try: while True: bear, dep_results = ( get_next_global_bear(timeout, global_bear_queue, global_bear_list, global_result_dict)) bearname = bear.__class__.__name__ result = run_global_bear(message_queue, timeout, bear, dep_results) if result: global_result_dict[bearname] = result control_queue.put((CONTROL_ELEMENT.GLOBAL, bearname)) else: global_result_dict[bearname] = None task_done(global_bear_queue) except queue.Empty: return
5,352,664
def get_partial_outputs_with_prophecies(prophecies, loader, model, my_device, corpus, seq2seq): """ Parameters ---------- prophecies : dict Dictionary mapping from sequence index to a list of prophecies, one for each prefix in the sequence. loader : torch.utils.data.dataloader.DataLoader Data loader, batch must have size 1. model : models.<model> NN model not BERT my_device : torch.device PyTorch device. label_pad : int Index of padding label. seq2seq : bool True if sequence tagging, else False for sequence classification. Returns ------- results : dict of dicts A dictionary storing partial outputs, accuracy w.r.t. the gold labels and an np matrix that indicates editions. """ # see comments in function above model.eval() results = {'partial_outputs':{}, 'log_changes':{}, 'accuracy':{}} with torch.no_grad(): for x, lens, y, idx in loader: #if idx.item() not in prophecies: # continue x = x.to(my_device) y = y.to(my_device) lens = lens.to(my_device) if seq2seq: predictions = np.zeros((lens, lens)) changes = np.zeros((lens, lens)) else: predictions = np.zeros((lens, 1)) changes = np.zeros((lens, 1)) pad = corpus.word2id['<pad>'] for l in range(1,lens.item()+1): if l != lens.item(): part_x = x[:,:l] # add prophecy prophecy = nltk.word_tokenize( prophecies[idx.item()][l-1][0]) prophecy_ids = torch.tensor([[corpus.word2id.get(w, pad) for w in prophecy[l:]]], dtype=torch.long, device=x.device) part_x = torch.cat((part_x, prophecy_ids),dim=1) part_len = torch.tensor([l+prophecy_ids.shape[1]], device=x.device) # create any y to append will not be used (but cannot be the same idx as # label of predicate in SRL), we use zero and check if 'srl' in corpus.task: assert corpus.label2id['B-V'] != 0 if seq2seq: extra_pad = torch.tensor([[0]*(part_x.shape[1]-l)], device=x.device, dtype=torch.long) part_y = torch.cat((y[:,:l], extra_pad), dim=1) #part_y = torch.zeros((1, part_len.item()), dtype=torch.long, # device=y.device) else: part_y = y else: # complete sentence does not need prophecy part_x = x part_y = y part_len = lens #unpacked, mask = model(x, lens) # _ = (hidden, context) _, predicted = model(part_x, part_len, part_y, seq2seq) if seq2seq: predictions[l-1] = np.array((predicted[:l].tolist() + (lens.item() - l)*[np.inf])) else: predictions[l-1] = np.array((predicted.tolist())) if l == 1: changes[l-1][0] = 1 else: changes[l-1] = predictions[l-1] != predictions[l-2] y = y.reshape(-1) y = torch.tensor([i for i in y if i!=corpus.label2id['<pad>']]) if seq2seq: acc = (predictions[-1] == y.cpu().numpy()).sum() / lens.item() else: acc = (predictions[-1] == y.cpu().numpy()).sum() results['partial_outputs'][idx.item()] = predictions results['log_changes'][idx.item()] = changes results['accuracy'][idx.item()] = acc return results
5,352,665
def _generate_deserialize_impl( symbol_table: intermediate.SymbolTable, spec_impls: specific_implementations.SpecificImplementations, ) -> Tuple[Optional[Stripped], Optional[List[Error]]]: """Generate the implementation for deserialization functions.""" blocks = [ _generate_skip_whitespace_and_comments(), _generate_read_whole_content_as_base_64(), ] # type: List[Stripped] errors = [] # type: List[Error] for symbol in symbol_table.symbols: if isinstance(symbol, intermediate.Enumeration): # NOTE (mristin, 2022-04-13): # Enumerations are going to be directly deserialized using # ``Stringification``. continue elif isinstance(symbol, intermediate.ConstrainedPrimitive): # NOTE (mristin, 2022-04-13): # Constrained primitives are only verified, but do not represent a C# type. continue elif isinstance( symbol, (intermediate.AbstractClass, intermediate.ConcreteClass) ): if symbol.is_implementation_specific: implementation_keys = [ specific_implementations.ImplementationKey( f"Xmlization/DeserializeImplementation/" f"{symbol.name}_from_element.cs" ), specific_implementations.ImplementationKey( f"Xmlization/DeserializeImplementation/" f"{symbol.name}_from_sequence.cs" ), ] for implementation_key in implementation_keys: implementation = spec_impls.get(implementation_key, None) if implementation is None: errors.append( Error( symbol.parsed.node, f"The xmlization snippet is missing " f"for the implementation-specific " f"class {symbol.name}: {implementation_key}", ) ) continue else: blocks.append(spec_impls[implementation_key]) else: if isinstance(symbol, intermediate.ConcreteClass): ( block, generation_errors, ) = _generate_deserialize_impl_cls_from_sequence(cls=symbol) if generation_errors is not None: errors.append( Error( symbol.parsed.node, f"Failed to generate the XML deserialization code " f"for the class {symbol.name}", generation_errors, ) ) else: assert block is not None blocks.append(block) if symbol.interface is not None: blocks.append( _generate_deserialize_impl_interface_from_element( interface=symbol.interface ) ) if isinstance(symbol, intermediate.ConcreteClass): blocks.append( _generate_deserialize_impl_concrete_cls_from_element(cls=symbol) ) else: assert_never(symbol) if len(errors) > 0: return None, errors writer = io.StringIO() writer.write( """\ /// <summary> /// Implement the deserialization of meta-model classes from XML. /// </summary> /// <remarks> /// The implementation propagates an <see cref="Reporting.Error" /> instead of /// relying on exceptions. Under the assumption that incorrect data is much less /// frequent than correct data, this makes the deserialization more /// efficient. /// /// However, we do not want to force the client to deal with /// the <see cref="Reporting.Error" /> class as this is not intuitive. /// Therefore we distinguish the implementation, realized in /// <see cref="DeserializeImplementation" />, and the facade given in /// <see cref="Deserialize" /> class. /// </remarks> internal static class DeserializeImplementation { """ ) for i, block in enumerate(blocks): if i > 0: writer.write("\n\n") writer.write(textwrap.indent(block, I)) writer.write("\n} // internal static class DeserializeImplementation") return Stripped(writer.getvalue()), None
5,352,666
def verif_snap_availability(dc, host): """Verify the ZFS snapshot name already exists. accepts: data center, host returns: ZFS snapshot / clone name exists. """ logger.info('Validating configuration request.') zfssrcfslist.append("ifxdb-do_" + "v-" + str(db_version)) zfssrcfslist.append("apps1-prod_" + "v-" + str(app_version)) for zfssrcfs in zfssrcfslist: snap = verif_snap(zfsdstsnap, zfssrcfs) if snap == 200: close_con(rc, zcon) print "Snapshot %s exists in %s. Error code: %s \nExiting." % (zfsdstsnap, zfssrcfs, snap) logger.error("Snapshot %s exists in %s. Error code: %s exiting.", zfsdstsnap, zfssrcfs, snap) sys.exit(snap) else: logger.info("Snapshot %s in %s is valid. continuing...", zfsdstsnap, zfssrcfs) zfsdstclonelist.append("ifxdb-do_" + "v-" + str(db_version) + "-" + dst_zone) zfsdstclonelist.append("apps1-prod_" + "v-" + str(app_version) + "-" + dst_zone) for zfsdstclone in zfsdstclonelist: clone = verif_clone(zfsdstclone) if clone == 200: close_con(rc, zcon) print "Clone %s exists. Error code: %s \nExiting." % (zfsdstclone, clone) logger.error("Clone %s exists. Error code: %s exiting.", zfsdstclone, clone) sys.exit(snap) else: logger.info("Clone %s is valid. continuing...", zfsdstclone)
5,352,667
def timestep_to_transition_idx(snapshot_years, transitions, timestep): """Convert timestep to transition index. Args: snapshot_years (list): a list of years corresponding to the provided rasters transitions (int): the number of transitions in the scenario timestep (int): the current timestep Returns: transition_idx (int): the current transition """ for i in xrange(0, transitions): if timestep < (snapshot_years[i+1] - snapshot_years[0]): return i
5,352,668
def _make_abs_path(path, cwd=None, default=None): """convert 'path' to absolute if necessary (could be already absolute) if not defined (empty, or None), will return 'default' one or 'cwd' """ cwd = cwd or get_cwd() if not path: abs_path = default or cwd elif os.path.isabs(path): abs_path = path else: abs_path = os.path.normpath(os.path.join(cwd, path)) return abs_path
5,352,669
def resolve_checks(names, all_checks): """Returns a set of resolved check names. Resolving a check name expands tag references (e.g., "@tag") to all the checks that contain the given tag. OpenShiftCheckException is raised if names contains an unknown check or tag name. names should be a sequence of strings. all_checks should be a sequence of check classes/instances. """ known_check_names = set(check.name for check in all_checks) known_tag_names = set(name for check in all_checks for name in check.tags) check_names = set(name for name in names if not name.startswith('@')) tag_names = set(name[1:] for name in names if name.startswith('@')) unknown_check_names = check_names - known_check_names unknown_tag_names = tag_names - known_tag_names if unknown_check_names or unknown_tag_names: msg = [] if unknown_check_names: msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names)))) if unknown_tag_names: msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names)))) msg.append('Make sure there is no typo in the playbook and no files are missing.') raise OpenShiftCheckException('\n'.join(msg)) tag_to_checks = defaultdict(set) for check in all_checks: for tag in check.tags: tag_to_checks[tag].add(check.name) resolved = check_names.copy() for tag in tag_names: resolved.update(tag_to_checks[tag]) return resolved
5,352,670
def swish( data: NodeInput, beta: Optional[NodeInput] = None, name: Optional[str] = None, ) -> Node: """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). :param data: Tensor with input data floating point type. :return: The new node which performs Swish """ if beta is None: beta = make_constant_node(1.0, np.float32) return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {})
5,352,671
def cut_out_interval(data, interval, with_gaps=False): """ Cuts out data from input array. Interval is the start-stop time pair. If with_gaps flag is True, then one NaN value will be added between the remaining two pieces of data. Returns modified data array. :param data: 2-dimensional array with data :param interval: list or array with two time points :type data: np.ndarray :type interval: list or tuple or np.ndarray :return: modified data array, start and stop point of deleted interval :rtype: tuple """ supported_arr_types = "np.ndarray" supported_interval_types = "list or tuple or np.ndarray" assert isinstance(data, np.ndarray), \ "Arr value is of an unsupported type. " \ "Expected {}, got {} instead.".format(supported_arr_types, type(data)) assert data.ndim == 2, \ "Data must be 2-dimensional array. Got {} ndims instead.".format(data.ndim) assert isinstance(interval, list) or \ isinstance(interval, tuple) or \ isinstance(interval, np.ndarray), \ "Interval value is of an unsupported type. " \ "Expected {}, got {} instead." \ "".format(supported_interval_types, type(interval)) assert len(interval) == 2, \ "Unsupported interval length. " \ "Expected 2, got {} instead.".format(len(interval)) assert interval[0] <= interval[1], \ "Left interval border ({}) is greater than the right ({})." \ "".format(interval[0], interval[1]) idx_start, idx_stop = _get_interval_idx(data, interval) if idx_start is None or idx_stop is None: return data, None, None # 1-dimensional mask mask = np.ones(shape=data.shape[1], dtype=bool) # right border value is included mask[idx_start:idx_stop + 1] = False start_str = datetime.fromtimestamp(data[0, idx_start]).strftime("%Y.%m.%d %H:%M:%S") stop_str = datetime.fromtimestamp(data[0, idx_stop]).strftime("%Y.%m.%d %H:%M:%S") # add nan if cutting inner interval if with_gaps and idx_start > 0 and idx_stop < data.shape[1] - 1: # leave one element and replace it with nan mask[idx_stop] = True data[:, idx_stop] = np.nan # masking (cutting out) all columns data = data[:, mask] else: # masking (cutting out) all columns data = data[:, mask] return data, start_str, stop_str
5,352,672
def _abbreviations_to_word(text: str): """ 对句子中的压缩次进行扩展成单词 :param text: 单个句子文本 :return: 转换后的句子文本 """ abbreviations = [ (re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ ('mrs', 'misess'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'fort') ] ] for regex, replacement in abbreviations: text = re.sub(regex, replacement, text) return text
5,352,673
def _prev(message=None): """ Handler for mycroft.audio.service.prev. Starts playing the previous track. Args: message: message bus message, not used but required """ global current if current: current.prev()
5,352,674
def resnet56(num_classes=100): """Constructs a ResNet-56 model for CIFAR-10 (by default) Args: num_classes (uint): number of classes """ model = CifarResNet(ResNetBasicblock, 56, num_classes) return model
5,352,675
def _fetch_latest_from_memcache(app_version): """Get the latest configuration data for this app-version from memcache. Args: app_version: the major version you want configuration data for. Returns: A Config class instance for most recently set options or None if none could be found in memcache. """ proto_string = memcache.get(app_version, namespace=NAMESPACE) if proto_string: logging.debug('Loaded most recent conf data from memcache.') return db.model_from_protobuf(proto_string) logging.debug('Tried to load conf data from memcache, but found nothing.') return None
5,352,676
def check_settings(generate_module, build_to_run): """ Check the validity of locally configured settings. """ add_check_settings_steps(generate_module, build_to_run) build_to_run.run()
5,352,677
async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ): """Set up CAME light devices dynamically through discovery.""" async def async_discover_sensor(dev_ids): """Discover and add a discovered CAME light devices.""" if not dev_ids: return entities = await hass.async_add_executor_job(_setup_entities, hass, dev_ids) async_add_entities(entities) async_dispatcher_connect( hass, SIGNAL_DISCOVERY_NEW.format(LIGHT_DOMAIN), async_discover_sensor ) devices_ids = hass.data[DOMAIN][CONF_PENDING].pop(LIGHT_DOMAIN, []) await async_discover_sensor(devices_ids)
5,352,678
def validate_train_id(relative_path): """ Check if train_id is valid. Args: relative_path (str): Train ID of a summary directory, e.g. './log1'. Returns: bool, if train id is valid, return True. """ if not relative_path.startswith('./'): log.warning("The relative_path does not start with './'.") raise ParamValueError( "Summary dir should be relative path starting with './'." ) if len(relative_path.split("/")) > 2: log.warning("The relative_path contains multiple '/'.") raise ParamValueError( "Summary dir should be relative path starting with './'." )
5,352,679
def test_atx_headings_extra_4(): """ Test case extra 4: ATX headings string starting with a code span. """ # Arrange source_markdown = """## ``this`` is a fun day""" expected_tokens = [ "[atx(1,1):2:0:]", "[text(1,4)::\a \a\x03\a]", "[icode-span(1,4):this:``::]", "[text(1,12): is a fun day:]", "[end-atx::]", ] expected_gfm = """<h2><code>this</code> is a fun day</h2>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
5,352,680
def pad_set_room(request): """ pad修改关联会议室 :param request: :return: """ dbs = request.dbsession user_id = request.POST.get('user_id', '') room_id = request.POST.get('room_id', '') pad_code = request.POST.get('pad_code', '') if not user_id: error_msg = '用户ID不能为空!' elif not pad_code: error_msg = '终端编码不能为空!' elif not room_id: error_msg = '会议室ID不能为空!' else: room, error_msg = set_room(dbs, user_id, pad_code, room_id) update_last_time(dbs, pad_code, 'setRoom') logger.info('setRoom--user_id:' + user_id + ',pad_code:' + pad_code + ',room_id:' + room_id) if error_msg: json = { 'success': 'false', 'error_msg': error_msg, } else: json = { 'success': 'true', 'room': room } return json
5,352,681
def airffromrh_wmo(rh_wmo,temp,pres,asat=None,dhsat=None,chkvals=False, chktol=_CHKTOL,asat0=None,dhsat0=None,chkbnd=False,mathargs=None): """Calculate dry fraction from WMO RH. Calculate the dry air mass fraction from the relative humidity. The relative humidity used here is defined by the WMO as: rh_wmo = [(1-airf)/airf] / [(1-asat)/asat] where asat is the dry air fraction at saturation. :arg float rh_wmo: Relative humidity, unitless. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :arg asat: Saturation dry air mass fraction in kg/kg. If unknown, pass None (default) and it will be calculated. :type asat: float or None :arg dhsat: Saturation humid air density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dhsat: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg asat0: Initial guess for the saturation dry air mass fraction in kg/kg. If None (default) then `_approx_tp` is used. :type asat0: float or None :arg dhsat0: Initial guess for the saturation humid air density in kg/m3. If None (default) then `_approx_tp` is used. :type dhsat0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: In-situ dry air mass fraction in kg/kg. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> airffromrh_wmo(0.8,270.,1e5) 0.997645698908 """ asat = massfractionair(temp=temp,pres=pres,airf=asat,dhum=dhsat, chkvals=chkvals,chktol=chktol,airf0=asat0,dhum0=dhsat0,chkbnd=chkbnd, mathargs=mathargs) airf = asat / (rh_wmo*(1-asat) + asat) return airf
5,352,682
def sqs_queue_encryption_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[SQS.2] SQS queues should use Server Side encryption""" response = list_queues(cache) iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat() if 'QueueUrls' in response: for queueUrl in response["QueueUrls"]: queueName = queueUrl.rsplit("/", 1)[-1] attributes = sqs.get_queue_attributes( QueueUrl=queueUrl, AttributeNames=["QueueArn", "KmsMasterKeyId"] ) queueArn=attributes["Attributes"]["QueueArn"] queueEncryption=attributes["Attributes"].get('KmsMasterKeyId') if queueEncryption != None: finding = { "SchemaVersion": "2018-10-08", "Id": queueArn + "/sqs_queue_encryption_check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": queueArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[SQS.2] SQS queues should use Server Side encryption", "Description": f"SQS queue {queueName} has Server Side encryption enabled.", "Remediation": { "Recommendation": { "Text": "For more information on best practices for encryption of SQS queues, refer to the Data Encryption section of the Amazon SQS Developer Guide", "Url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsSqsQueue", "Id": queueArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "AwsSqsQueue": { "QueueName": queueName, "KmsMasterKeyId": str(queueEncryption) } } } ], "Compliance": { "Status": "PASSED", "RelatedRequirements": [ "NIST CSF PR.DS-1", "NIST CSF PR.DS-5", "NIST CSF PR.PT-3", "AICPA TSC CC6.1", "ISO 27001:2013 A.8.2.3" ], }, "Workflow": {"Status": "RESOLVED"}, "RecordState": "ARCHIVED", } yield finding else: finding = { "SchemaVersion": "2018-10-08", "Id": queueArn + "/sqs_queue_encryption_check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": queueArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "HIGH"}, "Confidence": 99, "Title": "[SQS.2] SQS queues should use server side encryption", "Description": f"SQS queue {queueName} has not enabled Server side encryption. Refer to the recommendations to remediate.", "Remediation": { "Recommendation": { "Text": "For more information on best practices for encryption of SQS queues, refer to the Data Encryption section of the Amazon SQS Developer Guide", "Url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsSqsQueue", "Id": queueArn, "Partition": awsPartition, "Region": awsRegion, "Details": {"AwsSqsQueue": {"QueueName": queueName}} } ], "Compliance": { "Status": "FAILED", "RelatedRequirements": [ "NIST CSF PR.DS-1", "NIST CSF PR.DS-5", "NIST CSF PR.PT-3", "AICPA TSC CC6.1", "ISO 27001:2013 A.8.2.3" ], }, "Workflow": {"Status": "NEW"}, "RecordState": "ACTIVE", } yield finding else: # No queues listed pass
5,352,683
def test_model_not_recommend_the_last_eaten_menu(model, ten_people_data): """ Test model not recommend the last eaten menu """ model.add_data(ten_people_data) print(model.preferences('Jason')) assert model.recommend('Jason') != 'pasta'
5,352,684
def player_vs_bot(): """ For playing against a bot. it's created to keep the main class clean. """ die_player1 = dice.Dice() die_computer = dice.Dice() game = bot_levels.BotDifficulty() while True: try: choose_difficulty = int(input("""select difficulty. 1 for easy , 2 for medium: """)) print(" ") if choose_difficulty > 2: print("bad input. select 1 or 2!") else: break except ValueError: print("only enter numbers!") while(die_player1.show_total() < 100 and die_computer.show_total() < 100): end_game = 0 result = player_round(die_player1) die_player1.add_to_total(result) if result == 10001: print("you chose to exit game!") print("---------------------------") end_game = 1 break else: if choose_difficulty == 1: total_points_computer = game.cpu_easy(die_computer) die_computer.add_to_total(total_points_computer) print(f"""computer now has a total of: {die_computer.show_total()}""") print("--------------------------------") time.sleep(1) elif choose_difficulty == 2: total_points_computer = game.cpu_medium(die_computer) die_computer.add_to_total(total_points_computer) print(f"""computer now has a total of: {die_computer.show_total()}""") print("--------------------------------") time.sleep(1) if end_game == 0: if die_player1.show_total() > 100: print("congratz! Player actually won!") print_rolles_endgame(die_player1, die_computer) elif die_computer.show_total() > 100: print("congratz! Computer actually won!") print_rolles_endgame(die_player1, die_computer) elif(die_player1.show_total() > 100 and die_computer.show_total() > 100): print("it's a tie!") print_rolles_endgame(die_player1, die_computer)
5,352,685
def download_file(filename, url): """ Download an URL to a file """ print("downloading {0}".format(url)) with open(filename, "wb") as fout: response = requests.get(url, stream=True, verify=False) response.raise_for_status() # Write response data to file iblock = 0 for block in response.iter_content(4096): if iblock % 10000 == 0: sys.stdout.write(".") sys.stdout.flush() iblock += 1 fout.write(block)
5,352,686
def resolve_If(node: ast.If, tree: ast.Module, context: Context) -> WorkflowStep: """ Make the resolved condition string and body into a workflow step. TODO: support assignments, not just calls TODO: support multi-statement bodies """ if len(node.body) > 1: raise NotImplementedError("Can't yet handle multi-statement bodies. Only single function-calls are allowed.") body = node.body[0] if isinstance(body, ast.Expr) and isinstance(body.value, ast.Call): condition = resolve_cond(node, context.locals) return make_workflow_step( call_node=body.value, tree=tree, context=context, condition=condition, ) else: raise NotImplementedError("Can only transpile function call inside of conditional statements.")
5,352,687
def inflate(data: str) -> str: """ reverses the compression used by draw.io see: https://drawio-app.com/extracting-the-xml-from-mxfiles/ see: https://stackoverflow.com/questions/1089662/python-inflate-and-deflate-implementations :param data: base64 encoded string :return: "plain text" version of the deflated data """ data = base64.b64decode(data) decompress = zlib.decompressobj(-zlib.MAX_WBITS) inflated = decompress.decompress(data) inflated += decompress.flush() return unquote(inflated.decode('utf-8'))
5,352,688
def error_mixin(): """ @apiDefine ErrorMixin @apiError content 错误信息 @apiError message 错误消息 @apiErrorExample 400 Bad Request 参数错误 { content="missed keys set(['city'])", message="ArgsParseFailed" } 查询获取单个数据时,找到不止一个(通过id查找) { content=null, message="MultipleResultsFound" } 查询没有结果(通过id查找) { content=null, message="NoResultFound" } @apiErrorExample 403 Forbidden 非可操作用户 { content=null, message="Forbidden" } 未知错误 { content=null, message="UnknownError" } @apiErrorExample 422 Unprocessable Entity 逻辑层错误 { content=null, message="LogicResponseFailed" } 插入时重复键值 { content=null, message="DuplicateEntry" } 退款失败 { content=null, message="RefundMoneyFailed" } 扣款失败 { content=null, message="CostMoneyFailed" } """ pass
5,352,689
def get_tree(data_path,sep,root,cutoff,layer_max,up=True): """ This function takes the path of a data file of edge list with numeric weights and returns a tree (DiGraph object). The parameters include: data_path: The path of a data file of edge list with numeric weights. sep: The delimiter of the data file. root: A root node to start with. cutoff: The edge weight threshold. layer_max: The number of layers to explore. up: The direction (upstream or downstream) of the tree. The default is upstream. """ # Read in the network data. F = nx.read_weighted_edgelist(data_path,delimiter=sep,create_using=nx.DiGraph()) # create_using is to specify a directed network, otherwise, an # undirected network is returned. # Filter the edges with the cutoff value. G = nx.DiGraph( [ (u,v,d) for u,v,d in F.edges(data=True) if d['weight']>=cutoff] ) reachset = set() unreachset = set() for n in G.nodes(): if(n != root): unreachset.add(n) else: reachset.add(n) H = nx.DiGraph() # Initiate a tree. oldreach = len(reachset) newreach = oldreach +1 rndcount = 0 if(up==True): # When an upstream tree is requested. while(newreach>oldreach and rndcount<layer_max): oldreach = len(reachset) candidatesIn = {} for ee in G.edges(data = True): e1 = ee[0] e2 = ee[1] w = ee[2]['weight'] if(e2 in reachset and e1 in unreachset): # e2 in reachset because the direction is upstream. candidatesIn[(e1,e2)] = w sorted_edges_in = sorted(candidatesIn.iteritems(), key=operator.itemgetter(1), reverse = True) # reverse = True is to pick the edge with the largest weight # first. Otherwise, the edge with the smallest weight will be # picked first. if(len(sorted_edges_in) > 0): for se in sorted_edges_in: if (se[0][0] in unreachset): # The same candidate node may appear more than once # connecting with different existing nodes. So # se[0][0] needs to be checked if still in # unreachset before being added. This is to ensure # that all the nodes in the tree are unique. For # each round/layer of search, the edge with a # larger weight is preferred. reachset.add(se[0][0]) unreachset.remove(se[0][0]) H.add_edge(se[0][0],se[0][1],weight=se[1],layer=rndcount+1) # The edge attribute layer is added. H.node[se[0][0]]['dist']=rndcount+1 # The node attribute dist (distance from the root) is added. newreach=len(reachset) else: newreach=oldreach rndcount=rndcount+1 if(H.number_of_nodes()>0): # Error if empty tree. H.node[root]['dist']=0 # Add the attribute dist for the root. else: # When a downstream tree is requested. while(newreach>oldreach and rndcount<layer_max): oldreach = len(reachset) candidatesOut = {} for ee in G.edges(data = True): e1 = ee[0] e2 = ee[1] w = ee[2]['weight'] if(e1 in reachset and e2 in unreachset): # e1 in reachset because the direction is downstream. candidatesOut[(e1,e2)] = w sorted_edges_out = sorted(candidatesOut.iteritems(), key=operator.itemgetter(1), reverse = True) # reverse = True is to pick the edge with the largest weight # first. Otherwise, the edge with the smallest weight will be # picked first. if(len(sorted_edges_out) > 0): for se in sorted_edges_out: if (se[0][1] in unreachset): # The same candidate node may appear more than once # connecting with different existing nodes. So # se[0][1] needs to be checked if still in # unreachset before being added. This is to ensure # that all the nodes in the tree are unique. For # each round/layer of search, the edge with a # larger weight is preferred. reachset.add(se[0][1]) unreachset.remove(se[0][1]) H.add_edge(se[0][0],se[0][1],weight=se[1],layer=rndcount+1) # The edge attribute layer is added. H.node[se[0][1]]['dist']=rndcount+1 # The node attribute dist (distance from the root) is added. newreach=len(reachset) else: newreach=oldreach rndcount=rndcount+1 if(H.number_of_nodes()>0): # Error if empty tree. H.node[root]['dist']=0 # Add the attribute dist for the root. return H
5,352,690
def delete_functions(lambda_client, function_list) -> list: """Deletes all instances in the instances parameter. Args: lambda_client: A lambda boto3 client function_list: A list of instances you want deleted. Returns: A count of deleted instances """ terminated_functions = [] for lambda_function in function_list: function_name = lambda_function["FunctionName"] if helpers.check_in_whitelist(function_name, WHITELIST_NAME): continue try: lambda_client.delete_function( FunctionName=function_name ) except ClientError as error: error_string = "{0} on {1} - {2}".format(error, RESOURCE_NAME, function_name) print(error_string) terminated_functions.append(error_string) continue terminated_functions.append(lambda_function["FunctionName"]) return terminated_functions
5,352,691
def run(): """ Run the main loop. After the window has been set up, and the event hooks are in place, this is usually one of the last commands on the main program. """ pyglet.app.run()
5,352,692
def make_random_shares(seed, minimum, n_shares, share_strength=256): """ Generates a random shamir pool for a given seed phrase. Returns share points as seeds phrases (word list). """ if minimum > n_shares: raise ValueError( "More shares needed (%d) to recover the seed phrase than created " "(%d). Seed phrase would be irrecoverable." % (minimum, n_shares) ) seed_length = len(seed.split(" ")) if seed_length not in LENGTH_ALLOWED: raise ValueError( "Seed phrase should have %s words, but not %d words." % (LENGTH_STR, seed_length) ) seed_strength = seed_length // 3 * 32 if share_strength not in STRENGTH_ALLOWED: raise ValueError( "Share strength should be one of the following %s. " "But it is not (%d)." % (STRENGTH_STR, share_strength) ) if share_strength < seed_strength: raise ValueError( "Share strength (%d) is lower that seed strength (%d). Seed phrase " "would be irrecoverable." % (share_strength, seed_strength) ) prime = PRIMES[share_strength] secret = seed_to_int(seed) poly = [secret] + [random_int(prime - 1) for i in range(minimum - 1)] points = [(i, _eval_at(poly, i, prime)) for i in range(1, n_shares + 1)] shares = [(i, int_to_seed(point, strength=share_strength)) for i, point in points] return shares
5,352,693
def confusion_matrix( probs: Optional[Sequence[Sequence]] = None, y_true: Optional[Sequence] = None, preds: Optional[Sequence] = None, class_names: Optional[Sequence[str]] = None, title: Optional[str] = None, ): """ Computes a multi-run confusion matrix. Arguments: probs (2-d arr): Shape [n_examples, n_classes] y_true (arr): Array of label indices. preds (arr): Array of predicted label indices. class_names (arr): Array of class names. Returns: Nothing. To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ``` vals = np.random.uniform(size=(10, 5)) probs = np.exp(vals)/np.sum(np.exp(vals), keepdims=True, axis=1) y_true = np.random.randint(0, 5, size=(10)) labels = ["Cat", "Dog", "Bird", "Fish", "Horse"] wandb.log({'confusion_matrix': wandb.plot.confusion_matrix(probs, y_true=y_true, class_names=labels)}) ``` """ np = util.get_module( "numpy", required="confusion matrix requires the numpy library, install with `pip install numpy`", ) # change warning assert probs is None or len(probs.shape) == 2, ( "confusion_matrix has been updated to accept" " probabilities as the default first argument. Use preds=..." ) assert (probs is None or preds is None) and not ( probs is None and preds is None ), "Must provide probabilties or predictions but not both to confusion matrix" if probs is not None: preds = np.argmax(probs, axis=1).tolist() assert len(preds) == len( y_true ), "Number of predictions and label indices must match" if class_names is not None: n_classes = len(class_names) class_inds = [i for i in range(n_classes)] assert max(preds) <= len( class_names ), "Higher predicted index than number of classes" assert max(y_true) <= len( class_names ), "Higher label class index than number of classes" else: class_inds = set(preds).union(set(y_true)) n_classes = len(class_inds) class_names = [f"Class_{i}" for i in range(1, n_classes + 1)] # get mapping of inds to class index in case user has weird prediction indices class_mapping = {} for i, val in enumerate(sorted(list(class_inds))): class_mapping[val] = i counts = np.zeros((n_classes, n_classes)) for i in range(len(preds)): counts[class_mapping[y_true[i]], class_mapping[preds[i]]] += 1 data = [] for i in range(n_classes): for j in range(n_classes): data.append([class_names[i], class_names[j], counts[i, j]]) fields = { "Actual": "Actual", "Predicted": "Predicted", "nPredictions": "nPredictions", } title = title or "" return wandb.plot_table( "wandb/confusion_matrix/v1", wandb.Table(columns=["Actual", "Predicted", "nPredictions"], data=data), fields, {"title": title}, )
5,352,694
def get_monitor_details(): """Render the index page.""" monitor_id = paranoid_clean(request.args.get('id')) monitors = mongo.db[app.config['MONITORS_COLLECTION']] monitor = monitors.find_one({'hashed': monitor_id}, {'_id': 0}) if not monitor: return jsonify({'success': False, 'error': 'Monitor was not found.'}) articles = mongo.db[app.config['ARTICLES_COLLECTION']] link = monitor['metadata']['rss_link'] articles = list(articles.find({'feed_source': link}, {'_id': 0})) for idx, item in enumerate(articles): articles[idx]['title'] = html.unescape(item['title']) articles[idx]['date'] = item['collected'][:10] articles.sort(key=lambda x: x['collected'], reverse=True) return jsonify({'success': True, 'monitor': monitor, 'articles': articles})
5,352,695
def _simplify(obj: object) -> object: """ This function takes an object as input and returns a simple Python object which is supported by the chosen serialization method (such as JSON or msgpack). The reason we have this function is that some objects are either NOT supported by high level (fast) serializers OR the high level serializers don't support the fastest form of serialization. For example, PyTorch tensors have custom pickle functionality thus its better to pre-serialize PyTorch tensors using pickle and then serialize the binary in with the rest of the message being sent. Args: obj: an object which may need to be simplified Returns: obj: an simple Python object which msgpack can serialize Raises: ValueError: if `move_this` or `in_front_of_that` are not both single ASCII characters. """ try: # check to see if there is a simplifier # for this type. If there is, run return # the simplified object current_type = type(obj) result = (simplifiers[current_type][0], simplifiers[current_type][1](obj)) return result except KeyError: # if there is not a simplifier for this # object, then the object is already a # simple python object and we can just # return it return obj
5,352,696
def test_property_bindings(rdfs_graph: Graph) -> None: """ The ``bindings`` property of a `rdflib.query.Result` result works as expected. """ result = rdfs_graph.query( """ SELECT ?class ?label WHERE { ?class rdf:type rdfs:Class. ?class rdfs:label ?label. } ORDER BY ?class """ ) expected_bindings = [ { Variable('class'): RDFS.Class, Variable('label'): Literal('Class'), }, { Variable('class'): RDFS.Container, Variable('label'): Literal('Container'), }, { Variable('class'): RDFS.ContainerMembershipProperty, Variable('label'): Literal('ContainerMembershipProperty'), }, { Variable('class'): RDFS.Datatype, Variable('label'): Literal('Datatype'), }, { Variable('class'): RDFS.Literal, Variable('label'): Literal('Literal'), }, { Variable('class'): RDFS.Resource, Variable('label'): Literal('Resource'), }, ] assert expected_bindings == result.bindings result.bindings = [] assert [] == result.bindings
5,352,697
def match_v2v3(aperture_1, aperture_2, verbose=False): """Use the V2V3 from aperture_1 in aperture_2 modifying X[Y]DetRef,X[Y]SciRef to match. Also shift the polynomial coefficients to reflect the new reference point origin and for NIRCam recalculate angles. Parameters ---------- aperture_1 : `pysiaf.Aperture object` Aperture whose V2,V3 reference position is to be used aperture_2 : `pysiaf.Aperture object` The V2,V3 reference position is to be altered to match that of aperture_1 verbose : bool verbosity Returns ------- new_aperture_2: `pysiaf.Aperture object` An aperture object derived from aperture_2 but with some parameters changed to match altered V2V3. """ instrument = aperture_1.InstrName assert instrument != 'NIRSPEC', 'Program not working for NIRSpec' assert (aperture_2.AperType in ['FULLSCA', 'SUBARRAY', 'ROI']), \ "2nd aperture must be pixel-based" order = aperture_1.Sci2IdlDeg V2Ref1 = aperture_1.V2Ref V3Ref1 = aperture_1.V3Ref newV2Ref = V2Ref1 newV3Ref = V3Ref1 if verbose: print('Current Vref', aperture_2.V2Ref, aperture_2.V3Ref) print('Shift to ', V2Ref1, V3Ref1) # Need to work in aperture 2 coordinate systems aperName_1 = aperture_1.AperName aperName_2 = aperture_2.AperName detector_1 = aperName_1.split('_')[0] detector_2 = aperName_2.split('_')[0] if verbose: print('Detector 1', detector_1, ' Detector 2', detector_2) V2Ref2 = aperture_2.V2Ref V3Ref2 = aperture_2.V3Ref theta0 = aperture_2.V3IdlYAngle if verbose: print('Initial VRef', V2Ref2, V3Ref2) print('Initial theta', theta0) theta = radians(theta0) coefficients = aperture_2.get_polynomial_coefficients() A = coefficients['Sci2IdlX'] B = coefficients['Sci2IdlY'] C = coefficients['Idl2SciX'] D = coefficients['Idl2SciY'] if verbose: print('\nA') print_triangle(A) print('B') print_triangle(B) print('C') print_triangle(C) print('D') print_triangle(D) (stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(A, B, C, D, verbose=verbose, instrument=instrument) print('Round trip X Y') print(' Means%8.4F %8.4f' % (xmean, ymean)) print(' STDs%8.4f %8.4f' % (xstd, ystd)) # Use convert (newXSci, newYSci) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'sci') (newXDet, newYDet) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'det') (newXIdl, newYIdl) = aperture_2.convert(V2Ref1, V3Ref1, 'tel', 'idl') dXSciRef = newXSci - aperture_2.XSciRef dYSciRef = newYSci - aperture_2.YSciRef AS = shift_coefficients(A, dXSciRef, dYSciRef) BS = shift_coefficients(B, dXSciRef, dYSciRef) if verbose: print('VRef1', V2Ref1, V3Ref1) print('Idl', newXIdl, newYIdl) print('Shift pixel origin by', dXSciRef, dYSciRef) print('New Ideal origin', newXIdl, newYIdl) CS = shift_coefficients(C, AS[0], BS[0]) DS = shift_coefficients(D, AS[0], BS[0]) AS[0] = 0.0 BS[0] = 0.0 CS[0] = 0.0 DS[0] = 0.0 if verbose: print('\nShifted Polynomials') print('AS') print_triangle(AS) print('BS') print_triangle(BS) print('CS') print_triangle(CS) print('DS') print_triangle(DS) print('\nABCDS') (stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(AS, BS, CS, DS, verbose=verbose, instrument=instrument) if verbose: print('Round trip X Y') print(' Means%8.4F %8.4f' % (xmean, ymean)) print(' STDs%8.4f %8.4f' % (xstd, ystd)) newA = AS newB = BS newC = CS newD = DS new_aperture_2 = copy.deepcopy(aperture_2) # For NIRCam only, adjust angles if instrument == 'NIRCAM': newV3IdlYAngle = degrees(atan2(-AS[2], BS[2])) # Everything rotates by this amount if abs(newV3IdlYAngle) > 90.0: newV3IdlYAngle = newV3IdlYAngle - copysign(180, newV3IdlYAngle) newA = AS*cos(radians(newV3IdlYAngle)) + BS*sin(radians(newV3IdlYAngle)) newB = -AS*sin(radians(newV3IdlYAngle)) + BS*cos(radians(newV3IdlYAngle)) if verbose: print('New angle', newV3IdlYAngle) print('\nnewA') print_triangle(newA) print('newB') print_triangle(newB) newC = prepend_rotation_to_polynomial(CS, -newV3IdlYAngle) newD = prepend_rotation_to_polynomial(DS, -newV3IdlYAngle) if verbose: print('newC') print_triangle(newC) print('newD') print_triangle(newD) (stat, xmean, ymean, xstd, ystd, data) = compute_roundtrip_error(newA, newB, newC, newD, verbose=verbose, instrument=instrument) print('\nFinal coefficients') print('Round trip X Y') print(' Means%8.4F %8.4f' % (xmean, ymean)) print(' STDs%8.4f %8.4f' % (xstd, ystd)) newV3SciXAngle = aperture_2.V3SciXAngle + newV3IdlYAngle newV3SciYAngle = aperture_2.V3SciXAngle + newV3IdlYAngle newV3IdlYAngle = aperture_2.V3IdlYAngle + newV3IdlYAngle new_aperture_2.V3SciXAngle = newV3SciXAngle new_aperture_2.V3SciYAngle = newV3SciYAngle new_aperture_2.V3IdlYAngle = newV3IdlYAngle # Set new values in new_aperture_2 new_aperture_2.V2Ref = newV2Ref new_aperture_2.V3Ref = newV3Ref new_aperture_2.XDetRef = newXDet new_aperture_2.YDetRef = newYDet new_aperture_2.XSciRef = newXSci new_aperture_2.YSciRef = newYSci if verbose: print('Initial', aperture_2.V2Ref, aperture_2.V3Ref, aperture_2.XDetRef, aperture_2.YDetRef) print('Changes', newV2Ref, newV3Ref, newXDet, newYDet) print('Modified', new_aperture_2.V2Ref, new_aperture_2.V3Ref, new_aperture_2.XDetRef, new_aperture_2.YDetRef) new_aperture_2.set_polynomial_coefficients(newA, newB, newC, newD) (xcorners, ycorners) = new_aperture_2.corners('idl', rederive=True) for c in range(4): suffix = "{}".format(c+1) setattr(new_aperture_2, 'XIdlVert' + suffix, xcorners[c]) setattr(new_aperture_2, 'YIdlVert' + suffix, ycorners[c]) return new_aperture_2
5,352,698
def PlatformPager() -> PagerCommand: """ Return the default pager command for the current platform. """ if sys.platform.startswith('aix'): return More() if sys.platform.startswith('win32'): return More() return Less()
5,352,699