content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def article_markdown(text): """ 对传入的text文本进行markdown """ renderer = ArticleRenderer() markdown = mistune.Markdown(renderer=renderer) return markdown(text)
32d1edc0d5155c62b0dc0ff18dc9a44f1ec85d7a
3,658,700
def tf_pywt_wavelet_decomposition(patch_vec, patch_size, name, wavelet_type, level, mode): """ :param patch_vec: :param patch_size: :param name: :param wavelet_type: :param level: :param mode: :return: """ # TODO: docstring # Convert input values for pywt wavelet_type = wavelet_type.decode('utf-8') mode = mode.decode('utf-8') level = int(level) patch_size = tuple(patch_size) name = name.decode('utf-8') # print('wavelet_type: {}, {}'.format(wavelet_type, type(wavelet_type))) # print('mode: {}, {}'.format(mode, type(mode))) # print('level: {}, {}'.format(level, type(level))) # print('patch_vec: {}, {}'.format(patch_vec, type(patch_vec))) # print('patch_size: {}, {}'.format(patch_size, type(patch_size))) # print('name: {}, {}'.format(name, type(name))) # Rebuild transform_dict from unpacked inputs transform_dict = generate_transform_dict(patch_size, name, wavelet=wavelet_type, level=level, mode=mode) # print(transform_dict) # Decomposition coeffs_vec, bookkeeping_mat = wavelet_decomposition(patch_vec, transform_dict) return coeffs_vec.astype(np.float32), bookkeeping_mat.astype(np.int32)
4f4774189b06d5f51b6c65af7c4fc4f83b49314c
3,658,701
from pycompss.api.api import compss_start, compss_stop from importlib.machinery import SourceFileLoader # noqa import imp # noqa import os import logging import sys def launch_pycompss_application(app, func, log_level="off", # type: str o_c=False, # type: bool debug=False, # type: bool graph=False, # type: bool trace=False, # type: bool monitor=None, # type: int project_xml=None, # type: str resources_xml=None, # type: str summary=False, # type: bool task_execution="compss", # type: str storage_impl=None, # type: str storage_conf=None, # type: str streaming_backend=None, # type: str streaming_master_name=None, # type: str streaming_master_port=None, # type: str task_count=50, # type: int app_name=None, # type: str uuid=None, # type: str base_log_dir=None, # type: str specific_log_dir=None, # type: str extrae_cfg=None, # type: str comm="NIO", # type: str conn=DEFAULT_CONN, # type: str master_name="", # type: str master_port="", # type: str scheduler=DEFAULT_SCHED, # type: str jvm_workers=DEFAULT_JVM_WORKERS, # type: str cpu_affinity="automatic", # type: str gpu_affinity="automatic", # type: str fpga_affinity="automatic", # type: str fpga_reprogram="", # type: str profile_input="", # type: str profile_output="", # type: str scheduler_config="", # type: str external_adaptation=False, # type: bool propagate_virtual_environment=True, # noqa type: bool mpi_worker=False, # type: bool worker_cache=False, # type: bool or str shutdown_in_node_failure=False, # type: bool io_executors=0, # type: int env_script="", # type: str reuse_on_block=True, # type: bool nested_enabled=False, # type: bool tracing_task_dependencies=False, # type: bool trace_label=None, # type: str extrae_cfg_python=None, # type: str wcl=0, # type: int cache_profiler=False, # type: bool *args, **kwargs ): # NOSONAR # type: (...) -> None """ Launch PyCOMPSs application from function. :param app: Application path :param func: Function :param log_level: Logging level [ "trace"|"debug"|"info"|"api"|"off" ] (default: "off") :param o_c: Objects to string conversion [ True | False ] (default: False) :param debug: Debug mode [ True | False ] (default: False) (overrides log_level) :param graph: Generate graph [ True | False ] (default: False) :param trace: Generate trace [ True | False | "scorep" | "arm-map" | "arm-ddt"] (default: False) :param monitor: Monitor refresh rate (default: None) :param project_xml: Project xml file path :param resources_xml: Resources xml file path :param summary: Execution summary [ True | False ] (default: False) :param task_execution: Task execution (default: "compss") :param storage_impl: Storage implementation path :param storage_conf: Storage configuration file path :param streaming_backend: Streaming backend (default: None) :param streaming_master_name: Streaming master name (default: None) :param streaming_master_port: Streaming master port (default: None) :param task_count: Task count (default: 50) :param app_name: Application name (default: Interactive_date) :param uuid: UUId :param base_log_dir: Base logging directory :param specific_log_dir: Specific logging directory :param extrae_cfg: Extrae configuration file path :param comm: Communication library (default: NIO) :param conn: Connector (default: DefaultSSHConnector) :param master_name: Master Name (default: "") :param master_port: Master port (default: "") :param scheduler: Scheduler (default: es.bsc.compss.scheduler.loadbalancing.LoadBalancingScheduler) :param jvm_workers: Java VM parameters (default: "-Xms1024m,-Xmx1024m,-Xmn400m") :param cpu_affinity: CPU Core affinity (default: "automatic") :param gpu_affinity: GPU Core affinity (default: "automatic") :param fpga_affinity: FPA Core affinity (default: "automatic") :param fpga_reprogram: FPGA reprogram command (default: "") :param profile_input: Input profile (default: "") :param profile_output: Output profile (default: "") :param scheduler_config: Scheduler configuration (default: "") :param external_adaptation: External adaptation [ True | False ] (default: False) :param propagate_virtual_environment: Propagate virtual environment [ True | False ] (default: False) :param mpi_worker: Use the MPI worker [ True | False ] (default: False) :param worker_cache: Use the worker cache [ True | int(size) | False] (default: False) :param shutdown_in_node_failure: Shutdown in node failure [ True | False] (default: False) :param io_executors: <Integer> Number of IO executors :param env_script: <String> Environment script to be sourced in workers :param reuse_on_block: Reuse on block [ True | False] (default: True) :param nested_enabled: Nested enabled [ True | False] (default: False) :param tracing_task_dependencies: Include task dependencies in trace [ True | False] (default: False) :param trace_label: <String> Add trace label :param extrae_cfg_python: <String> Extrae configuration file for the workers :param wcl: <Integer> Wallclock limit. Stops the runtime if reached. 0 means forever. :param cache_profiler: Use the cache profiler [ True | False] (default: False) :param args: Positional arguments :param kwargs: Named arguments :return: Execution result """ # Check that COMPSs is available if "COMPSS_HOME" not in os.environ: # Do not allow to continue if COMPSS_HOME is not defined raise PyCOMPSsException("ERROR: COMPSS_HOME is not defined in the environment") # noqa: E501 # Let the Python binding know we are at master context.set_pycompss_context(context.MASTER) # Then we can import the appropriate start and stop functions from the API ############################################################## # INITIALIZATION ############################################################## if debug: log_level = "debug" # Initial dictionary with the user defined parameters all_vars = parameters_to_dict(log_level, debug, o_c, graph, trace, monitor, project_xml, resources_xml, summary, task_execution, storage_impl, storage_conf, streaming_backend, streaming_master_name, streaming_master_port, task_count, app_name, uuid, base_log_dir, specific_log_dir, extrae_cfg, comm, conn, master_name, master_port, scheduler, jvm_workers, cpu_affinity, gpu_affinity, fpga_affinity, fpga_reprogram, profile_input, profile_output, scheduler_config, external_adaptation, propagate_virtual_environment, mpi_worker, worker_cache, shutdown_in_node_failure, io_executors, env_script, reuse_on_block, nested_enabled, tracing_task_dependencies, trace_label, extrae_cfg_python, wcl, cache_profiler) # Save all vars in global current flags so that events.py can restart # the notebook with the same flags export_current_flags(all_vars) # Check the provided flags flags, issues = check_flags(all_vars) if not flags: print_flag_issues(issues) return None # Prepare the environment env_vars = prepare_environment(False, o_c, storage_impl, app, debug, trace, mpi_worker) all_vars.update(env_vars) monitoring_vars = prepare_loglevel_graph_for_monitoring(monitor, graph, debug, log_level) all_vars.update(monitoring_vars) if RUNNING_IN_SUPERCOMPUTER: updated_vars = updated_variables_in_sc() all_vars.update(updated_vars) to_update = prepare_tracing_environment(all_vars["trace"], all_vars["extrae_lib"], all_vars["ld_library_path"]) all_vars["trace"], all_vars["ld_library_path"] = to_update inf_vars = check_infrastructure_variables(all_vars["project_xml"], all_vars["resources_xml"], all_vars["compss_home"], all_vars["app_name"], all_vars["file_name"], all_vars["external_adaptation"]) all_vars.update(inf_vars) create_init_config_file(**all_vars) ############################################################## # RUNTIME START ############################################################## # Runtime start compss_start(log_level, all_vars["trace"], True) # Setup logging binding_log_path = get_log_path() log_path = os.path.join(all_vars["compss_home"], "Bindings", "python", str(all_vars["major_version"]), "log") set_temporary_directory(binding_log_path) logging_cfg_file = get_logging_cfg_file(log_level) init_logging(os.path.join(log_path, logging_cfg_file), binding_log_path) logger = logging.getLogger("pycompss.runtime.launch") logger.debug("--- START ---") logger.debug("PyCOMPSs Log path: %s" % log_path) if storage_impl and storage_conf: logger.debug("Starting storage") persistent_storage = master_init_storage(all_vars["storage_conf"], logger) else: persistent_storage = False logger.debug("Starting streaming") streaming = init_streaming(all_vars["streaming_backend"], all_vars["streaming_master_name"], all_vars["streaming_master_port"]) saved_argv = sys.argv sys.argv = args # Execution: with event(APPLICATION_RUNNING_EVENT, master=True): if func is None or func == "__main__": if IS_PYTHON3: exec(open(app).read()) else: execfile(app) # noqa result = None else: if IS_PYTHON3: imported_module = SourceFileLoader(all_vars["file_name"], app).load_module() # noqa else: imported_module = imp.load_source(all_vars["file_name"], app) # noqa method_to_call = getattr(imported_module, func) try: result = method_to_call(*args, **kwargs) except TypeError: result = method_to_call() # Recover the system arguments sys.argv = saved_argv # Stop streaming if streaming: stop_streaming() # Stop persistent storage if persistent_storage: master_stop_storage(logger) logger.debug("--- END ---") ############################################################## # RUNTIME STOP ############################################################## # Stop runtime compss_stop() clean_log_configs() return result
68871a41dedf1195045b51e712c17e7e45127a4e
3,658,702
def _gen_key(user_id, key_name): """ Tuck this into UserManager """ try: manager = users.UserManager.instance() private_key, fingerprint = manager.generate_key_pair(user_id, key_name) except Exception as ex: return {'exception': ex} return {'private_key': private_key, 'fingerprint': fingerprint}
f5babf523bded37ba624295a7435e2709488d47a
3,658,703
def svhn_loader(size=None,root="./shvn",set="train",batch_size=32,mean=0.5,std=0.5,transform="default",download=True,target_transform=None,**loader_args): """ :param size: :param root: :param set: :param batch_size: :param mean: :param std: :param transform: :param download: :param target_transform: :param loader_args: :return: """ valid_sets = ('train', 'test', 'extra') if set not in valid_sets: raise ValueError("set {} is invalid, valid sets include {}".format(set,valid_sets)) if size is not None: if not isinstance(size,tuple): size = (size,size) if transform == "default": t = [] if size is not None: t.append(transformations.Resize(size)) t.append(transformations.ToTensor()) if mean is not None and std is not None: if not isinstance(mean, tuple): mean = (mean,) if not isinstance(std, tuple): std = (std,) t.append(transformations.Normalize(mean=mean, std=std)) trans = transformations.Compose(t) else: trans = transform data = SVHN(root,split=set,transform=trans,download=download,target_transform=target_transform) shuffle_mode = True if set == "train" else False return DataLoader(data,batch_size=batch_size,shuffle=shuffle_mode,**loader_args)
f40cd95338f4e745cbbb849ac8a9999f98245cf0
3,658,704
import pkg_resources def get_supervisees(): """Pull the supervisor specifications out of the entry point.""" eps = list(pkg_resources.iter_entry_points(ENTRY_POINT_GROUP)) return dict((ep.name, ep.load()) for ep in eps)
6a812bb8422382c6e481bab8b27651786984ea66
3,658,705
async def index(request): """ This is the view handler for the "/" url. **Note: returning html without a template engine like jinja2 is ugly, no way around that.** :param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request :return: aiohttp.web.Response object """ # {% if database.is_none and example.is_message_board %} # app.router allows us to generate urls based on their names, # see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources message_url = request.app.router['messages'].url_for() ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content="""\ <p>Success! you've setup a basic aiohttp app.</p> <p>To demonstrate a little of the functionality of aiohttp this app implements a very simple message board.</p> <b> <a href="{message_url}">View and add messages</a> </b>""".format(message_url=message_url) ) # {% else %} ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content="<p>Success! you've setup a basic aiohttp app.</p>", ) # {% endif %} # with the base web.Response type we have to manually set the content type, otherwise text/plain will be used. return web.Response(text=BASE_PAGE.format(**ctx), content_type='text/html')
f90ba225055bf77b39942da7fc1b1b2f5b4a7286
3,658,706
def adtg(s, t, p): """ Calculates adiabatic temperature gradient as per UNESCO 1983 routines. Parameters ---------- s(p) : array_like salinity [psu (PSS-78)] t(p) : array_like temperature [℃ (ITS-90)] p : array_like pressure [db] Returns ------- adtg : array_like adiabatic temperature gradient [℃ db :sup:`-1`] Examples -------- >>> # Data from UNESCO 1983 p45. >>> import seawater as sw >>> from seawater.library import T90conv >>> t = T90conv([[ 0, 0, 0, 0, 0, 0], ... [10, 10, 10, 10, 10, 10], ... [20, 20, 20, 20, 20, 20], ... [30, 30, 30, 30, 30, 30], ... [40, 40, 40, 40, 40, 40]]) >>> s = [[25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35], ... [25, 25, 25, 35, 35, 35]] >>> p = [0, 5000, 10000, 0, 5000, 10000] >>> sw.adtg(s, t, p) array([[ 1.68710000e-05, 1.04700000e-04, 1.69426000e-04, 3.58030000e-05, 1.17956500e-04, 1.77007000e-04], [ 1.00194580e-04, 1.60959050e-04, 2.06874170e-04, 1.14887280e-04, 1.71364200e-04, 2.12991770e-04], [ 1.73819840e-04, 2.13534000e-04, 2.44483760e-04, 1.84273240e-04, 2.21087800e-04, 2.49137960e-04], [ 2.41720460e-04, 2.64764100e-04, 2.82959590e-04, 2.47934560e-04, 2.69466550e-04, 2.86150390e-04], [ 3.07870120e-04, 3.16988600e-04, 3.23006480e-04, 3.09844920e-04, 3.18839700e-04, 3.24733880e-04]]) References ---------- .. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for computation of fundamental properties of seawater. UNESCO Tech. Pap. in Mar. Sci., No. 44, 53 pp. http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf .. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic temperature gradient and potential temperature of sea water. Deep-Sea Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6 """ s, t, p = map(np.asanyarray, (s, t, p)) T68 = T68conv(t) a = [3.5803e-5, 8.5258e-6, -6.836e-8, 6.6228e-10] b = [1.8932e-6, -4.2393e-8] c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14] d = [-1.1351e-10, 2.7759e-12] e = [-4.6206e-13, 1.8676e-14, -2.1687e-16] return (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68 + (b[0] + b[1] * T68) * (s - 35) + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) + (d[0] + d[1] * T68) * (s - 35)) * p + (e[0] + (e[1] + e[2] * T68) * T68) * p * p)
8d195810ad52215135db4ef8f9825a914b01522c
3,658,707
import re def calculate_ion_mz(seq: str, ion: str = 'M', charge: int = 0 ) -> float: """ given a peptide sequence and ion type, count the number of atoms, accounting for ion type and whether cysteines are measured by IAA - ion type M: full peptide parent ion (with H2O) b: b ion (no addition) y: y ion (with H2O) :param seq: str amino acid sequence with modifications defined by [] :param ion: str ion type (default: M to return peptide mass) :param charge: int numerical charge (default: 0 to return peptide mass) :return: float accurate mass """ assert type(charge) == int, "Charge must be integer." mass = 0 # First, strip all mass shifts and add them to the starting mass try: mods = [float(mod[1:-1]) for mod in re.findall('\\[.*?]', seq)] except ValueError: raise ValueError('Modification contains string characters.') # 2021-11-22 exclude label mass from peptide mass calculation mass += sum(m for m in mods if m not in params.label_mass) # 2021-05-18 strip all N-terminal n from Comet seq = re.sub('^n', '', seq) # Strip all modifications stripped = re.sub('\\[.*?]', '', seq) res_atoms = _count_residue_atoms(stripped, iaa=params.iaa, # add iodoacetamide to cysteine ) # dictionary for complementary atoms to add to ion types comp_atom_dict = { 'M': [0, 2, 1, 0, 0], 'b': [0, 0, 0, 0, 0], 'y': [0, 2, 1, 0, 0], 'b_': [0, -2, -1, 0, 0], 'y_': [0, 0, 0, 0, 0], } comp_atoms = comp_atom_dict[ion] ion_atoms = [res_atoms[i] + comp_atoms[i] for i, v in enumerate(res_atoms)] mass += _calc_atom_mass(ion_atoms) # Return peptide mass if charge is 0 if charge > 0: mz = (mass + constants.PROTON_MASS * charge) / charge return mz if charge < 0: raise ValueError('Negative charges are not supported.') return mass
e032ad439414314511b008d98dadb23b84012798
3,658,708
import os def get_all_sub_folders(folder_path): """get all sub folders to list Parameters ---------- folder_path : str Returns ------- list """ sub_folders = [] for path in os.listdir(folder_path): full_path = os.path.join(folder_path, path) if os.path.isdir(full_path): sub_folders.append(full_path) return sub_folders
65afa34dadbf4cdd37ebd7feeab9aca9b03570ad
3,658,709
def hhc_to_int(s): """Parse a number expressed in sortable hhc as an integer (or long). >>> hhc_to_int('-') 0 >>> hhc_to_int('.') 1 >>> hhc_to_int('~') 65 >>> hhc_to_int('.-') 66 >>> hhc_to_int('..') 67 >>> hhc_to_int('.XW') 6700 >>> hhc_to_int('----..') 67 >>> print(hhc_to_int('fDpEShMz-qput')) 302231454903657293676544 Negative numbers are supported. >>> hhc_to_int(',zST') -6700 """ if s == '' or s is None or s[:2] == ',,': raise ValueError("invalid literal for hhc_to_int: {}".format(s)) if s[0] == NEGATIVE_PREFIX: return -hhc2_to_int(s[1:], alphabet=HHC_ALPHABET[::-1]) return hhc2_to_int(s, HHC_ALPHABET)
25f6e8097f1fbf0f6ceed08fc8ac0195fb88acb4
3,658,710
def initializeSens(P, B, idxs): """ This function initializes the sensitivities using the bicriteria algorithm, to be the distance between each point to it's closest flat from the set of flats B divided by the sum of distances between self.P.P and B. :param B: A set of flats where each flat is represented by an orthogonal matrix and a translation vector. :param idxs: A numpy array which represents the clustering which B imposes on self.P.P :return: None. """ centers_idxs = np.unique(idxs) # number of clusters imposed by B sensitivity_additive_term = np.zeros((P.shape[0], )) for center_idx in centers_idxs: # go over each cluster of points from self.P.P cluster_per_center = np.where(idxs == center_idx)[0] # get all points in certain cluster # compute the distance of each point in the cluster to its respect flat cost_per_point_in_cluster = Utils.computeDistanceToSubspace(P[cluster_per_center, :-1], B[center_idx][0], B[center_idx][1]) # ost_per_point_in_cluster = np.apply_along_axis(lambda x: # Utils.computeDistanceToSubspace(x, B[center_idx][0], # B[center_idx][1]), 1, # self.set_P.P[cluster_per_center, :-1]) # set the sensitivity to the distance of each point from its respected flat divided by the total distance # between cluster points and the respected flat sensitivity_additive_term[cluster_per_center] = 2 ** Utils.J * \ np.nan_to_num(cost_per_point_in_cluster / np.sum(cost_per_point_in_cluster)) return sensitivity_additive_term
6726c892311d1590adea62babde6023d4b7d67a3
3,658,711
def fast_knn(data, k=3, eps=0, p=2, distance_upper_bound=np.inf, leafsize=10, idw=util_idw.shepards): """ Impute using a variant of the nearest neighbours approach Basic idea: Impute array with a basic mean impute and then use the resulting complete array to construct a KDTree. Use this KDTree to compute nearest neighbours. After finding `k` nearest neighbours, take the weighted average of them. Basically, find the nearest row in terms of distance This approach is much, much faster than the other implementation (fit+transform for each subset) which is almost prohibitively expensive. Parameters ---------- data: numpy.ndarray 2D matrix to impute. k: int, optional Parameter used for method querying the KDTree class object. Number of neighbours used in the KNN query. Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). eps: nonnegative float, optional Parameter used for method querying the KDTree class object. From the SciPy docs: "Return approximate nearest neighbors; the kth returned value is guaranteed to be no further than (1+eps) times the distance to the real kth nearest neighbor". Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). p : float, 1<=p<=infinity, optional Parameter used for method querying the KDTree class object. Straight from the SciPy docs: "Which Minkowski p-norm to use. 1 is the sum-of-absolute-values Manhattan distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance". Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). distance_upper_bound : nonnegative float, optional Parameter used for method querying the KDTree class object. Straight from the SciPy docs: "Return only neighbors within this distance. This is used to prune tree searches, so if you are doing a series of nearest-neighbor queries, it may help to supply the distance to the nearest neighbor of the most recent point." Refer to the docs for [`scipy.spatial.KDTree.query`] (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html). leafsize: int, optional Parameter used for construction of the `KDTree` class object. Straight from the SciPy docs: "The number of points at which the algorithm switches over to brute-force. Has to be positive". Refer to the docs for [`scipy.spatial.KDTree`](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.KDTree.html) for more information. idw: fn, optional Function that takes one argument, a list of distances, and returns weighted percentages. You can define a custom one or bootstrap from functions defined in `impy.util.inverse_distance_weighting` which can be using functools.partial, for example: `functools.partial(impy.util.inverse_distance_weighting.shepards, power=1)` Returns ------- numpy.ndarray Imputed data. Examples -------- >>> data = np.arange(25).reshape((5, 5)).astype(np.float) >>> data[0][2] = np.nan >>> data array([[ 0., 1., nan, 3., 4.], [ 5., 6., 7., 8., 9.], [10., 11., 12., 13., 14.], [15., 16., 17., 18., 19.], [20., 21., 22., 23., 24.]]) >> fast_knn(data, k=1) # Weighted average (by distance) of nearest 1 neighbour array([[ 0., 1., 7., 3., 4.], [ 5., 6., 7., 8., 9.], [10., 11., 12., 13., 14.], [15., 16., 17., 18., 19.], [20., 21., 22., 23., 24.]]) >> fast_knn(data, k=2) # Weighted average of nearest 2 neighbours array([[ 0. , 1. , 10.08608891, 3. , 4. ], [ 5. , 6. , 7. , 8. , 9. ], [10. , 11. , 12. , 13. , 14. ], [15. , 16. , 17. , 18. , 19. ], [20. , 21. , 22. , 23. , 24. ]]) >> fast_knn(data, k=3) array([[ 0. , 1. , 13.40249283, 3. , 4. ], [ 5. , 6. , 7. , 8. , 9. ], [10. , 11. , 12. , 13. , 14. ], [15. , 16. , 17. , 18. , 19. ], [20. , 21. , 22. , 23. , 24. ]]) >> fast_knn(data, k=5) # There are at most only 4 neighbours. Raises error ... IndexError: index 5 is out of bounds for axis 0 with size 5 """ null_xy = find_null(data) data_c = mean(data) kdtree = KDTree(data_c, leafsize=leafsize) for x_i, y_i in null_xy: distances, indices = kdtree.query(data_c[x_i], k=k+1, eps=eps, p=p, distance_upper_bound=distance_upper_bound) # Will always return itself in the first index. Delete it. distances, indices = distances[1:], indices[1:] # Add small constant to distances to avoid division by 0 distances += 1e-3 weights = idw(distances) # Assign missing value the weighted average of `k` nearest neighbours data[x_i][y_i] = np.dot(weights, [data_c[ind][y_i] for ind in indices]) return data
976e51c66878643965b099f764595629b379d440
3,658,712
def role_generator(role): """Closure function returning a role function.""" return lambda *args, **kwargs: role.run(*args, **kwargs)
35dd1a54cb53a6435633c39608413c2d0b9fe841
3,658,713
def pick_slices(img, num_slices_per_view): """ Picks the slices to display in each dimension, skipping any empty slices (without any segmentation at all). """ slices = list() for view in range(len(img.shape)): dim_size = img.shape[view] non_empty_slices = np.array( [sl for sl in range(dim_size) if np.count_nonzero(get_axis(img, view, sl)) > 0]) num_non_empty = len(non_empty_slices) # trying to 5% slices at the tails (bottom clipping at 0) skip_count = max(0, np.around(num_non_empty * 0.05).astype('int16')) # only when possible if skip_count > 0 and (num_non_empty - 2 * skip_count >= num_slices_per_view): non_empty_slices = non_empty_slices[skip_count: -skip_count] num_non_empty = len(non_empty_slices) # sampling non-empty slices only sampled_indices = np.linspace(0, num_non_empty, num=min(num_non_empty, num_slices_per_view), endpoint=False) slices_in_dim = non_empty_slices[np.around(sampled_indices).astype('int64')] # ensure you do not overshoot slices_in_dim = [sn for sn in slices_in_dim if sn >= 0 or sn <= num_non_empty] slices.append(slices_in_dim) return slices
ed80e4bd53e6a72c6ad7cad899d875ac320b33b7
3,658,714
import json def cate2(request): """ DB에서 Cate2의 분류 이름을 반환 """ cate1 = Cate1.objects.get(cate1_name=request.GET.get('cate1')) cate2 = list(map(lambda cate2 : cate2['cate2_name'], Cate2.objects.filter(cate1=cate1).values('cate2_name'))) json_data = json.dumps({'cate2': cate2}) return HttpResponse(json_data, content_type="application/json")
ebeca48bb9d6550fb34d68c453ef1fb47225fb4a
3,658,715
def get_single_endpoint(name): """ TODO - Add docstring """ class EndpointWithID(Resource): def get(self, pid): return get_with_id(name, pid), 200 # TODO - Add `get.__doc__` EndpointWithID.__name__ = name return EndpointWithID
2081935a568545545e2825eaaa6bcb2c1ac33a6c
3,658,716
def dayChange(): """ Day Change Calculates and stores in a dictionary the total current change in position value since yesterday, which is (current_price - lastday_price)* qty. :return: dictionary """ daychange = dict() for position in portfolio: # Strings are returned from API; convert to floating point type current = float(position.current_price) last = float(position.lastday_price) quant = float(position.qty) daychange[position.symbol] = (current - last) * quant return daychange
f33979f25ffe44a0de8ec0abc1c02284e8fe5427
3,658,717
def look_for_section(line): """Look for one of the sections in a line of text.""" for key in SECTIONS: if line.startswith(key): return key return None
d44ad97312528c4fea856e705be8e6820695fd9a
3,658,718
def SetStrucIdx(sid, index): """ Change structure index @param sid: structure type ID @param index: new index of the structure @return: != 0 - ok @note: See GetFirstStrucIdx() for the explanation of structure indices and IDs. """ s = idaapi.get_struc(sid) if not s: return 0 return idaapi.set_struc_idx(s, index)
8b246d6e2fb155bdc789536f75e20971a54ddfc3
3,658,719
import json def extract_user_dict_from_tweet( tweet: Tweet ): """Takes the other_data field from a tweet object and extracts the data for the user from it. It returns a dictionary rather than a User model object because we might want to try looking up whether the user exists before creating a new user object. :type tweet Tweet :returns dict """ if tweet.other_data and len( tweet.other_data ) > 0: # extract the json into a dict j = json.loads( tweet.other_data ) # extract the user json from the created dict return json.loads( j[ 'user' ] )
533d8795c652e5c7f1299f3dcc04fc30de644222
3,658,720
def in_scope(repository_data): """Return whether the given repository is in scope for the configuration. Keyword arguments: repository_data -- data for the repository """ if "scope" in repository_data["configuration"] and repository_data["configuration"]["scope"] == "all": return True # Determine if user has sufficient permissions in the repository to approve the workflow run return not repository_data["object"].archived and ( repository_data["permissions"] == "write" or repository_data["permissions"] == "admin" )
0e521f805f69a1c6f306700680d42fbe76595c3a
3,658,721
from typing import Union from typing import Tuple from typing import Optional from typing import List from typing import Any def run_image_container_checks( image_container: Union[AICSImage, Reader], set_scene: str, expected_scenes: Tuple[str, ...], expected_current_scene: str, expected_shape: Tuple[int, ...], expected_dtype: np.dtype, expected_dims_order: str, expected_channel_names: Optional[List[str]], expected_physical_pixel_sizes: Tuple[ Optional[float], Optional[float], Optional[float] ], expected_metadata_type: Union[type, Tuple[Union[type, Tuple[Any, ...]], ...]], ) -> Union[AICSImage, Reader]: """ A general suite of tests to run against image containers (Reader and AICSImage). """ # Check serdes check_can_serialize_image_container(image_container) # Set scene image_container.set_scene(set_scene) # Check scene info assert image_container.scenes == expected_scenes assert image_container.current_scene == expected_current_scene # Check basics assert image_container.shape == expected_shape assert image_container.dtype == expected_dtype assert image_container.dims.order == expected_dims_order assert image_container.dims.shape == expected_shape assert image_container.channel_names == expected_channel_names assert image_container.physical_pixel_sizes == expected_physical_pixel_sizes assert isinstance(image_container.metadata, expected_metadata_type) # Read different chunks zyx_chunk_from_delayed = image_container.get_image_dask_data("ZYX").compute() cyx_chunk_from_delayed = image_container.get_image_dask_data("CYX").compute() # Check image still not fully in memory assert image_container._xarray_data is None # Read in mem then pull chunks zyx_chunk_from_mem = image_container.get_image_data("ZYX") cyz_chunk_from_mem = image_container.get_image_data("CYX") # Compare chunk reads np.testing.assert_array_equal( zyx_chunk_from_delayed, zyx_chunk_from_mem, ) np.testing.assert_array_equal( cyx_chunk_from_delayed, cyz_chunk_from_mem, ) # Check that the shape and dtype are expected after reading in full assert image_container.data.shape == expected_shape assert image_container.data.dtype == expected_dtype # Check serdes check_can_serialize_image_container(image_container) return image_container
a502650fb227aa4425a2501f112603b681b41fbf
3,658,722
from typing import Type def collect_validation_helper(package_names: str) -> Type[ValidationHelper]: """Finds subclasses of the validate.ValidationHelper from a list of package names. Args: package_names: A list of Python package names as strings. Returns: A validator class that are subclasses of validate.ValidationHelper. """ validation_cls = find_subclasses(package_names, ValidationHelper) return validation_cls[0]
bb38b734641a025a9d7fd26d46ebfb1476879c82
3,658,723
def heartbeat(request): """Test that ElasticSearch is operationnal. :param request: current request object :type request: :class:`~pyramid:pyramid.request.Request` :returns: ``True`` is everything is ok, ``False`` otherwise. :rtype: bool """ indexer = request.registry.indexer try: return indexer.client.ping() except Exception as e: logger.exception(e) return False
6acf0b21b6fcc64f70ca75cb6795df0b5109f273
3,658,724
def _load_edge_data(graph, regions): """Load and return all relevant edges from the graph.""" has_seat = _load_edges_from_query( graph, 'SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Seat') # The edges in the existing dataset point from parent to child region / settlement. # In the desired dataset, we want the edge to be the other way, so we switch # the "in_rid" and "out_rid" names. has_parent_region = _load_edges_from_query( graph, ''' SELECT inV().@rid AS out_rid, outV().@rid AS in_rid FROM E WHERE ( @this INSTANCEOF "Has_Castles" OR @this INSTANCEOF "Has_Cities" OR @this INSTANCEOF "Has_Towns" OR @this INSTANCEOF "Has_Villages" OR @this INSTANCEOF "Has_Regional+capital" OR @this INSTANCEOF "Has_Places" ) AND ( inV() INSTANCEOF "Region" OR inV() INSTANCEOF "Settlement" ) AND ( outV() INSTANCEOF "Region" OR outV() INSTANCEOF "Settlement" ) ''') + _load_missing_region_edges(regions) lives_in = _load_edges_from_query( graph, ''' SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Place WHERE ( (inV() INSTANCEOF "Region" OR inV() INSTANCEOF "Settlement") AND outV() INSTANCEOF "Character" )''') owes_allegiance_to = _load_edges_from_query( graph, ''' SELECT inV().@rid AS in_rid, outV().@rid AS out_rid FROM Has_Allegiance WHERE ( ( inV() INSTANCEOF "Character" OR inV() INSTANCEOF "Noblehouse" OR inV() INSTANCEOF "Noble_house" ) AND ( outV() INSTANCEOF "Character" OR outV() INSTANCEOF "Noblehouse" OR outV() INSTANCEOF "Noble_house" ) )''') return set(has_seat), set(has_parent_region), set(lives_in), set(owes_allegiance_to)
d7a002c6214b614e95edc42d850dc9df51a26462
3,658,725
def get_story_assignee(jira_sheet, process): """ Accessor for Story Assignee Accessor method for retrieving the value for Story Assignee on the JIRA Stories Sheet. There is a check to make certain the process in question is amongst those qualified to exist. Args: jira_sheet: A variable holding an Excel Workbook sheet in memory. process: A variable holding the process of an Issue. Returns: A string value of the Parent """ if process in PROCESS_DICT: return (jira_sheet[PROCESS_DICT.get(process) + "6"].value) else: print("""Error: " + process + " is an invalid process. The following QE processes are acceptable: Complaints, Inquiry, CAPA, Quality Event, Change Control.\n""")
3f49c10e540b001cf0f4eebf69a1821a16ec9476
3,658,726
def predict_mhalo(obs_dsigma, mock_use, logms_mod_tot, logms_mod_inn, sig_logms=None): """Halo mass and its scatter in each bin. Parameters ---------- obs_dsigma: list List of observed DeltaSigma profiles. mock_use: numpy array UniverseMachine mock catalog. logms_mod_tot : ndarray Total stellar mass (e.g. M100) predicted by UM. logms_mod_inn : ndarray Inner stellar mass (e.g. M10) predicted by UM. sig_logms: numpy array, optional Uncertainties of stellar mass. Default: None """ # The mock catalog and precomputed mass files for subsamples return [get_mean_mhalo(mock_use, obs_prof, logms_mod_tot, logms_mod_inn, sig_logms=sig_logms) for obs_prof in obs_dsigma]
0c68d773155f997d85361ae663bf0eaae09be258
3,658,727
def create_agent_model(env, lr=1e-4, h_size=128, epsilon=0.2, beta=1e-3, max_step=5e6, normalize=False, num_layers=2): """ Takes a Unity environment and model-specific hyper-parameters and returns the appropriate PPO agent model for the environment. :param env: a Unity environment. :param lr: Learning rate. :param h_size: Size of hidden layers/ :param epsilon: Value for policy-divergence threshold. :param beta: Strength of entropy regularization. :return: a sub-class of PPOAgent tailored to the environment. :param max_step: Total number of training steps. """ if num_layers < 1: num_layers = 1 brain_name = env.brain_names[0] brain = env.brains[brain_name] if brain.action_space_type == "continuous": return ContinuousControlModel(lr, brain, h_size, epsilon, max_step, normalize, num_layers) if brain.action_space_type == "discrete": return DiscreteControlModel(lr, brain, h_size, epsilon, beta, max_step, normalize, num_layers)
ef43219e9e12ba46c81ed3a39ecb1b82e8953585
3,658,728
from typing import List def decode_to_sequence(encoded_sequence: Bytes) -> List[RLP]: """ Decodes a rlp encoded byte stream assuming that the decoded data should be of type `Sequence` of objects. Parameters ---------- encoded_sequence : An RLP encoded Sequence. Returns ------- decoded : `Sequence[RLP]` Sequence of objects decoded from `encoded_sequence`. """ if encoded_sequence[0] <= 0xF7: len_joined_encodings = encoded_sequence[0] - 0xC0 ensure(len_joined_encodings < len(encoded_sequence)) joined_encodings = encoded_sequence[1 : 1 + len_joined_encodings] else: joined_encodings_start_idx = 1 + encoded_sequence[0] - 0xF7 ensure(joined_encodings_start_idx - 1 < len(encoded_sequence)) # Expectation is that the big endian bytes shouldn't start with 0 # while trying to decode using RLP, in which case is an error. ensure(encoded_sequence[1] != 0) len_joined_encodings = Uint.from_be_bytes( encoded_sequence[1:joined_encodings_start_idx] ) ensure(len_joined_encodings >= 0x38) joined_encodings_end_idx = ( joined_encodings_start_idx + len_joined_encodings ) ensure(joined_encodings_end_idx - 1 < len(encoded_sequence)) joined_encodings = encoded_sequence[ joined_encodings_start_idx:joined_encodings_end_idx ] return decode_joined_encodings(joined_encodings)
cb33dd9da8deb2096ce3ad205a743c4c22c0f4c8
3,658,729
def list_field_override_choices(override_map=None, html=True): """ This returns either a list of allowable choices, or an HTML-formatted unordered list (default). """ if override_map: if html: choices = '<b>These are the allowable field override choices for field name:<ul>' else: choices = [] for item in override_map: if html: choices += '<li>{}</li>'.format(item['field']) else: choices.append(item['field']) return choices return None
9b29493af651d95d67f8bd2c4283f53e737e7c5c
3,658,730
import logging def get_logger(initial_level=logging.DEBUG): """Gets the named logger""" logger = logging.getLogger('ungoogled') if logger.level == logging.NOTSET: logger.setLevel(initial_level) if not logger.hasHandlers(): console_handler = logging.StreamHandler() console_handler.setLevel(initial_level) format_string = '%(levelname)s: %(message)s' formatter = logging.Formatter(format_string) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger
d9f9a264f1f2c3c4638ffc5c6e886182923cbf5a
3,658,731
import logging import os import sys def config_logger(name, log_file, file_level, console_level): """Configure the logger that should be used by all modules in this package. This method sets up a logger, such that all messages are written to console and to an extra logging file. Both outputs will be the same, except that a message logged to file contains the module name, where the message comes from. The implementation is based on an earlier implementation of a function I used in another project: https://git.io/fNDZJ Args: name: The name of the created logger. log_file: Path of the log file. If None, no logfile will be generated. If the logfile already exists, it will be overwritten. file_level: Log level for logging to log file. console_level: Log level for logging to console. Returns: The configured logger. """ file_formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s' \ + ' - %(module)s - %(message)s', \ datefmt='%m/%d/%Y %I:%M:%S %p') stream_formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s' \ + ' - %(message)s', \ datefmt='%m/%d/%Y %I:%M:%S %p') if log_file is not None: log_dir = os.path.dirname(log_file) if log_dir != '' and not os.path.isdir(log_dir): os.mkdir(log_dir) if os.path.exists(log_file): os.remove(log_file) file_handler = logging.FileHandler(log_file) file_handler.setFormatter(file_formatter) file_handler.setLevel(file_level) stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(stream_formatter) stream_handler.setLevel(console_level) logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) if log_file is not None: logger.addHandler(file_handler) logger.addHandler(stream_handler) return logger
624cafebc685e10333134be28849b06c098902d1
3,658,732
import six def _safe_resolve_url(url): """ Previously, resolve_url_lazy would fail if the url was a unicode object. See <https://github.com/fusionbox/django-authtools/issues/13> for more information. Thanks to GitHub user alanwj for pointing out the problem and providing this solution. """ return six.text_type(resolve_url(url))
9b06bc346ebe03b1e5209aa8c108b76aae895089
3,658,733
def get_metrics( reset: bool = False, include_custom: bool = True, raise_errors: bool = True, ) -> pd.DataFrame: """ Returns table of available metrics used for CV. Example ------- >>> from pycaret.datasets import get_data >>> boston = get_data('boston') >>> from pycaret.regression import * >>> exp_name = setup(data = boston, target = 'medv') >>> all_metrics = get_metrics() reset: bool, default = False When True, will reset all changes made using the ``add_metric`` and ``remove_metric`` function. include_custom: bool, default = True Whether to include user added (custom) metrics or not. raise_errors: bool, default = True If False, will suppress all exceptions, ignoring models that couldn't be created. Returns: pandas.DataFrame """ return pycaret.internal.tabular.get_metrics( reset=reset, include_custom=include_custom, raise_errors=raise_errors, )
1d2ed9372aa6f26cd740e6987a2e94baaef647dc
3,658,734
def get_unique_wikilinks(filepath): """Get UNIQUE wikilinks from a md file. The links' order of appearance in the file IS preserved in the output. This accounts for: - Aliases / alt text, so [[Lorem ipsum|L.I.]] will be represented as 'Lorem ipsum'. - Header text links, so [[Lorem ipsum#Dummy text]] will be represented as 'Lorem ipsum'. Args: filepath (pathlib Path): Path object representing the file from which info will be extracted. Returns: list of strings """ plaintext = _get_ascii_plaintext_from_md_file(filepath, remove_code=True) wikilinks = _get_unique_wikilinks(plaintext, remove_aliases=True) return wikilinks
ca02428942d8a555d606a5c4b8190859917c22c7
3,658,735
def parse_single_example(serialized_example, params): """Parses a singel serialized TFExample string.""" decoder = tf_example_decoder.TfExampleDecoder() data = decoder.decode(serialized_example) image = data['image'] source_id = data['source_id'] source_id = dataloader_utils.process_source_id(source_id) height = data['height'] width = data['width'] boxes = data['groundtruth_boxes'] boxes = box_utils.denormalize_boxes(boxes, tf.shape(image)[:2]) classes = data['groundtruth_classes'] is_crowds = data['groundtruth_is_crowd'] areas = data['groundtruth_area'] image = input_utils.normalize_image(image) image, image_info = input_utils.resize_and_crop_image( image, params.retinanet_parser.output_size, padded_size=input_utils.compute_padded_size( params.retinanet_parser.output_size, 2 ** params.anchor.max_level), aug_scale_min=1.0, aug_scale_max=1.0) anchors = anchor.Anchor( params.anchor.min_level, params.anchor.max_level, params.anchor.num_scales, params.anchor.aspect_ratios, params.anchor.anchor_size, image.get_shape().as_list()[:2]) labels = { 'anchor_boxes': anchors.multilevel_boxes, 'image_info': image_info, } groundtruths = { 'source_id': source_id, 'height': height, 'width': width, 'num_detections': tf.shape(classes), 'boxes': boxes, 'classes': classes, 'areas': areas, 'is_crowds': tf.cast(is_crowds, tf.int32), } return image, labels, groundtruths
e274a6ebfe7e7aa51dc7bc6b779ef222081a7e47
3,658,736
def eval_blocking(lamb, mu, k): """Finds the blocking probability of a queue. Args: lamb (float): The rate into the queue. mu (float): The rate out of the queue. k (int): Maximum number of customers able to be in the queue. """ rho = lamb/mu return rho**k*((1-rho)/(1-rho**(k+1)))
4c1ea7f5f7984fb24c85a5c1c6c77cdbc2e1e76a
3,658,737
def get_dependent_columns(covar): """ Get the list of dependent columns :param covar: The covariance matrix :return: Dependent columns """ ind_columns = (np.where(~covar.any(axis=1))[0]).tolist() dep_columns_z = [] for i in range(0, covar.shape[0]): if i not in ind_columns: dep_columns_z.append(i) return exclude_linear_combination_variables(covar, dep_columns_z)
d0145649ce685a4d609809a57d374b1e362c303e
3,658,738
def results_to_answers(guess_hints, answers): """Provide remaining valid answers matching a list of guesses and corresponding hints """ gh_stack = guess_hints.copy() new_ans = answers.copy() while len(gh_stack) > 0: gh = gh_stack.pop() guess = gh[0] hint = gh[1] new_ans = answers_guess_hint_to_answers(new_ans, guess, hint) return new_ans
243cbaeb2d36c66e49cd570c1487bbca7636cd2c
3,658,739
from typing import Optional def get_archive_map(data: DataFrame, row_col: Optional[str] = "ROW") -> Series: """ Get a series mapping object names to archive names :param data: Dataset with archive names as ARCHIVE column and object names in index :type data: DataFrame :param row_col: column with rol index, defaults to "ROW". Set to None if not applicable :type row_col: str, optional :return: Series mapping object names to archive names :rtype: Series """ archive_map = data.ARCHIVE.drop_duplicates() if row_col is not None: archive_map = archive_map.droplevel(row_col) return archive_map
2d66c55c64dab89e7523778411a7bf70ac784bf6
3,658,740
from typing import Iterable import math def gain_ratio(x_mat: ndarray, y_row: ndarray, prop: int, prop_values: Iterable, gain_value: float = None) -> float: """ 计算使用属性 prop 对样本集进行划分的信息增益率,值越大表示使用属性 prop 进行划分 所获得的纯度提升越大。此方法对可取值数目较少的属性有所偏好 :param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数 :param y_row: 输出向量。是一个只有一个维度的行向量,要和 x_mat 匹配 :param prop: 进行划分的属性 :param prop_values: 属性的取值 :param gain_value: 信息增益。给出该值是为了避免重复计算。 :return: 信息增益率 """ prop_x = x_mat[:, prop] prop_y_num = [] for v in prop_values: prop_y_num.append(len(y_row[prop_x == v])) m = y_row.shape[0] intrinsic_value = 0 for num in prop_y_num: tmp = num / m intrinsic_value = intrinsic_value - tmp * (0 if math.isclose(tmp, 0) else math.log2(tmp)) if gain_value is None: gain_value = gain(x_mat, y_row, prop, prop_values) return gain_value / intrinsic_value
08a26ba4c3dc7712ca515f128f7e3039f005b993
3,658,741
def _LengthError(e: ByteList): """Check if the length of the EDID is a multiple of 128. Args: e: The list form of the EDID to be checked. Returns: A list of error.Error objects, or None. """ if not len(e) % 128: return None else: return [ error.Error( "Overall EDID", "Invalid length", "Length % 128 = 0", "Length %% 128 = %d" % (len(e) % 128), ) ]
940b6f4b2648eefe79afe69f623b0f1e02583ce1
3,658,742
def __single_auc_score__(feature_i, clf, cv_indices, X, y, sample_weight=None): """Method determining the 'area under curve' for a single test set. This function is intended for internal use. Parameters ---------- feature_i: int Index of the tested feature. clf: object Classifier that should be used for the classification. It needs a fit and a predict_proba function. cv_indices: list of tuples Indices for all the cross validation steps. They are explicit pass, so all test sets use the same splitting. X : numpy.float32array, shape=(n_samples, n_obs) Values describing the samples. y : numpy.float32array, shape=(n_samples) Array of the true labels. sample_weight : None or numpy.float32array, shape=(n_samples) If weights are used this has to contain the sample weights. None in the case of no weights. Returns ------- feature_i: int Index of the tested feature. It is need as a return value for asynchronous parallel processing auc_score: float Returns calculated auc score. """ y_pred = np.zeros_like(y, dtype=float) for i, [train_idx, test_idx] in enumerate(cv_indices): X_train = X[train_idx] X_test = X[test_idx] y_train = y[train_idx] if sample_weight is None: sample_weight_train = None sample_weight_test = None else: sample_weight_train = sample_weight[train_idx] sample_weight_test = sample_weight[test_idx] clf = clf.fit(X=X_train, y=y_train, sample_weight=sample_weight_train) y_pred[test_idx] = clf.predict_proba(X_test)[:, 1] auc_score = roc_auc_score(y, y_pred, sample_weight=sample_weight_test) return feature_i, auc_score
51738218bde23aeb3633bcfa47dff918af29c4cd
3,658,743
def IntCurveSurface_ThePolygonToolOfHInter_Bounding(*args): """ :param thePolygon: :type thePolygon: IntCurveSurface_ThePolygonOfHInter & :rtype: Bnd_Box """ return _IntCurveSurface.IntCurveSurface_ThePolygonToolOfHInter_Bounding(*args)
294da704fcc9a59a8e7fc2042d050255aa45accb
3,658,744
def identity_show(client, resource_group_name, account_name): """ Show the identity for Azure Cognitive Services account. """ sa = client.get(resource_group_name, account_name) return sa.identity if sa.identity else {}
19018c895f3fdf0b2b79788547bf80a400724336
3,658,745
import os import sys def parsing_input(program_parameters): """ Parses apart the command line or qsub submission file to get all user input parameters for analyzing the data. Function also prints all user parameters to command line, so a user can monitor the inputs. Also, default settings are set in this function and are overwritten if a user provides the parameter instead. : Param program_parameters: Name of the parameter file being parsed : Return dictionary: Returns a dictionary of all paramters for later parts of the program (lots of variables) """ # Default parameters (will be overridden by user---input) # Variant Level Default Paramters quality_score_min= 20 # Default parameter for indel distance indel_exclusion_region_length = 1 # Sample level default parameters min_total_read_count=20 # Meta-Analysis Cutoff Values meta_BH_adj_p_value_cutoff = 0.05 meta_sample_p_value_cutoff = 0.05 # Multi-Dimensional P-Value Cutoff Value multi_dim_adjust_pvalue_cutoff = 0.05 # Currenting working directory default parameters (same directory as program) working_directory= os.getcwd() input_file_location = working_directory+'\\' # Global Output_File_Location output_file_location = working_directory+'/' #for UNIX environment this symbol is required and it works fine in PC submission # binomial probability value (50/50 Test) binomial_probability_value = 0.5 ###LEGACY VARIABLE KEPT FOR LATER DEVELOPMENT###### # Variables originally created to be modified, but later # in development realized obsolete min_numb_of_samples = 1 numb_ref_alleles_allowed = 1 numb_alt_alleles_allowed = 1 parsed_parameters=program_parameters.split("--") # print (parsed_parameters) for x in range(1, len(parsed_parameters)): inputs = parsed_parameters[x].split(" ") if inputs[0] == "File_Name": file_name = inputs[1] elif inputs[0] == "Indel_Exclusion_Region_Length": indel_exclusion_region_length = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(indel_exclusion_region_length) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Indel_Exclusion_Region_Length- incorrect input") print ("Incorrect input was: ", indel_exclusion_region_length) sys.exit() elif inputs[0] == "Minimum_Number_of_Samples_for_ASE": min_numb_of_samples = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(min_numb_of_samples) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Minimum_Number_of_Samples_for_ASE- incorrect input") print ("Incorrect input was: ", min_numb_of_samples) sys.exit() elif inputs[0] == "Number_of_Reference_Alleles_Allowed": numb_ref_alleles_allowed = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(numb_ref_alleles_allowed) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Number_of_Reference_Alleles_Allowed- incorrect input") print ("Incorrect input was: ", numb_ref_alleles_allowed) sys.exit() elif inputs[0] == "Number_of_Alternative_Alleles_Allowed": numb_alt_alleles_allowed = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(numb_alt_alleles_allowed) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Number_of_Alternative_Alleles_Allowed- incorrect input") print ("Incorrect input was: ", numb_alt_alleles_allowed) sys.exit() elif inputs[0] == "Quality_Score_Minimum_for_Variants": quality_score_min = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(quality_score_min) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Quality_Score_Minimum_for_Variants- incorrect input") print ("Incorrect input was: ", quality_score_min) sys.exit() elif inputs[0] == "Minimum_Read_Counts": min_total_read_count = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(min_total_read_count) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Minimum_Read_Counts- incorrect input") print ("Incorrect input was: ", min_total_read_count) sys.exit() elif inputs[0] == "Meta_BH_adj_p_value_cutoff": meta_BH_adj_p_value_cutoff = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(meta_BH_adj_p_value_cutoff) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check META_FDR_p_value- incorrect input") print ("Incorrect input was: ", meta_BH_adj_p_value_cutoff) sys.exit() elif inputs[0] == "Meta_sample_p_value_cutoff": meta_sample_p_value_cutoff = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(meta_sample_p_value_cutoff) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check META_binomial_p_value- incorrect input") print ("Incorrect input was: ", meta_sample_p_value_cutoff) sys.exit() elif inputs[0] == "Multi_Dim_adjust_pvalue_cutoff": multi_dim_adjust_pvalue_cutoff = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(multi_dim_adjust_pvalue_cutoff) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Multi_Dim_adjust_pvalue_cutoff- incorrect input") print ("Incorrect input was: ", multi_dim_adjust_pvalue_cutoff) sys.exit() elif inputs[0] == "Binomial_Probability_Value": binomial_probability_value = inputs[1] #Testing user input (verify numeric value) result = test_Number_Input(binomial_probability_value) #If the result passes do this or if the result fails do something else if result =='Pass': pass else: print ("") print ("ERROR Alert!") print ("Please check Binomial_Probability_Value- incorrect input") print ("Incorrect input was: ", binomial_probability_value) sys.exit() elif inputs[0] == "Output_File_Location": output_file_location = inputs[1] # prints the help menu prompt elif inputs[0] == "help" or inputs[0]=="h" or inputs[0]=="Help": printing_help_menu=help_menu_prompt() else: print ("") print ("ERROR Alert!") print ("Please double check your input parameters, something was not quite right") print ("Type: --help to see a list of options and acceptable input for the program") sys.exit() # Printing user settings to terminal in case program crashes out before completion print ("") print ("") print ("Exact User Parameter Settings") print ("") print ("The input file is: ", file_name) print ("The output directory for analysis is: ", output_file_location) print ("") print ("") print ("The minimum qualtity score (phred score) for a variant is: ", quality_score_min) print ("The indel exclusion region length from identified indels is: ", indel_exclusion_region_length) # print ("The minimum number of samples to count a variant for ASE is: ", min_numb_of_samples) print ("The number of allowable reference alleles is (currently program is limited to one): ", numb_ref_alleles_allowed) print ("The number of allowable alternative alleles is (currently program is limited to one): ", numb_alt_alleles_allowed) print ("The minimum number of total read counts for a sample per variant is: ", min_total_read_count) print ("") print ("") print ("The binomial probability value for ASE testing is: ", binomial_probability_value) print ("") print ("") print ("Meta-Analysis of Data") print ("The Meta BH adjusted p-value cutoff is: ", meta_BH_adj_p_value_cutoff) print ("The p-value cutoff used for estimated tallying of samples is: ", meta_sample_p_value_cutoff) print ("") print ("Multi-Dimensional P-Value Adjustment") print ("The p-value cutoff for testing is: ", multi_dim_adjust_pvalue_cutoff) print ("") print ("") # Returns a dictionary of all the variables return{'file_name':file_name, 'indel_exclusion_region_length':indel_exclusion_region_length, 'min_numb_of_samples':min_numb_of_samples, 'numb_ref_alleles_allowed':numb_ref_alleles_allowed, 'numb_alt_alleles_allowed':numb_alt_alleles_allowed, 'quality_score_min':quality_score_min, 'min_total_read_count':min_total_read_count, 'binomial_probability_value': binomial_probability_value, 'meta_BH_adj_p_value_cutoff':meta_BH_adj_p_value_cutoff, 'meta_sample_p_value_cutoff': meta_sample_p_value_cutoff, 'multi_dim_adjust_pvalue_cutoff': multi_dim_adjust_pvalue_cutoff, 'output_file_location':output_file_location}
761e29994ee77ffc62f0d4c4039bc82971d4fc77
3,658,746
import re def normalize_country_code(country_code): """ Normalize country codes a bit by making capitalization consistent and removing trailing comments (and other words). """ if not country_code: return country_code country_code = re.match(r'^(\w+)', country_code).group(1) return country_code.upper()
37dce64b62ae4ec20cb2d9b10c66beeba73c5683
3,658,747
import math def get_angle(p1, p2): """Get the angle between two points.""" return math.atan2(p2[1] - p1[1], p2[0] - p1[0])
a29ea1ed74a6c071cf314d1c38c6e2f920bd1c3a
3,658,748
def per_application(): """ :return: a seeder function that always returns 1, ensuring at most one delegate is ever spawned for the entire application. """ return lambda msg: 1
7ecc568846ab484557e768ad372f4faf85238401
3,658,749
import requests from bs4 import BeautifulSoup def get_job_information(url): """ Uses bs4 to grab the information from each job container based on the url. Parameters ---------- url : str Career builder url of any job Returns ------ job_data : dict Contains Job Name, Company Name, Job Location, Description, Skills and apply link. """ website = requests.get(url).text job_soup = BeautifulSoup(website, 'html.parser') job_name = "N/A" try: job_name = job_soup.select('h2.h3')[0].getText() except Exception as err: print(f"The job tile could not be selected properly") print(err) print(f'Skipping {url}...') company_name = "N/A" try: company_name = job_soup.select('.data-details > span:nth-child(1)')[0].getText() except Exception as err: print(f"The company name could not be selected properly") print(err) print(f'Skipping {url}...') job_location = "N/A" try: job_location = job_soup.select('.data-details > span:nth-child(2)')[0].getText() except Exception as err: print(f"The location could not be selected properly") print(err) print(f'Skipping {url}...') job_description = job_soup.select('#jdp_description > div.col-2 > div.col.big.col-mobile-full > p') job_description_2 = job_soup.select('#jdp_description > div:nth-child(1) > div:nth-child(1)') desc = [ ] for idx, paragraph in enumerate(job_description): desc.append(job_description[idx].text) if len(desc) == 0: for idx, paragraph in enumerate(job_description_2): desc.append(job_description_2[idx].text) job_skills = [ ] skills_container = job_soup.findAll("div", {"class": "check-bubble"}) for idx, skill in enumerate(skills_container): job_skills.append(skills_container[idx].text) job_data = {'Job Title': job_name, 'Company': company_name, 'Location': job_location, 'Description': desc, 'Skills': job_skills, 'Application Url': url} return job_data
a5c0b53338dbacc7fe0e7c7eb91b66855968af2b
3,658,750
def idiv(self, other): """Compute the element-wise division. Parameters ---------- other : Union[dragon.Tensor, number] The value to divide. Returns ------- dragon.Tensor The self. See Also -------- `dragon.math.div(...)`_ """ return _apply_binary_op([self, other], 'Div', [self])
05afbc883ec835e06cceaa9a13119fbac0df8f5c
3,658,751
def getAudioMetadata(fileRef): """Extract metadata for audio file""" args = [config.mediaInfoExe] args.append("--Output=EBUCore") args.append(fileRef) # Command line as string (used for logging purposes only) cmdStr = " ".join(args) status, out, err = shared.launchSubProcess(args) # Configure XML parser to get rid of blank lines in MediaInfo output parser = etree.XMLParser(remove_blank_text=True) # Parse string to element outElt = etree.XML(out.encode('utf-8'), parser=parser) # Main results to dictionary dictOut = {} dictOut["cmdStr"] = cmdStr dictOut["status"] = status dictOut["outElt"] = outElt dictOut["stderr"] = err return dictOut
4f954d45e6b029b22001a02e49ad453a2f572bb8
3,658,752
def simulation_test(**kwargs): """Decorate a unit test and mark it as a simulation test. The arguments provided to this decorator will be passed to :py:meth:`~reviewbot.tools.testing.testcases.BaseToolTestCase .setup_simulation_test`. Args: **kwargs (dict): Keyword arguments to pass during setup. Returns: callable: The new unit test function. """ def _dec(func): func.simulation_setup_kwargs = kwargs return func return _dec
56aa51374e66bb765bfc3d4da51e3254d06c0b55
3,658,753
def update_action_state(): """ :type action: dart.model.action.Action """ # we receive a list of {action_id, action_status, workflow_instance_id/status} # We will update the database for each such entry try: action_status_updates = request.get_json() _logger.info("AWS_Batch: extracted json from request: {0}".format(action_status_updates)) except Exception as err: _logger.error("AWS_Batch: Failed to extract json from request") return {'result': str(err)}, 500 try: for action_status in action_status_updates: # updating the action state current_action = action_service().get_action(action_status['action_id']) if should_update(action_status['action_status'], current_action.data.state): _logger.info("AWS_Batch: Updating action={0} from {1} to state {2}".format(current_action.id, current_action.data.state, action_status['action_status'])) action_service().update_action_state(current_action, action_status['action_status']) # if we receive a workflow_instance_id (not empty) then we need to set workflow_instance status. # we may need to set workflow and datastore status if they need to be deactivated on failure. if action_status.get('workflow_instance_id'): wfs = action_status.get('workflow_instance_status') wf_instance_status = WorkflowInstanceState.FAILED if (wfs == 'FAILED') else WorkflowInstanceState.COMPLETED _logger.info("AWS_Batch: Updating workflow_instance={0} to state {1}".format(action_status.get('workflow_instance_id'), wf_instance_status)) # Updating workflow_instance with the status sent (success or failure). wf_instance = workflow_service().get_workflow_instance(action_status.get('workflow_instance_id')) workflow_service().update_workflow_instance_state(wf_instance, wf_instance_status) # check if need to deactivate workflow and datastore. if wf_instance_status == WorkflowInstanceState.FAILED: workflow_id = wf_instance.data.workflow_id master_workflow = workflow_service().get_workflow(workflow_id) # Failed action with deactivate on_failure should deactivate the current workflow. if current_action.data.on_failure == ActionOnFailure.HALT: _logger.info("AWS_Batch: Action in workflow={0} failed. Halting on failure and remaining in state {2}".format(master_workflow.id, WorkflowState.ACTIVE)) elif current_action.data.on_failure == ActionOnFailure.DEACTIVATE: _logger.info("AWS_Batch: Updating workflow={0} to state {2}".format(master_workflow.id, WorkflowState.INACTIVE)) workflow_service().update_workflow_state(master_workflow, WorkflowState.INACTIVE) if master_workflow.data.on_failure == WorkflowOnFailure.DEACTIVATE: datastore_id = master_workflow.data.datastore_id _logger.info("AWS_Batch: Updating datastore={0} to state {2}".format(datastore_id, DatastoreState.INACTIVE)) datastore = datastore_service().get_datastore(datastore_id) datastore_service().update_datastore_state(datastore, DatastoreState.INACTIVE) except Exception as err: _logger.error("AWS_Batch: Failed to update action state. err= {0}".format(err)) return {'result': str(err)}, 501 # if all pass we send success status (200) otherwise we will try again later. return {'result': "OK"}, 200
f89142b6877f615cce253d727c001737729394fa
3,658,754
def page_not_found(): """Directs to error page if user is not logged in. :return: HTML file for error page. """ error = 'You must be logged in to view this page.' return render_template('error.html', error=error)
ff3cc2c369154bec1303658bb3c691de448d8231
3,658,755
def mtf_toy_model_parallel(): """Set of hyperparameters.""" hparams = mtf_toy_base() hparams.add_hparam("layout", "hidden:0") return hparams
74c01e9f8c68f07d332119fd7cead21b92e4de84
3,658,756
from typing import Union from pathlib import Path def to_dataframe(sas7bdat_file: Union[str, Path]) -> pd.DataFrame: """Converts a sas7bdat and/or xpt file into a pandas dataframe. args: sas7bdat_file: The name, including the path, for the sas7bdat file. return: A pandas dataframe containing the data from the sas7bdat file. """ df = pd.read_sas(sas7bdat_file) # convert binary strings to utf-8 str_df = df.select_dtypes([np.dtype(object)]) if len(str_df.columns) > 0: str_df = str_df.stack().str.decode("utf-8").unstack() for col in str_df: df[col] = str_df[col] # end conversion to utf-8 return df
70564f16c43a6c6fdaf65841ee1d0c48d8f550f2
3,658,757
def shift_scale_rmsf(rmsf_double, phi, cellsize, ccomp, faraday_peak): """Shift and scale the RMSF, to the parameters of the found clean component. Args: rmsf_double (numpy array): double sized array of complex point spread function values in Faraday space. phi (numpy array): array of Faraday depths. cellsize (float): advised cellsize in Faraday space. ccomp (float): the complex-valued clean component. faraday_peak (int): the index of the peak of the clean component. Returns: ccomp*rmsf_shifted: the shifted and scaled RMSF. """ # Calculate the integer number of pixels required to shift the RMSF: faraday_shift = phi[faraday_peak]/cellsize faraday_shift = faraday_shift.astype(int) # Shift the RMSF and pad with zeros based upon its sign: if faraday_shift > 0: rmsf_shifted = np.roll(rmsf_double, faraday_shift) rmsf_shifted[0:faraday_shift] = 0.0 elif faraday_shift < 0: rmsf_shifted = np.roll(rmsf_double, faraday_shift) rmsf_shifted[len(rmsf_shifted)+faraday_shift:len(rmsf_shifted)] = 0.0 elif faraday_shift == 0: rmsf_shifted = np.copy(rmsf_double) # The shifted RMSF is double the width of the sampled Faraday space # to ensure the shifted beam is subtracted correctly. # Truncate the RMSF so it has same dimension as sampled parameter space: rmsf_len = len(rmsf_shifted) rmsf_shifted = np.delete(rmsf_shifted, np.arange((3*((rmsf_len-1)//4))+1, rmsf_len)) rmsf_shifted = np.delete(rmsf_shifted, np.arange(0, ((rmsf_len-1)//4))) # Scale the RMSF by the magnitude of the clean component: return ccomp*rmsf_shifted
d658cfece87276075b7c53b987772906908b5b80
3,658,758
def region_filter(annos, annotation): """filter for Region annotations. The 'time' parameter can match either 'time' or 'timeEnd' parameters. """ result = [] for anno in annos: time = annotation.get("time") timeEnd = annotation.get("timeEnd") for key in ['text', 'tags']: if anno.get(key) != annotation.get(key): continue if anno.get("regionId") == 0: continue if anno.get("time") not in [time, timeEnd]: continue result.append(anno) return result
3ca4c6ba39d44370b3022f5eb17a25e0e1c9f056
3,658,759
def estimator_mixt_default(sample): """Default estimator of mixture distribution This estimator returns tuple with two non-overlaping parts of `sample` which are estimated to come from continuous and discrete parts of mixture distribution. Estimation is done by deciding sample element to be from discrete part if it is present at least twice in input `sample`. If some part of estimation has no elements, it is represented as `None` in output. Parameters ---------- sample : array_like This should be a valid input to `np.asarray()` so that its output is numeric. Returns ------- sample_cont, sample_disc : tuple with two elements Elements can be `None` if estimation showed no elements from corresponding mixture part. """ # Detect sample from discrete part sample = np.asarray(sample) vals, inverse, counts = np.unique(sample, return_inverse=True, return_counts=True) disc_inds = np.nonzero(counts >= 2)[0] sample_is_disc = np.isin(inverse, disc_inds) # Return separation if np.all(sample_is_disc): return (None, sample) elif np.all(~sample_is_disc): return (sample, None) else: return (sample[~sample_is_disc], sample[sample_is_disc])
31394305d9da7afe553f0dab9753d919b6aa7c73
3,658,760
def modularity_clustering(graph, size_cutoff=10, deg_cutoff=0.5, callback=None): """ Use the Clauset-Newman-Moore greedy modularity maximization algorithm to partition the TN93 pairwise graph into communities. Modularity quantifies the density of edges at the periphery of a community relative to the density within it. TODO: try other methods like Louvain algorithm :param graph: networkx.Graph object from import_graph() :param size_cutoff: int, minimum component size to consider applying modularity community detection :param deg_cutoff: float, maximum edge density at which use community detection. :param callback: optional, write verbose messages :return: list, lists of node labels """ if callback: callback("Modularity clustering...") result = [] count = 0 for component in nx.connected_components(graph): count += 1 if len(component) > size_cutoff: sg = graph.subgraph(component) # retrieve list of degree sizes deg = [d for _, d in sg.degree()] mean_deg = sum(deg) / float(len(deg)) if mean_deg / len(deg) < deg_cutoff: communities = list(greedy_modularity_communities(sg)) if callback: callback( ' partitioning component of size {} into {} ' 'communities'.format(len(component), len(communities)) ) result.extend(communities) else: # component has sufficient edge density result.append(component) else: result.append(component) if callback: callback("Partitioned graph from {} to {} components".format( count, len(result)) ) return result
e64e383eaf4895244aab1f32e39fae5af92769b5
3,658,761
import inspect def get_post_processors(): """ Loads post processors by inspecting members of the 'post_processors' package. """ post_processor_classes = [] for _, member in inspect.getmembers(post_processors): if inspect.isclass(member): post_processor_classes.append(member) return post_processor_classes
6b65c438657230661b189c8851ca5b662714c4df
3,658,762
def vulcanize(name: str) -> str: """Add prefixes to names that are similar to the prefixes seen in Vulcan characters in the Star Trek™ franchise. :param name: The name to modify. :return: A :class:str object. :rtype: str Usage: >>> # Seed the RNG to make the example predictable. Don't do >>> # this if you want the modification to be random. >>> seed('spam') >>> >>> name = 'Bacon' >>> vulcanize(name) "T'Bacon" """ letter = 't' if roll('1d6') > 5: letters = 'd k l m n p s su v'.split() index = roll(f'1d{len(letters)}') - 1 letter = letters[index] letter = letter.title() name = name.title() return f"{letter}'{name}"
00cd22427ab873852af519a6657bf9504b945fb3
3,658,763
def B(j, p, x, knots): """ Compute B-splines using recursive definition. """ if p == 0: if knots[j] <= x < knots[j+1]: return 1.0 else: return 0.0 else: left = special_div((x-knots[j])*B(j,p-1,x,knots), knots[j+p]-knots[j]) right = special_div((knots[j+1+p]-x)*B(j+1,p-1,x,knots), knots[j+1+p]-knots[j+1]) return left + right
1c578e317a3e2ff00f31b8e0b31b4f184e9bd338
3,658,764
from re import T def not_falsy(item: T, item_name: str) -> T: """ Check if a value is falsy and throw an exception if so. :param item: the item to check for falsiness. :param item_name: the name of the item to include in any exception. :raises ValueError: if the item is falsy. :returns: the item. """ if not item: raise ValueError(f"{item_name} cannot be a value that evaluates to false") return item
b758d3ffe8f4c30086248fc9df2a9e82e05553d3
3,658,765
def _apply_limit_abs_unit(x, lim, unit): """Return one limit with applied unit(abs(x)). See get_limits.""" if unit is None: return lim unit = unit.lower() if unit == 'near': return lim * np.nanmin(np.abs(x)) if unit == 'far': return lim * np.nanmax(np.abs(x)) elif unit == 'median': return lim * np.nanmedian(np.abs(x)) elif unit == 'mean': return lim * np.nanmean(np.abs(x)) else: raise ValueError("Unknown unit %s"%unit)
e3c77192b90b04b4c488ca8bac41f79024517a6b
3,658,766
def load_fits(name): """ Open a fits file image Inputs: name: name of the .fits file (str). Output: image: """ while True: try: file = fits.open(name) image = file.copy() return image, name except FileNotFoundError: print(f"File {name} not found") name = input('Please enter a different file name: ')
24a348239e89cc9e565238e9f124875090ffe92b
3,658,767
import os import subprocess import json def run_flow(command, contents): """Run Flow command on a given contents.""" read, write = os.pipe() os.write(write, str.encode(contents)) os.close(write) try: output = subprocess.check_output( command, stderr=subprocess.STDOUT, stdin=read ) decoded_output = output.decode("utf-8") clean_output = decoded_output[decoded_output.find('{"') :] result = json.loads(clean_output) os.close(read) return result except subprocess.CalledProcessError as err: raise err
5621c321d0fb35518aabfb04f0f1b088be2bfa79
3,658,768
def cleanup(): """Clean up resoruces in use by implementation. Clean up any resources that have been allocated by the RPC implementation. This is typically open connections to a messaging service. This function would get called before an application using this API exits to allow connections to get torn down cleanly. :returns: None """ return _get_impl().cleanup()
984d2c3b297c47c1ffaec43302cfb741cfe369e4
3,658,769
def social_bonus_count(user, count): """Returns True if the number of social bonus the user received equals to count.""" return user.actionmember_set.filter(social_bonus_awarded=True).count() >= count
b2469833f315410df266cd0a9b36933edb1f9ac6
3,658,770
def del_category_tag_lib(self,c_uuid,t_uuid): """04删除便签或分类""" if c_uuid: category = Category.by_uuid(c_uuid) if category is None: flash(self, '分类不存在', 'error') return {'status':False} if category.articles: flash(self,'分类下面有文章,请先删除文章','error') return {'status': False} self.db.delete(category) self.db.commit() flash(self, '分类删除成功', 'success') return {'status':True} if t_uuid: tag = Tag.by_uuid(t_uuid) if tag is None: flash(self, '标签不存在', 'error') return {'status':False} if tag.articles: flash(self, '标签下面有文章,请先删除文章', 'error') return {'status': False} self.db.delete(tag) self.db.commit() flash(self, '标签删除成功', 'success') return {'status':True} flash(self, '请输入标签或分类', 'error') return {'status': False}
db915fe29943d9bb63122d73d59a052715798818
3,658,771
import math def get_distance_metres(aLocation1, aLocation2): """ Returns the ground distance in metres between two `LocationGlobal` or `LocationGlobalRelative` objects. This method is an approximation, and will not be accurate over large distances and close to the earth's poles. It comes from the ArduPilot test code: https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py """ dlat = aLocation2.lat - aLocation1.lat dlong = aLocation2.lon - aLocation1.lon return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
57a56fac2d0a3a83083b769b5f896cb82d55dc56
3,658,772
import types def pd_series_overload(data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series Limitations ----------- - Parameters ``dtype`` and ``copy`` are currently unsupported. - Types iterable and dict as ``data`` parameter are currently unsupported. - Categorical types (i.e. 'category' and ``CategoricalDtype``) are supported in ``dtype`` only if they are provided as constants in jitted code. Examples -------- Create Series with data [1, 2, 3] and index ['A', 'B', 'C']. >>> pd.Series([1, 2, 3], ['A', 'B', 'C']) Create Series with categorical data: >>> pd.Series([1, 2, 3], dtype='category') >>> pd.Series([1, 2, 3], dtype=CategoricalDtype([1, 2, 3])) .. seealso:: :ref:`DataFrame <pandas.DataFrame>` DataFrame constructor. """ is_index_none = isinstance(index, types.NoneType) or index is None if is_categoricaldtype(dtype): return _Series_category(data, index, dtype, name, copy, fastpath) def hpat_pandas_series_ctor_impl(data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): '''' use binop here as otherwise Numba's dead branch pruning doesn't work TODO: replace with 'if not is_index_none' when resolved ''' if is_index_none == False: # noqa fix_index = sdc.hiframes.api.fix_df_array(index) else: fix_index = index return sdc.hiframes.api.init_series(sdc.hiframes.api.fix_df_array(data), fix_index, name) return hpat_pandas_series_ctor_impl
bc5302cbbb30215d8257ad44ee60a5990948f94a
3,658,773
import copy def get_export_summary(results): """Prints to screen the exporting results of example programs. Args: results - results of the compilation stage. which is the output of and export_repos() Returns: Numbers of failed results """ pass_table = PrettyTable() pass_table.field_names = ["EXAMPLE NAME", "TARGET", "IDE", "EXPORT RESULT", "BUILD RESULT"] pass_table.align["EXAMPLE NAME"] = "l" fail_table = copy.deepcopy(pass_table) failure_counter = 0 for exp, status in list(results.items()): for summary in status[2]: pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "PASSED"]) for summary in status[3]: fail_table.add_row([summary["name"], summary["target"], summary["ide"], "FAILED", ""]) failure_counter+=1 for summary in status[4]: fail_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "FAILED"]) failure_counter+=1 for summary in status[5]: pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "SKIPPED"]) print("\n\nPassed Example Exporting:") print(pass_table) if (failure_counter > 0): print("\n\nFailed Example Exporting:") print(fail_table) print("Number of failures = %d" % failure_counter) return failure_counter
0f68e8da955a73c401536f83e18faa223d603d15
3,658,774
import numpy def _misfitfunc(data, predicted): """ Calculate the total data misfit function between the observed and predicted data. """ result = 0. for d, p, in zip(data, predicted): residuals = d.observed - p result += sqrt(numpy.dot(d.weights*residuals, residuals))/d.norm return result
c21fb4c8d68a2abe20ca155e5776124c69ce2eff
3,658,775
def stream_doi(app, doi): """Returns tuple of URL string and a urlopen() return value.""" apikey = app.cfg.get_or_die('api-keys', 'crossref') url = ('http://crossref.org/openurl/?id=%s&noredirect=true&pid=%s&' 'format=unixref' % (wu.urlquote(doi), wu.urlquote(apikey))) return url, wu.urlopen(url)
7c3569c4492b52c68ed13bcaac9dae0b6805bdb6
3,658,776
from typing import Optional import getpass from datetime import datetime import json def do_evaluation( *, input_path, training_path: Optional[str] = None, testing_path: Optional[str] = None, method, prediction_task, dimensions: int = 300, number_walks: int = 8, walk_length: int = 8, window_size: int = 4, p: float = 1.5, q: float = 2.1, alpha: float = 0.1, beta: float = 4, epochs: int = 5, kstep: int = 4, order: int = 3, embeddings_path: Optional[str] = None, predictive_model_path: Optional[str] = None, training_model_path: Optional[str] = None, evaluation_file: Optional[str] = None, classifier_type: Optional[str] = None, weighted: bool = False, labels_file: Optional[str] = None, ): """Train and evaluate an NRL model.""" if prediction_task == 'link_prediction': node_list = None labels = None graph, graph_train, testing_pos_edges, train_graph_filename = create_graphs( input_path=input_path, training_path=training_path, testing_path=testing_path, weighted=weighted, ) else: if not labels_file: raise ValueError("No input label file. Exit.") node_list, labels = read_node_labels(labels_file) train_graph_filename = input_path graph, graph_train, testing_pos_edges = None, None, None model = embedding_training( train_graph_filename=train_graph_filename, method=method, dimensions=dimensions, number_walks=number_walks, walk_length=walk_length, window_size=window_size, p=p, q=q, alpha=alpha, beta=beta, epochs=epochs, kstep=kstep, order=order, weighted=weighted, ) if training_model_path is not None: model.save_model(training_model_path) if embeddings_path is not None: model.save_embeddings(embeddings_path) if method == 'LINE': embeddings = model.get_embeddings_train() else: embeddings = model.get_embeddings() _results = dict( input=input_path, method=method, dimension=dimensions, user=getpass.getuser(), date=datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S'), ) if prediction_task == 'link_prediction': auc_roc, auc_pr, accuracy, f1, mcc = do_link_prediction( embeddings=embeddings, original_graph=graph, train_graph=graph_train, test_pos_edges=testing_pos_edges, save_model=predictive_model_path, classifier_type=classifier_type, ) _results['results'] = dict( auc_roc=auc_roc, auc_pr=auc_pr, accuracy=accuracy, f1=f1, mcc=mcc, ) else: accuracy, macro_f1, micro_f1, mcc = do_node_classification( embeddings=embeddings, node_list=node_list, labels=labels, save_model=predictive_model_path, classifier_type=classifier_type, ) _results['results'] = dict( accuracy=accuracy, macro_f1=macro_f1, micro_f1=micro_f1, mcc=mcc, ) if evaluation_file is not None: json.dump(_results, evaluation_file, sort_keys=True, indent=2) return _results
ab5939065d9cf70c6e5ddba8530f91cb2577a31c
3,658,777
import re def test_structure_fatal_deformities(good_structure, deformity): """Make specific checks upon performing single invalidating deformations of the data of a good structure. """ if deformity is None: return StructureResource(**good_structure) deformity, message = deformity good_structure["attributes"].update(deformity) with pytest.raises(ValidationError, match=fr".*{re.escape(message)}.*"): StructureResource(**good_structure)
28acc95fb29564ddbf844de70704e31212e59b9f
3,658,778
def edit_user(): """ 返回待编辑用户信息 """ data = request.json user_id = data.get('id') _edit = User.query.filter_by(id=user_id).first() _data = {'account': _edit.account, 'name': _edit.name, 'role_id': _edit.role_id} return jsonify({'data': _data, 'status': 1})
7423eb2342dd135a219bbb6f34ba7f82740b49d0
3,658,779
def transactions(request): """See all transactions that have been contained in blocks.""" vote_list = Vote.objects.all().order_by('timestamp') paginator = Paginator(vote_list, 100, orphans=20, allow_empty_first_page=True) page = request.GET.get('page') votes = paginator.get_page(page) hashes = [SHA3_256.new(str(v).encode('utf-8')).hexdigest() for v in votes] # This happens if you don't use foreign key block_hashes = [] for i in range(0, len(votes)): try: b = Block.objects.get(id=votes[i].block_id) h = b.h except: h = 404 block_hashes.append(h) # zip the three iters votes_pg = votes # for pagination votes = zip(votes, hashes, block_hashes) # Calculate the voting result of 3 cands, the ugly way result = [] for i in range(0, 3): try: r = Vote.objects.filter(vote=i+1).count() except: r = 0 result.append(r) context = { 'votes': votes, 'result': result, 'votes_pg': votes_pg, } return render(request, 'simulation/transactions.html', context)
7ed0d4a8b997a41112eccfc67a19784283e65fd8
3,658,780
import pandas def elections_vote_places_geo(source="xd", folder=".", fLOG=noLOG): """ Retrieves data vote places (bureaux de vote in French) with geocodes. @param source should be None unless you want to use the backup plan ("xd") @param folder where to download @param fLOG logging function @return list of dataframe """ if source is None: raise NotImplementedError("use source='xd'") url = source file = "bureauxvotegeo.zip" data = download_data(file, website=url, whereTo=folder, fLOG=fLOG) for d in data: if d.endswith(".txt"): df = pandas.read_csv(d, sep="\t", encoding="utf-8") return df raise DataNotAvailableError( "Unable to find any csv file in '{0}'".format(file))
b81abbeeed1968e01477cb71897a373a113ffafb
3,658,781
from typing import Optional def erfc( x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None ) -> oneflow._oneflow_internal.BlobDesc: """This operator computes the :math:`1-erf(x)`, for more details of `erf` function please refer to `math.erf`. Args: x (oneflow._oneflow_internal.BlobDesc): A Blob name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The result Blob For example: .. code-block:: python import oneflow.compatible.single_client as flow import numpy as np import oneflow.compatible.single_client.typing as tp @flow.global_function() def erfc_Job(x: tp.Numpy.Placeholder((3,)) ) -> tp.Numpy: return flow.math.erfc(x) x = np.array([1, 2, 3]).astype(np.float32) out = erfc_Job(x) # out [1.5729921e-01 4.6777353e-03 2.2090495e-05] """ return build_unary_elemwise_math_op("erfc", x, name)
0fd6f01b0d6dbbdf7449a5a7dc2ac9ee3f0bce0e
3,658,782
from typing import Union from typing import Iterable from typing import Optional from typing import List from typing import Dict from typing import Any def ner_manual_tokenizers_bert( dataset: str, source: Union[str, Iterable[dict]], loader: Optional[str] = None, label: Optional[List[str]] = None, tokenizer_vocab: Optional[str] = None, lowercase: bool = False, hide_special: bool = False, hide_wp_prefix: bool = False, ) -> Dict[str, Any]: """Example recipe that shows how to use model-specific tokenizers like the BERT word piece tokenizer to preprocess your incoming text for fast and efficient NER annotation and to make sure that all annotations you collect always map to tokens and can be used to train and fine-tune your model (even if the tokenization isn't that intuitive, because word pieces). The selection automatically snaps to the token boundaries and you can double-click single tokens to select them. Setting "honor_token_whitespace": true will ensure that whitespace between tokens is only shown if whitespace is present in the original text. This keeps the text readable. Requires Prodigy v1.10+ and usese the HuggingFace tokenizers library.""" stream = get_stream(source, loader=loader, input_key="text") # You can replace this with other tokenizers if needed tokenizer = BertWordPieceTokenizer(tokenizer_vocab, lowercase=lowercase) sep_token = tokenizer._parameters.get("sep_token") cls_token = tokenizer._parameters.get("cls_token") special_tokens = (sep_token, cls_token) wp_prefix = tokenizer._parameters.get("wordpieces_prefix") def add_tokens(stream): for eg in stream: tokens = tokenizer.encode(eg["text"]) eg_tokens = [] idx = 0 for (text, (start, end), tid) in zip( tokens.tokens, tokens.offsets, tokens.ids ): # If we don't want to see special tokens, don't add them if hide_special and text in special_tokens: continue # If we want to strip out word piece prefix, remove it from text if hide_wp_prefix and wp_prefix is not None: if text.startswith(wp_prefix): text = text[len(wp_prefix) :] token = { "text": text, "id": idx, "start": start, "end": end, # This is the encoded ID returned by the tokenizer "tokenizer_id": tid, # Don't allow selecting spacial SEP/CLS tokens "disabled": text in special_tokens, } eg_tokens.append(token) idx += 1 for i, token in enumerate(eg_tokens): # If the next start offset != the current end offset, we # assume there's whitespace in between if i < len(eg_tokens) - 1 and token["text"] not in special_tokens: next_token = eg_tokens[i + 1] token["ws"] = ( next_token["start"] > token["end"] or next_token["text"] in special_tokens ) else: token["ws"] = True eg["tokens"] = eg_tokens yield eg stream = add_tokens(stream) return { "dataset": dataset, "stream": stream, "view_id": "ner_manual", "config": { "honor_token_whitespace": True, "labels": label, "exclude_by": "input", "force_stream_order": True, }, }
982ddc4ab2e574870a5790dc37854ff5ffec648a
3,658,783
def test_nested_simple_condition() -> None: """ Iterates and maps expressions over a complex Condition: (A=B OR A=B) AND (A=B OR A=B) """ c1 = Column(None, "t1", "c1") c2 = Column(None, "t1", "c2") co1 = binary_condition(None, ConditionFunctions.EQ, c1, c2) c3 = Column(None, "t1", "c1") c4 = Column(None, "t1", "c2") co2 = binary_condition(None, ConditionFunctions.EQ, c3, c4) or1 = binary_condition(None, BooleanFunctions.OR, co1, co2) c5 = Column(None, "t1", "c1") c6 = Column(None, "t1", "c2") co4 = binary_condition(None, ConditionFunctions.EQ, c5, c6) c7 = Column(None, "t1", "c1") c8 = Column(None, "t1", "c2") co5 = binary_condition(None, ConditionFunctions.EQ, c7, c8) or2 = binary_condition(None, BooleanFunctions.OR, co4, co5) and1 = binary_condition(None, BooleanFunctions.AND, or1, or2) ret = list(and1) expected = [c1, c2, co1, c3, c4, co2, or1, c5, c6, co4, c7, c8, co5, or2, and1] assert ret == expected cX = Column(None, "t1", "cX") co1_b = binary_condition(None, ConditionFunctions.EQ, c1, cX) co2_b = binary_condition(None, ConditionFunctions.EQ, c3, cX) or1_b = binary_condition(None, BooleanFunctions.OR, co1_b, co2_b) co4_b = binary_condition(None, ConditionFunctions.EQ, c5, cX) co5_b = binary_condition(None, ConditionFunctions.EQ, c7, cX) or2_b = binary_condition(None, BooleanFunctions.OR, co4_b, co5_b) and1_b = binary_condition(None, BooleanFunctions.AND, or1_b, or2_b) def replace_col(e: Expression) -> Expression: if isinstance(e, Column) and e.column_name == "c2": return cX return e and1 = and1.transform(replace_col) ret = list(and1) expected = [ c1, cX, co1_b, c3, cX, co2_b, or1_b, c5, cX, co4_b, c7, cX, co5_b, or2_b, and1_b, ] assert ret == expected
f047f916f3ace9142e8940a39fd47d36d43dc108
3,658,784
import functools def _deep_setattr(obj, key, val): """ Set an attribute `key` on the object. If any of the prefix attributes do not exist, they are set to :class:`~pyro.nn.PyroModule`. """ def _getattr(obj, attr): obj_next = getattr(obj, attr, None) if obj_next is not None: return obj_next setattr(obj, attr, PyroModule()) return getattr(obj, attr) lpart, _, rpart = key.rpartition(".") # Recursive getattr while setting any prefix attributes to PyroModule if lpart: obj = functools.reduce(_getattr, [obj] + lpart.split(".")) setattr(obj, rpart, val)
a28b01484de71dc486c73fe9ad01238675b15a04
3,658,785
def inverse_max_dcg(labels, gain_fn=lambda labels: tf.pow(2.0, labels) - 1., rank_discount_fn=lambda rank: 1. / tf.math.log1p(rank), topn=None): """Computes the inverse of max DCG. Args: labels: A `Tensor` with shape [batch_size, list_size]. Each value is the graded relevance of the corresponding item. gain_fn: A gain function. By default this is set to: 2^label - 1. rank_discount_fn: A discount function. By default this is set to: 1/log(1+rank). topn: An integer as the cutoff of examples in the sorted list. Returns: A `Tensor` with shape [batch_size, 1]. """ ideal_sorted_labels, = sort_by_scores(labels, [labels], topn=topn) rank = tf.range(tf.shape(input=ideal_sorted_labels)[1]) + 1 discounted_gain = gain_fn(ideal_sorted_labels) * rank_discount_fn( tf.cast(rank, dtype=tf.float32)) discounted_gain = tf.reduce_sum( input_tensor=discounted_gain, axis=1, keepdims=True) return tf.compat.v1.where( tf.greater(discounted_gain, 0.), 1. / discounted_gain, tf.zeros_like(discounted_gain))
60e5b05af91fbd8e51a58894f9f19a5a8f92d1b5
3,658,786
def get(url): """ 用 GET 请求 url 并返回响应,对301进行了处理 :param url: :return:status_code, headers, body """ protocol, host, port, path = parsed_url(url) s = socket_by_protocol(protocol) s.connect((host, port)) request = 'GET {} HTTP/1.1\r\nhost: {}\r\nConnection: close\r\n\r\n'.format(path, host) encoding = 'utf-8' s.send(request.encode(encoding)) response = response_by_socket(s) r = response.decode(encoding) status_code, headers, body = parsed_response(r) if status_code == 301: url = headers['Location'] return get(url) else: return status_code, headers, body
3ef816149e8b4953e119c807726112feeacc6eed
3,658,787
import re def make_rule(frontier_pair, amr, tree, align, next_index): """ Creates a new rule with the given parts, and collapses these parts in the original graph and tree. """ constituent, amr_fragment = frontier_pair outside_edges = [e for e in amr.triples() if e not in amr_fragment.triples()] root_label = amr_fragment.root_edges()[0][1] if isinstance(root_label, NonterminalLabel): symbol = root_label.label m = re.match(r'(.+)_(.+)_(\d+)', symbol) role = m.group(1) else: if ':' in root_label: role, concept = root_label.split(':') else: role = root_label external_nodes = amr.find_external_nodes(amr_fragment) if len(external_nodes) == 0: external_nodes = [amr_fragment.find_leaves()[0]] # WARNING: destructive. Unfortunately we can't make the change any earlier. # TODO why? amr_fragment.external_nodes = external_nodes symbol = '%s_%s_%d' % (role, constituent.node, len(external_nodes)) label = NonterminalLabel(symbol, next_index) new_triple = (amr_fragment.roots[0], label, tuple(external_nodes)) new_amr = amr.collapse_fragment(amr_fragment, label) assert new_triple in new_amr.triples() new_tree = collapse_constituent(tree, constituent, label) new_alignments = collapse_alignments(align, amr_fragment, new_triple) rule = Rule(0, symbol, 1, amr_fragment, constituent, original_index = next_index) return rule, new_amr, new_tree, new_alignments, next_index+1
4b7cd9af8534688e8c30b88f4e5fa4da3c85f180
3,658,788
def NotEqual(data1, data2, target=utils.CCE): """ check whether data1 notequals to data2. Args: data1 (tvm.tensor.Tensor): Tensor. data2 (tvm.tensor.Tensor): Tensor. Returns: tvm.tensor.Tensor. If data1 notequal to data2 return True, else return False. Supported Platforms: 'Ascend', 'GPU', 'CPU' """ utils.check_supported_target(target) if target == utils.CCE: return _not_equal_ascend(data1, data2) else: return _not_equal(data1, data2)
88be9ea40900644a61dd3f37c0a05f9fa8c3eb76
3,658,789
def read_labels(labels_path): """Reads list of labels from a file""" with open(labels_path, 'rb') as f: return [w.strip() for w in f.readlines()]
3ebc61c76dd1ae83b73aa8b77584661c08a51321
3,658,790
import torch def targeted_neurogenesis(weights, n_replace, targeted_portion, is_training): """ Takes a weight matrix and applied targetted dropout based on weight importance (From Gomez et al. 2019; https://for.ai/blog/targeted-dropout/) Args: weights - the input by ouput matrix of weights dropout_rate - float (0,1), the proprotion of targeted neurons to dropout targeted_portion - the proportion of neurons/weights to consider 'unimportant' from which dropout_rate targets from is_training - bool, whether model is training, or being evaluated """ # get the input vs output size weights_shape = weights.shape # l1-norm of neurons based on input weights to sort by importance importance = torch.norm(weights, p=1, dim=1) # chose number of indices to remove of the output neurons idx = round(targeted_portion * weights_shape[0]) - 1 # when sorting the abs valued weights largest to smallest # take the index of the targeted portion to get a threshold importance_threshold = torch.sort(importance)[0][-idx] # TODO -idx # only weights below threshold will be set to None unimportance_mask = importance > importance_threshold #TODO > change < regular # during evaluation, only use important weights, without dropout threshold if not is_training: weights = torch.reshape(weights, weights_shape) return weights # difference between dropout_rate and unimportance_mask (i.e. threshold) idx_drop = np.random.choice(np.where(unimportance_mask)[0], size=n_replace, replace=False) dropout_mask = torch.zeros_like(unimportance_mask) dropout_mask[idx_drop] = 1 # delete dropped out units weights = weights[~dropout_mask] return weights, dropout_mask
4b605ddcd6ef0f13822d2c7050da588b3d1d0b72
3,658,791
def calc_distance_two_points(long_from, lat_from, long_to, lat_to): """Calculate distance between two points Parameters ---------- long_from : float Longitute coordinate from point lat_from : float Latitute coordinate from point long_to : float Longitute coordinate to point lat_to : float Latitue coordinate to point Return ------ distance : float Distance """ distance_in_km = haversine( (long_from, lat_from), (long_to, lat_to), miles=False) return distance_in_km
0c35c22458db165684242389470248632f2e1edb
3,658,792
from typing import Counter def modified_precision(reference_max_counts, hypothesis, n): """ Calculate modified ngram precision. The normal precision method may lead to some wrong translations with high-precision, e.g., the translation, in which a word of reference repeats several times, has very high precision. This function only returns the Fraction object that contains the numerator and denominator necessary to calculate the corpus-level precision. To calculate the modified precision for a single pair of hypothesis and references, cast the Fraction object into a float. The famous "the the the ... " example shows that you can get BLEU precision by duplicating high frequency words. >>> reference1 = 'the cat is on the mat'.split() >>> reference2 = 'there is a cat on the mat'.split() >>> hypothesis1 = 'the the the the the the the'.split() >>> references = [reference1, reference2] >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS 0.2857... In the modified n-gram precision, a reference word will be considered exhausted after a matching hypothesis word is identified, e.g. >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', ... 'forever', 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> hypothesis = 'of the'.split() >>> references = [reference1, reference2, reference3] >>> float(modified_precision(references, hypothesis, n=1)) 1.0 >>> float(modified_precision(references, hypothesis, n=2)) 1.0 An example of a normal machine translation hypothesis: >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', ... 'forever', 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> references = [reference1, reference2, reference3] >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS 0.9444... >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS 0.5714... >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS 0.5882352941176471 >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS 0.07692... :param references: A list of reference translations. :type references: list(list(str)) :param hypothesis: A hypothesis translation. :type hypothesis: list(str) :param n: The ngram order. :type n: int :return: BLEU's modified precision for the nth order ngram. :rtype: Fraction """ # Extracts all ngrams in hypothesis # Set an empty Counter if hypothesis is empty. counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter() # Extract a union of references' counts. # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references]) max_counts = reference_max_counts[n - 1] # Assigns the intersection between hypothesis and references' counts. clipped_counts = { ngram: min(count, max_counts.get(ngram, 0)) for ngram, count in counts.items() } numerator = sum(clipped_counts.values()) # Ensures that denominator is minimum 1 to avoid ZeroDivisionError. # Usually this happens when the ngram order is > len(reference). denominator = max(1, sum(counts.values())) return Fraction(numerator, denominator, _normalize=False)
cbaf2ca391a6b0ac8bfcf9e2f85aba83f4b585d0
3,658,793
def bin_spectrum(bin_width, wavelength, doppler_shift, flux, flux_uncertainty, final_uncertainty='combine'): """ Args: wavelength: doppler_shift: flux: flux_uncertainty: Returns: """ bw = bin_width wv = wavelength ds = doppler_shift f = flux u = flux_uncertainty v_bins = np.arange(min(ds), max(ds) + bw, bw) binned_data, edges, inds = binned_statistic(ds, [wv, ds, f], bins=v_bins, statistic='mean') wv_bin = binned_data[0] v_bin = binned_data[1] f_bin = binned_data[2] # Combine uncertainties assuming Gaussian regime if final_uncertainty == 'combine': u_bin, edges, inds = binned_statistic(ds, u ** 2, bins=v_bins, statistic='sum') u_count, edges, inds = binned_statistic(ds, u ** 2, bins=v_bins, statistic='count') u_bin = u_bin ** 0.5 / u_count ** 0.5 elif final_uncertainty == 'poisson': confidence_interval = poisson_conf_interval(f_bin) u_bin = np.mean(confidence_interval, axis=0) else: raise ValueError('This final uncertainty type is not implemented.') return wv_bin, v_bin, f_bin, u_bin
3c71977ae845161156ed95b42f68d7de65b80f66
3,658,794
def scrub(old_fs: Vfs, new_fs: Vfs) -> Vfs: """Try to eliminate files which were previously installed but are no longer used.""" old_fs = old_fs.copy() new_fs = new_fs.copy() # Look for files in the old log which are no longer present in the new log for txn in old_fs._log: if txn[0] == "link" and txn not in new_fs._log: new_fs.unlink(txn[2]) elif txn[0] == "mkdir" and txn not in new_fs._log: new_fs.unlink(txn[1]) return new_fs
c1cdfad6c658e481b05658d3041458ab6fd3419c
3,658,795
def get_filename(row): """ Assembles the name of the feature file. Parameters ---------- row : pandas.Series A row fom the sequence dataframe. Must have the following index values: "sample_name", "inj_number", "batch_name", "acquisition_date_and_time". Returns ------- filename : str The filename of the feature file. """ acquisition = row.acquisition_date_and_time if pd.isna(acquisition): acquisition = "1900-01-01_000000" filename = ( "_".join( [ str(row.sample_name), str(row.inj_number), str(row.batch_name), acquisition, ] ) + ".featureXML" ) return filename
68624971527442da110734043bdbaa1c68dc4875
3,658,796
def create_plotly_trace(data_x, data_y, namexy, chosen_mode='lines', use_gl = True, swap_xy = False): """ Создание одного trace по данным :param data_x: данные для оси x :param data_y: данные для оси y :param namexy: название для trace :param chosen_mode: настройка отображения 'lines', 'markers' :return: один trace """ if swap_xy: data_x, data_y = data_y, data_x hovertemplate = namexy + ": %{x}<extra></extra>" else: hovertemplate = namexy + ": %{y}<extra></extra>" if use_gl == True: one_trace = go.Scattergl( x=data_x, y=data_y, name=namexy, mode=chosen_mode, hovertemplate=hovertemplate ) else: one_trace = go.Scatter( x=data_x, y=data_y, name=namexy, mode=chosen_mode, hovertemplate=hovertemplate ) return one_trace
dd90d370c27968053bfaf98f509868d959416d39
3,658,797
import yaml def read_metadata() -> dict: """Reads and returns raw metadata.""" with open(metadata_path().resolve(), "r") as fd: return yaml.safe_load(fd)
0eafc0a722ac5cae69407a7e76d5bf62b7541b69
3,658,798
def plot_plaid_contrast_tuning(bf_indices, base_contrasts, mask_contrasts, base_orientations, mask_orientations, test_responses): """ Plot responses to orthogonal plaid stimulus at different base and mask contrasts Inputs: bf_indices: [list or array] of neuron indices to use all indices should be less than test_responsees.shape[0] base_contrasts: [list or array] of base contrasts. mask_contrasts: [list or array] of mask contrasts. each plot will have one line per mask_contrast base_orientations: [list or array] of optimal base orientations for all neurons should be a 1-D array with size = test_responses.shape[0] mask_orientations: [list or array] of mask orientation values function will compute the plaid response for orthogonal orientations test_responses: [list or array] of responses to the base+mask stimulus should be shape [num_neurons, num_base_contrasts, num_mask_contrasts, num_orientations] """ bf_indices = np.asarray(bf_indices) mask_orientations = np.asarray(mask_orientations) mask_contrasts = np.asarray(mask_contrasts) num_bfs = bf_indices.size num_orientations = mask_orientations.size num_contrasts = mask_contrasts.size # index of value in mask_orientations that is closest to orthogonal to base_orientations[bf_idx] orthogonal_orientations = [base_orientations[bf_indices[bf_idx]]-(np.pi/2) for bf_idx in range(num_bfs)] orthogonal_orientations = np.asarray([val + np.pi if val < 0 else val for val in orthogonal_orientations]) mask_or_idx = [np.argmin(orthogonal_orientations[bf_idx] - mask_orientations) for bf_idx in range(num_bfs)] cmap = plt.get_cmap('Greys') cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0) scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap) num_plots_y = np.int32(np.ceil(np.sqrt(num_bfs)))+1 num_plots_x = np.int32(np.ceil(np.sqrt(num_bfs))) gs_widths = [1.0,]*num_plots_x gs_heights = [1.0,]*num_plots_y gs = gridspec.GridSpec(num_plots_y, num_plots_x, wspace=0.5, hspace=0.7, width_ratios=gs_widths, height_ratios=gs_heights) fig = plt.figure(figsize=(32,32)) #TODO: Adjust fig size according to num plots bf_idx = 0 for plot_id in np.ndindex((num_plots_y, num_plots_x)): (y_id, x_id) = plot_id if y_id == 0 and x_id == 0: ax = fig.add_subplot(gs[plot_id]) #ax.set_ylabel("Normalized Activation", fontsize=16) #ax.set_xlabel("Base Contrast", fontsize=16) #ax.set_ylim([0.0, 1.0]) ax00 = ax else: ax = fig.add_subplot(gs[plot_id], sharey=ax00) if bf_idx < num_bfs: for co_idx, mask_contrast in enumerate(mask_contrasts): # vary base contrast for fixed mask contrast & orthogonal mask activity = test_responses[bf_indices[bf_idx], :, co_idx, mask_or_idx[bf_idx]] color_val = scalarMap.to_rgba(mask_contrast) ax.plot(base_contrasts, activity, linestyle="-", color=color_val) ax.scatter(base_contrasts, activity, s=4, c=color_val, label=str(mask_contrast)) ax.set_xticks([base_contrasts[0], base_contrasts[-1]]) bf_idx += 1 else: ax = clear_axis(ax, spines="none") plt.show() return fig
7521e7c5208f6fa9fe12772faa09106b31d1a96b
3,658,799