content
stringlengths
22
815k
id
int64
0
4.91M
def max_tb(collection): # pragma: no cover """Returns the maximum number of TB recorded in the collection""" max_TB = 0 for doc in collection.find({}).sort([('total_TB',-1)]).limit(1): max_TB = doc['total_TB'] return max_TB
5,356,600
def prep_im_for_blob(im, pixel_means, target_size_1, target_size_2, max_size_1, max_size_2): """Mean subtract and scale an image for use in a blob.""" im = im.astype(np.float32, copy=False) im -= pixel_means im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale_1 = float(target_size_1) / float(im_size_min) im_scale_2 = float(target_size_2) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale_1 * im_size_max) > max_size_1: im_scale_1 = float(max_size_1) / float(im_size_max) if np.round(im_scale_2 * im_size_max) > max_size_2: im_scale_2 = float(max_size_2) / float(im_size_max) im_1 = cv2.resize(im, None, None, fx=im_scale_1, fy=im_scale_1, interpolation=cv2.INTER_LINEAR) im_2 = cv2.resize(im, None, None, fx=im_scale_2, fy=im_scale_2, interpolation=cv2.INTER_LINEAR) return im_1, im_2, im_scale_1, im_scale_2
5,356,601
def bandit_run_bandit_scan(attr_dict, path_package, package_name, path_sdk_settings=None, **__): """ Run Bandit Scan on whole package using the settings defined in ``constants.BANDIT_DEFAULT_ARGS``. Raises a SDKException if ``bandit`` isn't installed. In use with ``validate``, this method should only be called after successfully calling ``bandit_validate_bandit_installed``. If a call to that method returns a failing SDKValidateIssue, this method shouldn't be called The default severity level on which the bandit scan fails is "medium" (defined as command line arg "-ll") The user can overwrite the default settings using an SDK Settings JSON file either in the default location or by using the --settings flag to pass in a path. The settings file should have a "bandit" attribute which is a list of bandit command line options. Example (to change level to "low" and give 5 context lines): .. code-block:: json { "bandit": [ "-l", "-n", "5" ] } NOTE: that you can include more than just the severity level in the list. Any valid bandit command line args will be parsed (as seen above with the "-n" arg added in). More info here: https://github.com/PyCQA/bandit#readme or by running ``bandit -h`` The user can run the scan in ``verbose`` mode using the ``-v`` flag for the SDK to get output live as the scan is running. :param attr_dict: dictionary of attributes for the bandit scan defined in ``bandit_attributes`` :type attr_dict: dict :param path_package: path to package :type path_package: str :param package_name: name of the package (i.e. fn_my_package) :type package_name: str :param path_sdk_settings: (optional) path to a sdk settings JSON file :type path_sdk_settings: str :param __: (unused) other unused named args :type __: dict :return: 1 or 0 and a SDKValidateIssue with details about the bandit scan :rtype: (int, SDKValidateIssue) """ # Because this method requires importing bandit, it must be installed in the env if not sdk_helpers.get_package_version(constants.BANDIT_PACKAGE_NAME): raise SDKException("Cannot call {0} without bandit installed".format(bandit_run_bandit_scan.__name__)) bandit_args = [constants.BANDIT_PACKAGE_NAME, "-r", os.path.join(path_package, package_name)] bandit_args.extend(constants.BANDIT_DEFAULT_ARGS) if LOG.isEnabledFor(logging.DEBUG): # if running validate in verbose, append verbose flag to bandit args bandit_args.extend(constants.BANDIT_VERBOSE_FLAG) # grab bandit settings from sdk settings file if given and exists # if either file doesn't exist or file doesn't have "bandit" section # append on default severity level if path_sdk_settings and os.path.exists(path_sdk_settings): # if a settings file exists, check if it has a bandit section settings_file_contents = sdk_helpers.read_json_file(path_sdk_settings) # grab the bandit section (should be a list) settings_bandit_section = settings_file_contents.get(constants.SDK_SETTINGS_BANDIT_SECTION_NAME) if settings_bandit_section and isinstance(settings_bandit_section, list): LOG.debug("Reading bandit command line args from sdk settings JSON file {0}".format(path_sdk_settings)) LOG.debug("Bandit settings found in settings file: {0}".format(settings_bandit_section)) bandit_args.extend(settings_bandit_section) else: bandit_args.extend(constants.BANDIT_DEFAULT_SEVERITY_LEVEL) else: bandit_args.extend(constants.BANDIT_DEFAULT_SEVERITY_LEVEL) # run bandit as a subprocess exit_code, details = sdk_helpers.run_subprocess(bandit_args, cmd_name="bandit scan") # bandit will return a non-zero exit code if an issue of minimum severity level or higher # is found. # Example: if "-ll" (our default level which is called "medium") is passed, the process # will only return a non-zero code if there are "medium" or "high" issues. # if only "low" or "uncategorized" issues are found, it will return 0 if exit_code != 0: # all information above the "Test results" are not really relevant # but incase that string is not found, we just take the whole details details_start_string = "Test results" if details.index(details_start_string) != -1: details = details[details.index(details_start_string):] details = details.replace("\n", "\n\t\t") return 0, SDKValidateIssue( name=attr_dict.get("name"), description=attr_dict.get("fail_msg").format(details), severity=attr_dict.get("severity"), solution=attr_dict.get("fail_solution") if not LOG.isEnabledFor(logging.DEBUG) else "" ) else: # success return 1, SDKValidateIssue( name=attr_dict.get("name"), description=attr_dict.get("pass_msg"), severity=SDKValidateIssue.SEVERITY_LEVEL_INFO, solution=attr_dict.get("pass_solution") )
5,356,602
def plotann(annotation, title = None, timeunits = 'samples', returnfig = False): """ Plot sample locations of an Annotation object. Usage: plotann(annotation, title = None, timeunits = 'samples', returnfig = False) Input arguments: - annotation (required): An Annotation object. The sample attribute locations will be overlaid on the signal. - title (default=None): A string containing the title of the graph. - timeunits (default='samples'): String specifying the x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'. - returnfig (default=False): Specifies whether the figure is to be returned as an output argument Output argument: - figure: The matplotlib figure generated. Only returned if the 'returnfig' option is set to True. Note: The plotrec function is useful for plotting annotations on top of signal waveforms. Example Usage: import wfdb annotation = wfdb.rdann('sampledata/100', 'atr', sampfrom = 100000, sampto = 110000) annotation.fs = 360 wfdb.plotann(annotation, timeunits = 'minutes') """ # Check the validity of items used to make the plot # Get the x axis annotation values to plot plotvals = checkannplotitems(annotation, title, timeunits) # Create the plot fig=plt.figure() plt.plot(plotvals, np.zeros(len(plotvals)), 'r+') if title is not None: plt.title(title) # Axis Labels if timeunits == 'samples': plt.xlabel('index/sample') else: plt.xlabel('time/'+timeunits[:-1]) plt.show(fig) # Return the figure if requested if returnfig: return fig
5,356,603
def test_dummy_ucc_finder(employees): """Test unique column combination finder.""" assert UCCDummy().run(employees) is not None
5,356,604
async def test_activate_client(mqtt_client): """Test activate client method.""" route = respx.post(f"http://{HOST}:80/axis-cgi/mqtt/client.cgi") await mqtt_client.activate() assert route.called assert route.calls.last.request.method == "POST" assert route.calls.last.request.url.path == "/axis-cgi/mqtt/client.cgi" assert json.loads(route.calls.last.request.content) == { "apiVersion": "1.0", "context": "Axis library", "method": "activateClient", "params": {}, }
5,356,605
def match_assignments(nb_assignments, course_id): """ Check sqlalchemy table for match with nbgrader assignments from a specified course. Creates a dictionary with nbgrader assignments as the key If match is found, query the entry from the table and set as the value. Else, set the value to None """ nb_matches = {assignment.name:AssignmentMatch.query.filter_by(nbgrader_assign_name=assignment.name, course_id=course_id).first() for assignment in nb_assignments} return nb_matches
5,356,606
def xr_login_handler(spawn, context, session): """ handles xr login prompt """ credential = get_current_credential(context=context, session=session) if credential: common_cred_username_handler(spawn=spawn, context=context, credential=credential) else: spawn.sendline(context['username']) session['enable_login'] = 1
5,356,607
def tan(input): """Computes tangent of values in ``input``. :rtype: TensorList of tan(input). If input is an integer, the result will be float, otherwise the type is preserved. """ return _arithm_op("tan", input)
5,356,608
def test_epipolar_angle(): """ test epipolar angle computation """ # First case : same column, positive direction [row, col, alt] start_line_1 = np.array([1, 0, 0]) end_line_1 = np.array([2, 0, 0]) reference_alpha_1 = math.pi / 2.0 alpha = compute_epipolar_angle(end_line_1, start_line_1) assert alpha == reference_alpha_1 # Second case : same column, negative direction [row, col, alt] start_line_2 = np.array([2, 0, 0]) end_line_2 = np.array([1, 0, 0]) reference_alpha_2 = -(math.pi / 2.0) alpha = compute_epipolar_angle(end_line_2, start_line_2) assert alpha == reference_alpha_2 # Third case : different column, positive direction [row, col, alt] start_line_3 = np.array([2, 0, 0]) end_line_3 = np.array([1, 1, 0]) slope = (1 - 2) / (1 - 0) reference_alpha_3 = np.arctan(slope) alpha = compute_epipolar_angle(end_line_3, start_line_3) assert alpha == reference_alpha_3 # Fourth case : different column, negative direction [row, col, alt] start_line_4 = np.array([2, 1, 0]) end_line_4 = np.array([1, 0, 0]) slope = (1 - 2) / (0 - 1) reference_alpha_4 = math.pi + np.arctan(slope) alpha = compute_epipolar_angle(end_line_4, start_line_4) assert alpha == reference_alpha_4 # With multiple point start_lines = np.stack((start_line_1, start_line_2, start_line_3, start_line_4)) end_lines = np.stack((end_line_1, end_line_2, end_line_3, end_line_4)) reference_alphas = np.stack((reference_alpha_1, reference_alpha_2, reference_alpha_3, reference_alpha_4)) alphas = compute_epipolar_angle(end_lines, start_lines) np.testing.assert_array_equal(alphas, reference_alphas)
5,356,609
def recall_from_IoU(IoU, samples=500): """ plot recall_vs_IoU_threshold """ if not (isinstance(IoU, list) or IoU.ndim == 1): raise ValueError('IoU needs to be a list or 1-D') iou = np.float32(IoU) # Plot intersection over union IoU_thresholds = np.linspace(0.0, 1.0, samples) recall = np.zeros_like(IoU_thresholds) for idx, IoU_th in enumerate(IoU_thresholds): tp, relevant = 0, 0 inds, = np.where(iou >= IoU_th) recall[idx] = len(inds) * 1.0 / len(IoU) return recall, IoU_thresholds
5,356,610
def _GetImage(options): """Returns the ndvi regression image for the given options. Args: options: a dict created by _ReadOptions() containing the request options Returns: An ee.Image with the coefficients of the regression and a band called "rmse" containing the Root Mean Square Error for the ndvi value calculated by the regression or None if collection is empty. """ # renaming the used options regression = options["regression"] start = options["start"] collection = _GetCollection(options) # _GetCollection() returns None if collection is empty if collection is None: return None # Function to calculate the values needed for a regression with a polynomial of degree 1 def makePoly1Variables(img): date = img.date() doy = date.getRelative("day", "year") x1 = doy x0 = 1 return (img.select() .addBands(ee.Image.constant(x0)) # 0. a0 constant term .addBands(ee.Image.constant(x1)) # 1. a1*x .addBands(img.normalizedDifference(["NIR","RED"])) # 2. response variable (NDVI) .toFloat()) # Function to calculate the values needed for a regression with a polynomial of degree 2 def makePoly2Variables(img): date = img.date() doy = date.getRelative("day", "year") x2 = doy.pow(2) x1 = doy x0 = 1 return (img.select() .addBands(ee.Image.constant(x0)) # 0. a0 constant term .addBands(ee.Image.constant(x1)) # 1. a1*x .addBands(ee.Image.constant(x2)) # 2. a2*x^2 .addBands(img.normalizedDifference(["NIR","RED"])) # 4. response variable (NDVI) .toFloat()) # Function to calculate the values needed for a regression with a polynomial of degree 3 def makePoly3Variables(img): date = img.date() doy = date.getRelative("day", "year") x3 = doy.pow(3) x2 = doy.pow(2) x1 = doy x0 = 1 return (img.select() .addBands(ee.Image.constant(x0)) # 0. a0 constant term .addBands(ee.Image.constant(x1)) # 1. a1*x .addBands(ee.Image.constant(x2)) # 2. a2*x^2 .addBands(ee.Image.constant(x3)) # 3. a3*x^3 .addBands(img.normalizedDifference(["NIR","RED"])) # 4. response variable (NDVI) .toFloat()) # Function to calculate the values needed for a regression with the model after Zhu & Woodcock def makeZhuWoodVariables(img): seconds = img.date().millis().divide(1000).floor() seconds_start = ee.Date("%s-01-01" % start).millis().divide(1000).floor() seconds_offset = seconds.subtract(seconds_start) sin_intra = ee.Number(2).multiply(math.pi).divide(365*24*60*60).multiply(seconds_offset).sin() cos_intra = ee.Number(2).multiply(math.pi).divide(365*24*60*60).multiply(seconds_offset).cos() inter = seconds_offset return (img.select() .addBands(ee.Image.constant(1)) # 0. constant term .addBands(ee.Image.constant(cos_intra)) # 1. cos intra-annual .addBands(ee.Image.constant(sin_intra)) # 2. sin intra-annual .addBands(ee.Image.constant(inter)) # 3. inter-annual .addBands(img.normalizedDifference(["NIR","RED"])) # 5. response variable (NDVI) .toFloat()) makeVariables = {"poly1": makePoly1Variables,"poly2": makePoly2Variables, "poly3": makePoly3Variables, "zhuWood": makeZhuWoodVariables} # calculate the needed values for the regression collection_prepared = collection.map(makeVariables[regression]) predictorsCount = {"poly1": 2,"poly2": 3, "poly3": 4, "zhuWood": 4} # counts the ndvi values per pixel countValues = collection_prepared.select("nd").reduce(ee.Reducer.count()) # masks pixels with less than 2 * number of predictors, to deliver better results def countMask(img): return img.updateMask(countValues.gt(predictorsCount[regression]*2-1)) # use the countMask collection_prepared = collection_prepared.map(countMask) # doing the regression coefficients = collection_prepared.reduce(ee.Reducer.linearRegression(predictorsCount[regression], 1)) # flattens regression coefficients to one image with multiple bands flattenPattern = {"poly1": ["a0", "a1"], "poly2": ["a0", "a1", "a2"], "poly3": ["a0", "a1", "a2", "a3"], "zhuWood": ["a0", "a1", "a2", "a3"]} renamePattern = {"poly1": "doy", "poly2": "doy", "poly3": "doy", "zhuWood": "sec"} coefficientsImage = coefficients.select(["coefficients"]).arrayFlatten([flattenPattern[regression],[renamePattern[regression]]]) # flattens the root mean square of the predicted ndvi values rmse = coefficients.select("residuals").arrayFlatten([["rmse"]]) # combines coefficients and rmse and returns them a one ee.Image return coefficientsImage.addBands(rmse)
5,356,611
async def test_migrate_unique_id(hass): """Test migrate unique_id of the air_quality entity.""" registry = er.async_get(hass) # Pre-create registry entries for disabled by default sensors registry.async_get_or_create( AIR_QUALITY_DOMAIN, DOMAIN, 123, suggested_object_id="home", disabled_by=None, ) await init_integration(hass) entry = registry.async_get("air_quality.home") assert entry assert entry.unique_id == "123"
5,356,612
def get_registered_plugins(registry, as_instances=False, sort_items=True): """Get registered plugins. Get a list of registered plugins in a form if tuple (plugin name, plugin description). If not yet auto-discovered, auto-discovers them. :param registry: :param bool as_instances: :param bool sort_items: :return list: """ ensure_autodiscover() if as_instances: return registry._registry registered_plugins = [] for uid, plugin in registry._registry.items(): plugin_name = safe_text(plugin.name) registered_plugins.append((uid, plugin_name)) if sort_items: registered_plugins.sort() return registered_plugins
5,356,613
def datetime_to_timestamp(d): """convert a datetime object to seconds since Epoch. Args: d: a naive datetime object in default timezone Return: int, timestamp in seconds """ return int(time.mktime(d.timetuple()))
5,356,614
def test_update__endtoend__4( address_book, FieldFactory, UpdateablePersonFactory, PostalAddressFactory, browser): """A user defined choice field can be updated.""" field_name = FieldFactory( address_book, IPostalAddress, 'Choice', u'distance', values=[u'< 50 km', u'>= 50 km']).__name__ PostalAddressFactory(UpdateablePersonFactory(address_book), **{field_name: '>= 50 km', 'set_as_default': True}) browser.login('mgr') browser.keyword_search(KEYWORD, apply='Update') browser.getControl('field').displayValue = ['postal address -- distance'] browser.getControl('Next').click() assert ['No value', '< 50 km', '>= 50 km'] == browser.getControl( 'new value').displayOptions browser.getControl('new value').displayValue = ['< 50 km'] browser.getControl('operation').displayValue = [ 'replace existing value with new one'] browser.getControl('Next').click() # Update sets the value to '< 50 km': assert ('<td>Tester</td><td><50km</td>' in browser.contents_without_whitespace)
5,356,615
def gumbel_softmax(logits, temperature): """From https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f logits: a tensor of shape (*, n_class) returns an one-hot vector of shape (*, n_class) """ y = gumbel_softmax_sample(logits, temperature) shape = y.size() _, ind = y.max(dim=-1) y_hard = torch.zeros_like(y).view(-1, shape[-1]) y_hard.scatter_(1, ind.view(-1, 1), 1) y_hard = y_hard.view(*shape) return (y_hard - y).detach() + y
5,356,616
def in_whitelist(address): """ Test if the given email address is contained in the list of allowed addressees. """ if WHITELIST is None: return True else: return any(regex.search(address) for regex in WHITELIST)
5,356,617
def decomposePath(path): """ :example: >>> decomposePath(None) >>> decomposePath("") >>> decomposePath(1) >>> decomposePath("truc") ('', 'truc', '', 'truc') >>> decomposePath("truc.txt") ('', 'truc', 'txt', 'truc.txt') >>> decomposePath("/home/truc.txt") ('/home/', 'truc', 'txt', 'truc.txt') >>> decomposePath("/home/truc.txt.bz2") ('/home/', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath("/truc.txt.bz2") ('/', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath("./truc.txt.bz2") ('./', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath(".truc.txt.bz2") ('', '.truc.txt', 'bz2', '.truc.txt.bz2') """ if path is None or type(path) is not str or len(path) == 0: return None filenameExt = path.split("/")[-1] dir = path[0:-len(filenameExt)] filename = ".".join(filenameExt.split(".")[0:-1]) ext = filenameExt.split(".")[-1] if len(filename) == 0 and len(ext) > 0: filename, ext = ext, filename return (dir, filename, ext, filenameExt)
5,356,618
def BSCLLR(c,p): """ c: A list of ones and zeros representing a codeword received over a BSC. p: Flip probability of the BSC. Returns log-likelihood ratios for c. """ N = len(c) evidence = [0]*N for i in range(N): if (c[i]): evidence[i] = log(p/(1-p)) else: evidence[i] = log((1-p)/p) return evidence
5,356,619
def start_serving(app_name='mms', args=None): """Start service routing. Parameters ---------- app_name : str App name to initialize mms service. args : List of str Arguments for starting service. By default it is None and commandline arguments will be used. It should follow the format recognized by python argparse parse_args method: https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.parse_args. An example for mms arguments: ['--models', 'resnet-18=path1', 'inception_v3=path2', '--gen-api', 'java', '--port', '8080'] """ # Parse the given arguments arguments = ArgParser.extract_args(args) # Download and/or Extract the given model files models = ModelLoader.load(arguments.models) # Instantiate an MMS object and prepare to start serving mms = MMS(app_name, args=arguments, models=models) mms.start_model_serving()
5,356,620
def java_fat_library(name=None, srcs=[], deps=[], visibility=None, tags=[], resources=[], source_encoding=None, warnings=None, exclusions=[], **kwargs): """Define java_fat_library target.""" target = JavaFatLibrary( name=name, srcs=srcs, deps=deps, visibility=visibility, tags=tags, resources=resources, source_encoding=source_encoding, warnings=warnings, exclusions=exclusions, kwargs=kwargs) build_manager.instance.register_target(target)
5,356,621
def _res_dynamics_fwd( real_input, imag_input, sin_decay, cos_decay, real_state, imag_state, threshold, w_scale, dtype=torch.int32 ): """ """ dtype = torch.int64 device = real_state.device real_old = (real_state * w_scale).clone().detach().to(dtype).to(device) imag_old = (imag_state * w_scale).clone().detach().to(dtype).to(device) sin_decay_int = (sin_decay).clone().detach().to(dtype).to(device) cos_decay_int = (cos_decay).clone().detach().to(dtype).to(device) real = torch.zeros_like(real_input) imag = torch.zeros_like(imag_input) threshold *= w_scale num_steps = real_input.shape[-1] for n in range(num_steps): real_new = right_shift_to_zero(cos_decay_int * real_old, 12) \ - right_shift_to_zero(sin_decay_int * imag_old, 12) \ + (w_scale * real_input[..., n]).to(dtype) imag_new = right_shift_to_zero(sin_decay_int * real_old, 12) \ + right_shift_to_zero(cos_decay_int * imag_old, 12) \ + (w_scale * imag_input[..., n]).to(dtype) if threshold >= 0: spike_new = (imag_new >= threshold).to(dtype) real_old = ((1 - spike_new) * real_new).to(dtype) imag_old = ( spike_new * (threshold - 1) + (1 - spike_new) * imag_new ).to(dtype) else: real_old = real_new imag_old = imag_new real[..., n] = real_new / w_scale imag[..., n] = imag_new / w_scale return real, imag
5,356,622
def make_json_error(error): """ Handle errors by logging and """ message = extract_error_message(error) status_code = extract_status_code(error) context = extract_context(error) retryable = extract_retryable(error) headers = extract_headers(error) # Flask will not log user exception (fortunately), but will log an error # for exceptions that escape out of the application entirely (e.g. if the # error handler raises an error) error_logger.debug("Handling {} error: {}".format( status_code, message, )) # Serialize into JSON response response_data = { "code": status_code, "context": context, "message": message, "retryable": retryable, } # Don't pass in the error schema because it will suppress any extra fields return dump_response_data(None, response_data, status_code, headers)
5,356,623
def tokenize_finding(finding): """Turn the finding into multiple findings split by whitespace.""" tokenized = set() tokens = finding.text.split() cursor = 0 # Note that finding.start and finding.end refer to the location in the overall # text, but finding.text is just the text for this finding. for token in tokens: start = finding.text.find(token, cursor) cursor = end = start + len(token) tokenized.add(Finding( finding.category, start + finding.start, end + finding.start, token, finding.context_start, finding.raw_context)) return tokenized
5,356,624
def main(): """ Take a folder with mridata.org .h5 files, read them, load to np matrices and save as pickle for convenience :return: None """ file_list = sorted([f for f in os.listdir(f'{data_path}') if f.endswith('h5')]) for f in file_list: filename = os.path.join(f'{data_path}', f) if not os.path.isfile(filename): print("%s is not a valid file" % filename) raise SystemExit dset = ismrmrd.Dataset(filename, 'dataset', create_if_needed=False) header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header()) enc = header.encoding[0] # Matrix size eNx = enc.encodedSpace.matrixSize.x eNy = enc.encodedSpace.matrixSize.y eNz = enc.encodedSpace.matrixSize.z # Field of View eFOVx = enc.encodedSpace.fieldOfView_mm.x eFOVy = enc.encodedSpace.fieldOfView_mm.y eFOVz = enc.encodedSpace.fieldOfView_mm.z # Number of Slices, Reps, Contrasts, etc. ncoils = header.acquisitionSystemInformation.receiverChannels if enc.encodingLimits.slice != None: nslices = enc.encodingLimits.slice.maximum + 1 else: nslices = 1 if enc.encodingLimits.repetition != None: nreps = enc.encodingLimits.repetition.maximum + 1 else: nreps = 1 if enc.encodingLimits.contrast != None: ncontrasts = enc.encodingLimits.contrast.maximum + 1 else: ncontrasts = 1 # Initialiaze a storage array all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, eNx), dtype=np.complex64) # Loop through the rest of the acquisitions and stuff for acqnum in tqdm(range(0, dset.number_of_acquisitions())): acq = dset.read_acquisition(acqnum) rep = acq.idx.repetition contrast = acq.idx.contrast slice = acq.idx.slice y = acq.idx.kspace_encode_step_1 z = acq.idx.kspace_encode_step_2 all_data[rep, contrast, slice, :, z, y, :] = acq.data with open(f'{filename.split(".")[0]}.pickle', 'wb') as handle: pickle.dump(all_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
5,356,625
def gaussian_filter_density(gt): """generate ground truth density map Args: gt: (height, width), object center is 1.0, otherwise 0.0 Returns: density map """ density = np.zeros(gt.shape, dtype=np.float32) gt_count = np.count_nonzero(gt) if gt_count == 0: return density pts = np.array(list(zip(np.nonzero(gt)[1], np.nonzero(gt)[0]))) # (x,y) leaf_size = 2048 # build kd tree tree = scipy.spatial.KDTree(pts.copy(), leafsize=leaf_size) # query kd tree distances, locations = tree.query(pts, k=4) for i, pt in enumerate(pts): pt2d = np.zeros(gt.shape, dtype=np.float32) pt2d[pt[1], pt[0]] = 1. if gt_count > 1: sigma = (distances[i][1] + distances[i][2] + distances[i][3]) * 0.085 sigma = min(sigma, 999) # avoid inf else: raise NotImplementedError('should not be here!!') density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant') return density
5,356,626
def RGBfactorstoBaseandRange( lumrange: list[int, int], rgbfactors: list[float, float, float]): """Get base color luminosity and luminosity range from color expressed as r, g, b float values and min and max byte luminosity values Args: lumrange: [minval: byte maxval: byte] rgbfactors: color as [r: float, g: float, b: float] Returns: base luminosity as [r: byte, g: byte, b: byte] luminosity range as [r: byte, g: byte, b: byte] """ baselum = intscalarmulvect( rgbfactors, lumrange[0]) lumrange = subvect(scalarmulvect( rgbfactors, lumrange[1]), baselum) return baselum, lumrange
5,356,627
def mean_by_orbit(inst, data_label): """Mean of data_label by orbit over Instrument.bounds Parameters ---------- data_label : string string identifying data product to be averaged Returns ------- mean : pandas Series simple mean of data_label indexed by start of each orbit """ return _core_mean(inst, data_label, by_orbit=True)
5,356,628
def quantile_constraint( column: str, quantile: float, assertion: Callable[[float], bool], where: Optional[str] = None, hint: Optional[str] = None, ) -> Constraint: """ Runs quantile analysis on the given column and executes the assertion column: Column to run the assertion on quantile: Which quantile to assert on assertion Callable that receives a float input parameter (the computed quantile) and returns a boolean hint: A hint to provide additional context why a constraint could have failed """ quant = Quantile(column, quantile, where) constraint = AnalysisBasedConstraint[float]( quant, assertion, hint=hint # type: ignore[arg-type] ) return NamedConstraint(constraint, f"QuantileConstraint({quant})")
5,356,629
def test_activate_ruleset(setup, create_deactivated_ruleset, rulesengine_db): """ tests activate_ruleset functionality. Requires that setup is run and there is a ruleset inactive. """ from src.praxxis.rulesengine import activate_ruleset from tests.src.praxxis.util import dummy_object import os name1 = dummy_object.make_dummy_ruleset("generated_deactivated_ruleset") result = activate_ruleset.activate_ruleset(name1, rulesengine_db) assert result == name1.name
5,356,630
def _compute_rank( kg_embedding_model, pos_triple, corrupted_subject_based, corrupted_object_based, device, ) -> Tuple[int, int]: """ :param kg_embedding_model: :param pos_triple: :param corrupted_subject_based: :param corrupted_object_based: :param device: :param all_pos_triples_hashed: This parameter isn't used but is necessary for compatability """ corrupted_subject_based = torch.tensor( corrupted_subject_based, dtype=torch.long, device=device ) corrupted_object_based = torch.tensor(corrupted_object_based, dtype=torch.long, device=device) scores_of_corrupted_subjects = kg_embedding_model.predict(corrupted_subject_based) scores_of_corrupted_objects = kg_embedding_model.predict(corrupted_object_based) pos_triple = np.array(pos_triple) pos_triple = np.expand_dims(a=pos_triple, axis=0) pos_triple = torch.tensor(pos_triple, dtype=torch.long, device=device) score_of_positive = kg_embedding_model.predict(pos_triple) scores_subject_based = np.append(arr=scores_of_corrupted_subjects, values=score_of_positive) indice_of_pos_subject_based = scores_subject_based.size - 1 scores_object_based = np.append(arr=scores_of_corrupted_objects, values=score_of_positive) indice_of_pos_object_based = scores_object_based.size - 1 _, sorted_score_indices_subject_based = torch.sort( torch.tensor(scores_subject_based, dtype=torch.float), descending=False) sorted_score_indices_subject_based = sorted_score_indices_subject_based.cpu().numpy() _, sorted_score_indices_object_based = torch.sort( torch.tensor(scores_object_based, dtype=torch.float), descending=False) sorted_score_indices_object_based = sorted_score_indices_object_based.cpu().numpy() # Get index of first occurrence that fulfills the condition rank_of_positive_subject_based = np.where(sorted_score_indices_subject_based == \ indice_of_pos_subject_based)[0][0] rank_of_positive_object_based = np.where(sorted_score_indices_object_based == \ indice_of_pos_object_based)[0][0] return ( rank_of_positive_subject_based, rank_of_positive_object_based, )
5,356,631
def _get_bool_argument(ctx: ClassDefContext, expr: CallExpr, name: str, default: bool) -> bool: """Return the boolean value for an argument to a call or the default if it's not found. """ attr_value = _get_argument(expr, name) if attr_value: ret = ctx.api.parse_bool(attr_value) if ret is None: ctx.api.fail('"{}" argument must be True or False.'.format(name), expr) return default return ret return default
5,356,632
def assess_month(pred, valid, skill_name, model_description, lead_time, save_path): """ Assesses the performance of a model for a given month Parameters ---------- pred : xr.Dataset Predictions valid : xr.Dataset Observations skill_name : str Skill to evaluate model_description : str Short description of the model, used for the filename lead_time : int Forecast leading time save_path : str Path where figure is saved """ monthly_skill = [] for month in range(1, 12+1): monthly_pred = pred.sel(time=pred['time.month']==month) monthly_obs = valid.sel(time=valid['time.month']==month) monthly_skill.append(skill_fcts[skill_name](monthly_pred, monthly_obs)) vmin_z, vmax_z = _compute_min_max([skillmap['z'] for skillmap in monthly_skill]) vmin_t, vmax_t = _compute_min_max([skillmap['t'] for skillmap in monthly_skill]) if skill_name == "rSD": vmin_z, vmax_z = 1 - max(abs(1 - vmin_z), abs(vmax_z - 1)), 1 + max(abs(1 - vmin_z), abs(vmax_z - 1)) vmin_t, vmax_t = 1 - max(abs(1 - vmin_t), abs(vmax_t - 1)), 1 + max(abs(1 - vmin_t), abs(vmax_t - 1)) title = "Monthly evaluation of " + skill_name + " for a {} h lead time".format(lead_time) filename = save_path + "_".join(["MonthlySummary", skill_name, model_description, str(lead_time)]) + ".png" # Plot f, axs = plt.subplots(8, 3, figsize=(18, 30), subplot_kw=dict(projection=proj)) axs = np.array(axs) f.suptitle(title, fontsize=26, y=1.02) for i, ax in enumerate(axs.reshape(-1)[:12]): plot_signal(f, sample=monthly_skill[i], var='z', ax=ax, vmin=vmin_z, vmax=vmax_z, proj=proj, cmap=cmaps[skill_name]) ax.set_title(months[i+1] + " Z500", fontsize=20) for i, ax in enumerate(axs.reshape(-1)[12:]): plot_signal(f, sample=monthly_skill[i], var='t', ax=ax, vmin=vmin_t, vmax=vmax_t, proj=proj, cmap=cmaps[skill_name]) ax.set_title(months[i+1] + " T850", fontsize=20) f.tight_layout(pad=-2) plt.savefig(filename, bbox_inches = 'tight')
5,356,633
def validate_filter_parameter(string): """ Extracts a single filter parameter in name[=value] format """ result = () if string: comps = string.split('=', 1) if comps[0]: if len(comps) > 1: # In the portal, if value textbox is blank we store the value as empty string. # In CLI, we should allow inputs like 'name=', which correspond to empty string value. # But there is no way to differentiate between CLI inputs 'name=' and 'name=""'. # So even though "" is invalid JSON escaped string, we will accept it and set the value as empty string. filter_param_value = '\"\"' if comps[1] == "" else comps[1] try: # Ensure that provided value of this filter parameter is valid JSON. Error out if value is invalid JSON. filter_param_value = json.loads(filter_param_value) except ValueError: raise CLIError('Filter parameter value must be a JSON escaped string. "{}" is not a valid JSON object.'.format(filter_param_value)) result = (comps[0], filter_param_value) else: result = (string, '') else: # Error out on invalid arguments like '=value' or '=' raise CLIError('Invalid filter parameter "{}". Parameter name cannot be empty.'.format(string)) return result
5,356,634
def test_filter_features_multi_point(): """MultiPoints should be turned into multiple Points""" features = [ { "type": "Feature", "geometry": { "type": "MultiPoint", "coordinates": [[0, 0], [1, 0]] } } ] expected = [ { "type": "Feature", "geometry": { "type": "Point", "coordinates": [0, 0] } }, { "type": "Feature", "geometry": { "type": "Point", "coordinates": [1, 0] } } ] assert list(sutils.filter_features(features)) == expected
5,356,635
def delightLearn(configfilename): """ :param configfilename: :return: """ threadNum = 0 numThreads = 1 #parse arguments params = parseParamFile(configfilename, verbose=False) if threadNum == 0: logger.info("--- DELIGHT-LEARN ---") # Read filter coefficients, compute normalization of filters bandCoefAmplitudes, bandCoefPositions, bandCoefWidths, norms = readBandCoefficients(params) numBands = bandCoefAmplitudes.shape[0] redshiftDistGrid, redshiftGrid, redshiftGridGP = createGrids(params) f_mod = readSEDs(params) numObjectsTraining = np.sum(1 for line in open(params['training_catFile'])) msg= 'Number of Training Objects ' + str(numObjectsTraining) logger.info(msg) firstLine = int(threadNum * numObjectsTraining / numThreads) lastLine = int(min(numObjectsTraining,(threadNum + 1) * numObjectsTraining / numThreads)) numLines = lastLine - firstLine msg ='Thread ' + str(threadNum) + ' , analyzes lines ' + str(firstLine) + ' , to ' + str(lastLine) logger.info(msg) DL = approx_DL() gp = PhotozGP(f_mod, bandCoefAmplitudes, bandCoefPositions, bandCoefWidths, params['lines_pos'], params['lines_width'], params['V_C'], params['V_L'], params['alpha_C'], params['alpha_L'], redshiftGridGP, use_interpolators=True) B = numBands numCol = 3 + B + B*(B+1)//2 + B + f_mod.shape[0] localData = np.zeros((numLines, numCol)) fmt = '%i ' + '%.12e ' * (localData.shape[1] - 1) loc = - 1 crossValidate = params['training_crossValidate'] trainingDataIter1 = getDataFromFile(params, firstLine, lastLine,prefix="training_", getXY=True,CV=crossValidate) if crossValidate: chi2sLocal = None bandIndicesCV, bandNamesCV, bandColumnsCV,bandVarColumnsCV, redshiftColumnCV = readColumnPositions(params, prefix="training_CV_", refFlux=False) for z, normedRefFlux,\ bands, fluxes, fluxesVar,\ bandsCV, fluxesCV, fluxesVarCV,\ X, Y, Yvar in trainingDataIter1: loc += 1 themod = np.zeros((1, f_mod.shape[0], bands.size)) for it in range(f_mod.shape[0]): for ib, band in enumerate(bands): themod[0, it, ib] = f_mod[it, band](z) # really calibrate the luminosity parameter l compared to the model # according the best type of galaxy chi2_grid, ellMLs = scalefree_flux_likelihood(fluxes,fluxesVar,themod,returnChi2=True) bestType = np.argmin(chi2_grid) # best type ell = ellMLs[0, bestType] # the luminosity factor X[:, 2] = ell gp.setData(X, Y, Yvar, bestType) lB = bands.size localData[loc, 0] = lB localData[loc, 1] = z localData[loc, 2] = ell localData[loc, 3:3+lB] = bands localData[loc, 3+lB:3+f_mod.shape[0]+lB+lB*(lB+1)//2+lB] = gp.getCore() if crossValidate: model_mean, model_covar = gp.predictAndInterpolate(np.array([z]), ell=ell) if chi2sLocal is None: chi2sLocal = np.zeros((numObjectsTraining, bandIndicesCV.size)) ind = np.array([list(bandIndicesCV).index(b) for b in bandsCV]) chi2sLocal[firstLine + loc, ind] = - 0.5 * (model_mean[0, bandsCV] - fluxesCV)**2 /(model_covar[0, bandsCV] + fluxesVarCV) if threadNum == 0: reducedData = np.zeros((numObjectsTraining, numCol)) if crossValidate: chi2sGlobal = np.zeros_like(chi2sLocal) #comm.Allreduce(chi2sLocal, chi2sGlobal, op=MPI.SUM) #comm.Barrier() chi2sGlobal = chi2sLocal firstLines = [int(k*numObjectsTraining/numThreads) for k in range(numThreads)] lastLines = [int(min(numObjectsTraining, (k+1)*numObjectsTraining/numThreads)) for k in range(numThreads)] sendcounts = tuple([(lastLines[k] - firstLines[k]) * numCol for k in range(numThreads)]) displacements = tuple([firstLines[k] * numCol for k in range(numThreads)]) reducedData = localData # parameters for the GP process on traniing data are transfered to reduced data and saved in file #'training_paramFile' if threadNum == 0: np.savetxt(params['training_paramFile'], reducedData, fmt=fmt) if crossValidate: np.savetxt(params['training_CVfile'], chi2sGlobal)
5,356,636
def ones(distribution, dtype=float): """Create a LocalArray filled with ones.""" la = LocalArray(distribution=distribution, dtype=dtype) la.fill(1) return la
5,356,637
def stderr(tag, component): """Look at the stderr for a map component.""" click.echo(_cli_load(tag).stderr[component])
5,356,638
def draw_and_save_grid( mol_list, names, subImgSize, mol_per_row, filename ): """ Draw RDKit molecules and save SVG. """ img = Draw.MolsToGridImage( mol_list, molsPerRow=mol_per_row, subImgSize=subImgSize, legends=names, useSVG=True ) save_svg( filename=filename, string=img )
5,356,639
def reseta_status_e_ofertas_dos_proponentes(): """ Exclui todos os itens de ofertas de todos os proponentes; Aplica o status inscritos para os proponentes; """ print('### >>> Limpa as ofertas de uniforme') limpa_ofertas_de_uniforme() print('### >>> Seta os proponentes com status de inscrito') set_status_inscrito()
5,356,640
def build(target, format, source, output, stringparam, xsl, publication, webwork, diagrams, diagrams_format, only_assets, clean): """ Process [TARGET] into format specified by project.ptx. Also accepts manual command-line options. For many formats, images coded in source (latex-image, etc) will only be processed when using the --diagrams option. If the project included WeBWorK exercises, these must be processed using the --webwork option. Certain builds may require installations not included with the CLI, or internet access to external servers. Command-line paths to non-Python executables may be set in project.ptx. For more details, consult the PreTeXt Guide: https://pretextbook.org/documentation.html """ target_name = target # set up stringparams as dictionary: if len(stringparam) > 0: stringparams = {p[0] : p[1] for p in stringparam} else: stringparams = None if utils.project_path() is None: log.warning(f"No project.ptx manifest was found. Run `pretext init` to generate one.") log.warning("Continuing using commandline arguments.") if publication is None: pass target = Target(name=format,format=format,source=source,output_dir=output, publication=publication,stringparams=stringparams) project = Project(targets=[target]) else: project = Project() if target_name is None: log.info(f"Since no build target was supplied, the first target of the "+ "project.ptx manifest will be built.") target = project.target(name=target_name) if target is None: log.critical("Build target could not be found in project.ptx manifest.") log.critical("Exiting without completing task.") return #overwrite target with commandline arguments, update project accordingly target = Target(xml_element=target.xml_element(), format=format,source=source,output_dir=output, publication=publication,stringparams=stringparams,xsl_path=xsl) project = Project(xml_element=project.xml_element(),targets=[target]) project.build(target_name,webwork,diagrams,diagrams_format,only_assets,clean)
5,356,641
def update_podcast_url(video): """Query the DDB table for this video. If found, it means we have a podcast m4a stored in S3. Otherwise, return no podcast. """ try: response = PODCAST_TABLE_CLIENT.query( KeyConditionExpression=Key('session').eq(video.session_id) & Key('year').eq(video.get_published_year()) ) except ClientError as error: print('Problem getting data from DynamoDB: {}'.format(error)) return False else: if response['Count'] == 1: video.podcast_url = response['Items'][0]['url'] return True
5,356,642
def translate(filename): """ File editing handler """ if request.method == 'POST': return save_translation(app, request, filename) else: return open_editor_form(app, request, filename)
5,356,643
def read_fileset(fileset): """ Extract required data from the sdoss fileset. """ feat_data = { 'DATE_OBS': [], 'FEAT_HG_LONG_DEG': [], 'FEAT_HG_LAT_DEG': [], 'FEAT_X_PIX': [], 'FEAT_Y_PIX': [], 'FEAT_AREA_DEG2': [], 'FEAT_FILENAME': []} for current_file in fileset: current_date = get_date_obs(current_file) current_data = read_csv(current_file) if (len(current_data) == 0): LOG.error("Empty file: %s!", current_file) return None for cd in current_data: feat_data['DATE_OBS'].append(current_date) feat_data['FEAT_HG_LONG_DEG'].append(float(cd['FEAT_HG_LONG_DEG'])) feat_data['FEAT_HG_LAT_DEG'].append(float(cd['FEAT_HG_LAT_DEG'])) feat_data['FEAT_X_PIX'].append(int(cd['FEAT_X_PIX'])) feat_data['FEAT_Y_PIX'].append(int(cd['FEAT_Y_PIX'])) feat_data['FEAT_AREA_DEG2'].append(float(cd['FEAT_AREA_DEG2'])) feat_data['FEAT_FILENAME'].append(current_file) return feat_data
5,356,644
def p_command_print2(p): """com : PRINT LPAREN expr RPAREN SEMI_COLON""" p[0] = ('PRINT',p[3])
5,356,645
def get_branch_index(edge_index, edge_degree, branch_cutting_frequency=1000): """Finds the branch indexes for each branch in the MST. Parameters ---------- edge_index : array The node index of the ends of each edge. edge_degree : array The degree for the ends of each edge. branch_cutting_frequency : int, optional An optimisation parameter, used to remove edges that have already been placed into a branch. This significantly improves the speed of the algorithm as branches that are already constructed are now removed from the branch finder. Returns ------- branch_index : list A list of branches where each branch is a list of the edge index of edges contained in each branch. branch_index_rejected : list A list of branches that have not been completed. This will occur only if a subset of the edge indexes of the full tree is provided. """ degree1 = edge_degree[0] degree2 = edge_degree[1] index1 = edge_index[0] index2 = edge_index[1] condition = np.where((degree1 == 2.) & (degree2 == 2.))[0] index_branch_mid = condition index_branch_mid1 = index1[index_branch_mid] index_branch_mid2 = index2[index_branch_mid] condition = np.where(((degree1 == 2.) & (degree2 != 2.)) | ((degree1 != 2.) & (degree2 == 2.)))[0] index_branch_end = condition index_branch_end1 = index1[index_branch_end] index_branch_end2 = index2[index_branch_end] degree_branch_end1 = degree1[index_branch_end] degree_branch_end2 = degree2[index_branch_end] check_mid = np.ones(len(index_branch_mid)) check_end = np.ones(len(index_branch_end)) branch_index = [] branch_index_rejected = [] mask_end = np.ones(index_branch_end.shape, dtype=np.bool) mask_mid = np.ones(index_branch_mid.shape, dtype=np.bool) count = 0 item = 0 while item < len(index_branch_end): if check_end[item] == 1.: check_end[item] = 0. done = 0. _twig = [] _twig.append(index_branch_end[item]) if degree_branch_end1[item] == 2.: node_index = index_branch_end1[item] elif degree_branch_end2[item] == 2.: node_index = index_branch_end2[item] else: assert ValueError("branch edge incorrect.") mask_end[item] = False while done == 0.: condition = np.where(((check_mid == 1.) & (index_branch_mid1 == node_index)) | ((check_mid == 1.) & (index_branch_mid2 == node_index)))[0] if len(condition) == 0: condition = np.where(((check_end == 1.) & (index_branch_end1 == node_index)) | ((check_end == 1.) & (index_branch_end2 == node_index)))[0] if len(condition) == 0: branch_index_rejected = branch_index_rejected + \ np.ndarray.tolist(np.ndarray.flatten(np.array(_twig))) done = 1. else: check_end[condition] = 0. _twig.append(index_branch_end[condition]) done = 1. mask_end[condition] = False branch_index.append(np.ndarray.tolist(np.ndarray.flatten(np.array(_twig)))) else: if len(condition) == 1: check_mid[condition] = 0. _twig.append(index_branch_mid[condition]) if index_branch_mid1[condition] == node_index: node_index = index_branch_mid2[condition] elif index_branch_mid2[condition] == node_index: node_index = index_branch_mid1[condition] else: assert ValueError("Identification error.") mask_mid[condition] = False else: assert ValueError("Found more than one vertex.") else: pass if count % branch_cutting_frequency == 0 and count != 0: index_branch_end = index_branch_end[mask_end] check_end = check_end[mask_end] index_branch_end1 = index_branch_end1[mask_end] index_branch_end2 = index_branch_end2[mask_end] degree_branch_end1 = degree_branch_end1[mask_end] degree_branch_end2 = degree_branch_end2[mask_end] index_branch_mid = index_branch_mid[mask_mid] check_mid = check_mid[mask_mid] index_branch_mid1 = index_branch_mid1[mask_mid] index_branch_mid2 = index_branch_mid2[mask_mid] mask_end = mask_end[mask_end] mask_mid = mask_mid[mask_mid] count = count + 1 item = 0 elif count % 1001 == 0: count = count + 1 item = item + 1 elif item == len(index_branch_end) - 1: index_branch_end = index_branch_end[mask_end] check_end = check_end[mask_end] index_branch_end1 = index_branch_end1[mask_end] index_branch_end2 = index_branch_end2[mask_end] degree_branch_end1 = degree_branch_end1[mask_end] degree_branch_end2 = degree_branch_end2[mask_end] index_branch_mid = index_branch_mid[mask_mid] check_mid = check_mid[mask_mid] index_branch_mid1 = index_branch_mid1[mask_mid] index_branch_mid2 = index_branch_mid2[mask_mid] mask_end = mask_end[mask_end] mask_mid = mask_mid[mask_mid] count = count + 1 item = 0 else: count = count + 1 item = item + 1 branch_index_rejected = branch_index_rejected + np.ndarray.tolist(np.ndarray.flatten(np.array(index_branch_mid))) branch_index = [np.ndarray.tolist(np.hstack(np.array(branch_index[i]))) for i in range(0, len(branch_index))] if len(branch_index_rejected) != 0: branch_index_rejected = np.ndarray.tolist(np.hstack(np.array(branch_index_rejected))) return branch_index, branch_index_rejected
5,356,646
def update_coverage(coverage, path, func, line, status): """Add to coverage the coverage status of a single line""" coverage[path] = coverage.get(path, {}) coverage[path][func] = coverage[path].get(func, {}) coverage[path][func][line] = coverage[path][func].get(line, status) coverage[path][func][line] = coverage[path][func][line].combine(status) return coverage
5,356,647
def main( argv: Optional[list[str]] = None, *, stdin=None, stdout=None, stderr=None, ): """Gada main: .. code-block:: python >>> import gada >>> >>> # Overwrite "gada/test/testnodes/config.yml" for this test >>> gada.test_utils.write_testnodes_config({ ... 'nodes': { ... 'echo': { ... 'runner': 'generic', ... 'bin': 'echo' ... } ... } ... }) >>> >>> # Need to create fake stdin and stdout for unittests >>> with gada.test_utils.PipeStream() as stdin: ... with gada.test_utils.PipeStream() as stdout: ... # Run node with CLI arguments ... gada.main( ... ['gada', 'testnodes.echo', 'hello'], ... stdin=stdin.reader, ... stdout=stdout.writer, ... stderr=stdout.writer ... ) ... ... # Close writer end so we can read form it ... stdout.writer.close() ... ... # Read node output ... stdout.reader.read().decode().strip() 'hello' >>> The three parameters ``stdin``, ``stdout`` or ``stderr`` are provided as a convenience for writing unit tests when you can't use ``sys.stdin`` or ``sys.stdout``, or simply when you want to be able to read from the output. :param argv: command line arguments :param stdin: input stream :param stdout: output stream :param stderr: error stream """ argv = sys.argv if argv is None else argv parser = argparse.ArgumentParser(prog="Service", description="Help") parser.add_argument("node", type=str, help="command name") parser.add_argument( "argv", type=str, nargs=argparse.REMAINDER, help="additional CLI arguments" ) parser.add_argument("-v", "--verbose", action="store_true", help="Verbosity level") args = parser.parse_args(args=argv[1:]) node_argv, gada_argv = split_unknown_args(args.argv) run(node=args.node, argv=node_argv, stdin=stdin, stdout=stdout, stderr=stderr)
5,356,648
def cross_product(v1, v2): """Calculate the cross product of 2 vectors as (x1 * y2 - x2 * y1).""" return v1.x * v2.y - v2.x * v1.y
5,356,649
def loadData(fname='Unstra.out2.00008.athdf'): """load 3d bfield and calc the current density""" #data=ath.athdf(fname,quantities=['B1','B2','B3']) time,data=ath.athdf(fname,quantities=['vel1']) vx = data['vel1'] time,data=ath.athdf(fname,quantities=['vel2']) vy = data['vel2'] time,data=ath.athdf(fname,quantities=['vel3']) vz = data['vel3'] x = data['x1f'] y = data['x2f'] z = data['x3f'] # --- def curl(vx,vy,vz,dx,dy,dz): [dzvx,dyvx,dxvx] = np.gradient(vx) [dzvy,dyvy,dxvy] = np.gradient(vy) [dzvz,dyvz,dxvz] = np.gradient(vz) cx = dyvz/dy-dzvy/dz cy = dzvx/dz-dxvz/dx cz = dxvy/dx-dyvx/dy # No need to del the reference by one manually # allow python to perform its own garbage collection # after the function return cxyz #del dzvx #del dzvy #del dzvz return cx,cy,cz # --- dx = dz = x[1]-x[0] dy = y[1]-y[0] jx,jy,jz = curl(vx,vy,vz,dx,dy,dz) w2 = jx**2+jy**2+jz**2 del jx,jy,jz,vx,vy,vz return w2
5,356,650
def list(): """List nonebot builtin drivers.""" search_driver("")
5,356,651
def text_iou(ground_truth: Text, prediction: Text) -> ScalarMetricValue: """ Calculates agreement between ground truth and predicted text """ return float(prediction.answer == ground_truth.answer)
5,356,652
def divisors(num): """ Takes a number and returns all divisors of the number, ordered least to greatest :param num: int :return: list (int) """ # Fill in the function and change the return statment. return 0
5,356,653
def space_oem(*argv): """Handle oem files Usage: space-oem get <selector>... space-oem insert (- | <file>) space-oem compute (- | <selector>...) [options] space-oem list <selector>... [options] space-oem purge <selector>... [--until <until>] space-oem list-tags <selector>... space-oem tag <selector> <tag> [options] Options: get Retrieve an existing OEM from the database insert Insert an OEM into the database compute Compute OEM from an other OPM, OEM or TLE list List existing ephemerides purge Remove old OEMs. Use --last option list-tags List available tags for ephems of the selected objects tag Create a tag for a particular ephem <selector> Selector of the satellite (see help of the "sat" command) -f, --frame <frame> Frame in which to write the file to -d, --date <date> Start date of the ephem [default: midnight] (format %Y-%m-%dT%H:%M:%S) -r, --range <days> Duration of extrapolation [default: 3d] -s, --step <step> Step size of the OEM [default: 180s] -i, --interp <inter> Interpolation method (linear, lagrange) [default: lagrange] -l, --last <last> When listing print the last N OEM [default: 10] -I, --insert Insert the computed OEM into the database -F, --force Force insertion --until <until> When purging, remove all file older than this date [default: 4w] May be a duration, or a date """ return _generic_cmd("oem", space_oem.__doc__, *argv)
5,356,654
def pytest_addoption(parser): """Describe plugin specified options. """ group = parser.getgroup("syslog", "plugin syslog notifier") group.addoption("--syslog", action="store_true", dest="syslog", default=False, help="Enable syslog plugin. %default by default.")
5,356,655
def main(): """Main entry point of script""" parser = argparse.ArgumentParser(description='Generate training input') # Required arguments parser.add_argument('roads_shp', help='Path to shapefile containing OSM road data') parser.add_argument('records_csv', help='Path to CSV containing record data') # Optional arguments parser.add_argument('--output-dir', help='Directory where files are output', default='.') parser.add_argument('--combined-segments-shp-name', help='Combined segments output .shp name', default='combined_segments.shp') parser.add_argument('--training-csv-name', help='Training input .csv name', default='training_input.csv') parser.add_argument('--intersection-buffer-units', help='Units to buffer each intersection', default=5) parser.add_argument('--tile-max-units', help='Maximum units for each side of a tile', default=3000) parser.add_argument('--max_line_units', help='Maximum units allowed for line segment', default=200) parser.add_argument('--time-zone', help='Time zone of records', default='America/New_York') parser.add_argument('--match-tolerance', help='Units to buffer when matching records to roads', default=5) parser.add_argument('--road-projection', help='Projection id of roads', default='epsg:32718') parser.add_argument('--record-projection', help='Projection id of records', default='epsg:4326') parser.add_argument('--record-col-id', help='Record column: id', default='CRN') parser.add_argument('--record-col-x', help='Record column: x-coordinate', default='LNG') parser.add_argument('--record-col-y', help='Record column: y-coordinate', default='LAT') parser.add_argument('--record-col-occurred', help='Record column: occurred', default='DATETIME') args = parser.parse_args() logger.info('Reading records from {}'.format(args.records_csv)) tz = pytz.timezone(args.time_zone) road_projection = {'init': args.road_projection} record_projection = {'init': args.record_projection} match_tolerance = args.match_tolerance records, min_occurred, max_occurred = read_records( args.records_csv, road_projection, record_projection, tz, args.record_col_id, args.record_col_x, args.record_col_y, args.record_col_occurred ) logger.info('Found {:,} records between {} and {}'.format( len(records), min_occurred.date(), max_occurred.date()) ) logger.info('Reading shapefile from {}'.format(args.roads_shp)) roads, road_bounds = read_roads(args.roads_shp, records, match_tolerance) logger.info('Number of relevant roads in shapefile: {:,}'.format(len(roads))) logger.info('Calculating intersections') int_buffers = get_intersection_buffers(roads, road_bounds, args.intersection_buffer_units, args.tile_max_units) logger.info('Getting intersection parts') int_multilines, non_int_lines = get_intersection_parts(roads, int_buffers, args.max_line_units) combined_segments = int_multilines + non_int_lines logger.info('Found {:,} intersection multilines'.format(len(int_multilines))) logger.info('Found {:,} non-intersection lines'.format(len(non_int_lines))) logger.info('Found {:,} combined segments'.format(len(combined_segments))) segments_with_records = match_records_to_segments( records, combined_segments, match_tolerance) logger.info('Found {:,} segments with records'.format(len(segments_with_records))) schema, segments_with_data = get_segments_with_data( combined_segments, segments_with_records, min_occurred, max_occurred ) logger.info('Compiled data for {:,} segments'.format(len(segments_with_data))) segments_shp_path = os.path.join(args.output_dir, args.combined_segments_shp_name) write_segments_shp(segments_shp_path, road_projection, segments_with_data, schema) logger.info('Generated shapefile') training_csv_path = os.path.join(args.output_dir, args.training_csv_name) write__training_csv(training_csv_path, segments_with_data, schema) logger.info('Generated csv for training')
5,356,656
def compare_distance(tree,target): """ Checks tree edit distance. Since every node has a unique position, we know that the node is the same when the positions are the same. Hence, a simple method of counting the number of edits one needs to do to create the target tree out of a given tree is equal to the number of positional differences. """ # check for positional overlap edit_value = 0 for node in target: node.found = False for node in tree: same_node = False for t_node in target: if node.pos[0] == t_node.pos[0] and node.pos[1] == t_node.pos[1]: same_node = True t_node.found = True if same_node == False: edit_value += 1 # count found for node in target: if not node.found: edit_value += 1 return edit_value
5,356,657
def header(name='peptide'): """ Parameters ---------- name Returns ------- """ with open('{}.pdb'.format(name), 'r') as f: file = f.read() model = file.find('\nMODEL') atom = file.find('\nATOM') if atom < 0: raise ValueError('no ATOM entries found in PDB') if model < 0: index = atom else: index = min(model, atom) return file[:index] + '\n'
5,356,658
def test_credit_card_recognizer_with_template( pii_csv, utterances, dictionary_path, num_of_examples, acceptance_threshold ): """ Test credit card recognizer with a dataset generated from template and a CSV values file :param pii_csv: input csv file location :param utterances: template file location :param dictionary_path: dictionary/vocabulary file location :param num_of_examples: number of samples to be used from dataset to test :param acceptance_threshold: minimum precision/recall allowed for tests to pass """ # read template and CSV files import os dir_path = os.path.dirname(os.path.realpath(__file__)) input_samples = generate( fake_pii_csv=pii_csv.format(dir_path), utterances_file=utterances.format(dir_path), dictionary_path=dictionary_path.format(dir_path), lower_case_ratio=0.5, num_of_examples=num_of_examples, ) scores = score_presidio_recognizer( recognizer=CreditCardRecognizer(), entities_to_keep=["CREDIT_CARD"], input_samples=input_samples, ) if not np.isnan(scores.pii_f): assert acceptance_threshold <= scores.pii_f
5,356,659
def move_tower(disks, from_pole, to_pole, with_pole): """ side note, I hate the tower of hanoi and anyone who thinks this should be used to teach recursion should not be allowed to teach recursion. if I ever see someone had a pr to do recursion by using itself I would reject it immediately """ if disks >= 1: # Move discs from from_tower of height-1 to an intermediate pole # you'll notice the disks quantity go down twice here # this is because when calling the function within the function, # So this will occur until the disks count is 0 move_tower(disks - 1, from_pole, with_pole, to_pole) print(f"moving disk from {from_pole} to {to_pole}") # once we get to this call, the disks should 0 move_tower(disks - 1, with_pole, to_pole, from_pole)
5,356,660
def test_fst_ap_remove_session_bad_session_id(dev, apdev, test_params): """FST AP remove session - bad session id""" fst_remove_session(apdev, test_params, remove_scenario_bad_session_id, True)
5,356,661
def test_domain_js_xrefs(app, status, warning): """Domain objects have correct prefixes when looking up xrefs""" app.builder.build_all() def assert_refnode(node, mod_name, prefix, target, reftype=None, domain='js'): attributes = { 'refdomain': domain, 'reftarget': target, } if reftype is not None: attributes['reftype'] = reftype if mod_name is not False: attributes['js:module'] = mod_name if prefix is not False: attributes['js:object'] = prefix assert_node(node, **attributes) doctree = app.env.get_doctree('roles') refnodes = list(doctree.traverse(addnodes.pending_xref)) assert_refnode(refnodes[0], None, None, u'TopLevel', u'class') assert_refnode(refnodes[1], None, None, u'top_level', u'func') assert_refnode(refnodes[2], None, u'NestedParentA', u'child_1', u'func') assert_refnode(refnodes[3], None, u'NestedParentA', u'NestedChildA.subchild_2', u'func') assert_refnode(refnodes[4], None, u'NestedParentA', u'child_2', u'func') assert_refnode(refnodes[5], False, u'NestedParentA', u'any_child', domain='') assert_refnode(refnodes[6], None, u'NestedParentA', u'NestedChildA', u'class') assert_refnode(refnodes[7], None, u'NestedParentA.NestedChildA', u'subchild_2', u'func') assert_refnode(refnodes[8], None, u'NestedParentA.NestedChildA', u'NestedParentA.child_1', u'func') assert_refnode(refnodes[9], None, u'NestedParentA', u'NestedChildA.subchild_1', u'func') assert_refnode(refnodes[10], None, u'NestedParentB', u'child_1', u'func') assert_refnode(refnodes[11], None, u'NestedParentB', u'NestedParentB', u'class') assert_refnode(refnodes[12], None, None, u'NestedParentA.NestedChildA', u'class') assert len(refnodes) == 13 doctree = app.env.get_doctree('module') refnodes = list(doctree.traverse(addnodes.pending_xref)) assert_refnode(refnodes[0], 'module_a.submodule', None, 'ModTopLevel', 'class') assert_refnode(refnodes[1], 'module_a.submodule', 'ModTopLevel', 'mod_child_1', 'meth') assert_refnode(refnodes[2], 'module_a.submodule', 'ModTopLevel', 'ModTopLevel.mod_child_1', 'meth') assert_refnode(refnodes[3], 'module_a.submodule', 'ModTopLevel', 'mod_child_2', 'meth') assert_refnode(refnodes[4], 'module_a.submodule', 'ModTopLevel', 'module_a.submodule.ModTopLevel.mod_child_1', 'meth') assert_refnode(refnodes[5], 'module_b.submodule', None, 'ModTopLevel', 'class') assert_refnode(refnodes[6], 'module_b.submodule', 'ModTopLevel', 'module_a.submodule', 'mod') assert len(refnodes) == 7
5,356,662
def isoUTC2datetime(iso): """Convert and ISO8601 (UTC only) like string date/time value to a :obj:`datetime.datetime` object. :param str iso: ISO8061 string :rtype: datetime.datetime """ formats = ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S.%f"] if 'T' in iso: formats = ["%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S.%fZ"] for fmt in formats: try: return datetime.datetime.strptime(iso, fmt) except ValueError: continue raise ValueError("Couldn't parse ISO8061 string '{}'".format(iso))
5,356,663
def groupstatus(aid: int, state: int = 0) -> EndpointResult: """Retrieve anime release status for different groups. :param aid: anidb anime id :type aid: int :param state: release state. int 1 to 6. Example: zenchi.mappings.group_status.ONGOING :type state: int, optional :return: a tuple (data, code). data is a dictionary with the keys: if code == (325, 330: :message str: NO SUCH GROUPS FOUND, NO SUCH ANIME if code == 225: :status: List of dictionaries with the following keys: :group_id int: :group_name str: :completion_state int: :last_episode_number int: :rating int: :votes int: :episode_range str: :truncated bool: if the response was truncated because it didn't fit the UDP packet, this will be True. :rtype: EndpointResult """ def cb(code: int, response: str) -> Optional[EndpointDict]: if code in (325, 330): return dict(message=response_message[code]) if code == 225: result = [] groups_data = response.splitlines()[1:] truncated = False for group_data in groups_data: parts = group_data.split("|") if len(parts) < 7: logger.warning( "Response was truncated, too much data for UDP packet." ) truncated = True break result.append( { "group_id": int(parts[0]), "group_name": parts[1], "completion_state": int(parts[2]), "last_episode_number": int(parts[3]), "rating": int(parts[4]), "votes": int(parts[5]), "episode_range": parts[6], } ) return dict(status_list=result, truncated=truncated) return None params: Dict[str, Union[str, int]] = dict(aid=aid) if state: params["state"] = state return send("GROUPSTATUS", params, cb)
5,356,664
def schedule_credit_card_purchase_creation(workspace_id: int, expense_group_ids: List[str]): """ Schedule credit card purchase creation :param expense_group_ids: List of expense group ids :param workspace_id: workspace id :return: None """ if expense_group_ids: expense_groups = ExpenseGroup.objects.filter( Q(tasklog__id__isnull=True) | ~Q(tasklog__status__in=['IN_PROGRESS', 'COMPLETE']), workspace_id=workspace_id, id__in=expense_group_ids, creditcardpurchase__id__isnull=True, exported_at__isnull=True ).all() chain = Chain() for expense_group in expense_groups: task_log, _ = TaskLog.objects.get_or_create( workspace_id=expense_group.workspace_id, expense_group=expense_group, defaults={ 'status': 'ENQUEUED', 'type': 'CREATING_CREDIT_CARD_PURCHASE' } ) if task_log.status not in ['IN_PROGRESS', 'ENQUEUED']: task_log.type = 'CREATING_CREDIT_CARD_PURCHASE' task_log.status = 'ENQUEUED' task_log.save() chain.append('apps.quickbooks_online.tasks.create_credit_card_purchase', expense_group, task_log.id) if chain.length(): chain.run()
5,356,665
def get_communities_codes(communities, fields=None, community_field='Community'): """From the postal code conversion file, select entries for the `communities`. This function is similar to get_community_codes, but works if `communities` and `fields` are strings or lists of strings. """ if not isinstance(communities, pd.DataFrame) and not isinstance(communities, pd.Series): communities = pd.Series(communities, name=community_field) df = _pccf_df.merge(communities, on=community_field) return df if fields is None else df[ensure_list(fields) + [community_field]].drop_duplicates()
5,356,666
def GLMFit_(file, designMatrix, mask, outputVBA, outputCon, fit="Kalman_AR1"): """ Call the GLM Fit function with apropriate arguments Parameters ---------- file designmatrix mask outputVBA outputCon fit='Kalman_AR1' Returns ------- glm, a vba.VBA instance representing the GLM """ from vba import VBA from dataFrame import DF if fit == "Kalman_AR1": model = "ar1" method = "kalman" elif fit == "Ordinary Least Squares": method = "ols" model="spherical" elif fit == "Kalman": method = "kalman" model = "spherical" s = dict() s["GlmDumpFile"] = outputVBA s["ConfigFilePath"] = outputCon s["DesignFilePath"] = designMatrix tab = DF.read(designMatrix) glm = VBA(tab, mask_url=mask, create_design_mat = False, mri_names = file, model = model, method = method) glm.fit() glm.save(s) return glm
5,356,667
def _load_blocks_txt(): """Load block name from Blocks.txt.""" with open_unicode_data_file("Blocks.txt") as blocks_txt: block_ranges = _parse_code_ranges(blocks_txt.read()) for first, last, block_name in block_ranges: for character_code in xrange(first, last+1): _block_data[character_code] = block_name
5,356,668
def test_wpas_mesh_open(dev, apdev): """wpa_supplicant open MESH network connectivity""" check_mesh_support(dev[0]) add_open_mesh_network(dev[0], freq="2462", basic_rates="60 120 240") add_open_mesh_network(dev[1], freq="2462", basic_rates="60 120 240") check_mesh_joined_connected(dev, connectivity=True) state = dev[0].get_status_field("wpa_state") if state != "COMPLETED": raise Exception("Unexpected wpa_state on dev0: " + state) state = dev[1].get_status_field("wpa_state") if state != "COMPLETED": raise Exception("Unexpected wpa_state on dev1: " + state) mode = dev[0].get_status_field("mode") if mode != "mesh": raise Exception("Unexpected mode: " + mode) dev[0].scan(freq="2462") bss = dev[0].get_bss(dev[1].own_addr()) if bss and 'ie' in bss and "ff0724" in bss['ie']: sta = dev[0].request("STA " + dev[1].own_addr()) logger.info("STA info:\n" + sta.rstrip()) if "[HE]" not in sta: raise Exception("Missing STA HE flag") if "[VHT]" in sta: raise Exception("Unexpected STA VHT flag")
5,356,669
def get_stoch_rsi(quotes: Iterable[Quote], rsi_periods: int, stoch_periods: int, signal_periods: int, smooth_periods: int = 1): """Get Stochastic RSI calculated. Stochastic RSI is a Stochastic interpretation of the Relative Strength Index. Parameters: `quotes` : Iterable[Quote] Historical price quotes. `rsi_periods` : int Number of periods for the RSI. `stoch_periods` : int Number of periods for the Stochastic. `signal_periods` : int Number of periods for the Stochastic RSI SMA signal line. `smooth_periods` : int, defaults 1 Number of periods for Stochastic Smoothing. Use 1 for Fast or 3 for Slow. Returns: `StochRSIResults[StochRSIResult]` StochRSIResults is list of StochRSIResult with providing useful helper methods. See more: - [Stochastic RSI Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/StochRsi/#content) - [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content) """ stoch_rsi_results = CsIndicator.GetStochRsi[Quote](CsList(Quote, quotes), rsi_periods, stoch_periods, signal_periods, smooth_periods) return StochRSIResults(stoch_rsi_results, StochRSIResult)
5,356,670
def view_client_account(account): """Узнать состояние своего счета""" print(account.full_info()) print(account.show_history())
5,356,671
def send_syslog(msg): """send a log message to AlienVault""" Config['netsyslogger'].log(syslog.LOG_USER, syslog.LOG_NOTICE, msg, pid=True)
5,356,672
def PUtilHann (inUV, outUV, err, scratch=False): """ Hanning smooth a UV data set returns smoothed UV data object inUV = Python UV object to smooth Any selection editing and calibration applied before average. outUV = Predefined UV data if scratch is False, ignored if scratch is True. err = Python Obit Error/message stack scratch = True if this is to be a scratch file (same type as inUV) """ ################################################################ if inUV.myClass=='AIPSUVData': raise TypeError("Function unavailable for "+inUV.myClass) # Checks if not inUV.UVIsA(): raise TypeError("inUV MUST be a Python Obit UV") if ((not scratch) and (not outUV.UVIsA())): raise TypeError("outUV MUST be a Python Obit UV") if not OErr.OErrIsA(err): raise TypeError("err MUST be an OErr") # # Create output for scratch if scratch: outUV = UV("None") outUV.me = Obit.UVUtilHann(inUV.me, scratch, outUV.me, err.me) if err.isErr: OErr.printErrMsg(err, "Error Hanning UV data") # Get scratch file info if scratch: PUVInfo (outUV, err) return outUV # end PUtilHann
5,356,673
def test_example_8_22__block_collection_nodes(): """ Example 8.22. Block Collection Nodes Expected: %YAML 1.2 --- !!map { ? !!str "sequence" : !!seq [ !!str "entry", !!seq [ !!str "nested" ], ], ? !!str "mapping" : !!map { ? !!str "foo" : !!str "bar", }, } """ text = dedent(""" sequence: !!seq - entry - !!seq - nested mapping: !!map foo: bar """)[1:-1] expected = None nodes = parser.parse(text) print_nodes(nodes) assert nodes == expected
5,356,674
def str2bytes(seq): """ Converts an string to a list of integers """ return map(ord,str(seq))
5,356,675
def __downloadFilings(cik: str) -> list: """Function to download the XML text of listings pages for a given CIK from the EDGAR database. Arguments: cik {str} -- Target CIK. Returns: list -- List of page XML, comprising full listing metadata for CIK. """ idx = 0 # Current page index end = False # Flags for loop count = 100 # Number of results per page (limited by SEC) # Text indicating next page exists next_page_text = 'rel="next" type="application/atom+xml" />' pages = [] while not end: # Making request page_text = __makeRequest(cik=cik, start_idx=idx, count=count) end = (page_text.find(next_page_text) == -1) # Update end flag idx += count # Increment index for next page pages.append(page_text) # Save page text return pages
5,356,676
def test_payment_dates_br(): """ Test routine: payment_dates_br(settle, maturity) Reference: http://www.tesouro.fazenda.gov.br/documents/10180/258262/NTN-F/1d23ed84-4921-49f4-891b-fececd3115f9 Expected Result: ['01/07/2004', '01/01/2005', '01/07/2005', '01/01/2006', '01/07/2006', '01/01/2007', '01/07/2007', '01/01/2008'] :return: """ from pprint import pprint pprint(list(map(dt.date2str_dmy, payment_dates("9/1/2004", "1/1/2008"))))
5,356,677
def KK_RC43_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen ([email protected] / [email protected]) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) )
5,356,678
def vconstant(value, length, dtype=None, chunk_size=1024): """Creates a virtual column with constant values, which uses 0 memory. :param value: The value with which to fill the column :param length: The length of the column, i.e. the number of rows it should contain. :param dtype: The preferred dtype for the column. :param chunk_size: Could be used to optimize the performance (evaluation) of this column. """ from .column import ColumnVirtualConstant return ColumnVirtualConstant(value=value, length=length, dtype=dtype, chunk_size=chunk_size)
5,356,679
def getsize(filename): """Return the size of a file, reported by os.stat().""" return os.stat(filename).st_size
5,356,680
def get_all_nsds_of_node(logger, instance): """ This function performs "mmlsnsd -X -Y". Args: instance (str): instance for which disks are use by filesystem. region (str): Region of operation Returns: all_disk_names (list): Disk names in list format. Ex: [nsd_1a_1_0, nsd_1c_1_0, nsd_1c_d_1] """ logger.debug("Function Entry: get_all_nsds_of_node. " "Args: instance={0}".format(instance)) nsd_list = [] nsd_list = SpectrumScaleNSD.get_all_nsd_info() all_nsd_names = [] for nsd in nsd_list: if nsd.get_remarks() == 'server node' and instance in nsd.get_server_list(): all_nsd_names.append(nsd.get_name()) logger.debug("Function Exit: get_all_nsds_of_node(). " "Return Params: all_nsd_names={0} ".format(all_nsd_names)) return all_nsd_names
5,356,681
def gen_color_palette(n: int): """ Generates a hex color palette of size n, without repeats and only light colors (easily visible on dark background). Adapted from code by 3630 TAs Binit Shah and Jerred Chen Args: n (int): number of clouds, each cloud gets a unique color """ palette = [] do_replace = False if len(COLOR_OPTIONS) >= n else True for i in np.random.choice(len(COLOR_OPTIONS), n, replace=do_replace): palette.append(COLOR_OPTIONS[i]) return palette
5,356,682
def absolute_vorticity(u, v, dx, dy, lats, dim_order='yx'): """Calculate the absolute vorticity of the horizontal wind. Parameters ---------- u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. lats : (M, N) ndarray latitudes of the wind data Returns ------- (M, N) ndarray absolute vorticity Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ f = coriolis_parameter(lats) relative_vorticity = vorticity(u, v, dx, dy, dim_order=dim_order) return relative_vorticity + f
5,356,683
def run(adeg=30,bdeg=15) : """ Generate pstricks code for drawing a diagram to demonstrate the angle addition formulas. """ a,b = map(lambda _ : _*pi/180.0, (adeg,bdeg)) ca, cb, cab = map(cos, (a,b,a+b)) sa, sb, sab = map(sin, (a,b,a+b)) # # Here are the points, vaguely where they should # be on the graph (How's this for literate programming!) # D=(-sa*sb,sab); Q=(cab,sab); C=(ca*cb,sab) pass; P=(ca*cb,ca*sb); R=(-sa*sb,sa*cb); O=(0,0); A=(-sa*sb,0); B=(ca*cb,0) lines = [(A,B),(B,C),(C,D),(D,A),(O,P),(P,Q),(Q,R),(R,O),(O,Q)] template = r'\psline[linewidth=0.1pt]{cc-cc}(%7.4f,%7.4f)(%7.4f,%7.4f)' for (P1,P2) in lines : print(template%(P1[0],P1[1],P2[0],P2[1])) template = r'\psarc[linecolor=black,linewidth=0.1pt](%7.4f,%7.4f)(%7.4f,%7.4f)' print(template % (O[0],O[1],0,b)) print(template % (O[0],O[1],b,b+a))
5,356,684
def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = input.new_empty(shape).bernoulli_(keep_prob) if keep_prob > 0.0 and scale_by_keep: random_tensor.div_(keep_prob) return input * random_tensor
5,356,685
def ask_user_config() -> Dict[str, Any]: """ Ask user a few questions to build the configuration. Interactive questions built using https://github.com/tmbo/questionary :returns: Dict with keys to put into template """ questions: List[Dict[str, Any]] = [ { "type": "confirm", "name": "dry_run", "message": "Do you want to enable Dry-run (simulated trades)?", "default": True, }, { "type": "text", "name": "stake_currency", "message": "Please insert your stake currency:", "default": 'USDT', }, { "type": "text", "name": "stake_amount", "message": f"Please insert your stake amount (Number or '{UNLIMITED_STAKE_AMOUNT}'):", "default": "100", "validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val), "filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"' if val == UNLIMITED_STAKE_AMOUNT else val }, { "type": "text", "name": "max_open_trades", "message": f"Please insert max_open_trades (Integer or '{UNLIMITED_STAKE_AMOUNT}'):", "default": "3", "validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_int(val), "filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"' if val == UNLIMITED_STAKE_AMOUNT else val }, { "type": "text", "name": "timeframe", "message": "Please insert your desired timeframe (e.g. 5m):", "default": "5m", }, { "type": "text", "name": "fiat_display_currency", "message": "Please insert your display Currency (for reporting):", "default": 'USD', }, { "type": "select", "name": "exchange_name", "message": "Select exchange", "choices": [ "binance", "binanceus", "bittrex", "kraken", "ftx", "kucoin", "gateio", Separator(), "other", ], }, { "type": "autocomplete", "name": "exchange_name", "message": "Type your exchange name (Must be supported by ccxt)", "choices": available_exchanges(), "when": lambda x: x["exchange_name"] == 'other' }, { "type": "password", "name": "exchange_key", "message": "Insert Exchange Key", "when": lambda x: not x['dry_run'] }, { "type": "password", "name": "exchange_secret", "message": "Insert Exchange Secret", "when": lambda x: not x['dry_run'] }, { "type": "password", "name": "exchange_key_password", "message": "Insert Exchange API Key password", "when": lambda x: not x['dry_run'] and x['exchange_name'] == 'kucoin' }, { "type": "confirm", "name": "telegram", "message": "Do you want to enable Telegram?", "default": False, }, { "type": "password", "name": "telegram_token", "message": "Insert Telegram token", "when": lambda x: x['telegram'] }, { "type": "text", "name": "telegram_chat_id", "message": "Insert Telegram chat id", "when": lambda x: x['telegram'] }, { "type": "confirm", "name": "api_server", "message": "Do you want to enable the Rest API (includes FreqUI)?", "default": False, }, { "type": "text", "name": "api_server_listen_addr", "message": "Insert Api server Listen Address (best left untouched default!)", "default": "127.0.0.1", "when": lambda x: x['api_server'] }, { "type": "text", "name": "api_server_username", "message": "Insert api-server username", "default": "freqtrader", "when": lambda x: x['api_server'] }, { "type": "text", "name": "api_server_password", "message": "Insert api-server password", "when": lambda x: x['api_server'] }, ] answers = prompt(questions) if not answers: # Interrupted questionary sessions return an empty dict. raise OperationalException("User interrupted interactive questions.") # Force JWT token to be a random string answers['api_server_jwt_key'] = secrets.token_hex() return answers
5,356,686
def generate_menusystem(): """ Generate Top-level Menu Structure (cached for specified timeout) """ return '[%s] Top-level Menu System' % timestamp()
5,356,687
def describe_node_association_status(NodeAssociationStatusToken=None, ServerName=None): """ Returns the current status of an existing association or disassociation request. A ResourceNotFoundException is thrown when no recent association or disassociation request with the specified token is found, or when the server does not exist. A ValidationException is raised when parameters of the request are not valid. See also: AWS API Documentation :example: response = client.describe_node_association_status( NodeAssociationStatusToken='string', ServerName='string' ) :type NodeAssociationStatusToken: string :param NodeAssociationStatusToken: [REQUIRED] :type ServerName: string :param ServerName: [REQUIRED] The name of the server from which to disassociate the node. :rtype: dict :return: { 'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS' } :returns: SUCCESS : The association or disassociation succeeded. FAILED : The association or disassociation failed. IN_PROGRESS : The association or disassociation is still in progress. """ pass
5,356,688
def main(): """ main loop """ #TODO: enable parallezation to use multiple cores N = int(1e18) def sum(N): start = time.time() result = 0 for i in range(N): if i % int(1e9) == 0: print("** step %i **" % int(i/1e9)) result += i end = time.time() print("Sum till %N yields %s, took %s seconds" %(N,result, end-start)) return result, end-start def count_friend_numbers(N, base=10): """ Project Euler problem 612 - https://projecteuler.net/problem=612 Let's call two numbers friend numbers if their representation in base 10 has at least one common digit. E.g. 1123 and 3981 are friend numbers. Let f(N) be the number of pairs (p,q) with 1≤p<q<N such that p and q are friend numbers. This function will calculate f(N) :param N: ceiling number to calculate the number of friend numbers :return: number of friend number (p,q) s.t. 1≤p<q<N """ start = time.time() result = 0 for i in range(1,base): # 1. till N=100=1e2 # single digit: friends XX result += 2*base-2 # double digits: friends XX result += int( ((base-1)+(base-i))*i/2 ) result += int(( 1 + base-i-2)*(base-i-2)/2 ) result += (i+1)*2*(base-i-1) result += (base-i-1)*(base-2 + 2*(base-i-1)) # 2. till N=1000=1e3 # single digits: extra friends XXX result += base^2 + (base-2)*(2*base-1) # double digits: extra friends XXX result += (base-2)*( 2*base^2 + (base-2)*(2*base-1) + (base-3)*(2*base-1) ) result += 2*( base^2 + (base-2)*base + 2*(base-2)*(base-1) ) # triple digits: friends XXX # 3. till N=10000=1e4 # single digits: extra friends XXXX # result += base^3 + (base-2)*( base^2 + (base-1)*(2*base-1) ) end = time.time() print("Counting friend numbers till %s yields %s, took %0.5f seconds" %(N,result, end-start)) return result, end-start # sum(N) count_friend_numbers(100)
5,356,689
async def simple_post(session, url: str, data: dict, timeout: int = 10) -> Optional[dict]: """ A simple post function with exception feedback Args: session (CommandSession): current session url (str): post url data (dict): post data timeout (int): timeout threshold Returns: Json response in dict if no exception occurred Otherwise, return None and send feedback to user. """ import json import aiohttp import asyncio from loguru import logger try: logger.debug(f"Start posting {data} to {url} ...") async with aiohttp.ClientSession() as client: async with client.post(url, data=data, timeout=timeout, proxy=get_local_proxy()) as response: if response.status != 200: logger.error(f"Cannot connect to {url}, Status: {response.status}") session.send("无法连接到服务器") return None r = json.loads(await response.text()) logger.debug(f"Response: {r}") return r except asyncio.TimeoutError: logger.error(f"Cannot connect to {url}, Error: Timeout") await session.send("请求超时")
5,356,690
def test_builder_schema(tmp_path: Path, samples: Path): """generate the global ibek schema""" schema_path = tmp_path / "schema.json" result = runner.invoke(cli, ["ibek-schema", str(schema_path)]) assert result.exit_code == 0, f"ibek-schema failed with: {result}" expected = json.loads(open(samples / "schemas" / "ibek.schema.json").read()) # Don't care if version number didn't update to match if the rest is the same # expected["title"] = mock.ANY actual = json.loads(open(schema_path).read()) assert expected == actual
5,356,691
def test_check_libgmt(): """ Make sure check_libgmt fails when given a bogus library. """ libgmt = FakedLibGMT("/path/to/libgmt.so") msg = ( # pylint: disable=protected-access f"Error loading '{libgmt._name}'. " "Couldn't access function GMT_Create_Session. " "Ensure that you have installed an up-to-date GMT version 6 library. " "Please set the environment variable 'GMT_LIBRARY_PATH' to the " "directory of the GMT 6 library." ) with pytest.raises(GMTCLibError, match=msg): check_libgmt(libgmt)
5,356,692
def gnomonic_proj(lon, lat, lon0=0, lat0=0): """ lon, lat : arrays of the same shape; longitude and latitude of points to be projected lon0, lat0: floats, longitude and latitude in radians for the tangency point --------------------------- Returns the gnomonic projection, x, y https://mathworld.wolfram.com/GnomonicProjection.html """ cosc = sin(lat0)*sin(lat) + cos(lat0)*cos(lat)*cos(lon-lon0) x = cos(lat)*sin(lon-lon0)/cosc y = (cos(lat0)*sin(lat) - sin(lat0)*cos(lat)*cos(lon-lon0))/cosc return x, y
5,356,693
def local_pluggables(pluggable_type): """ Accesses pluggable names Args: pluggable_type (Union(PluggableType,str)): The pluggable type Returns: list[str]: pluggable names Raises: AquaError: if the type is not registered """ _discover_on_demand() if isinstance(pluggable_type, str): for ptype in PluggableType: if ptype.value == pluggable_type: pluggable_type = ptype break if not isinstance(pluggable_type, PluggableType): raise AquaError( 'Invalid pluggable type {}'.format(pluggable_type)) if pluggable_type not in _REGISTRY_PLUGGABLE.registry: raise AquaError('{} not registered'.format(pluggable_type)) return [pluggable.name for pluggable in _REGISTRY_PLUGGABLE.registry[pluggable_type].values()]
5,356,694
async def test_migrator_existing_config(hass, store, hass_storage): """Test migrating existing config.""" with patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove: data = await storage.async_migrator( hass, "old-path", store, old_conf_load_func=lambda _: {"old": "config"} ) assert len(mock_remove.mock_calls) == 1 assert data == {"old": "config"} assert hass_storage[store.key] == { "key": MOCK_KEY, "version": MOCK_VERSION, "minor_version": 1, "data": data, }
5,356,695
def view_folio_contact(request, folio_id=None): """ View contact page within folio """ folio = get_object_or_404(Folio, pk=folio_id) if not folio.is_published and folio.author_id != request.user: return render( request, 'showcase/folio_is_not_published.html' ) author = get_object_or_404( UserAccount, pk=folio.author_id.id ) message_form = SendAuthorMessageForm() context = { "user": request.user, "folio": folio, "author": author, "form": message_form } return render( request, 'showcase/view_folio_contact.html', context=context)
5,356,696
def faom03(t): """ Wrapper for ERFA function ``eraFaom03``. Parameters ---------- t : double array Returns ------- c_retval : double array Notes ----- The ERFA documentation is below. - - - - - - - - - - e r a F a o m 0 3 - - - - - - - - - - Fundamental argument, IERS Conventions (2003): mean longitude of the Moon's ascending node. Given: t double TDB, Julian centuries since J2000.0 (Note 1) Returned (function value): double Omega, radians (Note 2) Notes: 1) Though t is strictly TDB, it is usually more convenient to use TT, which makes no significant difference. 2) The expression used is as adopted in IERS Conventions (2003) and is from Simon et al. (1994). References: McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), IERS Technical Note No. 32, BKG (2004) Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 Copyright (C) 2013-2017, NumFOCUS Foundation. Derived, with permission, from the SOFA library. See notes at end of file. """ c_retval = ufunc.faom03(t) return c_retval
5,356,697
def get_dtindex(interval, begin, end=None): """Creates a pandas datetime index for a given interval. Parameters ---------- interval : str or int Interval of the datetime index. Integer values will be treated as days. begin : datetime Datetime index start date. end : datetime, optional Datetime index end date, defaults to current date. Returns ------- dtindex : pandas.tseries.index.DatetimeIndex Datetime index. """ if end is None: end = datetime.now() if interval in ['dekad', 'dekadal', 'decadal', 'decade']: dtindex = dekad_index(begin, end) elif interval in ['daily', 'day', '1']: dtindex = pd.date_range(begin, end, freq='D') elif interval in ['weekly', 'week', '7']: begin2 = begin - timedelta(begin.weekday()) + timedelta(6) dtindex = pd.date_range(begin2, end, freq='7D') elif interval in ['monthly', 'month']: lday = calendar.monthrange(end.year, end.month)[1] end = datetime(end.year, end.month, lday) dtindex = pd.date_range(begin, end, freq='M') if type(interval) is int: dtindex = pd.date_range(begin, end, freq=str(str(interval) + 'D')) return dtindex
5,356,698
def asanyarray(a, dtype=None, order=None): """Converts the input to an array, but passes ndarray subclasses through. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes scalars, lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. dtype : dtype, optional By default, the dtype is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-stype) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray or an ndarray subclass Array interpretation of *a*. If *a* is a subclass of ndarray, it is returned as-is and no copy is performed. See Also -------- asarray : Converts the input to an array. Examples -------- Convert a list into an array: >>> import nlcpy as vp >>> a = [1, 2] >>> vp.asanyarray(a) array([1, 2]) """ if isinstance(a, ndarray): if dtype is None and order is None: return a elif dtype is not None and order is None: if a.dtype == numpy.dtype(dtype): return a elif dtype is None and order is not None: order_char = internal._normalize_order(order) order_char = chr(core._update_order_char(a, order_char)) if order_char == 'C' and a._c_contiguous: return a if order_char == 'F' and a._f_contiguous: return a else: order_char = internal._normalize_order(order) order_char = chr(core._update_order_char(a, order_char)) if a.dtype == numpy.dtype(dtype) and \ (order_char == 'C' and a._c_contiguous or order_char == 'F' and a._f_contiguous): return a return core.array(a, dtype=dtype, order=order)
5,356,699