content
stringlengths
22
815k
id
int64
0
4.91M
def assert_sdf_equal(f1:str, f2:str): """ Diff two sd files and assert that they are equal """ outStrg = io.read_file(f1) try: refStrg = io.read_file(f2) except FileNotFoundError: raise AssertionError(f"Reference File does not exist {f2} for {f1}") # drop mol timestamp line (2nd line in sdf file; not tested with rdkit and on windows) outStrg = re.sub('((^|\\$\\$\\$\\$[\r\n]).*[\r\n]).*([\r\n])','\\1\\3', outStrg).splitlines(1) refStrg = re.sub('((^|\\$\\$\\$\\$[\r\n]).*[\r\n]).*([\r\n])','\\1\\3', refStrg).splitlines(1) diff = ''.join(difflib.unified_diff(outStrg, refStrg)) if diff: log.debug(f'sdf missmatch: {f1}, {f2}') raise AssertionError(diff)
5,355,400
def test_award_update_from_earliest_transaction(): """Test awards fields that should be updated with most earliest transaction info.""" award = mommy.make('awards.Award') mommy.make( 'awards.Transaction', award=award, # since this is the award's first transaction, # the txn action_date will become the award # signed date action_date=datetime.date(2016, 1, 1) ) # adding later transaction should not change award values mommy.make( 'awards.Transaction', award=award, action_date=datetime.date(2017, 1, 1) ) update_awards() award.refresh_from_db() assert award.date_signed == datetime.date(2016, 1, 1) # adding earlier transaction should update award values mommy.make( 'awards.Transaction', award=award, action_date=datetime.date(2010, 1, 1) ) update_awards() award.refresh_from_db() assert award.date_signed == datetime.date(2010, 1, 1)
5,355,401
def _reorder_for_qbb_experiment(df: pd.DataFrame) -> pd.DataFrame: """By default the entries are ordered alphabetically. We want SPOTA, EPOpt, PPO""" print("Changed the order") return df.iloc[[2, 0, 1]]
5,355,402
def get_trace_sink_output(project: Optional[pulumi.Input[Optional[str]]] = None, trace_sink_id: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTraceSinkResult]: """ Get a trace sink by name under the parent resource (GCP project). """ ...
5,355,403
def get_field(self, *args, is_squeeze=False, node=None, is_rthetaz=False): """Get the value of variables stored in Solution. Parameters ---------- self : SolutionData an SolutionData object *args: list of strings List of axes requested by the user, their units and values (optional) Returns ------- field: array an array of field values """ axname, _ = self.get_axes_list() symbol = self.field.symbol if len(args) == 0: field_dict = self.field.get_along(tuple(axname), is_squeeze=is_squeeze) else: field_dict = self.field.get_along(*args, is_squeeze=is_squeeze) field = field_dict[symbol] return field
5,355,404
def get_folder_status(dirname, with_message=False): """获取目录状态 Args: dirname(str): 目录路径 with_message(bool): 是否需要返回状态文件内的信息 """ status = None closest_time = 0 message = '' for status_type in [ DatasetStatus, TaskStatus, PredictStatus, PruneStatus, DownloadStatus, PretrainedModelStatus ]: for s in status_type: if osp.exists(osp.join(dirname, s.name)): modify_time = os.stat(osp.join(dirname, s.name)).st_mtime if modify_time > closest_time: closest_time = modify_time status = getattr(status_type, s.name) if with_message: encoding = 'utf-8' try: f = open( osp.join(dirname, s.name), 'r', encoding=encoding) message = f.read() f.close() except: try: import chardet f = open(filename, 'rb') data = f.read() f.close() encoding = chardet.detect(data).get('encoding') f = open( osp.join(dirname, s.name), 'r', encoding=encoding) message = f.read() f.close() except: pass if with_message: return status, message return status
5,355,405
def handle_session_event(event: EventData) -> core_pb2.SessionEvent: """ Handle session event when there is a session event :param event: event data :return: session event """ event_time = event.time if event_time is not None: event_time = float(event_time) return core_pb2.SessionEvent( node_id=event.node, event=event.event_type.value, name=event.name, data=event.data, time=event_time, )
5,355,406
def extract(): """ Function to first go to the yahoo financial news page, then acts as a crawler visit the latest news page, then etract the full news in a variable upload_string, which is then encoded in UTF-64, which is then pushed as a file to my github repo. """ url = urllib2.urlopen("https://in.finance.yahoo.com/news/") soup = BeautifulSoup(url) var = soup.find("a", {"class":"title"}) con = var.get("href") header = '' for i in range(6, 40): header = header + con[i] heading = header + "..." link = "https://in.finance.yahoo.com" + con url1 = urllib2.urlopen(link) soup1 = BeautifulSoup(url1) var2 = soup1.find("div", {"itemprop":"articleBody"}) var3 = var2.text.encode('ascii', 'ignore') upload_string = str(var3) commit_message = str(datetime.datetime.now()) filename = heading.title() gh = githubpy.github.GitHub(access_token=token) encoded_file = base64.b64encode(upload_string) gh.repos('Rahul91')('pytest')('contents')(filename).put(path=filename,message=commit_message, content=encoded_file) print "Successfully pushed a new article titled : %s on %s \n" %(filename, commit_message) raw_input("Press Enter to exit :)")
5,355,407
def job_builder(meta, valid_meta, workflow, job_dir, out_dir, coprocess=None, other_args="", writeimg=False): """Build a list of image processing jobs. Args: meta: Dictionary of processed image metadata. valid_meta: Dictionary of valid metadata keys. workflow: PlantCV image processing workflow script file. job_dir: Intermediate file output directory. out_dir: Output images directory. coprocess: Coprocess the specified imgtype with the imgtype specified in meta_filters. other_args: String of additional arguments to be passed to the workflow script. writeimg: Boolean that specifies whether output images should be created or not. Returns: jobs: List of image processing commands. :param meta: dict :param valid_meta: dict :param workflow: str :param job_dir: str :param out_dir: str :param coprocess: str :param other_args: str :param writeimg: bool :return job_stack: list """ # Overall job stack. List of list of jobs jobs = [] # Get the list of images # images = list(meta.keys()) images = [] for img in list(meta.keys()): # # If a date range was requested, check whether the image is within range # if args.dates: # # Convert image datetime to unix time # timestamp = dt_parser(meta[img]['timestamp']) # time_delta = timestamp - datetime.datetime(1970, 1, 1) # unix_time = (time_delta.days * 24 * 3600) + time_delta.seconds # if unix_time < args.start_date or unix_time > args.end_date: # continue if coprocess is not None: if meta[img]['imgtype'] != coprocess: images.append(img) else: images.append(img) print("Job list will include " + str(len(images)) + " images" + '\n', file=sys.stderr) # For each image for img in images: # Create JSON templates for each image img_meta = {"metadata": deepcopy(valid_meta), "observations": {}} coimg_meta = {"metadata": deepcopy(valid_meta), "observations": {}} # If there is an image co-processed with the image if (coprocess is not None) and ('coimg' in meta[img]): # Create an output file to store the co-image processing results and populate with metadata coimg = meta[meta[img]['coimg']] coout = open(os.path.join(".", job_dir, meta[img]["coimg"] + ".txt"), 'w') # Store metadata in JSON coimg_meta["metadata"]["image"] = { "label": "image file", "datatype": "<class 'str'>", "value": os.path.join(coimg['path'], meta[img]['coimg']) } # Valid metadata for m in list(valid_meta.keys()): coimg_meta["metadata"][m]["value"] = coimg[m] json.dump(coimg_meta, coout) coout.close() # Create an output file to store the image processing results and populate with metadata outfile = open(os.path.join(".", job_dir, img + ".txt"), 'w') # Store metadata in JSON img_meta["metadata"]["image"] = { "label": "image file", "datatype": "<class 'str'>", "value": os.path.join(meta[img]['path'], img) } # Valid metadata for m in list(valid_meta.keys()): img_meta["metadata"][m]["value"] = meta[img][m] json.dump(img_meta, outfile) outfile.close() # Build job job_parts = ["python", workflow, "--image", os.path.join(meta[img]['path'], img), "--outdir", out_dir, "--result", os.path.join(job_dir, img) + ".txt"] # Add job to list if coprocess is not None and ('coimg' in meta[img]): job_parts = job_parts + ["--coresult", os.path.join(job_dir, meta[img]['coimg']) + ".txt"] if writeimg: job_parts.append("--writeimg") if other_args: other_args_copy = re.sub("'", "", other_args) other_args_copy = other_args_copy.split(" ") job_parts = job_parts + other_args_copy jobs.append(job_parts) return jobs
5,355,408
def bump_patch(version): """Raise the patch part of the version :param: version string :return: the raised version string :rtype: str """ verinfo = parse(version) return format_version(verinfo['major'], verinfo['minor'], verinfo['patch'] + 1)
5,355,409
def _extract_bike_location(bike, lon_abbrev='lon'): """ Standardize the bike location data from GBFS. Some have extra fields, and some are missing fields. Arguments: bike (dict[str, str]): A GBFS bike object as it appears in free_bike_status.json lon_abbrev (str): The abbreviation used for `longitude` Returns: dict[str, str]: A normalized GBFS bike object """ output = {key: bike.get(key) for key in ['bike_id', 'lat', 'is_reserved', 'is_disabled']} output['lon'] = bike.get(lon_abbrev) return output
5,355,410
def clean_word(word): """Return word in lowercase stripped of whitespace""" return word.strip().lower()
5,355,411
def get_batch_size(tracks): """ If tracks is a track-major list of possibly None tracks, get the batch size """ return get_shape(tracks)[0]
5,355,412
def find_edges_from_wires(body: TopoDS_Shape) -> set[TopoDS_Edge]: """Return set of edges from Wires.""" edge_set = set() for wire in TopologyExplorer(body, ignore_orientation=False).wires(): for edge in WireExplorer(wire).ordered_edges(): edge_set.add(edge) return edge_set
5,355,413
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None): """general nice map for israel, need that to plot stations, and temperature field on top of it""" import geopandas as gpd import contextily as ctx import seaborn as sns import cartopy.crs as ccrs sns.set_style("ticks", rc=rc) isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp') isr_with_yosh.crs = {'init': 'epsg:4326'} # isr_with_yosh = isr_with_yosh.to_crs(epsg=3857) crs_epsg = ccrs.epsg('3857') # crs_epsg = ccrs.epsg('2039') if ax is None: # fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg}, # figsize=(6, 15)) bounds = isr_with_yosh.geometry.total_bounds extent = [bounds[0], bounds[2], bounds[1], bounds[3]] # ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg) # ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg) ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15)) else: isr_with_yosh.plot(alpha=0.0, ax=ax) ctx.add_basemap( ax, source=ctx.providers.Stamen.TerrainBackground, crs='epsg:4326') ax.xaxis.set_major_locator(ticker.MaxNLocator(2)) ax.yaxis.set_major_locator(ticker.MaxNLocator(5)) ax.yaxis.set_major_formatter(lat_formatter) ax.xaxis.set_major_formatter(lon_formatter) ax.tick_params(top=True, bottom=True, left=True, right=True, direction='out', labelsize=ticklabelsize) # scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds) return ax
5,355,414
def getSentB(text2: str, offsetB: int, nextPoint: int, sentLength: int): """ alignSentences auxiliar function to get the sentences of the original text. """ posB = text2[offsetB+sentLength:].find('.') sentLength += posB+1 sentB = text2[offsetB:offsetB+sentLength] nextPoint = offsetB + sentLength return sentB, nextPoint, sentLength
5,355,415
def downgrade(): """Downgrade database schema and/or data back to the previous revision.""" op.alter_column('vendors', 'start_date', type_=mysql.DATETIME(), existing_type=mysql.DATE(), nullable=True) op.alter_column('vendors', 'end_date', type_=mysql.DATETIME(), existing_type=mysql.DATE(), nullable=True)
5,355,416
def get_mapping_fcost_local(interface, bus_def): """ coarse cost function to cheaply estimate local (subset of ports) interface match to bus_def """ cost = _get_mapping_fcost_base(interface, bus_def, penalize_umap=False) name_cost = _get_name_fcost2(interface, bus_def) cost.nc = name_cost return cost
5,355,417
def cross_compile(ctx, tag=""): """ Cross-compiles the trace-agent binaries. Use the "--tag=X" argument to specify build tag. """ if not tag: print("Argument --tag=<version> is required.") return print("Building tag %s..." % tag) env = { "TRACE_AGENT_VERSION": tag, "V": tag, } ctx.run("git checkout $V", env=env) ctx.run("mkdir -p ./bin/trace-agent/$V", env=env) ctx.run("go generate -mod=vendor ./pkg/trace/info", env=env) ctx.run("go get -u github.com/karalabe/xgo") ctx.run( "xgo -dest=bin/trace-agent/$V -go=1.11 -out=trace-agent-$V -targets=windows-6.1/amd64,linux/amd64,darwin-10.11/amd64 ./cmd/trace-agent", env=env, ) ctx.run( "mv ./bin/trace-agent/$V/trace-agent-$V-windows-6.1-amd64.exe ./bin/trace-agent/$V/trace-agent-$V-windows-amd64.exe", env=env, ) ctx.run( "mv ./bin/trace-agent/$V/trace-agent-$V-darwin-10.11-amd64 ./bin/trace-agent/$V/trace-agent-$V-darwin-amd64 ", env=env, ) ctx.run("git checkout -") print("Done! Binaries are located in ./bin/trace-agent/%s" % tag)
5,355,418
def base64_image(image: bytes, mime_type: str) -> str: """Encode the image for an URL using base64 Args: image: the image mime_type: the mime type Returns: A string starting with "data:{mime_type};base64," """ base64_data = base64.b64encode(image) image_data = quote(base64_data) return f"data:{mime_type};base64,{image_data}"
5,355,419
def multi_box_head(inputs, image, base_size, num_classes, aspect_ratios, min_ratio=None, max_ratio=None, min_sizes=None, max_sizes=None, steps=None, step_w=None, step_h=None, offset=0.5, variance=[0.1, 0.1, 0.2, 0.2], flip=True, clip=False, kernel_size=1, pad=0, stride=1, name=None, min_max_aspect_ratios_order=False): """ Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. The details of this algorithm, please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector <https://arxiv.org/abs/1512.02325>`_ . Args: inputs(list|tuple): The list of input Variables, the format of all Variables is NCHW. image(Variable): The input image data of PriorBoxOp, the layout is NCHW. base_size(int): the base_size is used to get min_size and max_size according to min_ratio and max_ratio. num_classes(int): The number of classes. aspect_ratios(list|tuple): the aspect ratios of generated prior boxes. The length of input and aspect_ratios must be equal. min_ratio(int): the min ratio of generated prior boxes. max_ratio(int): the max ratio of generated prior boxes. min_sizes(list|tuple|None): If `len(inputs) <=2`, min_sizes must be set up, and the length of min_sizes should equal to the length of inputs. Default: None. max_sizes(list|tuple|None): If `len(inputs) <=2`, max_sizes must be set up, and the length of min_sizes should equal to the length of inputs. Default: None. steps(list|tuple): If step_w and step_h are the same, step_w and step_h can be replaced by steps. step_w(list|tuple): Prior boxes step across width. If step_w[i] == 0.0, the prior boxes step across width of the inputs[i] will be automatically calculated. Default: None. step_h(list|tuple): Prior boxes step across height, If step_h[i] == 0.0, the prior boxes step across height of the inputs[i] will be automatically calculated. Default: None. offset(float): Prior boxes center offset. Default: 0.5 variance(list|tuple): the variances to be encoded in prior boxes. Default:[0.1, 0.1, 0.2, 0.2]. flip(bool): Whether to flip aspect ratios. Default:False. clip(bool): Whether to clip out-of-boundary boxes. Default: False. kernel_size(int): The kernel size of conv2d. Default: 1. pad(int|list|tuple): The padding of conv2d. Default:0. stride(int|list|tuple): The stride of conv2d. Default:1, name(str): Name of the prior box layer. Default: None. min_max_aspect_ratios_order(bool): If set True, the output prior box is in order of [min, max, aspect_ratios], which is consistent with Caffe. Please note, this order affects the weights order of convolution layer followed by and does not affect the fininal detection results. Default: False. Returns: tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances) mbox_loc: The predicted boxes' location of the inputs. The layout is [N, H*W*Priors, 4]. where Priors is the number of predicted boxes each position of each input. mbox_conf: The predicted boxes' confidence of the inputs. The layout is [N, H*W*Priors, C]. where Priors is the number of predicted boxes each position of each input and C is the number of Classes. boxes: the output prior boxes of PriorBox. The layout is [num_priors, 4]. num_priors is the total box count of each position of inputs. variances: the expanded variances of PriorBox. The layout is [num_priors, 4]. num_priors is the total box count of each position of inputs Examples: .. code-block:: python mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], image=images, num_classes=21, min_ratio=20, max_ratio=90, aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], base_size=300, offset=0.5, flip=True, clip=True) """ def _reshape_with_axis_(input, axis=1): out = nn.flatten(x=input, axis=axis) return out def _is_list_or_tuple_(data): return (isinstance(data, list) or isinstance(data, tuple)) def _is_list_or_tuple_and_equal(data, length, err_info): if not (_is_list_or_tuple_(data) and len(data) == length): raise ValueError(err_info) if not _is_list_or_tuple_(inputs): raise ValueError('inputs should be a list or tuple.') num_layer = len(inputs) if num_layer <= 2: assert min_sizes is not None and max_sizes is not None assert len(min_sizes) == num_layer and len(max_sizes) == num_layer elif min_sizes is None and max_sizes is None: min_sizes = [] max_sizes = [] step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) for ratio in six.moves.range(min_ratio, max_ratio + 1, step): min_sizes.append(base_size * ratio / 100.) max_sizes.append(base_size * (ratio + step) / 100.) min_sizes = [base_size * .10] + min_sizes max_sizes = [base_size * .20] + max_sizes if aspect_ratios: _is_list_or_tuple_and_equal( aspect_ratios, num_layer, 'aspect_ratios should be list or tuple, and the length of inputs ' 'and aspect_ratios should be the same.') if step_h: _is_list_or_tuple_and_equal( step_h, num_layer, 'step_h should be list or tuple, and the length of inputs and ' 'step_h should be the same.') if step_w: _is_list_or_tuple_and_equal( step_w, num_layer, 'step_w should be list or tuple, and the length of inputs and ' 'step_w should be the same.') if steps: _is_list_or_tuple_and_equal( steps, num_layer, 'steps should be list or tuple, and the length of inputs and ' 'step_w should be the same.') step_w = steps step_h = steps mbox_locs = [] mbox_confs = [] box_results = [] var_results = [] for i, input in enumerate(inputs): min_size = min_sizes[i] max_size = max_sizes[i] if not _is_list_or_tuple_(min_size): min_size = [min_size] if not _is_list_or_tuple_(max_size): max_size = [max_size] aspect_ratio = [] if aspect_ratios is not None: aspect_ratio = aspect_ratios[i] if not _is_list_or_tuple_(aspect_ratio): aspect_ratio = [aspect_ratio] step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0] box, var = prior_box(input, image, min_size, max_size, aspect_ratio, variance, flip, clip, step, offset, None, min_max_aspect_ratios_order) box_results.append(box) var_results.append(var) num_boxes = box.shape[2] # get loc num_loc_output = num_boxes * 4 mbox_loc = nn.conv2d( input=input, num_filters=num_loc_output, filter_size=kernel_size, padding=pad, stride=stride) mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1]) compile_shape = [ mbox_loc.shape[0], cpt.floor_division( mbox_loc.shape[1] * mbox_loc.shape[2] * mbox_loc.shape[3], 4), 4 ] run_shape = tensor.assign(numpy.array([0, -1, 4]).astype("int32")) mbox_loc_flatten = nn.reshape( mbox_loc, shape=compile_shape, actual_shape=run_shape) mbox_locs.append(mbox_loc_flatten) # get conf num_conf_output = num_boxes * num_classes conf_loc = nn.conv2d( input=input, num_filters=num_conf_output, filter_size=kernel_size, padding=pad, stride=stride) conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1]) new_shape = [0, -1, num_classes] compile_shape = [ conf_loc.shape[0], cpt.floor_division(conf_loc.shape[1] * conf_loc.shape[2] * conf_loc.shape[3], num_classes), num_classes ] run_shape = tensor.assign( numpy.array([0, -1, num_classes]).astype("int32")) conf_loc_flatten = nn.reshape( conf_loc, shape=compile_shape, actual_shape=run_shape) mbox_confs.append(conf_loc_flatten) if len(box_results) == 1: box = box_results[0] var = var_results[0] mbox_locs_concat = mbox_locs[0] mbox_confs_concat = mbox_confs[0] else: reshaped_boxes = [] reshaped_vars = [] for i in range(len(box_results)): reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3)) reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3)) box = tensor.concat(reshaped_boxes) var = tensor.concat(reshaped_vars) mbox_locs_concat = tensor.concat(mbox_locs, axis=1) mbox_confs_concat = tensor.concat(mbox_confs, axis=1) box.stop_gradient = True var.stop_gradient = True return mbox_locs_concat, mbox_confs_concat, box, var
5,355,420
def expandMask(img, shrink = False, step = 1): """Grow or shrink a mask by a pixel.""" if shrink: img = invert(img) img = jitterSum(img.data, step) > 0 img = Image(data = img.astype(numpy.uint8)*255) if shrink: img = invert(img) return img
5,355,421
def room_upsert(sender, instance, **kwargs): """ ... """ group_name: str = "rooms" channel_layer = get_channel_layer() serializer = RoomHeavySerializer(instance) # print(serializer.data) async_to_sync(channel_layer.group_send)( group_name, {"type": "room_event", "method": "U", "data": serializer.data,} )
5,355,422
def isHeader(line): """ tests to see if 'line' is in the event file header """ if containsAny(line, 'EVF Filename:', 'Generation Time:', 'Start_time:', 'End_time:', 'events in list)', '#', 'Include:', 'Init_value:'): return True elif len(line) < 3: return True else: return False
5,355,423
def test_float_notation(value): """ float notation """ assert ormsgpack.unpackb(ormsgpack.packb(value)) == value
5,355,424
def redirect_page(source_url, destination_url): """returns False is current page is not 200""" def _check_redirect(full_url): print('Getting ' + full_url) response = requests.get(full_url, allow_redirects=False) if response.status_code == 200: print("Was 200") return True elif response.status_code == 404: print("Was 404") return False elif response.status_code == 301: print("Was 301") return False else: raise Exception("UNEXPECTED STATUS CODE {} FOR {}".format( response.status_code, full_url)) return True full_source_url = 'https://www.gov.uk' + source_url full_destination_url = 'https://www.gov.uk' + destination_url return _check_redirect(full_source_url) and _check_redirect( full_destination_url)
5,355,425
def python_visualization(args): """Install Python visualization packages. :param args: A Namespace object containing parsed command-line options. """ if args.install: cmd = f"{args.pip_install} hvplot matplotlib" run_cmd(cmd) if args.config: pass if args.uninstall: cmd = f"{args.pip_uninstall} hvplot matplotlib" run_cmd(cmd)
5,355,426
def semantic_analysis(program, print_results=True): """ TODO :param program: TODO :param print_results: TODO :return: TODO """ semanter = make_semantic_analyser() program_ir = semanter.transform(program) if print_results: print_readable_ast(program_ir) return program_ir
5,355,427
def WrapWithQuotes(text, quote='"'): """ Wrap the supplied text with quotes Args: text: Input text to wrap quote: Quote character to use for wrapping (default = "") Returns: Supplied text wrapped in quote char """ if not text.startswith(quote): text = quote + text if not text.endswith(quote): text = text + quote return text
5,355,428
def get_fastsync_bin(venv_dir, tap_type, target_type): """ Get the absolute path of a fastsync executable """ source = tap_type.replace('tap-', '') target = target_type.replace('target-', '') fastsync_name = f'{source}-to-{target}' return os.path.join(venv_dir, 'pipelinewise', 'bin', fastsync_name)
5,355,429
def login_aws_via_idp(session, username, password, entity_id): """ Get a SAML assertion and set of AWS roles which can be assumed with the SAML assertion. """ logger.info("Looking up your IdP") idp_url, idp_form = get_idp_login_form( session, username, password, entity_id) logger.info("Logging in to %s", idp_url) idp_response = session.post(idp_url, data=idp_form) idp_response.raise_for_status() logger.info("Parsing response and presenting assertion to CILogon") cilogon_url, payload = parse_idp_login_response(idp_response.text) scimma_saml_proxy_response = session.post(cilogon_url, data=payload) scimma_saml_proxy_response.raise_for_status() logger.info("Login complete, extracting credentials") assertion = parse_scimma_sample_response(scimma_saml_proxy_response.text) roles = parse_scimma_aws_assertion(assertion) return assertion, roles
5,355,430
def fit_linreg(x, y, intercept=True): """Simple linear regression: y = kx + b. Arguments --------- x: :class:`~numpy.ndarray` A vector of independent variables. y: :class:`~numpy.ndarray` A vector of dependent variables. intercept: bool If using steady state assumption for fitting, then: True -- the linear regression is performed with an unfixed intercept; False -- the linear regresssion is performed with a fixed zero intercept. Returns ------- k: float The estimated slope. b: float The estimated intercept. """ mask = np.logical_and(~np.isnan(x), ~np.isnan(y)) xx = x[mask] yy = y[mask] ym = np.mean(yy) xm = np.mean(xx) if intercept: cov = np.mean(xx * yy) - xm * ym var_x = np.mean(xx * xx) - xm * xm k = cov / var_x b = ym - k * xm else: k = np.mean(yy) / np.mean(xx) b = 0 return k, b
5,355,431
def entropy(x, input_as_probabilities): """ Helper function to compute the entropy over the batch input: batch w/ shape [b, num_classes] output: entropy value [is ideally -log(num_classes)] """ if input_as_probabilities: x_ = torch.clamp(x, min = 1e-8) b = x_ * torch.log(x_) else: b = F.softmax(x, dim = 1) * F.log_softmax(x, dim = 1) if len(b.size()) == 2: # Sample-wise entropy return -b.sum(dim = 1).mean() elif len(b.size()) == 1: # Distribution-wise entropy return - b.sum() else: raise ValueError('Input tensor is %d-Dimensional' %(len(b.size())))
5,355,432
def find_function_in_object(o: object, function_name: str) -> Callable: """Finds a callable object matching given function name in given object. Args: o: Any object. function_name: Name of attribute within o. Returns: Callable object with name <function_name> in object <o>. Raises: LookupError: if <function_Name> is not a callable object in <o>. """ try: function_handle = getattr(o, function_name) if not hasattr(function_handle, "__call__"): raise LookupError( f"Resolved object {function_name} in object {o} is not a function." ) else: return function_handle except AttributeError: raise LookupError(f"Cannot find function {function_name} in object {o}.")
5,355,433
def test__vm_prefs_builder__virt_mode__negative_fuzz(value): """Fuzz test invalid values for virt mode""" _prefs = vm.VmPrefsBuilder() with pytest.raises(AssertionError): _prefs.virt_mode(value)
5,355,434
def device_id(ctx): """Return device index. For CPU, the index does not matter. For GPU, the index means which GPU device on the machine. Parameters ---------- ctx : Device context object. Device context. Returns ------- int The device index. """ pass
5,355,435
async def refresh(db: AsyncSession, schema: RefreshToken): """ Refresh token :param db: DB :type db: AsyncSession :param schema: Refresh token :type schema: RefreshToken :return: Access token :rtype: dict :raise HTTPException 400: User not found """ username = verify_refresh_token(schema.refresh_token) if not await user_crud.exists(db, username=username): raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='User not found') user = await user_crud.get(db, username=username) return create_token(user.id, username)
5,355,436
def gumbel_softmax(logits, temperature, hard=False): """Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs temperature: non-negative scalar hard: if True, take argmax, but differentiate w.r.t. soft sample y Returns: [batch_size, n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probabilitiy distribution that sums to 1 across classes """ y = gumbel_softmax_sample(logits, temperature) if hard: y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype) y = tf.stop_gradient(y_hard - y) + y return y
5,355,437
def dqn_learn(t, agent, env, env_state, history, args): """Learning loop for DeepQAgent""" step_type, reward, discount, state = env_state state = copy.deepcopy(state) # Act action = agent.act_explore(state) step_type, reward, discount, successor = env.step(action) # Learn if args.cheat: # TODO: fix this, since _get_hidden_reward seems to be episodic reward = env._get_hidden_reward() loss = agent.learn(state, action, reward, successor) history['writer'].add_scalar('Train/loss', loss, t) # Modify exploration eps = agent.update_epsilon() history['writer'].add_scalar('Train/epsilon', eps, t) # Sync target and policy networks if t % args.sync_every == args.sync_every - 1: agent.sync_target_Q() return (step_type, reward, discount, successor), history
5,355,438
def isaac_cc_test_group(srcs, deps = [], size = "small", copts = [], **kwargs): """ Creates on cc_test target per source file given in `srcs`. The test is given the same name as the corresponding source file. Only '*.cpp' files are supported. Every test will have the same dependencies `deps`. The gtest dependency is added automatically. """ for src in srcs: if not src.endswith(".cpp"): fail("Only cpp files are allowed as tests") native.cc_test( name = src[:-4], size = size, srcs = [src], deps = deps + ["@gtest//:main"], copts = copts + ["-Wno-unused-but-set-variable"], **kwargs )
5,355,439
def build_custom_Theta( data, data_description=[], add_constant_term=True, ): """ builds a matrix Theta(U) from a predefined set of terms This is used when we subsample and take all the derivatives point by point or if there is an extra input to put in. input: data: column 0 is U derivatives_description: description of candidate terms in Theta P: max power of polynomial function of U to be included in Theta returns: Theta = Theta(U,Q) descr = description of what all the columns in Theta are """ if len(data) > 0: n, m = data.shape # Add first column of Theta as ones. Theta = np.array([], dtype=np.complex64).reshape((n, 0)) descr = [] # Add "u"-part into Theta if len(data_description) > 0: Theta = np.hstack([Theta, data]) descr += data_description return Theta, descr
5,355,440
def run(): """Requirements for Task 2G""" # Build list of stations stations = build_station_list() # Update latest level data for all stations update_water_levels(stations) """Criteria for flooding: Relative water level list Rate of change (rising or falling) For both catrgories: top 25 percentile +3 points top 50 percentile +2 points bottom 50 percentile +1 points bottom 25 percentile +0 points Add points from both categories and check ranges for follding risk: == 6 Severe >= 4 Moderate >= 2 High Low """ # Number of stations wanted N = 150 #len(stations) # Store list of station objects to plot: stations_highest_rel_level(stations, 10) stations_highest_list = stations_highest_rel_level(stations, N) # Create list of tuples to store risk of flooding ratings and points for each station flooding_risk = [] for station in stations_highest_list: # Fetch data over past 2 days dt = 2 dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt)) # Calculate if water levels are rising or falling try: poly, date_shift = polyfit(dates, levels, 4) poly_gradient = np.polyder(poly) x1 = date2num(dates) gradient = poly_gradient(x1[-1]-date_shift) # Assign points. perecentile ranking of relative water level + 10 * gradient. Gradient of levels is a more important # factor for predicting flood risk and has a greater weight of 10 points = stations_highest_list.index(station)/len(stations_highest_list) + 10*gradient except: points = 0 #Assign rating if points >= 10: rating = "Severe" elif points >= 0: rating = "High" elif points >= -5: rating = "Moderate" else: rating = "Low" print("Processing", stations_highest_list.index(station), "out of ", len(stations_highest_list)) flooding_risk.append((station.name, rating, points)) # Sort by risk of flooding (points) flooding_risk = sorted_by_key(flooding_risk, 2) for station_name, rating, points in flooding_risk: print(station_name, rating, points)
5,355,441
def load(plugin: pathlib.Path) -> Optional[ModuleType]: """Load a specific cemu plugin Args: plugin (pathlib.Path): the path of the plugin to load Returns: Optional[ModuleType]: the loaded plugin module on success, None if there's no plugin, or it is invalid """ try: if plugin.is_file(): mod = importlib.import_module(f"cemu.plugins.{plugin.stem}") elif plugin.is_dir(): mod = importlib.import_module(f"cemu.plugins.{plugin.name}") else: raise ImportError("invalid format") except ImportError as ie: error(f"Failed to import '{plugin}' - reason: {str(ie)}") return None if not hasattr(mod, "register"): error(f"Plugin '{plugin.stem}' has no `register` method") return None return mod
5,355,442
def png_to_jpeg(png_file, jpeg_file): """ Convert PNG images to JPEG format :param png_file: full path of .png file :param jpeg_file: full path of .jpeg file """ im = PIL.Image.open(png_file) rgb_im = im.convert('RGB') rgb_im.save(jpeg_file, 'JPEG')
5,355,443
def get_standard_logger(name, log_dir=None): """Function to return an instance of type logger.""" if log_dir is None: log_dir = '/Users/teaton/dev/fantasyAM/logs' time_stamp = datetime.now().strftime('%Y%m%d_%H%M%S') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(name) logger.setLevel(logging.INFO) # Create a file handler os.makedirs(log_dir, exist_ok=True) handler = logging.FileHandler(os.path.join(log_dir, f'{name}_{time_stamp}.log')) handler.setLevel(logging.INFO) # Create a logging format formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) # add the handlers to the logger logger.addHandler(handler) return logger
5,355,444
def getDatabaseConnection(databaseString): """Attempt connection to the database""" sqlsession = None try: sqlengine = sqlalchemy.create_engine(databaseString) SQLSession = sessionmaker(bind=sqlengine) sqlsession = SQLSession() print("Connection to " + databaseString + " successfull") except Exception as e: print(traceback.format_exc()) print("Error in connection to the database") return sqlsession
5,355,445
def prepare_multiple_configs(conf): """ This function uses workload_1 as a base, and then duplicates its configuration for all other workloads 2,3... while leaving properties already defined in subsequent workloads (2,3..) unchanged. """ keys_starting_with_workload = [] for k, _ in conf.iteritems(): if k.startswith("workload"): keys_starting_with_workload.append(k) for k in keys_starting_with_workload: if k != "workload_1": merge_dicts(dst_dic=conf[k], src_dic=conf["workload_1"], overwrite=False) return conf, keys_starting_with_workload
5,355,446
def undo_coefficient_scaling(clf = None, coefficients = None, intercept = 0.0, scaler = None): """ given coefficients and data for scaled data, returns coefficients and intercept for unnormalized data w = w_scaled / sigma b = b_scaled - (w_scaled / sigma).dot(mu) = b_scaled - w.dot(mu) :param sklearn linear classifier :param coefficients: vector of coefficients :param intercept: scalar for the intercept function :param scaler: sklearn.Scaler or :return: coefficients and intercept for unnormalized data """ if coefficients is None: assert clf is not None assert intercept == 0.0 assert hasattr(clf, 'coef_') coefficients = clf.coef_ intercept = clf.intercept_ if hasattr(clf, 'intercept_') else 0.0 if scaler is None: w = np.array(coefficients) b = float(intercept) else: isinstance(scaler, StandardScaler) x_shift = np.array(scaler.mean_) x_scale = np.sqrt(scaler.var_) w = coefficients / x_scale b = intercept - np.dot(w, x_shift) w = np.array(w).flatten() b = float(b) return w, b
5,355,447
def plot_polar_image(data, origin=None): """Plots an image reprojected into polar coordinages with the origin at "origin" (a tuple of (x0, y0), defaults to the center of the image)""" polar_grid, r, theta = reproject_image_into_polar(data, origin) plt.figure() plt.imshow(polar_grid, extent=(theta.min(), theta.max(), r.max(), r.min())) plt.axis('auto') plt.ylim(plt.ylim()[::-1]) plt.xlabel('Theta Coordinate (radians)') plt.ylabel('R Coordinate (pixels)') plt.title('Image in Polar Coordinates')
5,355,448
def parse_sum_stats(inf, sep): """ Yields a line at a time from the summary statistics file. Args: inf (str): input file sep (str): column separator Returns: OrderedDict: {column: value} """ with open_gzip(inf, "rb") as in_handle: # Get header header = in_handle.readline().decode("utf-8").rstrip().split(sep) # Assert that all column arguments are contained in header for arg, value in args.__dict__.items(): if '_col' in arg and value: assert value in header, \ 'Error: --{0} {1} not found in input header'.format(arg, value) # Iterate over lines for line in in_handle: values = line.decode("utf-8").rstrip().split(sep) # Replace any na_rep_in values with None values = [value if value != args.na_rep_in else None for value in values] # Check we have the correct number of elements assert len(values) == len(header), 'Error: column length ({0}) does not match header length ({1})'.format(len(values), len(header)) yield OrderedDict(zip(header, values))
5,355,449
def reduce(snail_nr): """Returns a fully reduced version of the given snail number.""" new_snail_nr = copy.deepcopy(snail_nr) # print("Start:") # print(snail_nr) while True: # print("\nNew reduction phase...") if explode_in_place(new_snail_nr): # print("Exploded:", new_snail_nr) continue # else: # print("No explode.") if split_in_place(new_snail_nr): # print("Split:", new_snail_nr) continue # else: # print("No split.") break # print(new_snail_nr) return new_snail_nr
5,355,450
def destroy_sample_files_folders() -> None: """Destroys sample files folders.""" for sample_folder_path in constants.SAMPLE_FILES_FOLDERS: _destroy_sample_folder(sample_folder_path)
5,355,451
def load_image_embedding_model(input_repr, content_type, embedding_size): """ Returns a model with the given characteristics. Loads the model if the model has not been loaded yet. Parameters ---------- input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for audio model. content_type : "music" or "env" Type of content used to train embedding. embedding_size : 8192 or 512 Embedding dimensionality. Returns ------- model : tf.keras.Model Model object. """ model_path = get_image_embedding_model_path(input_repr, content_type) return load_image_embedding_model_from_path(model_path, embedding_size)
5,355,452
def prepare_alm(alm=None, ainfo=None, lmax=None, pre=(), dtype=np.float64): """Set up alm and ainfo based on which ones of them are available.""" if alm is None: if ainfo is None: if lmax is None: raise ValueError("prepare_alm needs either alm, ainfo or lmax to be specified") ainfo = sharp.alm_info(lmax) alm = np.zeros(pre+(ainfo.nelem,), dtype=np.result_type(dtype,0j)) else: ainfo = sharp.alm_info(nalm=alm.shape[-1]) return alm, ainfo
5,355,453
def increment_occurance_dict(d: dict, k: Any) -> None: """ Increment occurance dict, updates in-place so nothing is returned. """ try: d[k] += 1 except KeyError: d[k] = 1 return None
5,355,454
def dump_contents(input_fc): """ Print the contents of the feature class, this is just a namedtuple sample. """ fcrow = namedtuple("fcrow", ["oid", "datestamp"]) with arcpy.da.SearchCursor(input_fc, ["OID@", "datestamp"]) as cursor: for row in cursor: feature = fcrow._make(row) print(feature.oid, feature.datestamp) return
5,355,455
def package_versions(modules=None, builtins=False, standard_lib=None): """Retrieve package version information. When builtins or standard_lib are None, they will be included only if a version was found in the package. @param modules: Modules to inspect @type modules: list of strings @param builtins: Include builtins @type builtins: Boolean, or None for automatic selection @param standard_lib: Include standard library packages @type standard_lib: Boolean, or None for automatic selection """ if not modules: modules = sys.modules.keys() std_lib_dir = get_python_lib(standard_lib=True) root_packages = {key.split('.')[0] for key in modules} builtin_packages = {name.split('.')[0] for name in root_packages if name in sys.builtin_module_names or '_' + name in sys.builtin_module_names} # Improve performance by removing builtins from the list if possible. if builtins is False: root_packages = list(root_packages - builtin_packages) std_lib_packages = [] paths = {} data = {} for name in root_packages: try: package = import_module(name) except ImportError as e: data[name] = {'name': name, 'err': e} continue info = {'package': package, 'name': name} if name in builtin_packages: info['type'] = 'builtins' if '__file__' in package.__dict__: # Determine if this file part is of the standard library. if os.path.normcase(package.__file__).startswith( os.path.normcase(std_lib_dir)): std_lib_packages.append(name) if standard_lib is False: continue info['type'] = 'standard libary' # Strip '__init__.py' from the filename. path = package.__file__ if '__init__.py' in path: path = path[0:path.index('__init__.py')] if PY2: path = path.decode(sys.getfilesystemencoding()) info['path'] = path assert path not in paths, 'Path of the package is in defined paths' paths[path] = name if '__version__' in package.__dict__: info['ver'] = package.__version__ elif name.startswith('unicodedata'): info['ver'] = package.unidata_version # If builtins or standard_lib is None, # only include package if a version was found. if (builtins is None and name in builtin_packages) or \ (standard_lib is None and name in std_lib_packages): if 'ver' in info: data[name] = info else: # Remove the entry from paths, so it isn't processed below del paths[info['path']] else: data[name] = info # Remove any pywikibot sub-modules which were loaded as a package. # e.g. 'wikipedia_family.py' is loaded as 'wikipedia' _program_dir = _get_program_dir() if isinstance(pathlib, Exception): dir_parts = _program_dir.split(os.sep) else: dir_parts = pathlib.Path(_program_dir).parts length = len(dir_parts) for path, name in paths.items(): if isinstance(pathlib, Exception): lib_parts = os.path.normpath(path).split(os.sep) else: lib_parts = pathlib.Path(path).parts if dir_parts != lib_parts[:length]: continue if lib_parts[length] != '.tox': del data[name] return data
5,355,456
def closest_line(query_lines, metric='cosine'): """Compute the distance to, and parameters for, the closest line to each line in query_lines. Args: - query_lines: Array of lines to compute closest matches for, shape (n_lines, width, height, 1) - metric: String to pass to scipy.spatial.distance.cdist to choose which distance metric to use Returns: - min_dist, starts, ends: Arrays of shape (n_lines,) denoting the distance to the nearest ``true'' line and the start and end points. """ h, w = query_lines.shape[1:-1] # Construct 10000 lines with these dimensions angles = np.linspace(0, 2*np.pi - 2*np.pi/10000, 10000) all_lines = np.array( [(data.draw_line(angle, h, w)) for angle in angles]) # Produce vectorized versions of both for use with scipy.spatial flat_query = query_lines.reshape(query_lines.shape[0], -1) flat_all = all_lines.reshape(all_lines.shape[0], -1) # Compute pairwise distance matrix of query lines with all valid lines distances = scipy.spatial.distance.cdist(flat_query, flat_all, metric) min_dist_idx = np.argmin(distances, axis=-1) min_dist = distances[np.arange(distances.shape[0]), min_dist_idx] angles = np.array([angles[n] for n in min_dist_idx]) return min_dist, angles
5,355,457
def deref_vtk(obj): """Dereferences the VTK object from the object if possible.""" if isinstance(obj, TVTKBase): return obj._vtk_obj else: return obj
5,355,458
def order_assignee_factory(team): """ Creates a :class:`datahub.omis.order.models.OrderAssignee` instance related to ``team`` """ adviser = Advisor.objects.create( first_name='John', last_name='Doe', email=f'{uuid4()}@example.com', ) order_assignee = OrderAssignee.objects.create( order=Order.objects.create( company=Company.objects.create(), contact=Contact.objects.create(primary=True), primary_market=Country.objects.create(), ), adviser=adviser, created_by=adviser) order_assignee.team = team order_assignee.save() return order_assignee
5,355,459
def is_bool(space, w_obj): """ Finds out whether a variable is a boolean""" return space.wrap(w_obj.tp == space.tp_bool)
5,355,460
def _compile_for_uhfqa( device: zhinst.Device, cached_schedule: schedule_helpers.CachedSchedule, settings_builder: zi_settings.ZISettingsBuilder, ) -> Tuple[zi_settings.ZISettingsBuilder, ZIAcquisitionConfig]: """ Initialize programming the UHFQA ZI Instrument. Creates a sequence program and converts schedule pulses to waveforms for the UHFQA. Parameters ---------- device : cached_schedule : settings_builder : Returns ------- : """ instrument_info = zhinst.InstrumentInfo( clock_rate=device.clock_rate, resolution=8, granularity=WAVEFORM_GRANULARITY[device.device_type], ) channels = device.channels channels = list(filter(lambda c: c.mode == enums.SignalModeType.REAL, channels)) awg_index = 0 channel = channels[awg_index] logger.debug(f"[{device.name}-awg{awg_index}] {str(device)}") mixer_corrections = ( channel.mixer_corrections if not channel.mixer_corrections is None else common.MixerCorrections() ) settings_builder.with_defaults( [ ("awgs/0/single", 1), ("qas/0/rotations/*", (1 + 1j)), ("qas/0/integration/sources/*", 0), ] ).with_sigouts(0, (1, 1)).with_awg_time( 0, device.clock_select ).with_qas_integration_weights_real( range(NUM_UHFQA_READOUT_CHANNELS), np.zeros(MAX_QAS_INTEGRATION_LENGTH) ).with_qas_integration_weights_imag( range(NUM_UHFQA_READOUT_CHANNELS), np.zeros(MAX_QAS_INTEGRATION_LENGTH) ).with_sigout_offset( 0, mixer_corrections.dc_offset_I ).with_sigout_offset( 1, mixer_corrections.dc_offset_Q ) logger.debug(f"[{device.name}-awg{awg_index}] channel={str(channel)}") instructions = get_execution_table( cached_schedule, instrument_info, channel, ) # Generate a dictionary of uuid(s) and zhinst.Wave instructions wave_instructions_dict: Dict[int, zhinst.Wave] = dict( (i.uuid, i) for i in instructions if isinstance(i, zhinst.Wave) ) # Create a list of all pulse_id(s). pulse_ids: List[int] = wave_instructions_dict.keys() # Generate map containing waveform the location of a pulse_id. waveform_table: Dict[int, int] = zi_helpers.get_waveform_table( pulse_ids, cached_schedule.pulseid_pulseinfo_dict ) # Create a dictionary of uuid(s) and numerical waveforms. waveforms_dict: Dict[int, np.ndarray] = dict( (uuid, wf_instr.waveform) for uuid, wf_instr in wave_instructions_dict.items() ) # Create a dictionary of uuid(s) and zhinst.Measure instructions n_acquisitions = sum(isinstance(x, zhinst.Measure) for x in instructions) measure_instructions_dict: Dict[int, zhinst.Measure] = dict( (i.uuid, i) for i in instructions if isinstance(i, zhinst.Measure) ) # Generate and apply sequencer program seqc = _assemble_uhfqa_sequence( cached_schedule=cached_schedule, device=device, instrument_info=instrument_info, output=device.channel_0, waveform_table=waveform_table, instructions=instructions, ) logger.debug(seqc) settings_builder.with_compiler_sourcestring(awg_index, seqc) # Apply waveforms to AWG _add_wave_nodes(device, awg_index, waveforms_dict, waveform_table, settings_builder) # Get a list of all acquisition protocol channels acq_channel_resolvers_map: Dict[int, Callable[..., Any]] = dict() # the unique acquisitions are acquisitions unique_acquisition_hashes = [] for acq_uuid, acq_info in cached_schedule.acqid_acqinfo_dict.items(): # the acquisition index is not required for configuring the integration weights. # we use a hash to identify which acquisitions are identical in this context. acq_hash = make_hash(acq_info.copy().pop("acq_index")) if acq_hash in unique_acquisition_hashes: continue unique_acquisition_hashes.append(acq_hash) acq_protocol: str = acq_info["protocol"] acq_duration: float = acq_info["duration"] acq_channel: int = acq_info["acq_channel"] integration_length = round(acq_duration * instrument_info.clock_rate) logger.debug( f"[{device.name}] acq_info={acq_info} " + f" acq_duration={acq_duration} integration_length={integration_length}" ) settings_builder.with_qas_integration_mode( zhinst.QasIntegrationMode.NORMAL ).with_qas_integration_length(integration_length).with_qas_result_enable( False ).with_qas_monitor_enable( False ).with_qas_delay( 0 ) if acq_protocol == "trace": # Disable Weighted integration because we'd like to see # the raw signal. settings_builder.with_qas_monitor_enable(True).with_qas_monitor_averages( cached_schedule.schedule.repetitions ).with_qas_monitor_length( integration_length ).with_qas_integration_weights_real( range(NUM_UHFQA_READOUT_CHANNELS), np.ones(MAX_QAS_INTEGRATION_LENGTH) ).with_qas_integration_weights_imag( range(NUM_UHFQA_READOUT_CHANNELS), np.ones(MAX_QAS_INTEGRATION_LENGTH) ) monitor_nodes = ( "qas/0/monitor/inputs/0/wave", "qas/0/monitor/inputs/1/wave", ) acq_channel_resolvers_map[acq_channel] = partial( resolvers.monitor_acquisition_resolver, monitor_nodes=monitor_nodes ) else: measure_instruction: zhinst.Measure = measure_instructions_dict[acq_uuid] # Combine a reset and setting acq weights # by slicing the length of the waveform I and Q values. # This overwrites 0..length with new values. # The waveform is slightly larger then the integration_length # because of the waveform granularity. This is irrelevant # due to the waveform being appended with zeros. Therefore # avoiding an extra slice of waveform[0:integration_length] weights_i = np.zeros(MAX_QAS_INTEGRATION_LENGTH) weights_q = np.zeros(MAX_QAS_INTEGRATION_LENGTH) weights_i[ 0 : len(measure_instruction.weights_i) ] = measure_instruction.weights_i weights_q[ 0 : len(measure_instruction.weights_q) ] = measure_instruction.weights_q settings_builder.with_qas_result_mode( zhinst.QasResultMode.CYCLIC ).with_qas_result_source( zhinst.QasResultSource.INTEGRATION ).with_qas_result_length( n_acquisitions ).with_qas_result_enable( True ).with_qas_result_averages( cached_schedule.schedule.repetitions ) # set the integration weights, note that we need to set 4 weights in order # to use a complex valued weight function in the right way. # Z = (w0*sI + w1*sQ) + 1j ( w1*sI - w0 * sQ) settings_builder.with_qas_integration_weights_real( 2 * acq_channel, list(weights_i) ).with_qas_integration_weights_imag( 2 * acq_channel, list(weights_q) ).with_qas_integration_weights_real( 2 * acq_channel + 1, list(weights_q) ).with_qas_integration_weights_imag( 2 * acq_channel + 1, list(-1 * weights_i) ) # Create partial function for delayed execution acq_channel_resolvers_map[acq_channel] = partial( resolvers.result_acquisition_resolver, result_nodes=[ f"qas/0/result/data/{2*acq_channel}/wave", f"qas/0/result/data/{2*acq_channel+1}/wave", ], ) settings_builder.with_qas_result_reset(0).with_qas_result_reset(1) settings_builder.with_qas_monitor_reset(0).with_qas_monitor_reset(1) return settings_builder, ZIAcquisitionConfig( n_acquisitions, acq_channel_resolvers_map )
5,355,461
def read_config(): """Parses config and returns config values :returns: config as dict """ dirname = os.path.dirname(__file__) config_path = os.path.join(dirname, 'config.yaml') try: stream = open(config_path, "r") except FileNotFoundError: return None try: config = yaml.safe_load(stream) except yaml.YAMLError as exception: logging.error("YAML error while parsing config.yaml:\n%s", exception) exit() # Remove / on the end of url if "url" in config: config["url"] = config["url"].rstrip("/") return config
5,355,462
def pull_media(obj, remote_id, stage): """ Pull media files from the Divio cloud environment. """ localdev.pull_media(obj.client, stage=stage, remote_id=remote_id)
5,355,463
def air_density(t_f, elevation): """Eq 20, page 25""" return (1.293 - 1.525e-4 * elevation + 6.379e-9 * elevation ** 2) / ( 1 + 0.00367 * t_f )
5,355,464
def revert_migration(apps, schema_editor): """ Reverts migration in apply_migration """ Group = apps.get_model('auth', 'Group') Group.objects.filter(name__in=AGENCIES).delete()
5,355,465
def _strip_after_new_lines(s): """Removes leading and trailing whitespaces in all but first line.""" lines = s.splitlines() if len(lines) > 1: lines = [lines[0]] + [l.lstrip() for l in lines[1:]] return '\n'.join(lines)
5,355,466
def make_connection(request): """ Create a StreamSplitRoutine from a MockConnection and a container, return topics 'A' and 'B' as well as the routine """ def generate(*, max_items_send: int): return MockConnection(max_items_send=max_items_send) yield generate
5,355,467
def print_cycles(objects, outstream=sys.stdout, show_progress=False): """ *objects* A list of objects to find cycles in. It is often useful to pass in gc.garbage to find the cycles that are preventing some objects from being garbage collected. *outstream* The stream for output. *show_progress* If True, print the number of objects reached as they are found. """ import gc from types import FrameType def print_path(path): for i, step in enumerate(path): # next "wraps around" next = path[(i + 1) % len(path)] outstream.write(" %s -- " % str(type(step))) if isinstance(step, dict): for key, val in six.iteritems(step): if val is next: outstream.write("[%s]" % repr(key)) break if key is next: outstream.write("[key] = %s" % repr(val)) break elif isinstance(step, list): outstream.write("[%d]" % step.index(next)) elif isinstance(step, tuple): outstream.write("( tuple )") else: outstream.write(repr(step)) outstream.write(" ->\n") outstream.write("\n") def recurse(obj, start, all, current_path): if show_progress: outstream.write("%d\r" % len(all)) all[id(obj)] = None referents = gc.get_referents(obj) for referent in referents: # If we've found our way back to the start, this is # a cycle, so print it out if referent is start: print_path(current_path) # Don't go back through the original list of objects, or # through temporary references to the object, since those # are just an artifact of the cycle detector itself. elif referent is objects or isinstance(referent, FrameType): continue # We haven't seen this object before, so recurse elif id(referent) not in all: recurse(referent, start, all, current_path + [obj]) for obj in objects: outstream.write("Examining: %r\n" % (obj,)) recurse(obj, obj, {}, [])
5,355,468
def my_browse(*args, **kwargs): """ Creates and starts an ObjectBrowser with modified summary column. """ attribute_columns = copy.deepcopy(DEFAULT_ATTR_COLS) summary_column = [col for col in attribute_columns if col.name == 'summary'][0] summary_column.data_fn = my_summary return browse(*args, attribute_columns = attribute_columns, **kwargs)
5,355,469
def build_scatterplot(budget): """ Runs a cross-validation and plot the scatter-plot of the cross-fold validation error :param tool: the tool to employ :param budget: the budget in use for the tool :return: """ frame = load_frames(budget=budget) X = frame[metrics] Y = frame['y'] predicted = cross_val_predict(get_best_models(budget=budget, tool=settings.algo), X, Y, cv=20) fig, ax = plt.subplots() ax.scatter(Y, predicted, edgecolors=(0, 0, 0)) ax.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'k--', lw=4) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.savefig('{}/cv-error-{}-{}.pdf'.format(settings.PLOTS, settings.algo, budget))
5,355,470
def cpu_min_frequency(): """ Returns the processor minimum frequency, in Mhz (> int) """ return psutil.cpu_freq().min
5,355,471
def test_increment_int(): """Making sure increment works for integers""" x0 = 0 y0 = lde.increment(x0) # 1 assert y0 == 1 x1 = 100 y1 = lde.increment(x1) # 101 assert y1 == 101
5,355,472
def log_error(message: str) -> str: """error log""" return message
5,355,473
def exclude_preservation_pending(q): """ Transform query to exclude MuseumObject entries which are pending preservation """ now = datetime.datetime.now(datetime.timezone.utc) preservation_boundary = now - PRESERVATION_DELAY update_boundary = now - UPDATE_DELAY return ( q.outerjoin( MuseumPackage, MuseumPackage.id == MuseumObject.latest_package_id ) .filter( # If any of the four conditions is true, the object will not # be preserved and are thus included in this query: or_( # 1. Is metadata information still incomplete? MuseumObject.metadata_hash == None, MuseumObject.attachment_metadata_hash == None, # 2. Is the object frozen? MuseumObject.frozen, # 3. The object hasn't been preserved, but it has been less # than a month passed since the creation of the object? and_( MuseumObject.latest_package_id == None, coalesce( MuseumObject.created_date, datetime.datetime.min ) > preservation_boundary ), # 4. Has the object entered preservation before, but... and_( MuseumObject.latest_package_id != None, # ...the package wasn't cancelled, and either... MuseumPackage.cancelled == False, or_( # ...modification date hasn't changed? coalesce( MuseumPackage.object_modified_date, datetime.datetime.min ) == coalesce( MuseumObject.modified_date, datetime.datetime.min ), # ...modification date has changed, but it's been # less than a month? coalesce( MuseumPackage.object_modified_date, datetime.datetime.min ) > update_boundary, # ...metadata hashes haven't changed, indicating no # change has happened? and_( MuseumPackage.metadata_hash == MuseumObject.metadata_hash, MuseumPackage.attachment_metadata_hash == MuseumObject.attachment_metadata_hash ) ) ) ) ) )
5,355,474
def naturalday(value, format=None): """ For date values that are tomorrow, today or yesterday compared to present day returns representing string. Otherwise, returns a string formatted according to settings.DATE_FORMAT. """ value = localtime(value) try: tzinfo = getattr(value, 'tzinfo', None) value = date(value.year, value.month, value.day) except AttributeError: # Passed value wasn't a date object. return value except ValueError: # Date arguments out of range. return value today = datetime.now(tzinfo).date() delta = value - today if delta.days > 7: return date_format(value, format) elif delta.days > 2: if value.weekday() == 0: return _('Next Monday') elif value.weekday() == 1: return _('Next Tuesday') elif value.weekday() == 2: return _('Next Wednesday') elif value.weekday() == 3: return _('Next Thursday') elif value.weekday() == 4: return _('Next Friday') elif value.weekday() == 5: return _('Next Saturday') else: return _('Next Sunday') elif delta.days == 2: return _('After tomorrow') elif delta.days == 1: return _('Tomorrow') elif delta.days == 0: return _('Today') elif delta.days == -1: return _('Yesterday') elif delta.days == -2: return _('Before yesterday') elif delta.days > -7: if value.weekday() == 0: return _('Last Monday') elif value.weekday() == 1: return _('Last Tuesday') elif value.weekday() == 2: return _('Last Wednesday') elif value.weekday() == 3: return _('Last Thursday') elif value.weekday() == 4: return _('Last Friday') elif value.weekday() == 5: return _('Last Saturday') else: return _('Last Sunday') else: return date_format(value, format)
5,355,475
def getRequestData(): """ Main function to execute the GET petition to the API """ URL = "https://pokeapi.co/api/v2/evolution-chain/" # evoChain = [] Se puede optimizar el algoritmo manejando un array y # almacenando cada evolucion para evitar los if anidados recorriendo el # json con un loop print('Please enter an Id') input_id = input() response = requests.get(URL + str(input_id)) # se adiciona el id ingresado, al endpoint if response.status_code == 200: try: response_json = json.loads(response.text) chain = response_json['chain'] print('*** Evolution chain # {} ***'.format(input_id)) name0 = chain['species']['name'] print('Pokemon Name: {}'.format(name0)) getWeightAndHeight(name0) # llamada a la funcion para obtener ID, Peso y tamaño print('+ Base Stats:') getBaseStats(name0) # Llamada a la funcion para obtener los 6 base-stats if chain['evolves_to']: name1 = chain['evolves_to'][0]['species']['name'] print('- Evolves to: ') print('Pokemon Name: {}'.format(name1)) getWeightAndHeight(name1) print('+ Base Stats:') getBaseStats(name1) if chain['evolves_to'][0]['evolves_to']: name2 = chain['evolves_to'][0]['evolves_to'][0]['species']['name'] print('- Evolves to: ') print('Pokemon Name: {}'.format(name2)) getWeightAndHeight(name2) print('+ Base Stats:') getBaseStats(name2) if chain['evolves_to'][0]['evolves_to'][0]['evolves_to']: name3 = chain['evolves_to'][0]['evolves_to'][0] ['evolves_to'][0]['species']['name'] print('- Evolves to: ') print('Pokemon Name: {}'.format(name3)) getWeightAndHeight(name3) print('+ Base Stats:') getBaseStats(name3) else: print('*** End of the evo-chain # {} ***'.format(input_id)) else: print('*** End of the evo-chain # {} ***'.format(input_id)) else: print('*** End of the evo-chain # {} ***'.format(input_id)) except: print("Error fetching the data") else: print('*** ¡Invalid Id! Please Restart the Script and Try again ***') # optimizar para validacion de enteros en el inicio del input
5,355,476
def load_property_names(connection, property_names, count_properties): """ Load property names from a mutation file in the SQLite database """ cur = connection.cursor() for property_id, property_name in enumerate(property_names,1): statement = """ INSERT INTO MUTATION_PROPERTY (PROPERTY_ID, PROPERTY_NAME) VALUES ({id}, '{name}') """.format(id=property_id, name=property_name) cur.execute(statement) for property_id, (_,property_name) in enumerate(count_properties,property_id+1): statement = """ INSERT INTO MUTATION_PROPERTY (PROPERTY_ID, PROPERTY_NAME) VALUES ({id}, '{name}') """.format(id=property_id, name=property_name) cur.execute(statement) cur.close() connection.commit()
5,355,477
def test_adaptors(adaptor: str, shuffle_buffer_size: int): """ Test if framework-specific generator adpators yield batches. """ idx = np.arange(0, 10) def map_fn(x_, obs_): """ Note: Need to convert to numpy in output because torch does not accept dask. """ return (np.asarray(x_[:, :2]),), kwargs = {"idx": {"Mus musculus": idx}, "obs_keys": [], "randomized_batch_access": False, "retrieval_batch_size": 2, "map_fn": map_fn} cart = _get_cart(store_format="dao", feature_space="single", **kwargs) if adaptor == "python": kwargs = {} elif adaptor == "tensorflow": import tensorflow as tf kwargs = {"output_signature": ( tf.TensorSpec(shape=(2,), dtype=tf.float32), )} elif adaptor in ["torch", "torch-loader", "torch-iter-loader", "torch-iter"]: kwargs = {} else: assert False it = cart.adaptor(generator_type=adaptor, shuffle_buffer=shuffle_buffer_size, **kwargs) if adaptor == "tensorflow": it = iter(it.range(2)) if adaptor in ["torch", "torch-iter"]: from torch.utils.data import DataLoader it = list(DataLoader(it)) it = iter(it) if adaptor in ["torch-loader", "torch-iter-loader"]: import torch it = iter(list(it)) _ = next(it)
5,355,478
def qg8_graph_write(filename: str, graph: qg8_graph): """ Wrapper function which prepares a collection of chunks (graph) and writes it to a file """ if not isinstance(graph, qg8_graph): raise TypeError("Second argument is not a qg8_graph") try: qg8f = qg8_file_open(filename, QG8_MODE_WRITE) except: raise IOError("Could not open file in write mode") success = 1 for chunk in graph.chunks: success *= qg8_file_write_chunk(qg8f, chunk) qg8_file_flush(qg8f) qg8_file_close(qg8f) return success
5,355,479
def valid_post_author(user, post): """This function checks whether the post was created by the user""" if str(user.key().id()) == str(post.user.key().id()): return True
5,355,480
def listing(request, **kwargs): """view for processing and applying listings""" context = { 'view': 'listing', 'all_channels': CHANNELS, 'all_towns': TOWNS, 'method': request.method, 'actions': ['listing_parse', 'listing_apply'], } if request.method == 'GET': context['action'] = 'show_listingModal' return render(request, 'dvbboxes.html', context) elif request.method == 'POST': if 'listing/apply' in request.path: form = forms.ApplyListingForm(request.POST) if form.is_valid(): context['action'] = 'listing_apply' parsed_data = json.loads(form.cleaned_data['parsed_data']) service_id = form.cleaned_data['service_id'] towns = form.cleaned_data['towns'] if not towns: towns = TOWNS towns.sort() # apply listing to servers in towns days = [data['day'] for data in parsed_data] days = sorted( days, key=lambda x: time.mktime(time.strptime(x, '%d%m%Y')) ) response = dvbboxes.Listing.apply( parsed_data, service_id, towns ) # reorganize response by days result = collections.OrderedDict() for day in days: result[day] = collections.OrderedDict() bar = True for town, data in response.items(): for day, infos in data.items(): for server, statuses in infos.items(): foo = all(statuses.values()) result[day][server] = foo bar = bar and foo context['result'] = result return render(request, 'dvbboxes.html', context) else: context['errors'] = form.errors return render(request, 'dvbboxes.html', context) else: form = forms.UploadListingForm(request.POST, request.FILES) if form.is_valid(): filepath = handle_uploaded_file(request.FILES['filename']) listing = dvbboxes.Listing(filepath) # get listing object days = sorted( listing.days, key=lambda x: datetime.strptime(x, '%d%m%Y') ) # sort days in the listing if len(days) > 31: context['errors'] = ("Cannot process " "more than 31 days") return render(request, 'dvbboxes.html', context) context['action'] = 'listing_parse' missing_files = [ i for i, j in listing.filenames.items() if not j ] # detect missing files in the listing result = collections.OrderedDict() # prepare final result for day in days: result[day] = [] parsed_listing = listing.parse() json_result = [] for data in parsed_listing: infos = collections.OrderedDict() data = json.loads(data) json_result.append(data) day = data['day'] starts = [i for i in data if i != 'day'] starts = sorted( starts, key=lambda x: float(x.split('_')[1])) absent_files = 0 for start in starts: t, i = start.split('_') start_litteral = datetime.fromtimestamp( float(t)).strftime('%H:%M:%S') stop_litteral = datetime.fromtimestamp( float(t)+data[start]['duration']).strftime( '%d-%m-%Y %H:%M:%S') absent = not data[start]['duration'] if absent: absent_files += 1 filename = data[start]['filename'] infos[i] = [ start_litteral, filename, absent ] # we now define if the parsing is fine limit = datetime.strptime(day, '%d%m%Y') + timedelta(1) length_ok = ( datetime.fromtimestamp( float(t)+data[start]['duration']) >= limit ) if not absent_files and length_ok: success = 0 # green elif absent_files and length_ok: success = 1 # lightblue elif not absent_files and not length_ok: success = 2 # orange else: success = 3 # red result[day] = [infos, success, stop_litteral] context['days'] = days context['missing_files'] = missing_files context['result'] = result context['json_result'] = json.dumps(json_result) return render(request, 'dvbboxes.html', context) else: context['errors'] = form.errors return render(request, 'dvbboxes.html', context)
5,355,481
def get_rise_or_fall(U, V, Im, demo=0): """ Get increase or decrease of intensity in flow direction: This finds us the front and the wake regions of each wave. """ rr, cc = np.shape(Im) ax_x, ax_y = np.linspace(1, cc, cc), np.linspace(1, rr, rr) XX, YY = np.meshgrid(ax_x, ax_y) Velo_mag = np.hypot(U, V) nU = U / Velo_mag nV = V / Velo_mag lookahead = 3 # indices of nearby pixels, small span XX_next = np.round(XX + lookahead * nU) YY_next = np.round(YY + lookahead * nV) # interpolate Im_next = map_coordinates( Im, [YY_next.ravel(), XX_next.ravel()], order=3, mode="constant" ).reshape(Im.shape) # wavesign = np.sign(Im_nxt-Im) wavesign = Im_next < Im # test interrupt -demo=3:shelved, 2:activate): if demo == 2: plt.close("all") plt.figure() plt.imshow(wavesign) plt.title("front and wakes areas") plt.xlabel("x (pixels)") plt.ylabel("y (pixels)") plt.show() breakpoint() # click-to-code help return wavesign
5,355,482
def has_vanity_name(func): """Decorator checking whether a command has been provided a vanity_name value""" @functools.wraps(func) async def wrapper(*args, **kwargs): vanity_name = args[1] if vanity_name is None: ctx = args[0] await ctx.send("Please provide a Steam vanity URL or steamid") return return await func(*args, **kwargs) return wrapper
5,355,483
def get_data_providers( data_providers_configs: List[dict], data_providers_input: List[str] ) -> List[data.DataProvider]: """ Determines which data provider and in which order should be used. :param data_providers_configs: A list of data provider configurations :param data_providers_input: A list of data provider names :return: a list of data providers in order. """ logger = logging.getLogger(__name__) data_providers = [] for data_provider_config in data_providers_configs: data_provider_config["class"] = DATA_PROVIDER_MAP[data_provider_config["type"]]( **data_provider_config["parameters"] ) data_providers.append(data_provider_config) selected_data_providers = [] for data_provider_name in data_providers_input: found = False for data_provider_config in data_providers: if data_provider_config["name"] == data_provider_name: selected_data_providers.append(data_provider_config["class"]) found = True break if not found: logger.warning( "The following data provider could not be found: {}".format( data_provider_name ) ) if len(selected_data_providers) == 0: raise ValueError( "None of the selected data providers are available. The following data providers are valid " "options: " + ", ".join( data_provider["name"] for data_provider in data_providers_configs ) ) return selected_data_providers
5,355,484
def geocode(geocoder_api_key, out_file, verbose, quiet): """Reverse geocode well locations with BC Geocoder API""" verbosity = verbose - quiet configure_logging(verbosity) # only process if output file does not already exist if not os.path.exists(out_file): # get wells csv as pandas dataframe df = get_gwells(os.path.join("data", "wells.csv")) # extract just id and coords well_locations = df[ ["well_tag_number", "longitude_Decdeg", "latitude_Decdeg"] ].to_dict("records") LOG.info("Reverse geocoding well locations") with open(out_file, "w", newline="") as csvfile: writer = csv.DictWriter( csvfile, fieldnames=ADDRESS_COLUMNS + ["well_tag_number"] ) writer.writeheader() with click.progressbar(well_locations) as bar: for row in bar: r = reverse_geocode( row["longitude_Decdeg"], row["latitude_Decdeg"], geocoder_api_key, ) r["well_tag_number"] = row["well_tag_number"] writer.writerow(r)
5,355,485
def _PredatorForFracas(config=None): """A helper to pass in the standard pipeline class.""" return PredatorForFracas(MOCK_GET_REPOSITORY, config or {})
5,355,486
def CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids, platform_restrictions=None): """Creates the extension setting for a list of Feed Items. Args: client: an AdWordsClient instance. feed_items: the list of all Feed Items. campaign_feed: the original Campaign Feed. feed_item_ids: the Ids of the feed items for which extension settings should be created. platform_restrictions: an optional Platform Restriction for the Feed items. """ campaign_extension_setting_service = client.GetService( 'CampaignExtensionSettingService', 'v201603') extension_feed_items = [{ CreateSitelinkFeedItem(feed_items, feed_item_id) } for feed_item_id in feed_item_ids] extension_setting = { 'extensions': extension_feed_items } if platform_restrictions: extension_setting['platformRestrictions'] = platform_restrictions campaign_extension_setting = { 'campaignId': campaign_feed['campaignId'], 'extensionType': 'SITELINK', 'extensionSetting': extension_setting } operation = { 'operand': campaign_extension_setting, 'operator': 'ADD' } campaign_extension_setting_service.mutate([operation])
5,355,487
def py_SurfStatSmooth(Y, surf, FWHM): """Smooths surface data by repeatedly averaging over edges. Parameters ---------- Y : numpy array of shape (n,v) or (n,v,k) surface data, v=#vertices, n=#observations, k=#variates. surf : a dictionary with key 'tri' or 'lat', or a BSPolyData object. surf['tri'] = numpy array of shape (t,3), triangle indices, or surf['lat'] = numpy array of shape (nx,ny,nz), 1=in, 0=out, (nx,ny,nz) = size(volume). FWHM : approximate FWHM of Gaussian smoothing filter, in mesh units. Returns ------- Y : numpy array of shape (n,v) or (n,v,k), smoothed data. """ niter = int(np.ceil(pow(FWHM,2) / (2*np.log(2)))) if isinstance(Y, np.ndarray): Y = np.array(Y, dtype='float') if np.ndim(Y) == 2: n, v = np.shape(Y) k = 1 isnum = True elif np.ndim(Y) == 3: n, v, k = np.shape(Y) isnum = True edg = py_SurfStatEdg(surf) + 1 agg_1 = aggregate(edg[:,0], 2, size=(v+1)) agg_2 = aggregate(edg[:,1], 2, size=(v+1)) Y1 = (agg_1 + agg_2)[1:] if n>1: print(' %i x %i surfaces to smooth, %% remaining: 100 '%(n, k)) n10 = np.floor(n/10) for i in range(0, n): if n10 != 0 and np.remainder(i+1, n10) == 0: print('%s ' % str(int(100-(i+1)/n10*10)), end = '') for j in range(0, k): if isnum: if np.ndim(Y) == 2: Ys = Y[i,:] elif np.ndim(Y) == 3: Ys = Y[i,:,j] for itera in range(1, niter+1): Yedg = Ys[edg[:,0]-1] + Ys[edg[:,1]-1]; agg_tmp1 = aggregate(edg[:,0], Yedg, size=(v+1))[1:] agg_tmp2 = aggregate(edg[:,1], Yedg, size=(v+1))[1:] Ys = (agg_tmp1 + agg_tmp2) / Y1 if np.ndim(Y) == 2: Y[i,:] = Ys elif np.ndim(Y) == 3: Y[i,:,j] = Ys if n>1: print('Done') return Y
5,355,488
def arm_name_to_sort_key(arm_name: str) -> Tuple[str, int, int]: """Parses arm name into tuple suitable for reverse sorting by key Example: arm_names = ["0_0", "1_10", "1_2", "10_0", "control"] sorted(arm_names, key=arm_name_to_sort_key, reverse=True) ["control", "0_0", "1_2", "1_10", "10_0"] """ try: trial_index, arm_index = arm_name.split("_") return ("", -int(trial_index), -int(arm_index)) except (ValueError, IndexError): return (arm_name, 0, 0)
5,355,489
def run(path_main, local_data_path): """Function run script """ print("... start script {}".format(os.path.basename(__file__))) # Load data and assumptions base_data = data_loader.load_paths(path_main, local_data_path) base_data = data_loader.load_fuels(base_data) base_data['assumptions'] = assumptions.load_assumptions(base_data) base_data['weather_stations'], temperature_data = data_loader.load_data_temperatures( os.path.join(base_data['paths']['path_scripts_data'], 'weather_data') ) base_data['temperature_data'] = {} for weather_station, base_yr_temp in temperature_data.items(): base_data['temperature_data'][weather_station] = {2015: base_yr_temp} # IMPROVE TODO: LOAD FLOOR AREA DATA # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> base_data = data_loader.dummy_data_generation(base_data) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # Disaggregation base_data = disaggregate_base_demand(base_data) #Write to csv file disaggregated demand write_disagg_fuel( os.path.join(path_main, 'data', 'data_scripts', 'disaggregated', 'rs_fueldata_disagg.csv'), base_data['rs_fueldata_disagg']) write_disagg_fuel_sector( os.path.join(path_main, 'data', 'data_scripts', 'disaggregated', 'ss_fueldata_disagg.csv'), base_data['ss_fueldata_disagg']) write_disagg_fuel_sector( os.path.join(path_main, 'data', 'data_scripts', 'disaggregated', 'is_fueldata_disagg.csv'), base_data['is_fueldata_disagg']) write_disagg_fuel_ts( os.path.join(path_main, 'data', 'data_scripts', 'disaggregated', 'ts_fueldata_disagg.csv'), base_data['ts_fueldata_disagg']) print("... finished script {}".format(os.path.basename(__file__))) return
5,355,490
def test_imprint(app): """Test imprints.""" with app.app_context(): check_transformation( """ <datafield tag="260" ind1=" " ind2=" "> <subfield code="a">Sydney</subfield> <subfield code="b">Allen &amp; Unwin</subfield> <subfield code="c">2013</subfield> <subfield code="g">2015</subfield> </datafield> """, { "publication_year": "2013", "imprint": { "place": "Sydney", "publisher": "Allen & Unwin", "date": "2013", "reprint_date": "2015", }, }, )
5,355,491
def run_query_row(cur: Cursor, sql: str, params: Optional[Mapping[str, Any]] = None, **kwargs: Any ) -> Optional[skytools.dbdict]: """ Helper function if everything you need is just paramertisized execute to fetch one row only. If not found none is returned """ params = params or kwargs rows = run_query(cur, sql, params) if len(rows) == 0: return None return rows[0]
5,355,492
def vortex_indicator(high_arr, low_arr, close_arr, n): """Calculate the Vortex Indicator for given data. Vortex Indicator described here: http://www.vortexindicator.com/VFX_VORTEX.PDF :param high_arr: high price of the bar, expect series from cudf :param low_arr: low price of the bar, expect series from cudf :param close_arr: close price of the bar, expect series from cudf :param n: time steps to do EWM average :return: Vortex Indicator in cudf.Series """ TR = true_range(high_arr.data.to_gpu_array(), low_arr.data.to_gpu_array(), close_arr.data.to_gpu_array()) VM = lowhigh_diff(high_arr.data.to_gpu_array(), low_arr.data.to_gpu_array()) VI = division(Rolling(n, VM).sum(), Rolling(n, TR).sum()) return cudf.Series(VI)
5,355,493
def getServiceById(serviceId: str, **kwargs) -> Dict: """Retrieve service by its identifier. Args: serviceId: Identifier of service to be retrieved. Returns: Service object. """ db_collection_service = ( current_app.config['FOCA'].db.dbs['serviceStore'] .collections['services'].client ) obj = db_collection_service.find_one({"id": serviceId}) if not obj: raise NotFound del obj["_id"] return obj
5,355,494
def replace_module_prefix( state_dict: Dict[str, Any], prefix: str, replace_with: str = "", ignore_prefix: str = "" ): """ Remove prefixes in a state_dict needed when loading models that are not VISSL trained models. Specify the prefix in the keys that should be removed. Added by DLM contributors: ignore_prefix is used to ignore certain keys in the state dict """ state_dict = { (key.replace(prefix, replace_with, 1) if key.startswith(prefix) else key): val for (key, val) in state_dict.items() if ((not key.startswith(ignore_prefix)) or ignore_prefix == "") } return state_dict
5,355,495
def create_csv(parent_dir, tsv_folder, export_csv = True): """ The function reads all .tsv files, combine them into a csv file, and export .csv file into parent directory Args: parent_dir (string) : The working directory you are working with tsv_folder (string) : The name of the folder that you have stored .tsv files (Attention: It should be in your parent_dir) export_csv (Boolean, Optional) : Set to False if you don't want to store the .csv file in your parent_dir. Default set to True Returns: The .csv file of all combined .tsv files """ if parent_dir[-1] != '/': parent_dir += '/' os.chdir(parent_dir + tsv_folder) extension = 'tsv' all_filenames = [i for i in glob.glob('*.{}'.format(extension))] combined_csv = pd.concat([pd.read_csv(f, delimiter = '\t') for f in all_filenames ]) combined_csv.reset_index(inplace=True, drop= True) print("The csv file has generated! With " + str(len(combined_csv)) + " number of entries.") if export_csv: os.chdir(parent_dir) combined_csv.to_csv( "files/combined_csv.csv", index=False, encoding='utf-8-sig') print("The csv file has been exported to " + parent_dir) return combined_csv else: return combined_csv
5,355,496
def crop(img, left, top, right, bottom): """ Crop rectangle from image. Inputs: img - The image to crop. left - The leftmost index to crop the image. top - The topmost index. right - The rightmost index. bottom - The bottommost index. Outputs: img - The cropped image. """ return img[left:right, top:bottom]
5,355,497
def cl_file_with_height(tmp_path): """Create netcdf file for ``cl`` with hybrid height coordinate.""" nc_path = os.path.join(tmp_path, 'cl_hybrid_height.nc') dataset = Dataset(nc_path, mode='w') create_hybrid_height_file(dataset, 'cl') dataset.close() return nc_path
5,355,498
def get_hosts(network): """get_hosts() will return all the hosts within a provided network, range""" network = ipaddress.IPv4Network(network, strict=False) hosts_obj = network.hosts() hosts = [] for i in hosts_obj: hosts.append(str(i)) return hosts
5,355,499