content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def create_and_assign_household(humans_with_same_house, housetype, conf, city, allocated_humans): """ Creates a residence and allocates humans in `humans_with_same_house` to the same. Args: humans_with_same_house (list): a list of `Human` objects which are to be allocated to the same residence of type `type`. housetype (HouseType): type of allocation conf (dict): yaml configuration of the experiment city (covid19sim.location.City): simulator's city object allocated_humans (list): a list of humans that have been allocated a household Returns: allocated_humans (list): a list of humans that have been allocated a household """ assert all(human not in allocated_humans for human in humans_with_same_house), f"reassigning household to human" res = Household( env=city.env, rng=np.random.RandomState(city.rng.randint(2 ** 16)), conf=conf, name=f"HOUSEHOLD:{len(city.households)}", location_type="HOUSEHOLD", lat=city.rng.randint(*city.x_range), lon=city.rng.randint(*city.y_range), area=None, capacity=None, ) for human in humans_with_same_house: allocated_humans = _assign_household(human, res, allocated_humans) res.allocation_type = housetype city.households.add(res) return allocated_humans
594830aec1c820de94f7277499239f19e51ba0de
3,654,300
from sys import path import json import logging def Rdf2Marc(**kwargs): """Runs rdf2marc on a BF Instance URL""" task_instance = kwargs["task_instance"] instance_uri = task_instance.xcom_pull(task_ids="sqs-sensor") instance_path = urlparse(instance_uri).path instance_id = path.split(instance_path)[-1] sinopia_env = kwargs.get("sinopia_env", "dev") rdf2marc_lambda = f"{getenv('RDF2MARC_LAMBDA')}_{sinopia_env.upper()}" s3_bucket = f"{getenv('MARC_S3_BUCKET')}_{sinopia_env.upper()}" s3_record_path = f"airflow/{instance_id}/record" marc_path = f"{s3_record_path}.mar" marc_text_path = f"{s3_record_path}.txt" marc_err_path = f"{s3_record_path}.err" lambda_hook = AwsLambdaHook( rdf2marc_lambda, log_type="None", qualifier="$LATEST", invocation_type="RequestResponse", config=None, aws_conn_id="aws_lambda_connection", ) params = { "instance_uri": instance_uri, "bucket": s3_bucket, "marc_path": marc_path, "marc_txt_path": marc_text_path, "error_path": marc_err_path, } result = lambda_hook.invoke_lambda(payload=json.dumps(params)) print(f"RESULT = {result['StatusCode']}") if result["StatusCode"] == 200: return instance_id logging.error( f"RDF2MARC conversion failed for {instance_uri}: {result['FunctionError']}" ) raise Exception()
3b312d8c1d51d749efb5fad1aac7270a719a0006
3,654,301
import torch def make_positions(tensor, padding_idx): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return ( torch.cumsum(mask, dim=1).type_as(mask) * mask ).long() + padding_idx
f86f5485ddd3400161d9e233ad66cc492fd6d277
3,654,302
import click def init(): """Top level command handler.""" @click.command() @click.option('--policy-servers', type=cli.LIST, required=True, help='Warpgate policy servers') @click.option('--service-principal', type=str, default='host', help='Warpgate service principal.') @click.option('--policy', type=str, required=True, envvar='WARPGATE_POLICY', help='Warpget policy to use') @click.option('--tun-dev', type=str, required=True, help='Device to use when establishing tunnels.') @click.option('--tun-addr', type=str, required=False, help='Local IP address to use when establishing tunnels.') def warpgate(policy_servers, service_principal, policy, tun_dev, tun_addr): """Run warpgate connection manager. """ _LOGGER.info( 'Launch client => %s, tunnel: %s[%s], policy: %s, principal: %s', policy_servers, tun_dev, tun_addr, policy, service_principal, ) # Never exits client.run_client( policy_servers, service_principal, policy, tun_dev, tun_addr ) return warpgate
fcadaa48fead63b10431bf509f4f4398216be564
3,654,303
def load(file): """unpickle an object from a file""" pik = Unpickler(file) pik._main = _main_module obj = pik.load() if type(obj).__module__ == getattr(_main_module, '__name__', '__main__'): # point obj class to main try: obj.__class__ = getattr(pik._main, type(obj).__name__) except (AttributeError,TypeError): pass # defined in a file #_main_module.__dict__.update(obj.__dict__) #XXX: should update globals ? return obj
22050da1c2ff891180ce9581a1cf2c6f1cf9e0b9
3,654,304
import hashlib import binascii def verify_password(password: str, salt: str, key: str) -> bool: """ Verify the given password against the given salt and key. :param password: The password to check. :param salt: The salt to use. Should be encoded in ascii. :param key: The key to use. Should be encoded in ascii. :returns: True if given a valid password, False otherwise. """ LOGGER.debug("Verifying password.") new_key = hashlib.pbkdf2_hmac( 'sha256', password.encode('utf-8'), salt.encode('ascii'), 100000 ) return binascii.hexlify(new_key).decode() == key
e9aeea4731719a67417d429e84f4c5745c5edb0b
3,654,305
def setup(app): """Set up the Sphinx extension.""" app.add_config_value( name="doctr_versions_menu_conf", default={}, rebuild="html", ) app.connect('builder-inited', ext.add_versions_menu_js_file) app.connect('build-finished', ext.cleanup) return { "version": __version__, "parallel_read_safe": True, "parallel_write_safe": True, }
01173da317d1058811b01842be8492265ac0a62b
3,654,306
import os def head(file): """Returns the first/head line of the file""" first = '' if os.path.isfile(file): with open_file_read(file) as f_in: try: first = f_in.readline().rstrip() except UnicodeDecodeError: pass return first else: raise AppArmorException(_('Unable to read first line from %s: File Not Found') % file)
c6ab4c2c5dd98d228b806d9c878e56662162d5d7
3,654,307
import click def get_help_recursive(group, ctx, commands): """ Returns help for arbitrarily nested subcommands of the given click.Group. """ try: command_name = commands.pop(0) group = group.get_command(ctx, command_name) if not group: raise click.ClickException('Invalid command: {}'.format(command_name)) except IndexError: # end of subcommand chain return group.get_help(ctx) except AttributeError: # group is actually a command with no children return group.get_help(ctx) return get_help_recursive(group, ctx, commands)
412f0cb9e9aa1f19caf4a4a5db95c8040a0d2f36
3,654,308
def clump_tracker(fprefix, param=None, directory=None, nsmooth=32, verbose=True): """ Finds and tracks clumps over a simulation with multiple time steps and calculates various physical properties of the clumps. Runs all the steps necessary to find/track clumps, these are: get_fnames pFind_clumps pClump_properties pLink2 multilink build_clumps If the iord property is not found, the linking will only work if the number of particles remains constant through the simulation **ARGUMENTS** fprefix : str Prefix of the simulation outputs param : str (recommended) Filename of a .param file for the simulation directory : str (optional) Directory to search through. Default is current working directory nsmooth : int (optional) Number of nearest neighbors used for particle smoothing in the simulation. This is used in the definition of a density threshold for clump finding. verbose : bool (optional) Verbosity flag. Default is True **RETURNS** clump_list : list A list containing dictionaries for all clumps foujohn obryan fiddlend in the simulation See clump_properties for a list of the properties calculated for clumps """ # Get a list of all snapshot files fnames = get_fnames(fprefix, directory) nfiles = len(fnames) # Run the clump (halo) finder if verbose: print "\n\nRunning clump finder on {} files\n\n".format(nfiles) clumpnum_list = pFind_clumps(fnames, nsmooth, param, verbose=verbose) nclumps = np.zeros(nfiles, dtype=int) for i, clumpnums in enumerate(clumpnum_list): nclumps[i] = clumpnums.max() if nclumps.max() <= 0: if verbose: print 'No clumps found' return [] # Calculate the physical properties of the clumps if verbose: print "\n\nCalculating the physical of properties of clumps\n\n" properties = pClump_properties(fnames, clumpnum_list) # Link clumps on consecutive time-steps if verbose: print "\n\nLinking Clumps\n\n" link_list = pLink2(properties) # Link on multiple time-steps multilink_list = multilink(link_list) # Build the clumps clump_list = build_clumps(multilink_list, properties, fnames, param) return clump_list
bc72ae48e152ada388aa2421290d41d9865fa439
3,654,309
def OptimizeGraph(config_proto, metagraph, verbose=True, graph_id=b'graph_to_optimize', cluster=None, strip_default_attributes=False): """Optimize the provided metagraph. For best results, the signature_def field in `metagraph` should be populated with information about input (feed) and output (fetch) tensors. Args: config_proto: a ConfigProto protobuf. metagraph: a MetagraphDef protobuf. verbose: whether to log optimization results. graph_id: a string identifying this graph. cluster: a grappler cluster object representing hardware resources available to run this graph. strip_default_attributes: whether graph node attributes having default values should be removed after all the optimization passes. This option is useful if the resulting graph will be executed by an older process that might not know some of the recently added attributes. """ if not isinstance(config_proto, config_pb2.ConfigProto): raise TypeError('Argument `config_proto` should be a tf.ConfigProto, ' f'received type: {type(config_proto).__name__}') if cluster is not None: out_graph = tf_opt.TF_OptimizeGraph(cluster.tf_cluster, config_proto.SerializeToString(), metagraph.SerializeToString(), verbose, graph_id, strip_default_attributes) else: # Currently Grappler assumes no more than 1 sessions alive globally. # See comments on SingleMachine::Provision(), hence we use the following # lock to prevent concurrent access to the following code. with _OPTIMIZE_GRAPH_CLUSTER_LOCK: cluster = gcluster.Cluster() try: out_graph = tf_opt.TF_OptimizeGraph(cluster.tf_cluster, config_proto.SerializeToString(), metagraph.SerializeToString(), verbose, graph_id, strip_default_attributes) finally: # Force the cleanup instead of waiting on python GC to cleanup the # temporary cluster we've created. Otherwise subsequent calls might # not have a clean slate because GC may not have run yet. cluster.Shutdown() return graph_pb2.GraphDef().FromString(out_graph)
0d1fc74ffe6c16da953b9ac711534b125afb82d6
3,654,310
def files_by_date(): """TODO --- responses: '200': description: TODO """ return redirect("https://explorer.ooni.org/search", 301)
b1d184e01d7c08eac2ee4dad4180213d8f770c65
3,654,311
def parse_imei(msg): """Parse an IMEI (in BCD format) into ASCII format.""" imei = '' for octet in msg[1:]: imei += imei_parse_nibble(ord(octet) & 0x0f) imei += imei_parse_nibble(ord(octet) >> 4) return imei
664d9472b51dd806b28b2b2ecee1047307e4e15a
3,654,312
def get_blender_frame_time(skeleton, frame_id, rate, time_scale, actor_id): """Goes from multi-actor integer frame_id to modded blender float time.""" # stays within video frame limits frame_id2 = skeleton.mod_frame_id(frame_id=frame_id) # type: int time_ = skeleton.get_time(frame_id) if actor_id > 0: time_ = frame_id2 / rate print('time is {} for {} ({}), orig time: {}, rate: {}, ' 'time_scale: {}' .format(time_, frame_id, frame_id2, skeleton.get_time(frame_id), rate, time_scale)) frame_time = time_ * time_scale return frame_time
ca8ab45dbbb1b28b05894b9dd92529245441c60b
3,654,313
from ..plots.wx_symbols import wx_code_to_numeric from datetime import datetime import contextlib def parse_metar(metar_text, year, month, station_metadata=station_info): """Parse a METAR report in text form into a list of named tuples. Parameters ---------- metar_text : str The METAR report station_metadata : dict Mapping of station identifiers to station metadata year : int Reported year of observation for constructing 'date_time' month : int Reported month of observation for constructing 'date_time' Returns ------- metar : namedtuple Named tuple of parsed METAR fields Notes ----- Returned data has named tuples with the following attributes: * 'station_id': Station Identifier (ex. KLOT) * 'latitude': Latitude of the observation, measured in degrees * 'longitude': Longitude of the observation, measured in degrees * 'elevation': Elevation of the observation above sea level, measured in meters * 'date_time': Date and time of the observation, datetime object * 'wind_direction': Direction the wind is coming from, measured in degrees * 'wind_speed': Wind speed, measured in knots * 'wind_gust': Wind gust, measured in knots * 'current_wx1': Current weather (1 of 3) * 'current_wx2': Current weather (2 of 3) * 'current_wx3': Current weather (3 of 3) * 'skyc1': Sky cover (ex. FEW) * 'skylev1': Height of sky cover 1, measured in feet * 'skyc2': Sky cover (ex. OVC) * 'skylev2': Height of sky cover 2, measured in feet * 'skyc3': Sky cover (ex. FEW) * 'skylev3': Height of sky cover 3, measured in feet * 'skyc4': Sky cover (ex. CLR) * 'skylev4:': Height of sky cover 4, measured in feet * 'cloudcover': Cloud coverage measured in oktas, taken from maximum of sky cover values * 'temperature': Temperature, measured in degrees Celsius * 'dewpoint': Dewpoint, measured in degrees Celsius * 'altimeter': Altimeter value, measured in inches of mercury * 'current_wx1_symbol': Current weather symbol (1 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx2_symbol': Current weather symbol (2 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx3_symbol': Current weather symbol (3 of 3), WMO integer code from [WMO306]_ Attachment IV * 'visibility': Visibility distance, measured in meters * 'remarks': Remarks (unparsed) in the report """ # Decode the data using the parser (built using Canopy) the parser utilizes a grammar # file which follows the format structure dictated by the WMO Handbook, but has the # flexibility to decode the METAR text when there are missing or incorrectly # encoded values tree = parse(metar_text) # Station ID which is used to find the latitude, longitude, and elevation station_id = tree.siteid.text.strip() # Extract the latitude and longitude values from 'master' dictionary try: info = station_metadata[station_id] lat = info.latitude lon = info.longitude elev = info.altitude except KeyError: lat = np.nan lon = np.nan elev = np.nan # Set the datetime, day, and time_utc try: day_time_utc = tree.datetime.text.strip() day = int(day_time_utc[0:2]) hour = int(day_time_utc[2:4]) minute = int(day_time_utc[4:6]) date_time = datetime(year, month, day, hour, minute) except ValueError: date_time = np.nan # Set the wind values wind_units = 'kts' try: # If there are missing wind values, set wind speed and wind direction to nan if ('/' in tree.wind.text) or (tree.wind.text == 'KT') or (tree.wind.text == ''): wind_dir = np.nan wind_spd = np.nan # If the wind direction is variable, set wind direction to nan but keep the wind speed else: wind_spd = float(tree.wind.wind_spd.text) if 'MPS' in tree.wind.text: wind_units = 'm/s' wind_spd = units.Quantity(wind_spd, wind_units).m_as('knots') if (tree.wind.wind_dir.text == 'VRB') or (tree.wind.wind_dir.text == 'VAR'): wind_dir = np.nan else: wind_dir = int(tree.wind.wind_dir.text) # If there are any errors, return nan except ValueError: wind_dir = np.nan wind_spd = np.nan # Parse out the wind gust field if 'G' in tree.wind.text: wind_gust = units.Quantity(float(tree.wind.gust.text.strip()[1:]), wind_units).m_as('knots') else: wind_gust = np.nan # Handle visibility try: if tree.vis.text.endswith('SM'): visibility = 0 # Strip off the SM and any whitespace around the value and any leading 'M' vis_str = tree.vis.text[:-2].strip().lstrip('M') # Case of e.g. 1 1/4SM if ' ' in vis_str: whole, vis_str = vis_str.split(maxsplit=1) visibility += int(whole) # Handle fraction regardless if '/' in vis_str: num, denom = vis_str.split('/', maxsplit=1) visibility += int(num) / int(denom) else: # Should be getting all cases of whole number without fraction visibility += int(vis_str) visibility = units.Quantity(visibility, 'miles').m_as('meter') # CAVOK means vis is "at least 10km" and no significant clouds or weather elif 'CAVOK' in tree.vis.text: visibility = 10000 elif not tree.vis.text or tree.vis.text.strip() == '////': visibility = np.nan else: # Only worry about the first 4 characters (digits) and ignore possible 'NDV' visibility = int(tree.vis.text.strip()[:4]) # If there are any errors, return nan except ValueError: visibility = np.nan # Set the weather symbols # If the weather symbol is missing, set values to nan current_wx = [] current_wx_symbol = [] if tree.curwx.text.strip() not in ('', '//', 'NSW'): current_wx = tree.curwx.text.strip().split() # Handle having e.g. '+' and 'TSRA' parsed into separate items if current_wx[0] in ('-', '+') and current_wx[1]: current_wx[0] += current_wx[1] current_wx.pop(1) current_wx_symbol = wx_code_to_numeric(current_wx).tolist() while len(current_wx) < 3: current_wx.append(np.nan) while len(current_wx_symbol) < 3: current_wx_symbol.append(0) # Set the sky conditions skyc = [np.nan] * 4 skylev = [np.nan] * 4 if tree.skyc.text[1:3] == 'VV': skyc[0] = 'VV' level = tree.skyc.text.strip()[2:5] skylev[0] = np.nan if '/' in level else 100 * int(level) else: for ind, part in enumerate(tree.skyc.text.strip().split(maxsplit=3)): cover = part[:3] level = part[3:6] # Strips off any ending text like in FEW017CB if '/' not in cover: skyc[ind] = cover if level and '/' not in level: with contextlib.suppress(ValueError): skylev[ind] = float(level) * 100 # Set the cloud cover variable (measured in oktas) if 'OVC' in tree.skyc.text or 'VV' in tree.skyc.text: cloudcover = 8 elif 'BKN' in tree.skyc.text: cloudcover = 6 elif 'SCT' in tree.skyc.text: cloudcover = 4 elif 'FEW' in tree.skyc.text: cloudcover = 2 elif ('SKC' in tree.skyc.text or 'NCD' in tree.skyc.text or 'NSC' in tree.skyc.text or 'CLR' in tree.skyc.text or 'CAVOK' in tree.vis.text): cloudcover = 0 else: cloudcover = 10 # Set the temperature and dewpoint temp = np.nan dewp = np.nan if tree.temp_dewp.text and tree.temp_dewp.text != ' MM/MM': with contextlib.suppress(ValueError): temp = float(tree.temp_dewp.temp.text[-2:]) if 'M' in tree.temp_dewp.temp.text: temp *= -1 with contextlib.suppress(ValueError): dewp = float(tree.temp_dewp.dewp.text[-2:]) if 'M' in tree.temp_dewp.dewp.text: dewp *= -1 # Set the altimeter value and sea level pressure if tree.altim.text: val = float(tree.altim.text.strip()[1:5]) altim = val / 100 if val > 1100 else units.Quantity(val, 'hPa').m_as('inHg') else: altim = np.nan # Strip off extraneous stuff off the remarks section remarks = tree.remarks.text.lstrip().rstrip('= ') if remarks.startswith('RMK'): remarks = remarks[3:].strip() # Returns a named tuple with all the relevant variables return Metar(station_id, lat, lon, elev, date_time, wind_dir, wind_spd, wind_gust, visibility, current_wx[0], current_wx[1], current_wx[2], skyc[0], skylev[0], skyc[1], skylev[1], skyc[2], skylev[2], skyc[3], skylev[3], cloudcover, temp, dewp, altim, current_wx_symbol[0], current_wx_symbol[1], current_wx_symbol[2], remarks)
3660aeda77343c1bb21729b6b0d36ce597c5ca0d
3,654,314
def update_facemap_material(self, context): """ Assign the updated material to all faces belonging to active facemap """ set_material_for_active_facemap(self.material, context) return None
61e5f05cd059ca7646609f4d65f0bb86aaaebc8a
3,654,315
def calculate_accuracy(y_true, y_pred): """Calculates the accuracy of the model. Arguments: y_true {numpy.array} -- the true labels corresponding to each input y_pred {numpy.array} -- the model's predictions Returns: accuracy {str} -- the accuracy of the model (%) """ correctpred, total = 0, 0 for index in range(len(y_pred)): if(y_pred[index] == y_true[index]): correctpred = correctpred + 1 total = total+1 return 'accuracy='+str((correctpred*100)/total)
1ea14f8e4f50d13e2ae557aeec466c5372b99171
3,654,316
def resolve_diff_args(args): """Resolve ambiguity of path vs base/remote for git: Cases: - No args: Use defaults - One arg: Either base or path, check with is_gitref. - Two args or more: Check if first two are base/remote by is_gitref """ base = args.base remote = args.remote paths = getattr(args, 'paths', None) if not paths: paths = None if remote is None and paths is None: # One arg only: if not is_gitref(base): paths = base base = 'HEAD' # Two or more args: elif paths is None: # Two exactly # - Two files (not git-mode, do nothing) # - Base gitref one file (remote=None, path = file) # - Base gitref remote gitref (do nothing) if is_gitref(base) and not is_gitref(remote): paths = remote remote = None elif base and remote: # Three or more if not is_gitref(base): paths = [base, remote] + paths base = remote = None elif is_gitref(base) and not is_gitref(remote): paths = [remote] + paths remote = None return base, remote, paths
6260d69bffd8a4a4d35471c5710c9a86324f9549
3,654,317
def get_coco_metrics_from_gt_and_det(groundtruth_dict, detection_boxes_list, category=''): """ Get COCO metrics given dictionary of groundtruth dictionary and the list of detections. """ coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict) coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(detection_boxes_list) box_evaluator = coco_tools.COCOEvalWrapper(coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False) box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics( include_metrics_per_category=False, all_metrics_per_category=False, super_categories=None ) box_metrics.update(box_per_category_ap) box_metrics = {'DetectionBoxes_'+ category + key: value for key, value in iter(box_metrics.items())} return box_metrics
fbf6ca237f43c74ebe37772006c856f3a1850683
3,654,318
def createDataset(dataPath,dStr,sigScale=1): """ dStr from ["20K", "1M", "10M"] """ print("Loading D1B dataset...") ft1_d = loadD1B(dataPath,dStr,w=40) if dStr=="20K": ft1_d = ft1_d[:10000,:] print("Running PCA on D1B") pcaD1B = PCA(n_components=ft1_d.shape[1],random_state=0) ft1_d = pcaD1B.fit_transform(ft1_d) print("Loading FAS dataset") ft1_f, ft2_f, gt_f, pos1_f, pos2_f = loadFAS(dataPath) if dStr=="20K": ft1_f = ft1_f[:10000,:] ft2_f = ft2_f[:10000,:] print("Running PCA on FAS") pcaFAS = PCA(n_components=ft1_d.shape[1],random_state=0) ft1_f = pcaFAS.fit_transform(ft1_f) ft2_f = pcaFAS.transform(ft2_f) print("Re-scaling Variance of D1B using FAS data") ft1_d = np.std(ft1_f,axis=0)*ft1_d/np.std(ft1_d,axis=0) print("Computing a new version of D1B to be used as a query traverse") ftDiff = calcChange(dataPath) noiseVar = np.var(ftDiff,axis=0) noiseMean = np.mean(ftDiff,axis=0) print("\t Incorporating the 'change' from FAS along with some noise") ft1_n = addNoiseToFt(ft1_d,noiseMean,noiseVar,sigScale) print("Concatenating the two datasets") ft1 = np.concatenate([ft1_d,ft1_f],axis=0) ft2 = np.concatenate([ft1_n,ft2_f],axis=0) del ft1_d, ft1_n, ft1_f, ft2_f return ft1, ft2
02cf1b4a5708abf6d7e3fee323c5fb096fdbbffb
3,654,319
def generate_interblock_leader(): """Generates the leader between normal blocks""" return b'\x55' * 0x2
99878b67a31a4169bc73ad9b9b249a981a22177f
3,654,320
import itertools import warnings def discover_handlers(entrypoint_group_name="databroker.handlers", skip_failures=True): """ Discover handlers via entrypoints. Parameters ---------- entrypoint_group_name: str Default is 'databroker.handlers', the "official" databroker entrypoint for handlers. skip_failures: boolean True by default. Errors loading a handler class are converted to warnings if this is True. Returns ------- handler_registry: dict A suitable default handler registry """ group = entrypoints.get_group_named(entrypoint_group_name) group_all = entrypoints.get_group_all(entrypoint_group_name) if len(group_all) != len(group): # There are some name collisions. Let's go digging for them. for name, matches in itertools.groupby(group_all, lambda ep: ep.name): matches = list(matches) if len(matches) != 1: winner = group[name] warnings.warn( f"There are {len(matches)} entrypoints for the " f"databroker handler spec {name!r}. " f"They are {matches}. The match {winner} has won the race." ) handler_registry = {} for name, entrypoint in group.items(): try: handler_class = entrypoint.load() except Exception as exc: if skip_failures: warnings.warn( f"Skipping {entrypoint!r} which failed to load. " f"Exception: {exc!r}" ) continue else: raise handler_registry[name] = handler_class return handler_registry
d6b4b5c2071833503689abf474d5ebbc928c30c8
3,654,321
def create_highway_layer(highway_type, num_layer, unit_dim, window_size, activation, dropout, num_gpus, default_gpu_id, regularizer, random_seed, trainable): """create highway layer""" scope = "highway/{0}".format(highway_type) if highway_type == "highway": highway_layer = StackedHighway(num_layer=num_layer, unit_dim=unit_dim, activation=activation, dropout=dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=regularizer, random_seed=random_seed, trainable=trainable) elif highway_type == "conv_highway": highway_layer = StackedHighway(num_layer=num_layer, num_filter=unit_dim, window_size=window_size, activation=activation, dropout=dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=regularizer, random_seed=random_seed, trainable=trainable) else: raise ValueError("unsupported dense type {0}".format(highway_type)) return highway_layer
3bb1aafe9935f81683dfb036c91ec52da808932f
3,654,322
def compute_metrics(y_true, y_predicted, y_prob = None): """compute metrics for the prredicted labels against ground truth @args: y_true: the ground truth label y_predicted: the predicted label y_predicted_prob: probability of the predicted label @returns: various metrics: F1-score, AUC of ROC, brier-score, also plots AUC """ # plot AUC if y_prob: fpr, tpr, _ = roc_curve(y_true, y_prob) auc = roc_auc_score(y_true, y_prob) plt.plot(fpr, tpr, label="data 1, auc=" + str(auc)) plt.legend(loc=4) plt.show() # brier = brier_score_loss((y_true, y_prob)) # F1 score and brier score f1 = f1_score(y_true, y_predicted) # classification report plot_classification_report(classification_report(y_true, y_predicted)) return f1
e31264fa05ad02bcc73de0746df12dcccb1889fd
3,654,323
def session_store(decoy: Decoy) -> SessionStore: """Get a mock SessionStore interface.""" return decoy.mock(cls=SessionStore)
92518d32c7195f8fe6a6f3e44640cb2a5accb28b
3,654,324
from datetime import datetime def get_json_signed(asn_metadata): """ Given an ASN.1 object conforming to the new ASN.1 metadata definitions derived from Snapshot*.asn1, return a Python dictionary containing the same information, conformant to TUF's standard data specification for Snapshot metadata (tuf.formats.SNAPSHOT_SCHEMA). TUF internally does not use the ASN.1, converting it in and out of the standard Python dictionary formats defined in tuf.formats. """ pydict_signed = {} # TODO: Normalize this function's interface: the asn_metadata given is # actually both 'signed' and 'signatures', which is strange since the # get_asn_signed function takes only the contents of the 'signed' entry, and # this function only returns the contents of a corresponding 'signed' entry. # (It is confusingly inconsistent to take the full object, return a converted # partial object, and have parallel naming and placement with a function that # takes and returns a partial object.) # This change has to percolate across all modules, however. asn_signed = asn_metadata['signed'] # This should be the argument instead of asn_metadata. # Should check this from the ASN, but... the ASN definitions don't actually # USE a type, so I'm entirely basing the type encoded on the filename. This # is bad, I think. Could it be a security issue to not sign the metadata type # in there? The metadata types are pretty distinct, but... it's still best to # fix this at some point. pydict_signed['_type'] = 'Snapshot' pydict_signed['expires'] = datetime.utcfromtimestamp( asn_signed['expires']).isoformat()+'Z' pydict_signed['version'] = int(asn_signed['version']) # Next, extract the fileinfo for each role file described in the ASN.1 # Snapshot metadata. snapshot_metadata = asn_signed['body']['snapshotMetadata'] number_of_target_role_files = int( snapshot_metadata['numberOfTargetRoleFiles']) asn_target_fileinfos = snapshot_metadata['targetRoleFileInfos'] pydict_fileinfos = {} # Copy the Targets and delegated roles fileinfos: for i in range(number_of_target_role_files): asn_role_fileinfo = asn_target_fileinfos[i] filename = str(asn_role_fileinfo['filename']) pydict_fileinfos[filename] = {'version': int(asn_role_fileinfo['version'])} # Add in the Root role fileinfo: # In the Python dictionary format for Snapshot metadata, these all exist in # one dictionary. filename = str(snapshot_metadata['rootRoleFileInfo']['filename']) version = int(snapshot_metadata['rootRoleFileInfo']['version']) length = int(snapshot_metadata['rootRoleFileInfo']['length']) if filename in pydict_fileinfos: raise tuf.Error('ASN1 Conversion failure for Snapshot role: duplicate ' 'fileinfo entries detected: filename ' + str(filename) + ' identified ' 'both as Root role and Targets role in Snapshot metadata.') # Populate the hashes in the fileinfo describing the Root role. hashes = {} for i in range(snapshot_metadata['rootRoleFileInfo']['numberOfHashes']): asn_hash_info = snapshot_metadata['rootRoleFileInfo']['hashes'][i] # This is how we'd extract the name of the hash function from the # enumeration (namedValues) that is in the class (HashFunction), indexed by # the underlying "value" of asn_hash_info. The [0] at the end selects # the string description from a 2-tuple of e.g. ('sha256', 1), where 1 is # the value in the enum. # TODO: Should probably make this its own function. The following should # work: # def translate_pyasn_enum_to_value(asn_enum_value): # return asn_enum_value.namedValues[asn_enum_value][0] # hashtype = asn_hash_info['function'].namedValues[asn_hash_info['function']] hashval = hex_from_octetstring(asn_hash_info['digest']) hashes[hashtype] = hashval # Finally, add all the information gathered about the Root role. pydict_fileinfos[filename] = { 'version': version, 'length': length, 'hashes': hashes} pydict_signed['meta'] = pydict_fileinfos return pydict_signed
159202ccd2a33b13d44c0f9ba378ad3058075a54
3,654,325
from typing import Dict from typing import Any from typing import List def extract_values(obj: Dict[str, Any], key: str, val: Any) -> List[Dict[str, Any]]: """ Pull all values of specified key from nested JSON. Args: obj (dict): Dictionary to be searched key (str): tuple of key and value. value (any): value, which can be any type Returns: list of matched key-value pairs """ return [elem for elem in extract(obj, key, val)]
368203a85ded379d6c4042dc90e803611bf810d9
3,654,326
def createMeshPatches(ax, mesh, rasterized=False, verbose=True): """Utility function to create 2d mesh patches within a given ax.""" if not mesh: pg.error("drawMeshBoundaries(ax, mesh): invalid mesh:", mesh) return if mesh.nodeCount() < 2: pg.error("drawMeshBoundaries(ax, mesh): to few nodes:", mesh) return pg.tic() polys = [_createCellPolygon(c) for c in mesh.cells()] patches = mpl.collections.PolyCollection(polys, picker=True, rasterized=rasterized) if verbose: pg.info("Creation of mesh patches took = ", pg.toc()) return patches
977de081b20e0ab0709887213b53f5318b1ff5f0
3,654,327
def get_url_name(url_): """从url_中获取名字""" raw_res = url_.split('/', -1)[-1] raw_res = raw_res.split('.', 1)[0] res = raw_res[-15:] return res
a8f3b8dbc4a53e839b3047604e71ffaf36c00767
3,654,328
def check_uuid_in_db(uuid_to_validate, uuid_type): """ A helper function to validate whether a UUID exists within our db. """ uuid_in_db = None if uuid_type.name == "SESSION": uuid_in_db = Sessions.query.filter_by(session_uuid=uuid_to_validate).first() elif uuid_type.name == "QUIZ": uuid_in_db = Scores.query.filter_by(quiz_uuid=uuid_to_validate).first() elif uuid_type.name == "USER": uuid_in_db = Users.query.filter_by(user_uuid=uuid_to_validate).first() if not uuid_in_db: raise DatabaseError(message=f"{uuid_type.name}_UUID is not in the db.") return uuid_in_db
b151e7b7b393daf9647f308dea6fddd5eec3cb92
3,654,329
def delete(uuid): """ Deletes stored entities and time them. Args: uuid: A str, unique identifier, a part of the keynames of entities. Returns: A tuple of two lists. A list of float times to delete all entities, and a list of errors. A zero value signifies a failure. """ timings = [] errors = [] for index in range(0, constants.NUM_SAMPLES): entity = None try: entity = TestModel.get_by_key_name(key_names=uuid + str(index)) if not entity: raise Exception("Unable to first fetch entity.") except Exception, exception: logging.exception(exception) errors.append(str(exception)) total_time = 0 timings.append(total_time) logging.error("Left over entity with keyname {0}".\ format(uuid + str(index))) continue start = time.time() try: entity.delete() total_time = time.time() - start except Exception, exception: logging.exception(exception) errors.append(str(exception)) total_time = 0 timings.append(total_time * constants.SECONDS_TO_MILLI) return (timings, errors)
c0f9b42829dd8bd0963ea3a9b904d1aec0c50368
3,654,330
def remove_prefix(string, prefix): """ This function removes the given prefix from a string, if the string does indeed begin with the prefix; otherwise, it returns the string unmodified. """ if string.startswith(prefix): return string[len(prefix):] else: return string
73cffca0e9938ea48f3781c7821fcbcf56e0cf25
3,654,331
import torch def action_probs_to_action(probs): """ Takes output of controller and converts to action in format [0,0,0,0] """ forward = probs[:, 0:2]; camera=probs[:, 2:5]; jump=probs[:,5:7]; action = [torch.distributions.Categorical(p).sample().detach().item() for p in [forward,camera,jump]] action.append(0) # not allowing any motion along side dimension return action
00395569cd3fb7696bd0aa050f6fbcd6641d3741
3,654,332
def solve_circuit(netlist): """ Generate and solve the Modified Nodal Analysis (MNA) equations for the circuit. The MNA equations are a linear system Ax = z. See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html Args: netlist (pandas.DataFrame): A netlist of circuit elements with format desc, node1, node2, value. Returns: (np.ndarray, np.ndarray): - V_node: Voltages of the voltage elements - I_batt: Currents of the current elements """ timer = pybamm.Timer() desc = np.array(netlist["desc"]).astype("<U16") node1 = np.array(netlist["node1"]) node2 = np.array(netlist["node2"]) value = np.array(netlist["value"]) nLines = netlist.shape[0] n = np.concatenate((node1, node2)).max() # Number of nodes (highest node number) m = 0 # "m" is the number of voltage sources, determined below. V_elem = ["V", "O", "E", "H"] for nm in desc: if nm[0] in V_elem: m += 1 # Construct the A matrix, which will be a (n+m) x (n+m) matrix # A = [G B] # [B.T D] # G matrix tracks the conductance between nodes (consists of floats) # B matrix tracks voltage sources between nodes (consists of -1, 0, 1) # D matrix is always zero for non-dependent sources # Construct the z vector with length (n+m) # z = [i] # [e] # i is currents and e is voltages # Use lil matrices to construct the A array G = sp.sparse.lil_matrix((n, n)) B = sp.sparse.lil_matrix((n, m)) D = sp.sparse.lil_matrix((m, m)) i = np.zeros([n, 1]) e = np.zeros([m, 1]) """ % We need to keep track of the number of voltage sources we've parsed % so far as we go through file. We start with zero. """ vsCnt = 0 """ % This loop does the bulk of filling in the arrays. It scans line by line % and fills in the arrays depending on the type of element found on the % current line. % See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html """ for k1 in range(nLines): n1 = node1[k1] - 1 # get the two node numbers in python index format n2 = node2[k1] - 1 elem = desc[k1][0] if elem == "R": # Resistance elements: fill the G matrix only g = 1 / value[k1] # conductance = 1 / R """ % Here we fill in G array by adding conductance. % The procedure is slightly different if one of the nodes is % ground, so check for those accordingly. """ if n1 == -1: # -1 is the ground node G[n2, n2] = G[n2, n2] + g elif n2 == -1: G[n1, n1] = G[n1, n1] + g else: G[n1, n1] = G[n1, n1] + g G[n2, n2] = G[n2, n2] + g G[n1, n2] = G[n1, n2] - g G[n2, n1] = G[n2, n1] - g elif elem == "V": # Voltage elements: fill the B matrix and the e vector if n1 >= 0: B[n1, vsCnt] = B[n1, vsCnt] + 1 if n2 >= 0: B[n2, vsCnt] = B[n2, vsCnt] - 1 e[vsCnt] = value[k1] vsCnt += 1 elif elem == "I": # Current elements: fill the i vector only if n1 >= 0: i[n1] = i[n1] - value[k1] if n2 >= 0: i[n2] = i[n2] + value[k1] # Construct final matrices from sub-matrices upper = sp.sparse.hstack((G, B)) lower = sp.sparse.hstack((B.T, D)) A = sp.sparse.vstack((upper, lower)) # Convert a to csr sparse format for more efficient solving of the linear system # csr works slighhtly more robustly than csc A_csr = sp.sparse.csr_matrix(A) z = np.vstack((i, e)) toc_setup = timer.time() lp.logger.debug(f"Circuit set up in {toc_setup}") # Scipy # X = solve(A, z).flatten() X = sp.sparse.linalg.spsolve(A_csr, z).flatten() # Pypardiso # X = pypardiso.spsolve(Aspr, z).flatten() # amg # ml = pyamg.smoothed_aggregation_solver(Aspr) # X = ml.solve(b=z, tol=1e-6, maxiter=10, accel="bicgstab") # include ground node (0V) # it is counter-intuitive that z is [i,e] while X is [V,I], but this is correct V_node = np.zeros(n + 1) V_node[1:] = X[:n] I_batt = X[n:] toc = timer.time() lp.logger.debug(f"Circuit solved in {toc - toc_setup}") lp.logger.info(f"Circuit set up and solved in {toc}") return V_node, I_batt
73339854dbf993a22c4bd07abf028dc181dbc483
3,654,333
from typing import Tuple from typing import List from typing import Set def search_for_subject(subject: Synset, num_urls: int, subscription_key: str, custom_config: str, host: str, path: str) -> Tuple[List[Tuple[str, str, str]], str, str]: """Perform the search phase for one particular subject.""" query = get_search_query(subject) logger.info(f"Subject {subject.name()} - Search query: `{query}`") urls: Set[str] = set() results: List[Tuple[str, str, str]] = [] wiki_links: List[str] = [] offset = 0 step = 0 while len(urls) < num_urls: search_result_json = bing_search(search_query=query, count=SEARCH_BATCH_SIZE, offset=offset, subscription_key=subscription_key, custom_config=custom_config, host=host, path=path) try: for url, title, snippet in parse_content_from_search_result(search_result_json): if url not in urls: urls.add(url) results.append((url, title, snippet)) if url.startswith(EN_WIKIPEDIA_PREFIX): wiki_links.append(url) if len(urls) >= num_urls: break except Exception: break offset += SEARCH_BATCH_SIZE step += 1 if step >= MAX_SEARCH_STEP: break if subject.name() in MANUAL_WN2WP: logger.info("Detected manual WordNet-Wikipedia linking") wiki = EN_WIKIPEDIA_PREFIX + quote_plus(MANUAL_WN2WP[subject.name()]["wikipedia"]).capitalize() wiki_map_source = MANUAL_WN2WP[subject.name()]["source"] else: if len(wiki_links) == 0: wiki_links = search_wiki(subject, subscription_key, custom_config, host, path) wiki = wiki_links[0] for w in wiki_links: w = unquote_plus(w) if "List_" in w: continue if "(disambiguation)" in w: continue if "Category:" in w: continue if "Template:" in w: continue wiki = w break wiki_map_source = "BING" # Add Wikipedia article if wiki.lower() not in set(url.lower() for url in urls): results[-1] = (wiki, "{} - Wikipedia".format(wiki[(wiki.rindex("/") + 1):]).capitalize(), "") return results, wiki, wiki_map_source
fde60dc857f5623e8aae9a7a52621d4386034fb5
3,654,334
def get_kwargs(class_name: str) -> Kwargs: """Returns the specific kwargs for each field `class_name`""" default_kwargs = get_default_kwargs() class_kwargs = get_setting("COMMON_KWARGS", {}) use_kwargs = class_kwargs.get(class_name, default_kwargs) return use_kwargs
8b1ee7448792e2740053edf51528c99f3e2b5698
3,654,335
def minute_info(x): """ separates the minutes from time stamp. Returns minute of time. """ n2 = x.minute return n2/60
c166bb8f759a5eed1b45b2dd8f228206357deb28
3,654,336
from bs4 import BeautifulSoup def remove_html_tags(text): """Removes HTML Tags from texts and replaces special spaces with regular spaces""" text = BeautifulSoup(text, 'html.parser').get_text() text = text.replace(u'\xa0', ' ') return text
7f31a18d81ebc80b202ac697eb7b19fe206aed95
3,654,337
def patchy(target, source=None): """ If source is not supplied, auto updates cannot be applied """ if isinstance(target, str): target = resolve(target) if isinstance(source, str): source = resolve(source) if isinstance(target, ModuleType): return PatchModule(target, source) elif isinstance(target, type) and source: return PatchClass(target, source)
eece41abbc040fd306ae9b2813ae6f3e089cee82
3,654,338
def _handle_special_addresses(lion): """ When there are special address codes/names, ensure that there is a duplicate row with the special name and code as the primary. Note: Only for special address type 'P' - addressable place names """ special = lion[ (lion['special_address_type'].isin(['P', 'B', 'G'])) & (lion['street'] != lion['special_address_street_name']) ].drop(columns=['street', 'street_code']) special['street'] = special['special_address_street_name'] special['street_code'] = special['special_address_street_code'] special['special_address_street_code'] = "" special['special_address_street_name'] = "" lion = pd.concat([lion, special], sort=True).reset_index(drop=True) return lion
c8079ef0cba6e96940ed13b74c87a1bd49416376
3,654,339
def get_local(): """Construct a local population.""" pop = CosmicPopulation.simple(SIZE, generate=True) survey = Survey('perfect') surv_pop = SurveyPopulation(pop, survey) return surv_pop.frbs.s_peak
2ab081ffbd79c991c8a3d6ec7097a09407e5fe8a
3,654,340
def get_session(): """<comment-ja> thread-localでセッションを取得します。 </comment-ja> <comment-en> TODO: English Comment </comment-en> """ return scoped_session( sessionmaker(bind=get_engine(), autoflush=False))
e5e28f96bb8a14c152ddb8759c61152ca4f74127
3,654,341
def calculate_y_pos(x, centre): """Calculates the y-coordinate on a parabolic curve, given x.""" centre = 80 y = 1 / centre * (x - centre) ** 2 + sun_radius return int(y)
e57501c9e83bc26491266c9237f3e3b722ccacef
3,654,342
def extract_flowlines(gdb_path, target_crs, extra_flowline_cols=[]): """ Extracts flowlines data from NHDPlusHR data product. Extract flowlines from NHDPlusHR data product, joins to VAA table, and filters out coastlines. Extracts joins between flowlines, and filters out coastlines. Parameters ---------- gdb_path : str path to the NHD HUC4 Geodatabase target_crs: GeoPandas CRS object target CRS to project NHD to for analysis, like length calculations. Must be a planar projection. extra_cols: list List of extra field names to extract from NHDFlowline layer Returns ------- tuple of (GeoDataFrame, DataFrame) (flowlines, joins) """ ### Read in flowline data and convert to data frame print("Reading flowlines") flowline_cols = FLOWLINE_COLS + extra_flowline_cols df = read_dataframe( gdb_path, layer="NHDFlowline", force_2d=True, columns=[flowline_cols] ) print("Read {:,} flowlines".format(len(df))) # Index on NHDPlusID for easy joins to other NHD data df.NHDPlusID = df.NHDPlusID.astype("uint64") df = df.set_index(["NHDPlusID"], drop=False) # convert MultiLineStrings to LineStrings (all have a single linestring) df.geometry = pg.get_geometry(df.geometry.values.data, 0) ### Read in VAA and convert to data frame # NOTE: not all records in Flowlines have corresponding records in VAA # we drop those that do not since we need these fields. print("Reading VAA table and joining...") vaa_df = read_dataframe(gdb_path, layer="NHDPlusFlowlineVAA", columns=[VAA_COLS]) vaa_df.NHDPlusID = vaa_df.NHDPlusID.astype("uint64") vaa_df = vaa_df.set_index(["NHDPlusID"]) df = df.join(vaa_df, how="inner") print("{:,} features after join to VAA".format(len(df))) # Simplify data types for smaller files and faster IO df.FType = df.FType.astype("uint16") df.FCode = df.FCode.astype("uint16") df.StreamOrde = df.StreamOrde.astype("uint8") df.Slope = df.Slope.astype("float32") df.MinElevSmo = df.MinElevSmo.astype("float32") df.MaxElevSmo = df.MaxElevSmo.astype("float32") ### Read in flowline joins print("Reading flowline joins") join_df = gp.read_file(gdb_path, layer="NHDPlusFlow")[ ["FromNHDPID", "ToNHDPID"] ].rename(columns={"FromNHDPID": "upstream", "ToNHDPID": "downstream"}) join_df.upstream = join_df.upstream.astype("uint64") join_df.downstream = join_df.downstream.astype("uint64") ### Label loops for easier removal later # WARNING: loops may be very problematic from a network processing standpoint. # Include with caution. print("Identifying loops") df["loop"] = (df.StreamOrde != df.StreamCalc) | (df.FlowDir.isnull()) idx = df.loc[df.loop].index join_df["loop"] = join_df.upstream.isin(idx) | join_df.downstream.isin(idx) ### Filter out coastlines and update joins # WARNING: we tried filtering out pipelines (FType == 428). It doesn't work properly; # there are many that go through dams and are thus needed to calculate # network connectivity and gain of removing a dam. print("Filtering out coastlines...") coastline_idx = df.loc[df.FType == 566].index df = df.loc[~df.index.isin(coastline_idx)].copy() # remove any joins that have coastlines as upstream # these are themselves coastline segments join_df = join_df.loc[~join_df.upstream.isin(coastline_idx)].copy() # set the downstream to 0 for any that join coastlines # this will enable us to mark these as downstream terminals in # the network analysis later join_df.loc[join_df.downstream.isin(coastline_idx), "downstream"] = 0 # drop any duplicates (above operation sets some joins to upstream and downstream of 0) join_df = join_df.drop_duplicates() print("{:,} features after removing coastlines".format(len(df))) ### Add calculated fields # Set our internal master IDs to the original index of the file we start from # Assume that we can always fit into a uint32, which is ~400 million records # and probably bigger than anything we could ever read in df["lineID"] = df.index.values.astype("uint32") + 1 join_df = ( join_df.join(df.lineID.rename("upstream_id"), on="upstream") .join(df.lineID.rename("downstream_id"), on="downstream") .fillna(0) ) for col in ("upstream", "downstream"): join_df[col] = join_df[col].astype("uint64") for col in ("upstream_id", "downstream_id"): join_df[col] = join_df[col].astype("uint32") ### Calculate size classes print("Calculating size class") drainage = df.TotDASqKm df.loc[drainage < 10, "sizeclass"] = "1a" df.loc[(drainage >= 10) & (drainage < 100), "sizeclass"] = "1b" df.loc[(drainage >= 100) & (drainage < 518), "sizeclass"] = "2" df.loc[(drainage >= 518) & (drainage < 2590), "sizeclass"] = "3a" df.loc[(drainage >= 2590) & (drainage < 10000), "sizeclass"] = "3b" df.loc[(drainage >= 10000) & (drainage < 25000), "sizeclass"] = "4" df.loc[drainage >= 25000, "sizeclass"] = "5" print("projecting to target projection") df = df.to_crs(target_crs) # Calculate length and sinuosity print("Calculating length and sinuosity") df["length"] = df.geometry.length.astype("float32") df["sinuosity"] = df.geometry.apply(calculate_sinuosity).astype("float32") # set join types to make it easier to track join_df["type"] = "internal" # set default join_df.loc[join_df.upstream == 0, "type"] = "origin" join_df.loc[join_df.downstream == 0, "type"] = "terminal" join_df.loc[(join_df.upstream != 0) & (join_df.upstream_id == 0), "type"] = "huc_in" # drop columns not useful for later processing steps df = df.drop(columns=["FlowDir", "StreamCalc"]) return df, join_df
8e0f0fec59441a3370b958452a2e4674f1e0ee34
3,654,343
import os def exists(awesome_title): """Check the awesome repository is cached Args: awesome_title: Awesome repository title Returns: True if exists, False otherwise """ awesome_cache_directory = os.path.join(CACHE_DIRECTORY, awesome_title) awesome_cached_readme = os.path.join(awesome_cache_directory, 'README.md') return os.path.exists(awesome_cached_readme)
23a5675741e6489fb911edd72fcb6dfd46d8a8f9
3,654,344
def split_str_to_list(input_str, split_char=","): """Split a string into a list of elements. Args: input_str (str): The string to split split_char (str, optional): The character to split the string by. Defaults to ",". Returns: (list): The string split into a list """ # Split a string into a list using `,` char split_str = input_str.split(split_char) # For each element in split_str, strip leading/trailing whitespace for i, element in enumerate(split_str): split_str[i] = element.strip() return split_str
2b13868aed1869310a1398886f6777ddceb6c777
3,654,345
def generate_password(length): """ This will create a random password for the user Args: length - the user's preferred length for the password Return: It will return a random password of user's preferred length """ return Password.generate_pass(length)
76fd4e06364b4cbfeffb389cb959f5d22f0acc71
3,654,346
def export_csv(obj, file_name, point_type='evalpts', **kwargs): """ Exports control points or evaluated points as a CSV file. :param obj: a curve or a surface object :type obj: abstract.Curve, abstract.Surface :param file_name: output file name :type file_name: str :param point_type: ``ctrlpts`` for control points or ``evalpts`` for evaluated points :type point_type: str :raises IOError: an error occurred writing the file """ if not isinstance(obj, (abstract.Curve, abstract.Surface)): raise ValueError("Input object should be a curve or a surface") # Pick correct points from the object if point_type == 'ctrlpts': points = obj.ctrlpts elif point_type == 'evalpts' or point_type == 'curvepts' or point_type == 'surfpts': points = obj.evalpts else: raise ValueError("Please choose a valid point type option. Possible types: ctrlpts, evalpts") # Prepare CSV header dim = len(points[0]) line = "dim " for i in range(dim-1): line += str(i + 1) + ", dim " line += str(dim) + "\n" # Prepare values for pt in points: line += ",".join([str(p) for p in pt]) + "\n" # Write to file return exch.write_file(file_name, line)
a42f13a5af94344f0ef9c6b9b8aca62067dfd77f
3,654,347
import re def formatRFC822Headers(headers): """ Convert the key-value pairs in 'headers' to valid RFC822-style headers, including adding leading whitespace to elements which contain newlines in order to preserve continuation-line semantics. """ munged = [] linesplit = re.compile(r'[\n\r]+?') for key, value in headers: vallines = linesplit.split(value) while vallines: if vallines[-1].rstrip() == '': vallines = vallines[:-1] else: break munged.append('%s: %s' % (key, '\r\n '.join(vallines))) return '\r\n'.join(munged)
4c7dd97c9079daf144acf83241ebe9f025020611
3,654,348
def first_fixation_duration(trial: Trial, region_number: int) -> RegionMeasure: """ The duration of the first fixation in a region during first pass reading (i.e., before the reader fixates areas beyond the region). If this region is skipped during first pass, this measure is None. :: fp_fixations = get_first_pass_fixations(trial, region_number) if length of fp_fixations is 0: return None else: return duration of first fixation in fp_fixations """ region = region_exists(trial, region_number) fp_fixations = get_fp_fixations(trial, region_number) if not fp_fixations: return save_measure(trial, region, "first_fixation_duration", None, None) return save_measure( trial, region, "first_fixation_duration", fp_fixations[0].duration(), [fp_fixations[0]], )
cdb1435f382d277bb3a116e2d268a566b17692a4
3,654,349
def find_in_path(input_data, path): """Finds values at the path in input_data. :param input_data: dict or list :param path: the path of the values example: b.*.name :result: list of found data """ result = find(input_data, path.split('.')) return [value for _, value in result if value]
6529486013966df264fc3f84a17a8f858a37190c
3,654,350
def post_test_check(duthost, up_bgp_neighbors): """Post-checks the status of critical processes and state of BGP sessions. Args: duthost: Host DUT. skip_containers: A list contains the container names which should be skipped. Return: This function will return True if all critical processes are running and all BGP sessions are established. Otherwise it will return False. """ return check_all_critical_processes_status(duthost) and duthost.check_bgp_session_state(up_bgp_neighbors, "established")
6ce585abbfbdb2b8a1f858ce54f4cd837c84bbda
3,654,351
def fill_with_mode(filename, column): """ Fill the missing values(NaN) in a column with the mode of that column Args: filename: Name of the CSV file. column: Name of the column to fill Returns: df: Pandas DataFrame object. (Representing entire data and where 'column' does not contain NaN values) (Filled with above mentioned rules) """ df=pd.read_csv(filename) mode = df[column].mode() df[column] = df[column].fillna(mode[0]) return df
6b9dc4b0530c21b0a43776b05ce0d8620f75dd30
3,654,352
def add_workshift_context(request): """ Add workshift variables to all dictionaries passed to templates. """ if not request.user.is_authenticated(): return {} if Semester.objects.count() < 1: return {"WORKSHIFT_ENABLED": False} # Current semester is for navbar notifications try: current_semester = Semester.objects.get(current=True) except Semester.DoesNotExist: current_semester = None except Semester.MultipleObjectsReturned: current_semester = Semester.objects.filter(current=True).latest("start_date") workshift_emails = [] for pos in Manager.objects.filter(workshift_manager=True, active=True): if pos.email: workshift_emails.append(pos.email) elif pos.incumbent.email_visible and pos.incumbent.user.email: workshift_emails.append(pos.incumbent.user.email) if workshift_emails: workshift_email_str = " ({0})".format( ", ".join(["<a href=\"mailto:{0}\">{0}</a>".format(i) for i in workshift_emails]) ) else: workshift_email_str = "" messages.add_message( request, messages.WARNING, MESSAGES["MULTIPLE_CURRENT_SEMESTERS"].format( admin_email=settings.ADMINS[0][1], workshift_emails=workshift_email_str, )) today = localtime(now()).date() days_passed = None total_days = None semester_percentage = None standing = None happening_now = None workshift_profile = None if current_semester: # number of days passed in this semester days_passed = (today - current_semester.start_date).days # total number of days in this semester total_days = (current_semester.end_date - current_semester.start_date).days semester_percentage = round((days_passed / total_days) * 100, 2) # Semester is for populating the current page try: semester = request.semester except AttributeError: semester = current_semester try: workshift_profile = WorkshiftProfile.objects.get( semester=semester, user=request.user, ) except WorkshiftProfile.DoesNotExist: workshift_profile = None workshift_manager = utils.can_manage(request.user, semester=semester) upcoming_shifts = WorkshiftInstance.objects.filter( workshifter=workshift_profile, closed=False, date__gte=today, date__lte=today + timedelta(days=2), ) # TODO: Add a fudge factor of an hour to this? time = localtime(now()).time() happening_now = [] for shift in upcoming_shifts: if shift.week_long: happening_now.append(shift) continue if shift.date != today: continue if shift.start_time is None: if shift.end_time is not None: if time < shift.end_time: happening_now.append(shift) else: happening_now.append(shift) continue if shift.end_time is None: if shift.start_time is not None: if time > shift.start_time: happening_now.append(shift) else: happening_now.append(shift) continue if time > shift.start_time and time < shift.end_time: happening_now.append(shift) if workshift_profile: try: standing = workshift_profile.pool_hours.get(pool__is_primary=True).standing except (PoolHours.DoesNotExist, PoolHours.MultipleObjectsReturned): pass return { "WORKSHIFT_ENABLED": True, "SEMESTER": semester, "CURRENT_SEMESTER": current_semester, "WORKSHIFT_MANAGER": workshift_manager, "WORKSHIFT_PROFILE": workshift_profile, "STANDING": standing, "DAYS_PASSED": days_passed, "TOTAL_DAYS": total_days, "SEMESTER_PERCENTAGE": semester_percentage, "UPCOMING_SHIFTS": zip(upcoming_shifts, happening_now), }
8281c55d01b08080dc13bd8ae277963d05992347
3,654,353
def get_model_spec( model_zoo, model_def, model_params, dataset_fn, loss, optimizer, eval_metrics_fn, prediction_outputs_processor, ): """Get the model spec items in a tuple. The model spec tuple contains the following items in order: * The model object instantiated with parameters specified in `model_params`, * The `dataset_fn`, * The `loss`, * The `optimizer`, * The `eval_metrics_fn`, * The `prediction_outputs_processor`. Note that it will print warning if it's not inherited from `BasePredictionOutputsProcessor`. """ model_def_module_file = get_module_file_path(model_zoo, model_def) default_module = load_module(model_def_module_file).__dict__ model = load_model_from_module(model_def, default_module, model_params) prediction_outputs_processor = _get_spec_value( prediction_outputs_processor, model_zoo, default_module ) if prediction_outputs_processor and not isinstance( prediction_outputs_processor, BasePredictionOutputsProcessor ): logger.warning( "prediction_outputs_processor is not " "inherited from BasePredictionOutputsProcessor. " "Prediction outputs may not be processed correctly." ) return ( model, _get_spec_value(dataset_fn, model_zoo, default_module, required=True), _get_spec_value(loss, model_zoo, default_module, required=True), _get_spec_value(optimizer, model_zoo, default_module, required=True), _get_spec_value( eval_metrics_fn, model_zoo, default_module, required=True ), prediction_outputs_processor, )
427cf6f2705f32a493fdd8c16cc57d337b528a2f
3,654,354
def clean_meta(unclean_list): """ cleans raw_vcf_header_list for downstream processing :return: """ clean_list = [] for i in unclean_list: if "=<" in i: i = i.rstrip(">") i = i.replace("##", "") ii = i.split("=<", 1) else: i = i.replace("##", "") ii = i.split("=", 1) clean_list.append(ii) return clean_list
03dcbcad57b129fd6ff379f3fb3181c91f8f4106
3,654,355
import itertools def generate_result_table(models, data_info): # per idx (gene/transcript) """ Generate a table containing learned model parameters and statistic tests. Parameters ---------- models Learned models for individual genomic positions of a gene. group_labels Labels of samples. data_inf Dict Returns ------- table List of tuples. """ ### condition_names,run_names = get_ordered_condition_run_names(data_info) # information from the config file used for modelling. ### ### table = [] for key, (model,prefiltering) in models.items(): idx, position, kmer = key mu = model.nodes['mu_tau'].expected() # K sigma2 = 1./model.nodes['mu_tau'].expected(var='gamma') # K var_mu = model.nodes['mu_tau'].variance(var='normal') # K # mu = model.nodes['y'].params['mean'] # sigma2 = model.nodes['y'].params['variance'] w = model.nodes['w'].expected() # GK N = model.nodes['y'].params['N'].round() # GK N0 = N[:, 0].squeeze() N1 = N[:, 1].squeeze() w0 = w[:, 0].squeeze() coverage = np.sum(model.nodes['y'].params['N'], axis=-1) # GK => G # n_reads per group p_overlap, list_cdf_at_intersections = stats.calc_prob_overlapping(mu, sigma2) model_group_names = model.nodes['x'].params['group_names'] #condition_names if pooling, run_names otherwise. ### Cluster assignment ### conf_mu = [calculate_confidence_cluster_assignment(mu[0],model.kmer_signal),calculate_confidence_cluster_assignment(mu[1],model.kmer_signal)] cluster_idx = {} if conf_mu[0] > conf_mu[1]: cluster_idx['unmod'] = 0 cluster_idx['mod'] = 1 else: cluster_idx['unmod'] = 1 cluster_idx['mod'] = 0 mu_assigned = [mu[cluster_idx['unmod']],mu[cluster_idx['mod']]] sigma2_assigned = [sigma2[cluster_idx['unmod']],sigma2[cluster_idx['mod']]] conf_mu = [conf_mu[cluster_idx['unmod']],conf_mu[cluster_idx['mod']]] w_mod = w[:,cluster_idx['mod']] mod_assignment = [['higher','lower'][(mu[0]<mu[1])^cluster_idx['mod']]] ### calculate stats_pairwise stats_pairwise = [] for cond1, cond2 in itertools.combinations(condition_names, 2): if model.method['pooling']: cond1, cond2 = [cond1], [cond2] else: cond1, cond2 = list(data_info[cond1].keys()), list(data_info[cond2].keys()) if any(r in model_group_names for r in cond1) and any(r in model_group_names for r in cond2): w_cond1 = w[np.isin(model_group_names, cond1), cluster_idx['mod']].flatten() w_cond2 = w[np.isin(model_group_names, cond2), cluster_idx['mod']].flatten() n_cond1 = coverage[np.isin(model_group_names, cond1)] n_cond2 = coverage[np.isin(model_group_names, cond2)] z_score, p_ws = stats.z_test(w_cond1, w_cond2, n_cond1, n_cond2) # two=tailed w_mod_mean_diff = np.mean(w_cond1)-np.mean(w_cond2) stats_pairwise += [w_mod_mean_diff, p_ws, z_score] else: stats_pairwise += [None, None, None] if len(condition_names) > 2: ### calculate stats_one_vs_all stats_one_vs_all = [] for cond in condition_names: if model.method['pooling']: cond = [cond] else: cond = list(data_info[cond].keys()) if any(r in model_group_names for r in cond): w_cond1 = w[np.isin(model_group_names, cond), cluster_idx['mod']].flatten() w_cond2 = w[~np.isin(model_group_names, cond), cluster_idx['mod']].flatten() n_cond1 = coverage[np.isin(model_group_names, cond)] n_cond2 = coverage[~np.isin(model_group_names, cond)] z_score, p_ws = stats.z_test(w_cond1, w_cond2, n_cond1, n_cond2) w_mod_mean_diff = np.mean(w_cond1)-np.mean(w_cond2) stats_one_vs_all += [w_mod_mean_diff, p_ws, z_score] else: stats_one_vs_all += [None, None, None] ### w_mod_ordered, coverage_ordered = [], [] # ordered by conditon_names or run_names based on headers. if model.method['pooling']: names = condition_names else: names = run_names for name in names: if name in model_group_names: w_mod_ordered += list(w_mod[np.isin(model_group_names, name)]) coverage_ordered += list(coverage[np.isin(model_group_names, name)]) else: w_mod_ordered += [None] coverage_ordered += [None] ### ### prepare values to write row = [idx, position, kmer] row += stats_pairwise if len(condition_names) > 2: row += stats_one_vs_all # row += [p_overlap] # row += list_cdf_at_intersections row += list(w_mod_ordered) row += list(coverage_ordered) row += mu_assigned + sigma2_assigned + conf_mu + mod_assignment if prefiltering is not None: row += [prefiltering[model.method['prefiltering']['method']]] ### Filtering those positions with a nearly single distribution. cdf_threshold = 0.1 x_x1, y_x1, x_x2, y_x2 = list_cdf_at_intersections is_not_inside = ((y_x1 < cdf_threshold) & (x_x1 < cdf_threshold)) | ((y_x2 < cdf_threshold) & (x_x2 < cdf_threshold)) | (( (1-y_x1) < cdf_threshold) & ((1-x_x1) < cdf_threshold)) | (( (1-y_x2) < cdf_threshold) & ((1-x_x2) < cdf_threshold)) if (p_overlap <= 0.5) and (is_not_inside): table += [tuple(row)] return table
455cbe41c2114e3a81ac186b2adf07753041d753
3,654,356
def get_href_kind(href, domain): """Return kind of href (internal or external)""" if is_internal_href(href, domain): kind = 'internal' else: kind = 'external' return kind
e63b3e28d0f6f776338da827f61b0c5709dfe990
3,654,357
import glob import os def get_input_file(req_id, file_number): """ Returns an uploaded input file, 404 if not yet uploaded. :param req_id: The id of the conversion. :param file_number: File number. :return: File as text. """ cr = db.retrieve(req_id) if cr is None: return jsonify({ 'status': 'errored', 'message': 'no job found matching request id: ' + req_id }), 404 if int(file_number) not in range(cr.file_count): return jsonify({ 'status': 'errored', 'message': 'file number must be in (0, ' + str(cr.file_count - 1) + ').' }), 404 if len(glob(config['FILE_STORE'] + str(req_id) + '/input/' + str(file_number) + '.*')) is 0: return jsonify({ 'status': 'errored', 'message': 'file number ' + str(file_number) + ' has not been uploaded yet.' }), 404 input_file = open(glob(config['FILE_STORE'] + str(req_id) + '/input/' + str(file_number) + '.*')[0], 'r') def generate(file): while True: l = file.readline() if l: yield l else: return return Response(generate(input_file), content_type='application/' + os.path.splitext( input_file.name)[1].strip('.')), 200
b5d4777bc9b59efef6056ef832fb2a20d2b04100
3,654,358
def check_mark(value): """Helper method to create an html formatted entry for the flags in tables.""" return format_html('&check;') if value == 1 else ''
07430e1b5be180b01dd8dd045db01ac4ee9ca6ee
3,654,359
import os import time def test_sleep(n): """Used only for testing -- example method with argument. """ logger = LMLogger.get_logger() logger.info("Starting test_sleep({}) in pid {}".format(n, os.getpid())) try: job = get_current_job() job.meta['sample'] = 'test_sleep metadata' job.meta['pid'] = int(os.getpid()) job.save_meta() time.sleep(n) logger.info("Completed test_sleep in pid {}".format(os.getpid())) return 0 except Exception as e: logger.error("Error on test_sleep in pid {}: {}".format(os.getpid(), e)) raise
5b93c691577c7510f8c4b108f1334d715f1cfaa6
3,654,360
def military_to_english_time(time, fmt="{0}:{1:02d}{2}"): """ assumes 08:33:55 and 22:33:42 type times will return 8:33am and 10:33pm (not we floor the minutes) """ ret_val = time try: h, m = split_time(time) ampm = "am" if h >= 12: ampm = "pm" if h >= 24: ampm = "am" h = h % 12 if h == 0: h = 12 ret_val = fmt.format(h, m, ampm) except: pass return ret_val
880f42354c407a7fae5ba2685b38a10260bc9f58
3,654,361
def parse_ssh_config(text): """ Parse an ssh-config output into a Python dict. Because Windows doesn't have grep, lol. """ try: lines = text.split('\n') lists = [l.split(' ') for l in lines] lists = [filter(None, l) for l in lists] tuples = [(l[0], ''.join(l[1:]).strip().strip('\r')) for l in lists] return dict(tuples) except IndexError: raise Exception("Malformed input")
7441c39e5ca9127871316d98a6fe195ed1da6522
3,654,362
import re def snake_case(string: str) -> str: """Convert upper camelcase to snake case.""" return re.sub(r"(?<!^)(?=[A-Z])", "_", string).lower()
fe8592bcfa1f2233a07308741de5f912fd7055b3
3,654,363
import argparse def positive_int(s: str) -> int: """Positive integer validator for `argparse.ArgumentParser`.""" i = int(s) if i < 0: raise argparse.ArgumentTypeError("A positive number is required") return i
480f68e296e3fedfef4dae26f1a8563691056a17
3,654,364
import tempfile import atexit def create_tempdir(suffix='', prefix='tmp', directory=None, delete=True): """Create a tempdir and return the path. This function registers the new temporary directory for deletion with the atexit module. """ tempd = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=directory) if delete: atexit.register(_cleanup_tempdir, tempd) return tempd
f0c9b6b3a9198d1e552e5fce838113239021a4fd
3,654,365
import binascii async def get_transactor_key(request): """Get transactor key out of request.""" id_dict = deserialize_api_key( request.app.config.SECRET_KEY, extract_request_token(request) ) next_id = id_dict.get("id") auth_data = await get_auth_by_next_id(next_id) encrypted_private_key = auth_data.get("encrypted_private_key") private_key = decrypt_private_key( request.app.config.AES_KEY, next_id, encrypted_private_key ) hex_private_key = binascii.hexlify(private_key) return Key(hex_private_key), next_id
a1766e70ad076eaeb7d19509aeffbb729869df51
3,654,366
def _get_plot_aeff_exact_to_ground_energy(parsed_ncsd_out_files): """Returns a list of plots in the form (xdata, ydata, const_list, const_dict), where A=Aeff is xdata, and ground energy is ydata """ a_aeff_to_ground_state_energy = get_a_aeff_to_ground_state_energy_map( parsed_ncsd_out_files=parsed_ncsd_out_files) a_to_ground_state_energy = dict() for a_aeff, e in a_aeff_to_ground_state_energy.items(): if a_aeff[0] == a_aeff[1]: a_to_ground_state_energy[a_aeff[0]] = e return map_to_arrays(a_to_ground_state_energy) + (list(), dict())
e4224d43808e9ef0f43bc32041ef567138853bdb
3,654,367
def get_twitter_auth(): """Setup Twitter connection return: API object""" parameters = set_parameters.take_auth_data() twitter_access_token = parameters['twitter_access_token'] twitter_secret_token = parameters['twitter_secret_token'] twitter_api_key = parameters['twitter_api_key'] twitter_secret_key = parameters['twitter_secret_key'] auth = OAuthHandler(twitter_api_key, twitter_secret_key) auth.set_access_token(twitter_access_token, twitter_secret_token) return auth
1bb6ef2660adf25935f844c29e7e1dae3e674937
3,654,368
import re import logging def pre_process_string_data(item: dict): """ remove extra whitespaces, linebreaks, quotes from strings :param item: dictionary with data for analysis :return: cleaned item """ try: result_item = {key: item[key] for key in KEYS + ['_id']} for prop in result_item: if type(result_item[prop]) is str and prop != '_id': result_item[prop] = re.sub(' +', ' ', item[prop]) result_item[prop] = re.sub('\n', ' ', item[prop]) result_item[prop] = item[prop].strip().strip('"').strip("'").lower().strip() return result_item except KeyError: logging.warning("Wrong formed entity with id %s", item['_id']) return None
32c4218c0e02580ea90a75f117d8b822239ee6d1
3,654,369
def remove_cmds_from_title(title): """ Função que remove os comandos colocados nos títulos apenas por uma questão de objetividade no título """ arr = title.split() output = " ".join(list(filter(lambda x: x[0] != "!", arr))) return output
bfaa96aa578455f977549b737a8492afa80e1e7c
3,654,370
def load_config(file_path): """Loads the config file into a config-namedtuple Parameters: input (pathlib.Path): takes a Path object for the config file. It does not correct any relative path issues. Returns: (namedtuple -- config): Contains two sub-structures (run, plot) that will return a dictionary of configuration options. You can get your desired config-dictionary via `config.run` or `config.plot`. """ with open(file_path) as f: return config(**loads(f.read()))
82664fa4e27fd60ae56c435b3deb45cb7535bc17
3,654,371
def parse_version_number(raw_version_number): # type: (str) -> Tuple[int, int, int] """ Parse a valid "INT.INT.INT" string, or raise an Exception. Exceptions are handled by caller and mean invalid version number. """ converted_version_number = [int(part) for part in raw_version_number.split(".")] if len(converted_version_number) != 3: raise ValueError( "Invalid version number %r, parsed as %r", raw_version_number, converted_version_number, ) # Make mypy happy version_number = ( converted_version_number[0], converted_version_number[1], converted_version_number[2], ) return version_number
a899d29790ce03d28e7acb11c87f38890501d462
3,654,372
def get_error_directory_does_not_exists(dir_kind): """dir kind = [dir, file ,url]""" return f"Error: Directory with {dir_kind} does not exist:"
171fb09ab341daf2810612f2cc7c077b5326f347
3,654,373
def var_text(vname, iotype, variable): """ Extract info from variable for vname of iotype and return info as HTML string. """ if iotype == 'read': txt = '<p><i>Input Variable Name:</i> <b>{}</b>'.format(vname) if 'required' in variable: txt += '<br><b><i>Required Input Variable</i></b>' else: txt = '<p><i>Output Variable Name:</i> <b>{}</b>'.format(vname) txt += '<br><i>Description:</i> {}'.format(variable['desc']) txt += '<br><i>Datatype:</i> {}'.format(variable['type']) if iotype == 'read': txt += '<br><i>Availability:</i> {}'.format(variable['availability']) txt += '<br><i>IRS Form Location:</i>' formdict = variable['form'] for yrange in sorted(formdict.keys()): txt += '<br>{}: {}'.format(yrange, formdict[yrange]) txt += '</p>' return txt
04fdb1727c8eb783f7fb2c0324852e80673e8b77
3,654,374
def line_search_reset(binary_img, left_lane, right_line): """ #--------------------- # After applying calibration, thresholding, and a perspective transform to a road image, # I have a binary image where the lane lines stand out clearly. # However, I still need to decide explicitly which pixels are part of the lines # and which belong to the left line and which belong to the right line. # # This lane line search is done using histogram and sliding window # # The sliding window implementation is based on lecture videos. # # This function searches lines from scratch, i.e. without using info from previous lines. # However, the search is not entirely a blind search, since I am using histogram information. # # Use Cases: # - Use this function on the first frame # - Use when lines are lost or not detected in previous frames # """ # I first take a histogram along all the columns in the lower half of the image histogram = np.sum(binary_img[int(binary_img.shape[0] / 2):, :], axis=0) # Create an output image to draw on and visualize the result out_img = np.dstack((binary_img, binary_img, binary_img)) * 255 # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines midpoint = np.int(histogram.shape[0] / 2) leftX_base = np.argmax(histogram[:midpoint]) rightX_base = np.argmax(histogram[midpoint:]) + midpoint # Choose the number of sliding windows num_windows = 9 # Set height of windows window_height = np.int(binary_img.shape[0] / num_windows) # Identify the x and y positions of all nonzero pixels in the image nonzero = binary_img.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Current positions to be updated for each window current_leftX = leftX_base current_rightX = rightX_base # Set minimum number of pixels found to recenter window min_num_pixel = 50 # Create empty lists to receive left and right lane pixel indices win_left_lane = [] win_right_lane = [] window_margin = left_lane.window_margin # Step through the windows one by one for window in range(num_windows): # Identify window boundaries in x and y (and right and left) win_y_low = binary_img.shape[0] - (window + 1) * window_height win_y_high = binary_img.shape[0] - window * window_height win_leftx_min = current_leftX - window_margin win_leftx_max = current_leftX + window_margin win_rightx_min = current_rightX - window_margin win_rightx_max = current_rightX + window_margin # Draw the windows on the visualization image cv2.rectangle(out_img, (win_leftx_min, win_y_low), (win_leftx_max, win_y_high), (0, 255, 0), 2) cv2.rectangle(out_img, (win_rightx_min, win_y_low), (win_rightx_max, win_y_high), (0, 255, 0), 2) # Identify the nonzero pixels in x and y within the window left_window_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_leftx_min) & ( nonzerox <= win_leftx_max)).nonzero()[0] right_window_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_rightx_min) & ( nonzerox <= win_rightx_max)).nonzero()[0] # Append these indices to the lists win_left_lane.append(left_window_inds) win_right_lane.append(right_window_inds) # If you found > minpix pixels, recenter next window on their mean position if len(left_window_inds) > min_num_pixel: current_leftX = np.int(np.mean(nonzerox[left_window_inds])) if len(right_window_inds) > min_num_pixel: current_rightX = np.int(np.mean(nonzerox[right_window_inds])) # Concatenate the arrays of indices win_left_lane = np.concatenate(win_left_lane) win_right_lane = np.concatenate(win_right_lane) # Extract left and right line pixel positions leftx= nonzerox[win_left_lane] lefty = nonzeroy[win_left_lane] rightx = nonzerox[win_right_lane] righty = nonzeroy[win_right_lane] out_img[lefty, leftx] = [255, 0, 0] out_img[righty, rightx] = [0, 0, 255] # Fit a second order polynomial to each left_fit = np.polyfit(lefty, leftx, 2) right_fit = np.polyfit(righty, rightx, 2) left_lane.current_fit = left_fit right_line.current_fit = right_fit # Generate x and y values for plotting ploty = np.linspace(0, binary_img.shape[0] - 1, binary_img.shape[0]) # ax^2 + bx + c left_plotx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2] right_plotx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2] left_lane.prevx.append(left_plotx) right_line.prevx.append(right_plotx) if len(left_lane.prevx) > 10: left_avg_line = smoothing(left_lane.prevx, 10) left_avg_fit = np.polyfit(ploty, left_avg_line, 2) left_fit_plotx = left_avg_fit[0] * ploty ** 2 + left_avg_fit[1] * ploty + left_avg_fit[2] left_lane.current_fit = left_avg_fit left_lane.allx, left_lane.ally = left_fit_plotx, ploty else: left_lane.current_fit = left_fit left_lane.allx, left_lane.ally = left_plotx, ploty if len(right_line.prevx) > 10: right_avg_line = smoothing(right_line.prevx, 10) right_avg_fit = np.polyfit(ploty, right_avg_line, 2) right_fit_plotx = right_avg_fit[0] * ploty ** 2 + right_avg_fit[1] * ploty + right_avg_fit[2] right_line.current_fit = right_avg_fit right_line.allx, right_line.ally = right_fit_plotx, ploty else: right_line.current_fit = right_fit right_line.allx, right_line.ally = right_plotx, ploty left_lane.startx, right_line.startx = left_lane.allx[len(left_lane.allx)-1], right_line.allx[len(right_line.allx)-1] left_lane.endx, right_line.endx = left_lane.allx[0], right_line.allx[0] # Set detected=True for both lines left_lane.detected, right_line.detected = True, True measure_curvature(left_lane, right_line) return out_img
d810c111bcf5731f7c4486c77863c3505d8400a8
3,654,375
def get_primary_language(current_site=None): """Fetch the first language of the current site settings.""" current_site = current_site or Site.objects.get_current() return get_languages()[current_site.id][0]['code']
c4d71c30424bb753de353e325a012efb9265a01b
3,654,376
def get_Theta_ref_cnd_H(Theta_sur_f_hex_H): """(23) Args: Theta_sur_f_hex_H: 暖房時の室内機熱交換器の表面温度(℃) Returns: 暖房時の冷媒の凝縮温度(℃) """ Theta_ref_cnd_H = Theta_sur_f_hex_H if Theta_ref_cnd_H > 65: Theta_ref_cnd_H = 65 return Theta_ref_cnd_H
deccaa524aebda2a7457da53b44c517287a190a4
3,654,377
import os import glob def get_tool_info(arduino_info, tool_name): """.""" tool_info = {} has_tool = False sel_pkg = arduino_info['selected'].get('package') inst_pkgs_info = arduino_info.get('installed_packages', {}) inst_pkg_names = inst_pkgs_info.get('names', []) pkg_info = get_package_info(inst_pkgs_info, sel_pkg) pkg_path = pkg_info.get('path', '') tools_path = os.path.join(pkg_path, 'tools') tool_path = os.path.join(tools_path, tool_name) if os.path.isdir(tool_path): has_tool = True else: for pkg_name in inst_pkg_names: pkg_info = get_package_info(inst_pkgs_info, pkg_name) pkg_path = pkg_info.get('path', '') tools_path = os.path.join(pkg_path, 'tools') tool_path = os.path.join(tools_path, tool_name) if os.path.isdir(tool_path): has_tool = True break has_bin = False if has_tool: sub_paths = glob.glob(tool_path + '/*')[::-1] for sub_path in sub_paths: if os.path.isfile(sub_path): has_bin = True break if not has_bin: for sub_path in sub_paths: bin_path = os.path.join(sub_path, 'bin') if os.path.isdir(bin_path): has_bin = True else: s_sub_paths = glob.glob(sub_path + '/*') for s_sub_path in s_sub_paths: if os.path.isfile(s_sub_path): has_bin = True break if has_bin: tool_path = sub_path break if not has_bin: tool_path = '' avial_pkgs_info = arduino_info.get('packages', {}) avail_pkg_names = avial_pkgs_info.get('names', []) for pkg_name in avail_pkg_names: pkg_info = get_package_info(avial_pkgs_info, pkg_name) tools_info = pkg_info.get('tools', {}) tool_names = tools_info.get('names', []) if tool_name in tool_names: tool_info = tools_info.get(tool_name) break tool_info['name'] = tool_name tool_info['path'] = tool_path return tool_info
88b2c6c68e69dfb87538b94ca2a0798f3f1b51ba
3,654,378
def hpat_pandas_series_shape(self): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.shape Examples -------- .. literalinclude:: ../../../examples/series/series_shape.py :language: python :lines: 27- :caption: Return a tuple of the shape of the underlying data. :name: ex_series_shape .. command-output:: python ./series/series_shape.py :cwd: ../../../examples Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series attribute :attr:`pandas.Series.shape` implementation .. only:: developer Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1 """ _func_name = 'Attribute shape.' ty_checker = TypeChecker(_func_name) ty_checker.check(self, SeriesType) def hpat_pandas_series_shape_impl(self): return self._data.shape return hpat_pandas_series_shape_impl
6c27e6276caecaea18650398678d04623ddcc653
3,654,379
async def port_utilization_range( port_id: str, direction: str, limit: int, start: str, granularity: int, end=None, ): """Get port utilization by date range.""" async with Influx("telegraf", granularity=granularity) as db: q = ( db.SELECT(f"derivative(max(bytes{direction.title()}), 1s) * 8") .FROM("interfaces") .BETWEEN(start, end) .WHERE(port_id=port_id) .GROUP("port_id", "participant_id") .FILL("none") .LIMIT(limit) ) return await q.query()
2d2ac7ad32ee279f88d662bd8f099ccee0407b66
3,654,380
def composer_includes(context): """ Include the composer JS and CSS files in a page if the user has permission. """ if context.get('can_compose_permission', False): url = settings.STATIC_URL url += '' if url[-1] == '/' else '/' js = '<script type="text/javascript" src="%sjs/composer.min.js"></script>' % url css = '<link rel="stylesheet" type="text/css" href="%scss/composer.css">' % url return js + css return ''
7c0a89a5ce1e1fe5838e8022fe568347420ffb0f
3,654,381
def craft(crafter, recipe_name, *inputs, raise_exception=False, **kwargs): """ Access function. Craft a given recipe from a source recipe module. A recipe module is a Python module containing recipe classes. Note that this requires `settings.CRAFT_RECIPE_MODULES` to be added to a list of one or more python-paths to modules holding Recipe-classes. Args: crafter (Object): The one doing the crafting. recipe_name (str): The `CraftRecipe.name` to use. This uses fuzzy-matching if the result is unique. *inputs: Suitable ingredients and/or tools (Objects) to use in the crafting. raise_exception (bool, optional): If crafting failed for whatever reason, raise `CraftingError`. The user will still be informed by the recipe. **kwargs: Optional kwargs to pass into the recipe (will passed into recipe.craft). Returns: list: Crafted objects, if any. Raises: CraftingError: If `raise_exception` is True and crafting failed to produce an output. KeyError: If `recipe_name` failed to find a matching recipe class (or the hit was not precise enough.) Notes: If no recipe_module is given, will look for a list `settings.CRAFT_RECIPE_MODULES` and lastly fall back to the example module `"evennia.contrib."` """ # delayed loading/caching of recipes _load_recipes() RecipeClass = _RECIPE_CLASSES.get(recipe_name, None) if not RecipeClass: # try a startswith fuzzy match matches = [key for key in _RECIPE_CLASSES if key.startswith(recipe_name)] if not matches: # try in-match matches = [key for key in _RECIPE_CLASSES if recipe_name in key] if len(matches) == 1: RecipeClass = matches[0] if not RecipeClass: raise KeyError( f"No recipe in settings.CRAFT_RECIPE_MODULES has a name matching {recipe_name}" ) recipe = RecipeClass(crafter, *inputs, **kwargs) return recipe.craft(raise_exception=raise_exception)
860b839123394f2ba210b4cfdcb40a57595701a3
3,654,382
from typing import Iterable from typing import Union from typing import List from typing import Any from typing import Dict import collections def load_data( data, *, keys: Iterable[Union[str, int]] = (0,), unique_keys: bool = False, multiple_values: bool = False, unique_values: bool = False, **kwargs, ) -> Union[List[Any], Dict[Any, Union[Any, List[Any]]]]: """Load data. If no values are provided, then return a list from keys. If values are provided, then return a dictionary of keys/values. Args: data (str): File or buffer. See Pandas 'filepath_or_buffer' option from 'read_csv()'. Kwargs: keys (Iterable[str|int]): Columns to use as dictionary keys. Multiple keys are stored as tuples in same order as given. If str, then it corresponds to 'headers' names. If int, then it corresponds to column indices. unique_keys (bool): Control if keys can be repeated or not. Only applies if 'values' is None. multiple_values (bool): Specify if values consist of single or multiple elements. For multi-value case, values are placed in an iterable container. For single-value case, the value is used as-is. Only applies if 'values' is not None. unique_values (bool): Control if values can be repeated or not. Only applies if 'multiple_values' is True. Kwargs: Options forwarded to 'iload_data()'. """ if kwargs.get('values') is None: if unique_keys: # NOTE: Convert to a list because JSON does not serializes sets. _data = list(set(iload_data(data, keys=keys, **kwargs))) else: _data = list(iload_data(data, keys=keys, **kwargs)) elif multiple_values: if unique_values: _data = collections.defaultdict(list) for k, v in iload_data(data, keys=keys, **kwargs): if v not in _data[k]: _data[k].append(v) else: _data = collections.defaultdict(list) for k, v in iload_data(data, keys=keys, **kwargs): _data[k].append(v) else: # Consider the value of the first appearance of a key. _data = {} for k, v in iload_data(data, keys=keys, **kwargs): if k not in _data: _data[k] = v return _data
ad3a5f74a0bbbfbf3de62f691be5b27b63fa9949
3,654,383
def get_avg_wind_speed(data): """this function gets the average wind speeds for each point in the fetched data""" wind_speed_history = [] for point in data: this_point_wind_speed = [] for year_reading in point: hourly = [] for hour in year_reading['weather'][0]['hourly']: hourly.append(float(hour['windspeedKmph'])) this_point_wind_speed.append(float(np.average(hourly))) wind_speed_history.append(np.flip(this_point_wind_speed)) return wind_speed_history
fdeeb64f495343893ffc98997de2bad5748591c2
3,654,384
from typing import List def get_uris_of_class(repository: str, endpoint: str, sparql_file: str, class_name: str, endpoint_type: str, limit: int = 1000) -> List[URIRef]: """ Returns the list of uris of type class_name :param repository: The repository containing the RDF data :param endpoint: The SPARQL endpoint :param sparql_file: The file containing the SPARQL query :param class_name: The class_name to search :param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called) :param limit: The sparql query limit :return: The list of uris of type class_name """ uri_list = [] uris_of_class_sparql_query = open(sparql_file).read() uris_of_class_template = Template(uris_of_class_sparql_query).substitute(class_name=class_name) uris_of_class_template = Template(uris_of_class_template + " limit $limit offset $offset ") for uri in get_sparql_results(uris_of_class_template, "uri", endpoint, repository, endpoint_type, limit): uri_list.append(uri) if len(uri_list) % 1000 == 0: print(len(uri_list)) return uri_list
7b5cf86d286afd00d40e202e98661be3668364c3
3,654,385
def nspath_eval(xpath: str) -> str: """ Return an etree friendly xpath based expanding namespace into namespace URIs :param xpath: xpath string with namespace prefixes :returns: etree friendly xpath """ out = [] for chunks in xpath.split('/'): namespace, element = chunks.split(':') out.append('{{{}}}{}'.format(NAMESPACES[namespace], element)) return '/'.join(out)
6e5e558da8d00d57ee1857bce2b8c99d05386c73
3,654,386
def basic_streamalert_config(): """Generate basic StreamAlert configuration dictionary.""" return { 'global': { 'account': { 'aws_account_id': '123456789123', 'kms_key_alias': 'stream_alert_secrets', 'prefix': 'unit-testing', 'region': 'us-west-2' }, 'terraform': { 'tfstate_bucket': 'unit-testing.streamalert.terraform.state', 'tfstate_s3_key': 'stream_alert_state/terraform.tfstate', 'tfvars': 'terraform.tfvars' }, 'infrastructure': { 'monitoring': { 'create_sns_topic': True, 'metric_alarms': { 'rule_processor': { 'Aggregate Unit Testing Failed Parses Alarm': { 'alarm_description': '', 'comparison_operator': 'GreaterThanOrEqualToThreshold', 'evaluation_periods': 1, 'metric_name': 'RuleProcessor-FailedParses', 'period': 300, 'statistic': 'Sum', 'threshold': 1.0 } } } } } }, 'lambda': { 'alert_processor_config': { 'handler': 'stream_alert.alert_processor.main.handler', 'source_bucket': 'unit-testing.streamalert.source', 'source_current_hash': '<auto_generated>', 'source_object_key': '<auto_generated>', 'third_party_libraries': [] }, 'rule_processor_config': { 'handler': 'stream_alert.rule_processor.main.handler', 'source_bucket': 'unit-testing.streamalert.source', 'source_current_hash': '<auto_generated>', 'source_object_key': '<auto_generated>', 'third_party_libraries': [ 'jsonpath_rw', 'netaddr' ] }, 'athena_partition_refresh_config': { 'current_version': '$LATEST', 'enable_metrics': False, 'enabled': True, 'handler': 'main.handler', 'memory': 128, 'partitioning': { 'firehose': {}, 'normal': { 'unit-testing.streamalerts': 'alerts' } }, 'source_bucket': 'unit-testing.streamalert.source', 'source_current_hash': '<auto_generated>', 'source_object_key': '<auto_generated>', 'third_party_libraries': [ 'backoff' ], 'timeout': 60 }, }, 'clusters': { 'prod': { 'id': 'prod', 'modules': { 'cloudwatch_monitoring': { 'enabled': True }, 'kinesis': { 'firehose': { 'enabled': True, 's3_bucket_suffix': 'streamalert.results' }, 'streams': { 'retention': 24, 'shards': 1 } }, 'kinesis_events': { 'enabled': True }, 'stream_alert': { 'alert_processor': { 'current_version': '$LATEST', 'memory': 128, 'timeout': 10 }, 'rule_processor': { 'current_version': '$LATEST', "enable_metrics": True, 'memory': 128, 'metric_alarms': { 'Prod Unit Testing Failed Parses Alarm': { 'alarm_description': '', 'comparison_operator': 'GreaterThanOrEqualToThreshold', 'evaluation_periods': 1, 'metric_name': 'RuleProcessor-FailedParses-PROD', 'period': 300, 'statistic': 'Sum', 'threshold': 1.0 } }, 'timeout': 10 } } }, 'outputs': { 'kinesis': [ 'username', 'access_key_id', 'secret_key' ] }, 'region': 'us-east-1' } } }
8e766fa73c9043888c6531659bccc57fcb1a88ea
3,654,387
def _read_elastic_moduli(outfilename): """ Read elastic modulus matrix from a completed GULP job :param outfilename: Path of the stdout from the GULP job :type outfilename: str :returns: 6x6 Elastic modulus matrix in GPa """ outfile = open(outfilename,'r') moduli_array = [] while True: oneline = outfile.readline() if not oneline: # break at EOF break if 'Elastic Constant Matrix' in oneline: moduli = np.zeros((6,6)) dummyline = outfile.readline() dummyline = outfile.readline() dummyline = outfile.readline() dummyline = outfile.readline() for i in range(6): modline = outfile.readline().strip() e1, e2, e3, e4, e5, e6 = modline[3:13], modline[13:23], modline[23:33], modline[33:43], modline[43:53], modline[53:63] modarray = [e1,e2,e3,e4,e5,e6] float_modarray = [] # Handle errors for element in modarray: if element[0] == "*": float_modarray.append(0.0) else: float_modarray.append(float(element)) moduli[i,:] = float_modarray moduli_array.append(moduli) outfile.close() return moduli_array
d09672135bed16aa651bbe5befe526e21763fc1b
3,654,388
def predict_koopman(lam, w, v, x0, ncp, g, h, u=None): """Predict the future dynamics of the system given an initial value `x0`. Result is returned as a matrix where rows correspond to states and columns to time. Args: lam (tf.Tensor): Koopman eigenvalues. w (tf.Tensor): Left eigenvectors. v (tf.Tensor): Right eigenvectors. x0 (tf.Tensor): Initial value of the system. N (int): Number of time steps to predict. g (Net): Encoder network. h (Net): Decoder network. u (tf.Tensor): Input signal. Returns: tuple: Prediction of the states of the system for N time steps into the future, prediction of the observables of the system for N time steps into the future. """ # Precompute some constants for more efficient computations wH = tf.linalg.adjoint(w) norm_vec = 1/tf.math.reduce_sum(tf.math.multiply(tf.math.conj(w),v), axis=0) # Store each time step in a list res_x = tf.TensorArray(x0.dtype,size=ncp+1) res_gx = tf.TensorArray(w.dtype,size=ncp+1) res_x = res_x.write(0,x0) res_gx = res_gx.write(0,tf.cast(tf.squeeze(g(tf.expand_dims(x0,0)),axis=[0]), w.dtype)) # Initiate time stepping xk = x0 if u is not None: for k in range(1,ncp+1): xk = tf.concat([tf.expand_dims(xk[:-1],0),tf.reshape(u[k-1],[1,-1])],axis=1) xk, gxk = one_step_pred(lam, wH, v, norm_vec, xk, g, h) res_x = res_x.write(k,xk) res_gx = res_gx.write(k,gxk) else: for k in range(1,ncp+1): xk = tf.expand_dims(xk,0) xk, gxk = one_step_pred(lam, wH, v, norm_vec, xk, g, h) res_x = res_x.write(k,xk) res_gx = res_gx.write(k,gxk) return res_x.stack(), res_gx.stack()
8509a96a5566f69ac238827538591ff9fcf34269
3,654,389
def handle_registration(): """ Show the registration form or handles the registration of a user, if the email or username is taken, take them back to the registration form - Upon successful login, take to the homepage """ form = RegisterForm() email = form.email.data username = form.username.data # If there is a user with this email already if User.query.filter_by(email=email).first(): form.email.errors = ["This email is already being used"] # Check if there is a user with this username already if User.query.filter_by(username=username).first(): form.username.errors = ["This username is already being used"] if form.email.errors or form.username.errors: return render_template('login_register/register.html', form=form) if form.validate_on_submit(): pwd = form.password.data f_name = form.first_name.data l_name = form.last_name.data user = User.register(username=username, pwd=pwd, email=email, f_name=f_name, l_name=l_name) db.session.add(user) db.session.commit() login_user(user) flash('Sucessfully logged in!', "success") # on successful login, redirect to user detail page return redirect(url_for("homepage.index")) else: return render_template("login_register/register.html", form=form)
27ce2a38202ea5873c53bc53fd5d2843515177cf
3,654,390
import typing def func_xy_args_kwargs_annotate( x: "0", y, *args: "2", **kwargs: "4" ) -> typing.Tuple: """func. Parameters ---------- x, y: float args: tuple kwargs: dict Returns ------- x, y: float args: tuple kwargs: dict """ return x, y, None, None, args, None, None, kwargs
41d06b792ac3d794e1c0ea8bedc1708bb5b4e969
3,654,391
import torch def mp_nerf_torch(a, b, c, l, theta, chi): """ Custom Natural extension of Reference Frame. Inputs: * a: (batch, 3) or (3,). point(s) of the plane, not connected to d * b: (batch, 3) or (3,). point(s) of the plane, not connected to d * c: (batch, 3) or (3,). point(s) of the plane, connected to d * theta: (batch,) or (float). angle(s) between b-c-d * chi: (batch,) or float. dihedral angle(s) between the a-b-c and b-c-d planes Outputs: d (batch, 3) or (float). the next point in the sequence, linked to c """ # safety check if not ( (-np.pi <= theta) * (theta <= np.pi) ).all().item(): raise ValueError(f"theta(s) must be in radians and in [-pi, pi]. theta(s) = {theta}") # calc vecs ba = b-a cb = c-b # calc rotation matrix. based on plane normals and normalized n_plane = torch.cross(ba, cb, dim=-1) n_plane_ = torch.cross(n_plane, cb, dim=-1) rotate = torch.stack([cb, n_plane_, n_plane], dim=-1) rotate /= torch.norm(rotate, dim=-2, keepdim=True) # calc proto point, rotate. add (-1 for sidechainnet convention) # https://github.com/jonathanking/sidechainnet/issues/14 d = torch.stack([-torch.cos(theta), torch.sin(theta) * torch.cos(chi), torch.sin(theta) * torch.sin(chi)], dim=-1).unsqueeze(-1) # extend base point, set length return c + l.unsqueeze(-1) * torch.matmul(rotate, d).squeeze()
2c42339455f6549e87488d12dec44282a6570d63
3,654,392
def makemarkers(nb): """ Give a list of cycling markers. See http://matplotlib.org/api/markers_api.html .. note:: This what I consider the *optimal* sequence of markers, they are clearly differentiable one from another and all are pretty. Examples: >>> makemarkers(7) ['o', 'D', 'v', 'p', '<', 's', '^'] >>> makemarkers(12) ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>', 'o', 'D'] """ allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>'] longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time return longlist[:nb]
a1dc00cdb831b3b622670a5f36ba956273379b16
3,654,393
import random import time import torch def test_grad_diffoutdim(eval_data_loader, model, num_classes, output_dir='pred', has_gt=True, save_vis=False, downsize_scale=1, args=None): """ Evaluates the effect of increasing output dimension on the norm of the gradient. Monte Carlo sampling will be used and the result would be averaged. First choose the number of pixels to calculate the loss for (output dimension) --> select_num. For each select_num, we do the following MC_times(as Monte Carlo sampling): Calculate the loss for select_num pixels chosen, backpropagate and get the input gradient. Average all these. :param eval_data_loader: :param model: :param num_classes: :param output_dir: :param has_gt: :param save_vis: :param downsize_scale: :param args: :return: """ model.eval() batch_time = AverageMeter() data_time = AverageMeter() end = time.time() hist = np.zeros((num_classes, num_classes)) # exit(0) if torch.cuda.is_available(): GPU_flag = True else: GPU_flag = False # Number of points to be selected for masking - analogous to number of output dimensions. Only these many pixels will be considered to calculate the loss. select_num_list = [1] + [i * 4 for i in range(1, 100)] + [400 + i*200 for i in range(100)] result_list = [] for select_num in select_num_list: print("********") print("selecting {} of output".format(select_num)) grad_sample_avg_sum = 0 if select_num < 400: MCtimes = 20 else: MCtimes = 5 MCtimes = 1 # Monte Carlo Sampling - MCTimes is the number of times that we sample for inner_i in range(MCtimes): grad_sum = 0 cnt = 0 print("MC time {}".format(inner_i)) for iter, (image, label, name) in enumerate(eval_data_loader): # break if 50 images (batches) done if cnt > 1 and args.debug: break elif cnt > 200: break data_time.update(time.time() - end) if torch.cuda.is_available(): image_var = Variable(image.cuda(), requires_grad=True) else: image_var = Variable(image, requires_grad=True) # print("__shape of image var__", image_var.shape) # [1,3,1024,2048] final = model(image_var)[0] # print("__shape of final__", final.shape) # [1, 19, 1024,2048] _, pred = torch.max(final, 1) # print("__shape of pred__", pred.shape) # [1,1024,2048] # for this image, sample select_num number of pixels temp = [i for i in range(image_var.size(2) * image_var.size(3))] selected = random.sample(temp, select_num) # Build mask for image - mask = np.zeros((image_var.size(2) * image_var.size(3)), dtype=np.uint8) for iii in range(select_num): mask[selected[iii]] = 1 mask = mask.reshape(1, 1, image_var.size(2), image_var.size(3)) mask = torch.from_numpy(mask) mask = mask.float() mask_target = mask.long() # print('label', label) label = label.long() if GPU_flag: # image.cuda() # image_var.cuda() # BUG: too late mask = mask.cuda() mask_target = mask_target.cuda() label = label.cuda() target, mask = Variable(label), Variable(mask) loss = cross_entropy2d(final * mask, target * mask_target, size_average=False) loss.backward() data_grad = image_var.grad np_data_grad = data_grad.cpu().numpy() # print(np_data_grad.shape) L2_grad_norm = np.linalg.norm(np_data_grad) / select_num # the 1/M \sum_M \partial{Loss_i}/\partial{input} grad_sum += L2_grad_norm # increment the batch # counter cnt += 1 pred = pred.cpu().data.numpy() batch_time.update(time.time() - end) end = time.time() grad_avg = grad_sum / cnt # Represents the gradient average for batch. cnt is the number of samples in a batch. grad_sample_avg_sum += grad_avg # For each sampling this is the sum of avg gradients in that sample. grad_sample_avg_sum /= MCtimes result_list.append(grad_sample_avg_sum) print(select_num, 'middle result', result_list) np.save('{}_{}_graph_more.npy'.format(args.dataset, args.arch), result_list) print('Final', result_list) np.save('{}_{}_graph_more.npy'.format(args.dataset, args.arch), result_list) # not sure if has to be moved if has_gt: # val ious = per_class_iu(hist) * 100 logger.info(' '.join('{:.03f}'.format(i) for i in ious)) return round(np.nanmean(ious), 2)
c6286c8dabcd17c5b9d07affe4ecdd452a19784b
3,654,394
import types import typing def uselist(*, schema: types.Schema, schemas: types.Schemas) -> typing.Optional[bool]: """ Retrieve the x-uselist of the schema. Raises MalformedSchemaError if the x-uselist value is not a boolean. Args: schema: The schema to get x-uselist from. schemas: The schemas for $ref lookup. Returns: The x-uselist or None. """ value = peek_key( schema=schema, schemas=schemas, key=types.ExtensionProperties.USELIST ) if value is None: return None if not isinstance(value, bool): raise exceptions.MalformedSchemaError( "The x-uselist property must be of type boolean." ) return value
eea45ef82a2d2715473a7a2203dcfdef1e958805
3,654,395
def getIPRules(): """ Fetches a json representation of the Iptables rules on the server GET: json object with the all the iptables rules on the system """ return jsonify({"result" : True, "rules" : hl.getIptablesRules()})
5b91978c0329105ff85f02deeccce707182b5551
3,654,396
def _get_only_relevant_data(video_data): """ Method to build ES document with only the relevant information """ return { "kind": video_data["kind"], "id": video_data["id"], "published_at": video_data["snippet"]["publishedAt"], "title": video_data["snippet"]["title"], "description": video_data["snippet"]["description"], "thumbnail_url": video_data["snippet"]["thumbnails"]["default"]["url"], "channel_title": video_data["snippet"]["channelTitle"], }
b5d2a0cf2c5b7121c92e95adb524379d7cf3eb9c
3,654,397
def get_mask(img): """ Convert an image to a mask array. """ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY) return mask
255e396b12f61dfe9d98fcf9c89fdf6b486a7a95
3,654,398
def b32encode(hex_values, pad_left=True): """ Base32 encoder algorithm for Nano. Transforms the given hex_value into a base-32 representation. The allowed letters are: "13456789abcdefghijkmnopqrstuwxyz" :param hex_values: Hexadecimal values (string) or byte array containing the data to be encoded. :param pad_left: True if a byte of 0s should be prepended to the input. False otherwise. This padding is required when generating a nanoblocks address with this algorithm. """ if type(hex_values) is str: data_bytes = int(hex_values, 16).to_bytes(32, "big") else: data_bytes = hex_values data_binary = ("0000" if pad_left else "") + "".join([f'{p:08b}' for p in data_bytes]) data_encoded = [int(split, 2) for split in chunkize(data_binary, 5)] return "".join(pub_key_map.iloc[data_encoded].tolist())
7a58b56ad4d3733da6d7c211bab470d8cde63e9c
3,654,399