content
stringlengths
22
815k
id
int64
0
4.91M
def write_changes(changes_file, changes): """Write SVs and SNPs to a file""" with open(changes_file, "w") as outfile: header = "\t".join(["type", "ref_idx", "alt_idx", "size", "ref", "alt", "\n"]) outfile.write(header) for change in changes: change = "\t".join(map(str, change)) + "\n" outfile.write(change)
5,352,900
def lambda_handler(event, context): """ Lambda entry-point """ news_link = "https://news.ok.ubc.ca/feed/" news_items = [] filtered_news_items = [] response_items = get_news_items_from_web(news_link) if len(response_items) == 0: return {"status": "No items in RSS Feed"} # Iterate through list of raw items and parse them, if there is a parsing error, save the raw item that throws an # error to S3 for item in response_items: try: news_item = news_parser(item) news_items.append(news_item) except Exception as e: S3_CLIENT.put_object(Body=json.dumps(item, indent=4), Bucket=S3_BUCKET_NAME, Key=f'ErrorLog/News/{str(datetime.now(tz=pytz.timezone("America/Vancouver")))[:-13]}.json') LOGGER.error(f"Error in parsing a news item, raw item saved to {S3_BUCKET_NAME}/ErrorLog/News") detailed_exception(LOGGER) # Filter the parsed items based on last query time to get only new items try: last_query_time = SSM_CLIENT.get_parameter(Name="NewsQueryTime")["Parameter"]["Value"] for news_item in news_items: if datetime.strptime(last_query_time, "%Y-%m-%d %H:%M:%S") \ < datetime.strptime(news_item["dateModified"], "%Y-%m-%d %H:%M:%S"): filtered_news_items.append(news_item) SSM_CLIENT.put_parameter(Name="NewsQueryTime", Value=str(datetime.now(tz=pytz.timezone("America/Vancouver")))[:-13], Overwrite=True) except SSM_CLIENT.exceptions.InternalServerError as e: LOGGER.error("Error in communicating with Parameter store") detailed_exception(LOGGER) LOGGER.debug(json.dumps(news_items, indent=4)) LOGGER.debug(json.dumps(filtered_news_items, indent=4)) # Save new items to central data lake S3 if len(filtered_news_items) != 0: S3_CLIENT.put_object(Body=json.dumps(filtered_news_items, indent=4), Bucket=S3_BUCKET_NAME, Key=f'News/{str(datetime.now(tz=pytz.timezone("America/Vancouver")))[:-13]}.json') # Insert items into DynamoDB table with appropriate TTL table = DYNAMODB_RESOURCE.Table(NEWS_TABLE) for events_item in filtered_news_items: events_item["expiresOn"] = get_adjusted_unix_time(events_item["dateModified"], "%Y-%m-%d %H:%M:%S", EXPIRY_DAYS_OFFSET * 24) table.put_item(Item=events_item) return {"status": "completed"}
5,352,901
def read_photons(photonfile, ra0, dec0, tranges, radius, verbose=0, colnames=['t', 'x', 'y', 'xa', 'ya', 'q', 'xi', 'eta', 'ra', 'dec', 'flags']): """ Read a photon list file and return a python dict() with the expected format. :param photonfile: Name of the photon event file to use. :type photonfile: str :param ra0: Right ascension of the targeted sky position, in degrees. :type ra0: float :param dec0: Declination of the targeted sky position, in degrees. :type dec0: float :param tranges: Set of time ranges from which to retrieve photon events, in GALEX time units :type tranges: list :param radius: The radius, in degrees, defining a cone on the sky that is centered on ra0 and dec0, from which to extract photons. :type radius: float :param verbose: Verbosity level, a value of 0 is minimum verbosity. :type verbose: int :param colnames: Labels of the columns found in the photon event file. :type colnames: list :returns: dict -- The set of photon events and their properties. """ # [Future]: Consider moving this method to 'dbasetools'. if verbose: mc.print_inline('Reading photon list file: {f}'.format(f=photonfile)) data = pd.io.parsers.read_csv(photonfile, names=colnames) ra, dec = np.array(data['ra']), np.array(data['dec']) angsep = mc.angularSeparation(ra0, dec0, ra, dec) ix = np.array([]) for trange in tranges: cut = np.where((angsep <= radius) & (np.isfinite(angsep)))[0] ix = np.concatenate((ix, cut), axis=0) events = {'t':np.array(data['t'][ix])/tscale, 'ra':np.array(data['ra'][ix]), 'dec':np.array(data['dec'][ix]), 'xi':np.array(data['xi'][ix]), 'eta':np.array(data['eta'][ix]), 'x':np.array(data['x'][ix]), 'y':np.array(data['y'][ix])} return events
5,352,902
def normElec(surf, electrode, normdist, NaN_as_zeros=True): """ Notes ----- When `normway` is a scalar, it takes the normal of the points of the mesh which are closer than `normway`. However, some points have a normal of (0, 0, 0) (default assigned if the vertex does not belong to any triangle). projectElectrodes.m includes those (0, 0, 0) in the calculation, but it might not be correct. See l. 138 (there are no NaN in normals but only (0, 0, 0)). To replicate the matlab behavior, make sure that `NaN_as_zeros` is True. """ dvect = norm(electrode - surf['pos'], axis=1) # l. 104-112 of projectElectrodes.m closevert = dvect < normdist # l. 120 of projectElectrodes.m normal = surf['pos_norm'][closevert, :].mean(axis=0) # l. 144 of projectElectrodes.m normals2av = surf['pos_norm'][closevert, :].copy() if NaN_as_zeros: normals2av[isnan(normals2av)] = 0 normal = nanmean(normals2av, axis=0) return normal
5,352,903
def filter_sources(sources, release): """Check if a source has already been consumed. If has not then add it to sources dict. """ source, version, dist, arch = parse_release(release) if source not in sources.keys(): sources[source] = {version: {dist: [arch]}} return True elif version not in sources[source].keys(): sources[source][version] = {dist: [arch]} return True elif dist not in sources[source][version]: sources[source][version][dist] = [arch] return True elif arch not in sources[source][version][dist]: sources[source][version][dist].append(arch) return True return False
5,352,904
async def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]): """ Train a machine learning model. Provide records to the model to train it. The model should be already instantiated. Parameters ---------- model : Model Machine Learning model to use. See :doc:`/plugins/dffml_model` for models options. *args : list Input data for training. Could be a ``dict``, :py:class:`Record`, filename, one of the data :doc:`/plugins/dffml_source`, or a filename with the extension being one of the data sources. Examples -------- >>> import asyncio >>> from dffml import * >>> >>> model = SLRModel( ... features=Features( ... Feature("Years", int, 1), ... ), ... predict=Feature("Salary", int, 1), ... directory="tempdir", ... ) >>> >>> async def main(): ... await train( ... model, ... {"Years": 0, "Salary": 10}, ... {"Years": 1, "Salary": 20}, ... {"Years": 2, "Salary": 30}, ... {"Years": 3, "Salary": 40}, ... ) >>> >>> asyncio.run(main()) """ sources = _records_to_sources(*args) async with sources as sources, model as model: async with sources() as sctx, model() as mctx: return await mctx.train(sctx)
5,352,905
def get_os(): """ if called in powershell returns "powershell" if called in cygwin returns "cygwin" if called in darwin/osx returns "osx" for linux returns "linux" """ env = os.environ p = platform.system().lower() terminal = p operating_system = p if p == 'windows': terminal = "powershell" if 'TERM' in env: terminal = env['TERM'] if p == 'darwin': terminal = 'osx' return terminal
5,352,906
def setup_model_and_optimizer(args): """Setup model and optimizer.""" print ("setting up model...") model = get_model(args) print ("setting up optimizer...") optimizer = get_optimizer(model, args) print ("setting up lr scheduler...") lr_scheduler = get_learning_rate_scheduler(optimizer, args) if DEEPSPEED_WRAP and args.deepspeed: print_rank_0("DeepSpeed is enabled.") print ("Calling deepspeed.initialize with our model, optimizer and scheduler") model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize( model=model, optimizer=optimizer, args=args, lr_scheduler=lr_scheduler, mpu=mpu, dist_init_required=False ) print ("We've wrapped our model, optimizer and scheduler in DeepSpeed") if args.load is not None: print_rank_0("Load checkpoint from " + args.load) args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args, deepspeed=DEEPSPEED_WRAP and args.deepspeed) print_rank_0("Checkpoint loaded") else: args.iteration = 0 print ("returning our model, optimizer and scheduler") return model, optimizer, lr_scheduler
5,352,907
def ensure_str(origin, decode=None): """ Ensure is string, for display and completion. Then add double quotes Note: this method do not handle nil, make sure check (nil) out of this method. """ if origin is None: return None if isinstance(origin, str): return origin if isinstance(origin, int): return str(origin) elif isinstance(origin, list): return [ensure_str(b) for b in origin] elif isinstance(origin, bytes): if decode: return origin.decode(decode) return _literal_bytes(origin) else: raise Exception(f"Unknown type: {type(origin)}, origin: {origin}")
5,352,908
def verdi_config_set(ctx, option, value, globally, append, remove): """Set an AiiDA option. List values are split by whitespace, e.g. "a b" becomes ["a", "b"]. """ from aiida.manage.configuration import Config, ConfigValidationError, Profile if append and remove: echo.echo_critical('Cannot flag both append and remove') config: Config = ctx.obj.config profile: Profile = ctx.obj.profile if option.global_only: globally = True # Define the string that determines the scope: for specific profile or globally scope = profile.name if (not globally and profile) else None scope_text = f"for '{profile.name}' profile" if (not globally and profile) else 'globally' if append or remove: try: current = config.get_option(option.name, scope=scope) except ConfigValidationError as error: echo.echo_critical(str(error)) if not isinstance(current, list): echo.echo_critical(f'cannot append/remove to value: {current}') if append: value = current + [value] else: value = [item for item in current if item != value] # Set the specified option try: value = config.set_option(option.name, value, scope=scope) except ConfigValidationError as error: echo.echo_critical(str(error)) config.store() echo.echo_success(f"'{option.name}' set to {value} {scope_text}")
5,352,909
def restore( collection: str, id: Union[str, int, Dict[str, Any]] ) -> Optional[Dict[str, Any]]: """Restrieve cached data from database. :param collection: The collection to be retrieved. Same name as API commands. :type collection: str :param id: The unique identifier for a particular collection. This varies by command. :type id: Union[str, int] :return: The retrieved data if exists, else None. :rtype: Optional[Dict[str, Any]] """ db = _get_connection() if not db: return None if not isinstance(id, dict): id = dict(_id=id) return db[collection].find_one(id, dict(_id=0))
5,352,910
def encode_bits(data, number): """Turn bits into n bytes of modulation patterns""" # 0000 00BA gets encoded as: # 128 64 32 16 8 4 2 1 # 1 B B 0 1 A A 0 # i.e. a 0 is a short pulse, a 1 is a long pulse #print("modulate_bits %s (%s)" % (ashex(data), str(number))) shift = number-2 encoded = [] for i in range(int(number/2)): bits = (data >> shift) & 0x03 #print(" shift %d bits %d" % (shift, bits)) encoded.append(ENCODER[bits]) shift -= 2 #print(" returns:%s" % ashex(encoded)) return encoded
5,352,911
def compute_session_changes(session, task=None, asset=None, app=None): """Compute the changes for a Session object on asset, task or app switch This does *NOT* update the Session object, but returns the changes required for a valid update of the Session. Args: session (dict): The initial session to compute changes to. This is required for computing the full Work Directory, as that also depends on the values that haven't changed. task (str, Optional): Name of task to switch to. asset (str or dict, Optional): Name of asset to switch to. You can also directly provide the Asset dictionary as returned from the database to avoid an additional query. (optimization) app (str, Optional): Name of app to switch to. Returns: dict: The required changes in the Session dictionary. """ changes = dict() # If no changes, return directly if not any([task, asset, app]): return changes # Get asset document and asset asset_document = None if asset: if isinstance(asset, dict): # Assume asset database document asset_document = asset asset = asset["name"] else: # Assume asset name asset_document = io.find_one({"name": asset, "type": "asset"}) assert asset_document, "Asset must exist" # Detect any changes compared session mapping = { "AVALON_ASSET": asset, "AVALON_TASK": task, "AVALON_APP": app, } changes = {key: value for key, value in mapping.items() if value and value != session.get(key)} if not changes: return changes # Update silo and hierarchy when asset changed if "AVALON_ASSET" in changes: # Update silo changes["AVALON_SILO"] = asset_document.get("silo") or "" # Update hierarchy parents = asset_document['data'].get('parents', []) hierarchy = "" if len(parents) > 0: hierarchy = os.path.sep.join(parents) changes['AVALON_HIERARCHY'] = hierarchy # Compute work directory (with the temporary changed session so far) project = io.find_one({"type": "project"}) _session = session.copy() _session.update(changes) anatomy = Anatomy(project["name"]) template_data = template_data_from_session(_session) anatomy_filled = anatomy.format(template_data) changes["AVALON_WORKDIR"] = anatomy_filled["work"]["folder"] return changes
5,352,912
def _tf_get_negs( all_embed: "tf.Tensor", all_raw: "tf.Tensor", raw_pos: "tf.Tensor", num_neg: int ) -> Tuple["tf.Tensor", "tf.Tensor"]: """Get negative examples from given tensor.""" if len(raw_pos.shape) == 3: batch_size = tf.shape(raw_pos)[0] seq_length = tf.shape(raw_pos)[1] else: # len(raw_pos.shape) == 2 batch_size = tf.shape(raw_pos)[0] seq_length = 1 raw_flat = _tf_make_flat(raw_pos) total_candidates = tf.shape(all_embed)[0] all_indices = tf.tile( tf.expand_dims(tf.range(0, total_candidates, 1), 0), (batch_size * seq_length, 1), ) shuffled_indices = tf.transpose( tf.random.shuffle(tf.transpose(all_indices, (1, 0))), (1, 0) ) neg_ids = shuffled_indices[:, :num_neg] bad_negs = _tf_get_bad_mask(raw_flat, all_raw, neg_ids) if len(raw_pos.shape) == 3: bad_negs = tf.reshape(bad_negs, (batch_size, seq_length, -1)) neg_embed = _tf_sample_neg(batch_size * seq_length, all_embed, neg_ids) if len(raw_pos.shape) == 3: neg_embed = tf.reshape( neg_embed, (batch_size, seq_length, -1, all_embed.shape[-1]) ) return neg_embed, bad_negs
5,352,913
def remove(store_config, shardid): # FIXME require config instead """Remove a shard from the store. Args: store_config: Dict of storage paths to optional attributes. limit: The dir size limit in bytes, 0 for no limit. use_folder_tree: Files organized in a folder tree (always on for fat partitions). shardid: Id of the shard to be removed. Raises: AssertionError: If input not valid. Example: import storjlib id = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae" store_config = {"path/alpha": None, "path/beta": None} storjlib.store.manager.remove(store_config, id) """ shard_path = find(store_config, shardid) if shard_path is not None: _log.info("Removing shard {0} from store.".format(shardid)) return os.remove(shard_path)
5,352,914
def init_db(path: Union[str, 'os.PathLike'] = db_constants.DB_PATH) -> Optional[sqlite3.dbapi2.Connection]: """Initialises the DB. Returns a sqlite3 connection, which will be passed to the db thread. """ # TODO: change saving version from float to string def db_layout(cursor: sqlite3.dbapi2.Cursor) -> None: c = cursor # version c.execute(""" CREATE TABLE IF NOT EXISTS version(version REAL) """) c.execute("""INSERT INTO version(version) VALUES(?)""", (db_constants.CURRENT_DB_VERSION,)) log_i("Constructing database layout") log_d("Database Layout:\n\t{}".format(STRUCTURE_SCRIPT)) c.executescript(STRUCTURE_SCRIPT) def new_db(p: Union[str, 'os.PathLike'], new: bool = False) -> sqlite3.dbapi2.Connection: connection = sqlite3.connect(p, check_same_thread=False) connection.row_factory = sqlite3.Row if new: c = connection.cursor() db_layout(c) connection.commit() return connection if os.path.isfile(path): conn = new_db(path) if path == db_constants.DB_PATH and not check_db_version(conn): return None else: create_db_path() conn = new_db(path, True) conn.isolation_level = None conn.execute("PRAGMA foreign_keys = on") return conn
5,352,915
def load_pdb(path): """ Loads all of the atomic positioning/type arrays from a pdb file. The arrays can then be transformed into density (or "field") tensors before being sent through the neural network. Parameters: path (str, required): The full path to the pdb file being voxelized. Returns: dictionary: A dictionary containing the following arrays from the pdb file: num_atoms, atom_types, positions, atom_type_set, xcoords, ycoords, zcoords, residues, residue_set """ pdb = PandasPdb().read_pdb(path) # This just creates a dataframe from the pdb file using biopandas #print('This is vars',vars(pdb)) pdf = pdb.df['ATOM'] # atomic coordinates x_coords = pdf['x_coord'].values y_coords = pdf['y_coord'].values z_coords = pdf['z_coord'].values # create an array containing tuples of x,y,z for every atom positions = [] for i, x in enumerate(x_coords): position_tuple = (x_coords[i], y_coords[i], z_coords[i]) positions.append(position_tuple) positions = np.array(positions) # names of all the atoms contained in the protein atom_types = pdf['atom_name'].values num_atoms = len(atom_types) atom_type_set = np.unique(atom_types) num_atom_types = len(atom_type_set) # residue names residue_names = pdf['residue_name'].values residue_set = np.unique(residue_names) protein_dict = {'x_coords':x_coords, 'y_coords':y_coords, 'z_coords':z_coords, 'positions':positions, 'atom_types':atom_types, 'num_atoms':num_atoms, 'atom_type_set':atom_type_set, 'num_atom_types':num_atom_types, 'residues':residue_names, 'residue_set':residue_set} # add a value to the dictionary, which is all of the atomic coordinates just # shifted to the origin protein_dict = shift_coords(protein_dict) return protein_dict
5,352,916
def confusion_matrix_cli( test_statistics: Union[str, List[str]], ground_truth_metadata: str, **kwargs: dict ) -> None: """Load model data from files to be shown by confusion_matrix. # Inputs :param test_statistics: (Union[str, List[str]]) path to experiment test statistics file. :param ground_truth_metadata: (str) path to ground truth metadata file. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ test_stats_per_model = load_data_for_viz('load_json', test_statistics) metadata = load_json(ground_truth_metadata) confusion_matrix(test_stats_per_model, metadata, **kwargs)
5,352,917
def test_proton_model(): """ test import """ from ..sherpa_models import PionDecay model = PionDecay() model.ampl = 1e36 model.index = 2.1 # point calc output = model.calc([p.val for p in model.pars], energies) # integrated output = model.calc([p.val for p in model.pars], elo, xhi=ehi) # test as well ECPL model.cutoff = 1000 # Perform a fit to fake data ui.load_arrays(1, energies, test_spec_points, test_err_points) ui.set_model(model) ui.guess() # Actual fit is too slow for tests # ui.fit() # test with integrated data ui.load_arrays(1, elo, ehi, test_spec_int, test_err_int, ui.Data1DInt) ui.set_model(model) ui.guess() # Actual fit is too slow for tests # ui.fit()
5,352,918
def validate_workspace( workspace_option: str, available_paths: List[str] = list(WORKSPACE_PATHS.values()) ) -> str: """Validate and return workspace. :param workspace_option: A string of the workspace to validate. :type workspace_option: string :param available_paths: A list of the available workspaces. :type available_paths: list :returns: A string of the validated workspace. """ if workspace_option: available = any( os.path.join(os.path.abspath(workspace_option), "").startswith( os.path.join(os.path.abspath(path), "") ) for path in available_paths ) if not available: raise REANAValidationError( f'Desired workspace "{workspace_option}" is not valid.\n' f'Available workspace prefix values are: {", ".join(available_paths)}', ) return workspace_option
5,352,919
def compileShaders(self): """Loads the vertex/fragment shader source code, and creates a :class:`.GLSLShader` program. """ if self.shader is not None: self.shader.destroy() vertSrc = shaders.getVertexShader( 'glvolume') fragSrc = shaders.getFragmentShader('glrgbvolume') textures = { 'imageTexture' : 0 } constants = { 'texture_is_2d' : self.imageTexture.ndim == 2 } self.shader = shaders.ARBPShader(vertSrc, fragSrc, shaders.getShaderDir(), textureMap=textures, constants=constants)
5,352,920
def __filter_handler(query_set, model, params): """ Handle user-provided filtering requests. Args: query_set: SQLAlchemy query set to be filtered. model: Data model from which given query set is generated. params: User-provided filter params, with format {"query": [...], ...}. For query format see "__build_filter_exp" function. Returns: A query set with user-provided filters applied. """ query = params.get("query") if query: filter_exp = __build_filter_exp(query, model) return query_set.filter(filter_exp) else: return query_set
5,352,921
def split_data_set(data_set, axis, value): """ 按照给定特征划分数据集,筛选某个特征为指定特征值的数据 (然后因为是按该特征进行划分了,该特征在以后的划分中就不用再出现,所以把该特征在新的列表中移除) :param data_set: 待划分的数据集,格式如下,每一行是一个list,list最后一个元素就是标签,其他元素是特征 :param axis: 划分数据集的特征(特征的序号) :param value: 需要返回的特征的值(筛选特征的值要等于此值) :return: >>>myDat = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] >>>split_data_set(myDat,0,1) [[1, 'yes'], [1, 'yes'], [0, 'no']] >>>split_data_set(myDat,0,0) [[1, 'no'], [1, 'no']] """ # 创建新的list对象 ret_data_set = [] for feature_vec in data_set: if feature_vec[axis] == value: # 抽取, 把指定特征从列表中去掉,组成一个新的特征+标签的列表 reduced_feature_vec = feature_vec[:axis] reduced_feature_vec.extend(feature_vec[axis + 1:]) ret_data_set.append(reduced_feature_vec) return ret_data_set
5,352,922
def get_ast(target_func_or_module): """ See :func:``bettertimeit`` for acceptable types. :returns: an AST for ``target_func_or_module`` """ if isinstance(target_func_or_module, ast.AST): return target_func_or_module if not isinstance(target_func_or_module, (six.string_types, six.binary_type)): handled_types = ( types.ModuleType, types.FunctionType, getattr(types, "UnboundMethodType", types.MethodType), types.MethodType, ) if not isinstance(target_func_or_module, handled_types): raise TypeError("Don't know how to handle objects of types '%s'" % type(target_func_or_module)) target_func_or_module = inspect.getsource(target_func_or_module) target_func_or_module = textwrap.dedent(target_func_or_module) return ast.parse(target_func_or_module)
5,352,923
def enumerate_spans(sentence: List[T], offset: int = 0, max_span_width: int = None, min_span_width: int = 1, filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]: """ Given a sentence, return all token spans within the sentence. Spans are `inclusive`. Additionally, you can provide a maximum and minimum span width, which will be used to exclude spans outside of this range. Finally, you can provide a function mapping ``List[T] -> bool``, which will be applied to every span to decide whether that span should be included. This allows filtering by length, regex matches, pos tags or any Spacy ``Token`` attributes, for example. Parameters ---------- sentence : ``List[T]``, required. The sentence to generate spans for. The type is generic, as this function can be used with strings, or Spacy ``Tokens`` or other sequences. offset : ``int``, optional (default = 0) A numeric offset to add to all span start and end indices. This is helpful if the sentence is part of a larger structure, such as a document, which the indices need to respect. max_span_width : ``int``, optional (default = None) The maximum length of spans which should be included. Defaults to len(sentence). min_span_width : ``int``, optional (default = 1) The minimum length of spans which should be included. Defaults to 1. filter_function : ``Callable[[List[T]], bool]``, optional (default = None) A function mapping sequences of the passed type T to a boolean value. If ``True``, the span is included in the returned spans from the sentence, otherwise it is excluded.. """ max_span_width = max_span_width or len(sentence) filter_function = filter_function or (lambda x: True) spans: List[Tuple[int, int]] = [] for start_index in range(len(sentence)): last_end_index = min(start_index + max_span_width, len(sentence)) first_end_index = min(start_index + min_span_width - 1, len(sentence)) for end_index in range(first_end_index, last_end_index): start = offset + start_index end = offset + end_index # add 1 to end index because span indices are inclusive. if filter_function(sentence[slice(start_index, end_index + 1)]): spans.append((start, end)) return spans
5,352,924
def test_fetch_fraction_timestamp(conn_cnx): """Additional fetch timestamp tests. Mainly used for SnowSQL which converts to string representations.""" PST_TZ = "America/Los_Angeles" converter_class = SnowflakeConverterSnowSQL sql = """ SELECT '1900-01-01T05:00:00.000Z'::timestamp_tz(7), '1900-01-01T05:00:00.000'::timestamp_ntz(7), '1900-01-01T05:00:01.000Z'::timestamp_tz(7), '1900-01-01T05:00:01.000'::timestamp_ntz(7), '1900-01-01T05:00:01.012Z'::timestamp_tz(7), '1900-01-01T05:00:01.012'::timestamp_ntz(7), '1900-01-01T05:00:00.012Z'::timestamp_tz(7), '1900-01-01T05:00:00.012'::timestamp_ntz(7), '2100-01-01T05:00:00.012Z'::timestamp_tz(7), '2100-01-01T05:00:00.012'::timestamp_ntz(7), '1970-01-01T00:00:00Z'::timestamp_tz(7), '1970-01-01T00:00:00'::timestamp_ntz(7) """ with conn_cnx(converter_class=converter_class) as cnx: cur = cnx.cursor() cur.execute(""" alter session set python_connector_query_result_format='JSON' """) cur.execute(""" ALTER SESSION SET TIMEZONE='{tz}'; """.format(tz=PST_TZ)) cur.execute(""" ALTER SESSION SET TIMESTAMP_OUTPUT_FORMAT='YYYY-MM-DD HH24:MI:SS.FF9 TZH:TZM', TIMESTAMP_NTZ_OUTPUT_FORMAT='YYYY-MM-DD HH24:MI:SS.FF9', TIME_OUTPUT_FORMAT='HH24:MI:SS.FF9'; """) cur.execute(sql) ret = cur.fetchone() assert ret[0] == '1900-01-01 05:00:00.000000000 +0000' assert ret[1] == '1900-01-01 05:00:00.000000000' assert ret[2] == '1900-01-01 05:00:01.000000000 +0000' assert ret[3] == '1900-01-01 05:00:01.000000000' assert ret[4] == '1900-01-01 05:00:01.012000000 +0000' assert ret[5] == '1900-01-01 05:00:01.012000000' assert ret[6] == '1900-01-01 05:00:00.012000000 +0000' assert ret[7] == '1900-01-01 05:00:00.012000000' assert ret[8] == '2100-01-01 05:00:00.012000000 +0000' assert ret[9] == '2100-01-01 05:00:00.012000000' assert ret[10] == '1970-01-01 00:00:00.000000000 +0000' assert ret[11] == '1970-01-01 00:00:00.000000000'
5,352,925
def fixed_mu(mu, data, qty, comp='muAI', beads_2_M=1): """ """ return fixed_conc(mu*np.ones([len(data.keys())]), data, qty, comp=comp, beads_2_M=beads_2_M)
5,352,926
def error_nrmse(y_true, y_pred, time_axis=0): """ Computes the Normalized Root Mean Square Error (NRMSE). The NRMSE index is computed separately on each channel. Parameters ---------- y_true : np.array Array of true values. If must be at least 2D. y_pred : np.array Array of predicted values. If must be compatible with y_true' time_axis : int Time axis. All other axes define separate channels. Returns ------- NRMSE : np.array Array of r_squared value. """ SSE = np.mean((y_pred - y_true)**2, axis=time_axis) RMSE = np.sqrt(SSE) NRMSE = RMSE/np.std(y_true, axis=time_axis) return NRMSE
5,352,927
def test_normalization_2d(): """test for Normalization2D""" # TODO: Because the expected behaviour of this layer is somehow confusing for me now
5,352,928
def main(): """ Compute binary scores for target words. """ # Get the arguments args = docopt("""Compute binary scores for taget words. Usage: binary.py <path_distances> <path_output> <thres_percentage> binary.py <path_distances> <path_targets> <path_output> <thres_percentage> <path_distances> = path to file containing word distance pairs (tab-separated) <path_targets> = path to file containing target words (optional for binary classification) <path_output> = output path for result file <thres_percentage> = mean + thres_percentage * std Note: Choose the first usage to discover changing words in <path_distances>. Choose the second usage to compute binary scores for words in <path_targets>. """) path_distances = args['<path_distances>'] path_targets = args['<path_targets>'] path_output = args['<path_output>'] thres_percentage = float(args['<thres_percentage>']) logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) logging.info(__file__.upper()) start_time = time.time() # Load data distances = {} with open(path_distances, 'r', encoding='utf-8') as f: reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE, strict=True) for row in reader: try: distances[row[0]] = float(row[1]) except ValueError: pass # Compute mean, std and threshold list_distances = np.array(list(distances.values())) upper_quantile = np.quantile(list_distances, 0.75) list_distances = list_distances[list_distances < upper_quantile] mean = np.mean(list_distances, axis=0) #std = np.std(list_distances, axis=0) stde = sem(list_distances, axis=0) threshold = mean + thres_percentage*stde # Usage 1: discover changing words if path_targets == None: changing_words = [] for key in distances: if distances[key] >= threshold: changing_words.append(key) # Write changing words to <path_output> with open(path_output, 'w', encoding='utf-8') as f: for word in changing_words: f.write(word + '\n') # Usage 2: label target words according to threshold (binary classification) else: # Load data target_distances = {} with open(path_targets, 'r', encoding='utf-8') as f: reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE, strict=True) for row in reader: try: target_distances[row[0]] = float(row[1]) except ValueError: pass # Compute binary scores binary_scores = {} for key in target_distances: if target_distances[key] >= threshold: binary_scores[key] = 1 else: binary_scores[key] = 0 # Write binary scores to <path_output> with open(path_output, 'w', encoding='utf-8') as f: f.write('word' + '\t' + 'change_binary' + '\n') for key, value in binary_scores.items(): f.write(key + '\t' + str(value) + '\n') logging.info("--- %s seconds ---" % (time.time() - start_time))
5,352,929
def show(font_size=20): """ Display a figure. """ plt.rcParams.update({'font.size': font_size}) plt.show()
5,352,930
def is_valid_file(parser, arg): """ Function used to check if a valid VLC path was given. """ if not os.path.exists(arg): parser.error( f"The filepath {arg} does not exist! Be sure to include quotes around the path, view help for more info." )
5,352,931
def test_get_checksum_not_in_cache(tmp_path): """Test the checksum cache_dict.""" assert cache_dict == {} file_path = tmp_path / 'space.js' file_path.write_text('punkcat') get_checksum(file_path) assert str(file_path) in cache_dict
5,352,932
def load(ctx: Context, opts: LoadOptions): """Plugin that loads data packs and resource packs.""" for config, pack in zip([opts.resource_pack, opts.data_pack], ctx.packs): for pattern in [config] if isinstance(config, str) else config: for path in glob(str(ctx.directory / pattern)): pack.load(path)
5,352,933
def rmdir_empty(f): """Returns a count of the number of directories it has deleted""" if not f.is_dir(): return 0 removable = True result = 0 for i in f.iterdir(): if i.is_dir(): result += rmdir_empty(i) removable = removable and not i.exists() else: removable = removable and (i.name == '.DS_Store') if removable: items = list(f.iterdir()) assert not items or items[0].name == '.DS_Store' print(f) shutil.rmtree(f) result += 1 return result
5,352,934
def delete_suggester(DomainName=None, SuggesterName=None): """ Deletes a suggester. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide . See also: AWS API Documentation :example: response = client.delete_suggester( DomainName='string', SuggesterName='string' ) :type DomainName: string :param DomainName: [REQUIRED] A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :type SuggesterName: string :param SuggesterName: [REQUIRED] Specifies the name of the suggester you want to delete. :rtype: dict :return: { 'Suggester': { 'Options': { 'SuggesterName': 'string', 'DocumentSuggesterOptions': { 'SourceField': 'string', 'FuzzyMatching': 'none'|'low'|'high', 'SortExpression': 'string' } }, 'Status': { 'CreationDate': datetime(2015, 1, 1), 'UpdateDate': datetime(2015, 1, 1), 'UpdateVersion': 123, 'State': 'RequiresIndexDocuments'|'Processing'|'Active'|'FailedToValidate', 'PendingDeletion': True|False } } } :returns: RequiresIndexDocuments : the option's latest value will not be deployed until IndexDocuments has been called and indexing is complete. Processing : the option's latest value is in the process of being activated. Active : the option's latest value is completely deployed. FailedToValidate : the option value is not compatible with the domain's data and cannot be used to index the data. You must either modify the option value or update or remove the incompatible documents. """ pass
5,352,935
def generic_exception_json_response(code): """ Turns an unhandled exception into a JSON payload to respond to a service call """ payload = { "error": "TechnicalException", "message": "An unknown error occured", "code": code } resp = make_response(jsonify(payload), code) resp.headers["Content-type"] = "application/json" return resp
5,352,936
def UnN(X, Z, N, sampling_type, kernel="prod"): """Computes block-wise complete U-statistic.""" def fun_block(x, z): return Un(x, z, kernel=kernel) return UN(X, Z, N, fun_block, sampling_type=sampling_type)
5,352,937
def delete_card(request): """Delete card""" return delete_container_element(request)
5,352,938
def build_estimator(tf_transform_dir, config, hidden_units=None): """Build an estimator for predicting the tipping behavior of taxi riders. Args: tf_transform_dir: directory in which the tf-transform model was written during the preprocessing step. config: tf.contrib.learn.RunConfig defining the runtime environment for the estimator (including model_dir). hidden_units: [int], the layer sizes of the DNN (input layer first) Returns: Resulting DNNLinearCombinedClassifier. """ metadata_dir = os.path.join(tf_transform_dir, transform_fn_io.TRANSFORMED_METADATA_DIR) transformed_metadata = metadata_io.read_metadata(metadata_dir) transformed_feature_spec = transformed_metadata.schema.as_feature_spec() transformed_feature_spec.pop(transformed_name(LABEL_KEY)) real_valued_columns = [ tf.feature_column.numeric_column(key, shape=()) for key in transformed_names(DENSE_FLOAT_FEATURE_KEYS) ] categorical_columns = [ tf.feature_column.categorical_column_with_identity( key, num_buckets=VOCAB_SIZE + OOV_SIZE, default_value=0) for key in transformed_names(VOCAB_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( key, num_buckets=FEATURE_BUCKET_COUNT, default_value=0) for key in transformed_names(BUCKET_FEATURE_KEYS) ] categorical_columns += [ tf.feature_column.categorical_column_with_identity( key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip( transformed_names(CATEGORICAL_FEATURE_KEYS), # MAX_CATEGORICAL_FEATURE_VALUES) ] #return tf.estimator.DNNLinearCombinedClassifier( return tf.estimator.DNNLinearCombinedRegressor( config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25])
5,352,939
def get_plot_grid_size(num_plots, fewer_rows=True): """ Returns the number of rows and columns ideal for visualizing multiple (identical) plots within a single figure Parameters ---------- num_plots : uint Number of identical subplots within a figure fewer_rows : bool, optional. Default = True Set to True if the grid should be short and wide or False for tall and narrow Returns ------- nrows : uint Number of rows ncols : uint Number of columns """ assert isinstance(num_plots, Number), 'num_plots must be a number' # force integer: num_plots = int(num_plots) if num_plots < 1: raise ValueError('num_plots was less than 0') if fewer_rows: nrows = int(np.floor(np.sqrt(num_plots))) ncols = int(np.ceil(num_plots / nrows)) else: ncols = int(np.floor(np.sqrt(num_plots))) nrows = int(np.ceil(num_plots / ncols)) return nrows, ncols
5,352,940
def note_view(): """Display a grid of notes and options to manipulate those notes or create new notes.""" # attempt to retrieve notes from previous run and print to screen os.system('clear') try: note_list = retrieve_notes() print_grid(note_list) options = [ '✎ Make a New Note ✎', '✎ Make a New List ✎', '✎ Edit a Note ✎', '⛔ Exit ⛔'] # no previous notes exist so prompt to create a new note or list except json.decoder.JSONDecodeError: print('\u001b[1;31m', end='') print('You don\'t have any notes!'.center(width)) options = [ '✎ Make a New Note ✎', '✎ Make a New List ✎', '⛔ Exit ⛔' ] note_list = [] note_options = { 'type': 'list', 'name': 'noteChoice', 'message': 'Please select an option for notes:', 'choices': options } note_options_choice = prompt(note_options) try: if note_options_choice['noteChoice'] == '✎ Make a New Note ✎': make_a_note(note_list) elif note_options_choice['noteChoice'] == '✎ Make a New List ✎': make_a_list(note_list) elif note_options_choice['noteChoice'] == '✎ Edit a Note ✎': edit_note_selector() elif note_options_choice['noteChoice'] == '⛔ Exit ⛔': save_notes(note_list) except KeyError: save_notes(note_list)
5,352,941
def check_source(module): """ Check that module doesn't have any globals. Example:: def test_no_global(self): result, line = check_source(self.module) self.assertTrue(result, "Make sure no code is outside functions.\\nRow: " + line) """ try: source = module.__file__ except Exception: raise Exception('Varmista, että koodin suoritus onnistuu') allowed = [ "import ", "from ", "def ", "class ", " ", "\t", "#", "if __name__", "@", ] with open(source) as file: for line in file.readlines(): if line.strip() == "": continue for prefix in allowed: if line.startswith(prefix): break else: return (False, line) return (True, "")
5,352,942
def _create_lagged_variable_panel(df, col, id_col="TICKER", num_lags=1): """ Note: inplace """ new_name = varname_to_lagged_varname(col, num_lags=num_lags) df[new_name] = df.groupby(id_col)[col].shift(num_lags)
5,352,943
def pre_processing(X): """ Center and sphere data.""" eps = 1e-18 n = X.shape[0] cX = X - np.mean(X, axis=0) # centering cov_mat = 1.0/n * np.dot(cX.T, cX) eigvals, eigvecs = eigh(cov_mat) D = np.diag(1./np.sqrt(eigvals+eps)) W = np.dot(np.dot(eigvecs, D), eigvecs.T) # whitening matrix wcX = np.dot(cX, W) return wcX
5,352,944
def task_grid(): """Grid classifier.""" for mode, sl in product(MODES, SAMPLELISTS): paths = dwi.paths.Paths(mode) lesioninfo = defaultdict(list) for c, s, l in dwi.dataset.iterlesions(str(paths.samplelist(sl))): c, l, lt = c.num, l.index + 1, l.location lesioninfo[(c, s)].append((l, lt)) for k, v in lesioninfo.items(): c, s = k ls, lt = [x[0] for x in v], [x[1] for x in v] d = dict(lt=lt, mbb=None, fmt='h5') mt = 'prostate' d['nanbg'] = True yield get_task_grid(mode, c, s, ls, mt, **d) for tspec in texture_methods_winsizes(mode, mt): yield get_task_grid_texture(mode, c, s, ls, mt, tspec, **d) mt = 'all' d['nanbg'] = False yield get_task_grid(mode, c, s, ls, mt, **d) for tspec in texture_methods_winsizes(mode, mt): yield get_task_grid_texture(mode, c, s, ls, mt, tspec, **d)
5,352,945
def get_as_tags(bundle_name, extension=None, config="DEFAULT", attrs=""): """ Get a list of formatted <script> & <link> tags for the assets in the named bundle. :param bundle_name: The name of the bundle :param extension: (optional) filter by extension, eg. "js" or "css" :param config: (optional) the name of the configuration :param attrs: (optional) further attributes on the tags :return: a list of formatted tags as strings """ bundle = _get_bundle(bundle_name, extension, config) return _render_tags(bundle, attrs)
5,352,946
def _init_tables(app): """Creates sql lite tables""" if not os.path.exists(_db_filename(app)): pkdlog('creating user oauth database') _db.create_all()
5,352,947
def psql(statement, timeout=30): """Execute a statement using the psql client.""" LOG.debug('Sending to local db: {0}'.format(statement)) return execute('psql', '-c', statement, timeout=timeout)
5,352,948
def devpiserver_get_credentials(request): """Search request for X-Remote-User header. Returns a tuple with (X-Remote-User, '') if credentials could be extracted, or None if no credentials were found. The first plugin to return credentials is used, the order of plugin calls is undefined. """ if 'X-Remote-User' in request.headers: remote_user = request.headers['X-Remote-User'] threadlog.info("Found X-Remote-User in request: %s", remote_user) return remote_user, ''
5,352,949
def vader_sentiment( full_dataframe, grading_column_name, vader_columns=COLUMN_NAMES, logger=config.LOGGER ): """apply vader_sentiment analysis to dataframe Args: full_dataframe (:obj:`pandas.DataFrame`): parent dataframe to apply analysis to grading_column_name (str): column with the data to grade vader_columns (:obj:`list`. optional): names to map vader results to ['neu', 'pos', 'compound', 'neg'] logger (:obj:`logging.logger`, optional): logging handle Returns; (:obj:`pandas.DataFrame`): updated dataframe with vader sentiment """ logger.info('applying vader sentiment analysis to `%s`', grading_column_name) logger.info('--applying vader_lexicon') vader_df = map_vader_sentiment( full_dataframe[grading_column_name], column_names=vader_columns ) logger.info('--merging results into original dataframe') joined_df = full_dataframe.merge( vader_df, how='left', on=grading_column_name ) return joined_df
5,352,950
def _reload(name="pynrc"): """ Simple reload function to test code changes without restarting python. There may be some weird consequences and bugs that show up, such as functions and attributes deleted from the code still stick around after the reload. Although, this is even true with ``importlib.reload(pynrc)``. Other possible ways to reload on-the-fly: from importlib import reload reload(pynrc) # Delete classes/modules to reload import sys del sys.modules['pynrc.obs_nircam'] """ import imp imp.load_module(name,*imp.find_module(name)) print("{} reloaded".format(name))
5,352,951
def _overlayPoints(points1, points2): """Given two sets of points, determine the translation and rotation that matches them as closely as possible. Parameters ---------- points1 (numpy array of simtk.unit.Quantity with units compatible with distance) - reference set of coordinates points2 (numpy array of simtk.unit.Quantity with units compatible with distance) - set of coordinates to be rotated Returns ------- translate2 - vector to translate points2 by in order to center it rotate - rotation matrix to apply to centered points2 to map it on to points1 center1 - center of points1 Notes ----- This is based on W. Kabsch, Acta Cryst., A34, pp. 828-829 (1978). """ if len(points1) == 0: return (mm.Vec3(0, 0, 0), np.identity(3), mm.Vec3(0, 0, 0)) if len(points1) == 1: return (points1[0], np.identity(3), -1*points2[0]) # Compute centroids. center1 = unit.sum(points1)/float(len(points1)) center2 = unit.sum(points2)/float(len(points2)) # Compute R matrix. R = np.zeros((3, 3)) for p1, p2 in zip(points1, points2): x = p1-center1 y = p2-center2 for i in range(3): for j in range(3): R[i][j] += y[i]*x[j] # Use an SVD to compute the rotation matrix. (u, s, v) = lin.svd(R) return (-1*center2, np.dot(u, v).transpose(), center1)
5,352,952
def return_request(data): """ Arguments: data Return if call detect: list[dist1, dist2, ...]: dist = { "feature": feature } Return if call extract: list[dist1, dist2, ...]: dist = { "confidence_score": predict probability, "class": face, "bounding_box": [xmin, ymin, xmax, ymax], "keypoints": {'left_eye': (x,y), 'right_eye':(x,y), 'nose': (x,y), 'mouth_left': (x,y), 'mouth_right': (x,y)} } """ contents = [] try: boxs = data['predictions'] print(type(boxs)) print(boxs) # for box in boxs: # contents.append({ # "confidence_score": box[4], # "class": 'face', # "bounding_box": [box[0], box[1], box[2], box[3]] # }) except: pass try: features = data['features'] for feature in features: contents.append({ "feature": feature }) except: pass return contents
5,352,953
def test_successful_association_validation_2(): """This test adds another host to make sure that associations with no count specified in the profile are handled correctly.""" test_associations = [{ 'type': 'Document', 'name': 'blahblah.txt' }, { 'type': 'Document', 'name': 'blahblah.pdf' }, { 'type': 'Host', 'name': 'hightower.space' }, { 'type': 'Host', 'name': 'deep.space' }, { 'type': 'Adversary', 'name': 'Bad guy' }] results = tc_dc._validate_associations(SAMPLE_ASSOCIATIONS_PROFILE, test_associations) assert len(results['failures']) == 0 assert len(results['warnings']) == 0
5,352,954
def test_validate_payload_with_invalid_payload(): """ Test if validate_payload raises ValidationError when validation of payload failes. """ message = CallResult( unique_id="1234", action="Heartbeat", payload={'invalid_key': True}, ) with pytest.raises(ValidationError): validate_payload(message, ocpp_version="1.6")
5,352,955
def test_after_space(): """Test procedure for after_space""" print('Testing after_space') # Test case 1 s = currency.after_space('Hello World') introcs.assert_equals('World',s) # Test case 2 s = currency.after_space(' Hello World') introcs.assert_equals('Hello World',s) # Test case 3 s = currency.after_space('HelloWorld ') introcs.assert_equals('',s) # Test case 4 s = currency.after_space('Hello World') introcs.assert_equals(' World',s)
5,352,956
def get_recent_messages_simple(e: TextMessageEventObject): """ Command to get the most recent messages with default count. This command has a cooldown of ``Bot.RecentActivity.CooldownSeconds`` seconds. This command will get the most recent ``Bot.RecentActivity.DefaultLimitCountDirect`` messages without the message that called this command. :param e: message event that called this command :return: default count of most recent messages with a link to the recent activity page """ return get_recent_messages(e, Bot.RecentActivity.DefaultLimitCountLink)
5,352,957
def parse_systemctl_units(stdout:str, stderr:str, exitcode:int) -> dict: """ UNIT LOAD ACTIVE SUB DESCRIPTION mono-xsp4.service loaded active running LSB: Mono XSP4 motd-news.service loaded inactive dead Message of the Day ● mountkernfs.service masked inactive dead mountkernfs.service systemd-machine-id-commit.service loaded inactive dead Commit a transient machine-id on disk ● systemd-modules-load.service loaded failed failed Load Kernel Modules systemd-networkd-resolvconf-update.service loaded inactive dead Update resolvconf for networkd DNS sysinit.target loaded active active System Initialization ● syslog.target not-found inactive dead syslog.target time-sync.target loaded active active System Time Synchronized LOAD = Reflects whether the unit definition was properly loaded. ACTIVE = The high-level unit activation state, i.e. generalization of SUB. SUB = The low-level unit activation state, values depend on unit type. 354 loaded units listed. To show all installed unit files use 'systemctl list-unit-files'. """ if exitcode != 0: raise Exception() # split into list of lines lines = LineList(stdout) assert isinstance(lines, LineList) # now we must separate a trailing description. lineNumbers = lines.getLineNumbersOfEmptyLines() assert lineNumbers assert lineNumbers[0] > 0 del lines[lineNumbers[0]:] # get column split positions wordPos = [ 0 ] + getPositionsOfWords(lines[0]) table = lines.createDataTableFromColumns(wordPos, bLStrip=True, bRStrip=True, bFirstLineIsHeader=True, columnDefs=[ ColumnDef("MARK", _parseMark), ColumnDef("UNIT"), ColumnDef("LOAD"), ColumnDef("ACTIVE"), ColumnDef("SUB"), ColumnDef("DESCRIPTION"), ]) # build output matrix: use service names as keys ret = {} for record in table: key = record[1] pos = key.rfind(".") category = key[pos+1:] + "s" # pluralize the category key = key[:pos] if category not in ret: ret[category] = {} ret[category][key] = record return ret
5,352,958
def resnet_model_fn(is_training, feature, label, data_format, params): """Build computation tower (Resnet). Args: is_training: true if is training graph. feature: a Tensor. label: a Tensor. data_format: channels_last (NHWC) or channels_first (NCHW). params: params for the model to consider Returns: A tuple with the loss for the tower, the gradients and parameters, and predictions. """ num_layers = params.num_layers batch_norm_decay = params.batch_norm_decay batch_norm_epsilon = params.batch_norm_epsilon weight_decay = params.weight_decay model = cifar10_with_resnet_model.ResNetCifar10( num_layers, batch_norm_decay=batch_norm_decay, batch_norm_epsilon=batch_norm_epsilon, is_training=is_training, data_format=data_format) logits = model.forward_pass(feature, input_data_format='channels_last') predictions = { 'classes': tf.argmax(input=logits, axis=1), 'probabilities': tf.nn.softmax(logits) } loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=label) loss = tf.reduce_mean(loss) model_params = tf.trainable_variables() loss += weight_decay * tf.add_n( [tf.nn.l2_loss(v) for v in model_params]) gradients = tf.gradients(loss, model_params) return loss, zip(gradients, model_params), predictions
5,352,959
def permutate_touched_latent_class(untouched_classes, class_info_np, gran_lvl_info): """untouch certain class num latent class, permute the rest (reserve H(Y))""" # get untouched instance index untouched_instance_index = [] for i in untouched_classes: index = np.where(class_info_np == i)[0] untouched_instance_index.append(index) untouched_instance_index_np = np.concatenate(untouched_instance_index) # permutate touched id my_gran_lvl_info = gran_lvl_info * np.ones(gran_lvl_info.shape) # replicate the gran_lvl_info untouched_latent_class_np = my_gran_lvl_info[untouched_instance_index_np] touched_index = np.delete(np.arange(my_gran_lvl_info.shape[0]), untouched_instance_index_np, 0) # exclude untouched index tourched_latent_class = my_gran_lvl_info[touched_index] my_gran_lvl_info[touched_index] = np.random.permutation(tourched_latent_class) return my_gran_lvl_info.astype(np.int32)
5,352,960
def setup(c, editable=True, version='3.8'): """Setup development environment """ print('Creating conda environment...') c.run(f'conda create --name scaffold python={version} --force --yes') print('Installing package...') flag = '--editable' if editable else '' conda.run_in_env(c, f'pip install {flag} .[dev]', env='scaffold')
5,352,961
def get_interarrival_times(arrival_times, period_start): """ Given a list of report dates, it returns the list corresponding to the interrival times. :param arrival_times: List of arrival times. :return: List of inter-arrival times. """ interarrival_times = [] for position, created_date in enumerate(arrival_times): if position > 0: distance = created_date - arrival_times[position - 1] interarrival_times.append(get_distance_in_hours(distance)) else: if isinstance(created_date, np.datetime64): created_date = datetime.datetime.utcfromtimestamp(created_date.tolist() / 1e9) created_date = pytz.utc.localize(created_date) distance = get_distance_in_hours(created_date - period_start) if distance > 0: interarrival_times.append(distance) return pd.Series(data=interarrival_times)
5,352,962
def _indexOp(opname): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ def wrapper(self, other): func = getattr(self.view(np.ndarray), opname) return func(other) return wrapper
5,352,963
def safe_log(a): """ Return the element-wise log of an array, checking for negative array elements and avoiding divide-by-zero errors. """ if np.any([a < 0]): raise ValueError('array contains negative components') return np.log(a + 1e-12)
5,352,964
def parse_treepath(k=23): """Parse treepath results""" results = {} treepath_file = "treepath.k{}.csv".format(k) if not os.path.isfile(treepath_file): print("No treepath results found", file=sys.stderr) return with open(treepath_file, 'rb') as f: f.readline() # skip header for line in f: temp = line.strip().split(",") sample = temp[0] strains = [] for strain in temp[5].split(" "): strains.append(":".join(strain.split(":")[:-1])) results[sample] = strains return results
5,352,965
def test_patch_passport() -> None: """ Ignored fields: {"uuid", "internal_id", "is_in_db", "featured_in_int_id"}. Send anything, nothing will be changed """ token = login() passport_patch = { "uuid": "fake", "internal_id": "fake", "passport_short_url": "new_url", "is_in_db": True, "biography": None, "components_units": {}, "featured_in_int_id": "fake", "barcode": None, "model": "New Model", } r = client.patch("/api/v1/passports/123456", headers={"Authorization": f"Bearer {token}"}, json=passport_patch) assert r.json()["status_code"] == 200, r.json()
5,352,966
def test_alphabet_as_string_violation( assert_errors, assert_error_text, parse_ast_tree, code, prefix, default_options, ): """Testing that the strings violate the rules.""" tree = parse_ast_tree('{0}"{1}"'.format(prefix, code)) visitor = WrongStringVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [StringConstantRedefinedViolation]) assert_error_text(visitor, code)
5,352,967
def test_set_default_temperature_rise(): """should return the default capacitance for the selected subcategory ID.""" assert inductor._set_default_temperature_rise(1, 1) == 10.0 assert inductor._set_default_temperature_rise(1, 3) == 30.0 assert inductor._set_default_temperature_rise(2, 1) == 10.0
5,352,968
def _set_quota(user, quota): """Set the quota of the user to the given value, and restore the old value when exit""" oldquota = seafilerpc.get_user_quota(user) if seafilerpc.set_user_quota(user, quota) < 0: raise RuntimeError('failed to change user quota') assert seafilerpc.get_user_quota(user) == quota try: yield finally: seafilerpc.set_user_quota(user, oldquota)
5,352,969
def deduplicate( timeseries: List[TimeseriesEntry], margins: Dict[str, float] = {}, ) -> List[TimeseriesEntry]: """ Remove duplicates from the supplied `timeseries`. Currently the deduplication relies on `timemseries` being formatted according to how data is stored in `Weather.series.values()`. The function removes duplicates if the start and stop timestamps of consecutive segments are equal and the values are either equal or, if they are numeric, if their differences are smaller than a certain margin of error. Parameters ---------- timeseries : List[TimeseriesEntry] The timeseries to duplicate. margins : Dict[str, float] The margins of error. Can contain one or both of the strings :code:`"absolute"` and :code:`"relative"` as keys with the numbers stored under these keys having the following meaning: - for :code:`absolute` value of the difference between the two values has to be smaller than or equal to this while - for :code:`relative` this difference has to be smaller than or equal to this when interpreted as a fraction of the maximum of the absolute values of the two compared values. By default these limits are set to be infinitely big. Returns ------- timeseries : List[TimeseriesEntry] A copy of the input data with duplicate values removed. Raises ------ ValueError If the data contains duplicates outside of the allowed margins. """ # TODO: Fix the data. If possible add a constraint preventing this from # happending again alongside the fix. # This is just here because there's duplicate data (that we know) # at the end of 2017. The last timestamp of 2017 is duplicated in # the first timespan of 2018. And unfortunately it's not exactly # duplicated. The timestamps are equal, but the values are only # equal within a certain margin. # TODO: Use [`unique_iter`][0] for unsafe removal, i.e. if both margins # are infinite. Or find an alternative in [`more-itertools`][1]. # [0]: https://boltons.readthedocs.io/en/latest/iterutils.html # #boltons.iterutils.unique_iter # [1]: https://pypi.org/project/more-itertools/ margins = { **{"absolute": float("inf"), "relative": float("inf")}, **margins, } multiples = [ run for run in reduce(runs, enumerate(timeseries), [[]]) if len(run) > 1 ] compressed = [compress(m, margins) for m in multiples] errors = [c for c in compressed if len(c) > 1] if errors: raise ValueError( "Found duplicate timestamps while retrieving data:\n{}".format( pformat(errors) ) ) compressed.reverse() result = timeseries.copy() for c in compressed: result[c[0][0]] = (c[0][1],) return result
5,352,970
def build_windows_subsystem(profile, make_program): """ The AutotoolsDeps can be used also in pure Makefiles, if the makefiles follow the Autotools conventions """ # FIXME: cygwin in CI (my local machine works) seems broken for path with spaces client = TestClient(path_with_spaces=False) client.run("new hello/0.1 --template=cmake_lib") # TODO: Test Windows subsystems in CMake, at least msys is broken os.rename(os.path.join(client.current_folder, "test_package"), os.path.join(client.current_folder, "test_package2")) client.save({"profile": profile}) client.run("create . --profile=profile") main = gen_function_cpp(name="main", includes=["hello"], calls=["hello"]) makefile = gen_makefile(apps=["app"]) conanfile = textwrap.dedent(""" from conans import ConanFile from conan.tools.gnu import AutotoolsToolchain, Autotools, AutotoolsDeps class TestConan(ConanFile): requires = "hello/0.1" settings = "os", "compiler", "arch", "build_type" exports_sources = "Makefile" generators = "AutotoolsDeps", "AutotoolsToolchain" def build(self): autotools = Autotools(self) autotools.make() """) client.save({"app.cpp": main, "Makefile": makefile, "conanfile.py": conanfile, "profile": profile}, clean_first=True) client.run("install . --profile=profile") cmd = environment_wrap_command(["conanbuildenv", "conanautotoolstoolchain", "conanautotoolsdeps"], make_program, cwd=client.current_folder) client.run_command(cmd) client.run_command("app") # TODO: fill compiler version when ready check_exe_run(client.out, "main", "gcc", None, "Release", "x86_64", None) assert "hello/0.1: Hello World Release!" in client.out client.save({"app.cpp": gen_function_cpp(name="main", msg="main2", includes=["hello"], calls=["hello"])}) # Make sure it is newer t = time.time() + 1 touch(os.path.join(client.current_folder, "app.cpp"), (t, t)) client.run("build .") client.run_command("app") # TODO: fill compiler version when ready check_exe_run(client.out, "main2", "gcc", None, "Release", "x86_64", None, cxx11_abi=0) assert "hello/0.1: Hello World Release!" in client.out return client.out
5,352,971
def monitor_service(exported_metric, url, csv_filename, sleep_amount, max_records): """Monitor selected service and export retrieved metrics into CSV file.""" # Try to open new file for writing. with open(csv_filename, 'w') as csvfile: # Initialize CSV writer. writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC) # First row with header. writer.writerow(exported_metrics) n = 0 # We are gonna to retrieve the metrics with specified frequency and process it. while True: # TODO: make configurable payload = requests.get("http://localhost:8080/api/v1/metrics").text metrics = parse_metrics(exported_metrics, payload) writer.writerow(metrics) # Make sure the next Ctrl+C or kill won't affect more that the last record. csvfile.flush() print("recorded") # Wait for the next metrics to be processed. time.sleep(sleep_amount) # It is possible to limit number of records. if max_records is not None: n += 1 # We already acquired specified number of records, time to drop. if n >= max_records: print("done") break
5,352,972
def make_batch_keys(args, extras=None): """depending on the args, different data are used by the listener.""" batch_keys = ['objects', 'tokens', 'target_pos'] # all models use these if extras is not None: batch_keys += extras if args.obj_cls_alpha > 0: batch_keys.append('class_labels') if args.lang_cls_alpha > 0: batch_keys.append('target_class') return batch_keys
5,352,973
def init_dic_OP(universe_woH, dic_atname2genericname, resname): """Initialize the dictionary of result (`dic_op`). Initialize also the dictionary of correspondance between residue number (resid) and its index in dic_OP (`dic_corresp_numres_index_dic_OP`). To calculate the error, we need to first average over the trajectory, then over residues. Thus in dic_OP, we want for each key a list of lists, for example: OrderedDict([ (('C1', 'H11'), [[], [], ..., [], []]), (('C1', 'H12'), [[], ..., []]), ... ]) Thus each sublist will contain OPs for one residue. e.g. ('C1', 'H11'), [[OP res 1 frame1, OP res1 frame2, ...], [OP res 2 frame1, OP res2 frame2, ...], ...] Parameters ---------- universe_woH : MDAnalysis universe instance This is the universe *without* hydrogen. dic_atname2genericname: ordered dictionary dict of correspondance between generic H names and PDB names. resname: str The name of the lipid. Returns ------- ordered dictionary Each key of this dict is a couple carbon/H, and at the beginning it contains an empty list. dictionary contains the correspondance between the residue number and its index in dic_op """ dic_OP = collections.OrderedDict() # Get list of residue id from the lipid name all_resids = universe_woH.select_atoms( f"resname {resname}").residues.resids nb_residus = len(all_resids) # Each key contain a list which contains a number of list equals to # the number of residus for key in dic_atname2genericname: dic_OP[key] = [[] for _ in range(nb_residus)] # We also need the correspondance between residue number (resid) and # its index in dic_OP. # the index will always start at 0 and goes to the number of residus = range(nb_residus) dic_corresp_numres_index_dic_OP = dict(zip(all_resids, range(nb_residus))) if DEBUG: print("Initial dic_OP:", dic_OP) print("dic_corresp_numres_index_dic_OP:", dic_corresp_numres_index_dic_OP) return dic_OP, dic_corresp_numres_index_dic_OP
5,352,974
def magic_split(value: str, sep=",", open="(<", close=")>"): """Split the value according to the given separator, but keeps together elements within the given separator. Useful to split C++ signature function since type names can contain special characters... Examples: - magic_split("a,b,c", sep=",") -> ["a", "b", "c"] - magic_split("a<b,c>,d(e,<k,c>),p) -> ["a<b,c>", "d(e,<k,c>)", "p"] Args: value: String to split. sep: Separator to use. open: List of opening characters. close: List of closing characters. Order must match open. Returns: The list of split parts from value. """ i, j = 0, 0 s: List[str] = [] r = [] while i < len(value): j = i + 1 while j < len(value): c = value[j] # Separator found and the stack is empty: if c == sep and not s: break # Check close/open: if c in open: s.append(open.index(c)) elif c in close: # The stack might be empty if the separator is also an opening element: if not s and sep in open and j + 1 == len(value): pass else: t = s.pop() if t != close.index(c): raise ValueError( "Found closing element {} for opening element {}.".format( c, open[t] ) ) j += 1 r.append(value[i:j]) i = j + 1 return r
5,352,975
def update_group_annotation(name=None, annotation_name=None, x_pos=None, y_pos=None, angle=None, opacity=None, canvas=None, z_order=None, network=None, base_url=DEFAULT_BASE_URL): """Update Group Annotation Updates a group annotation, changing the given properties. Args: name (UUID or str): Single UUID or str naming group object annotation_name (UUID or str): Name of annotation by UUID or name x_pos (int): X position in pixels from left; default is center of current view y_pos (int): Y position in pixels from top; default is center of current view angle (float): Angle of text orientation; default is 0.0 (horizontal) canvas (str): Canvas to display annotation, i.e., foreground (default) or background z_order (int): Arrangement order specified by number (larger values are in front of smaller values); default is 0 network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://localhost:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: A named list of annotation properties, including UUID Raises: CyError: if invalid name requests.exceptions.HTTPError: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> update_group_annotation(annotation_name='Group 1', angle=180) {'canvas': 'foreground', 'rotation': '180.0', 'name': 'Group 1', 'x': '2450.0', 'y': '1883.0', 'z': '0', 'type': 'org.cytoscape.view.presentation.annotations.GroupAnnotation', 'uuid': 'b9bf3184-3c5a-4e8b-9651-4bc4403af158', 'memberUUIDs': 'bb3061c5-d8d5-4fca-ac5c-9b7bf8fb9fd0,32f89c1d-e987-4867-9b8a-787aaac6e165,ec73aad8-b00b-4f4d-9361-a4b93f70c8f8'} >>> update_group_annotation(name='2c0a77f8-a6d0-450d-b6ee-1bfe3c8f8aea', annotation_name=group_uuid, x_pos=101, y_pos=201, angle=180, canvas='foreground') {'canvas': 'foreground', 'rotation': '180.0', 'name': 'Group 1', 'x': '101.0', 'y': '201.0', 'z': '0', 'type': 'org.cytoscape.view.presentation.annotations.GroupAnnotation', 'uuid': '2c0a77f8-a6d0-450d-b6ee-1bfe3c8f8aea', 'memberUUIDs': '8872c2f6-42ad-4b6a-8fb9-1d1b13da504d,2c830227-7f6a-4e58-bbef-2070f1b5a603,8d04e34d-86b8-486f-9927-581184cbe03e'} """ cmd_string, net_suid = _build_base_cmd_string('annotation update group', network, base_url) # a good start cmd_string += _get_annotation_name_cmd_string(annotation_name, 'Must provide the UUID or name of group') # x and y position cmd_string += _get_x_y_pos_cmd_string(x_pos, y_pos, net_suid, base_url) # optional params cmd_string += _get_angle_cmd_string(angle) cmd_string += _get_name_cmd_string(name, network, base_url) cmd_string += _get_canvas_cmd_string(canvas) cmd_string += _get_z_order_cmd_string(z_order) # execute command res = commands.commands_post(cmd_string, base_url=base_url) return res
5,352,976
def _extract_properties(properties_str): """Return a dictionary of properties from a string in the format ${key1}={value1}&${key2}={value2}...&${keyn}={valuen} """ d = {} kv_pairs = properties_str.split("&") for entry in kv_pairs: pair = entry.split("=") key = urllib.parse.unquote(pair[0]).lstrip("$") value = urllib.parse.unquote(pair[1]) d[key] = value return d
5,352,977
def first_fail_second_succeed(_: Any, context: Any) -> str: """ Simulate Etherscan saying for the first time 'wait', but for the second time 'success'. """ context.status_code = 200 try: if first_fail_second_succeed.called: # type: ignore return '{ "status": "1", "result" : "Pass - Verified", "message" : "" }' except AttributeError: # first time pass first_fail_second_succeed.called = True # type: ignore return '{ "status": "0", "result" : "wait for a moment", "message" : "" }'
5,352,978
def get_ipsw_url(device, ios_version, build): """Get URL of IPSW by specifying device and iOS version.""" json_data = fw_utils.get_json_data(device, "ipsw") if build is None: build = fw_utils.get_build_id(json_data, ios_version, "ipsw") fw_url = fw_utils.get_firmware_url(json_data, build) if fw_url is None: print("[w] could not get IPSW url, exiting...") return fw_url
5,352,979
def cvt_video2img(src_folder, dst_folder,fps=5,wh_size=(720,500), exclusion_range=580, pre_trim_sec=10,duration=30,extension='mp4'): """ exclusion_range: This arg define exclusion area to prevents the overlapping of same event situation. pre_trim_sec: This arg define how long set the time before event happen wh_size: width, height of converted image fps: frame per second of converted image """ for outdoor in get_dirpaths_in_dir(path=src_folder): for fold_num in get_dirpaths_in_dir(path=outdoor): for vid_name in get_filepaths_in_dir(path=fold_num,extension=extension): xml_path=vid_name.split(".")[0]+".xml" start_points=[0] dir_name= vid_name.split("\\")[-1].split(".")[0] print(dir_name) if os.path.isfile(xml_path): label=parsing_label(xml_path) #dir_name for saving converted imgs dir_name= naming(vid_name.split("\\")[-1],label) dir_name=dir_name.replace('-','_') #extract start_points because there could be several event points in the one video start_points= extract_start_points(xml_path) #prev start point prev_sp=0 for f in start_points: f=int(f) if f < (prev_sp-exclusion_range): continue print(f) save_path= os.path.join(dst_folder,dir_name+"_"+str(f)) mkdir(save_path) video2imgs_aihub(vid_name,save_path,f,wh_size,fps,pre_trim_sec,duration) prev_sp = f
5,352,980
def check_input_dir(input_dir: Path, contained_dirs: List[str] = None, contained_files: List[str] = None): """Check if the input_dir contains all the contained_dirs and contained_files.""" if not input_dir.is_dir(): raise Exception("{} is not an existed directory.".format(input_dir.absolute())) if not contained_dirs: contained_dirs = [] if not contained_files: contained_files = [] for contained_dir in contained_dirs: dir = input_dir / contained_dir if not (dir).is_dir(): raise Exception("{} is not an existed directory.".format(dir.absolute())) for contained_file in contained_files: file = input_dir / contained_file if not (file).is_file(): raise Exception("{} is not an existed file.".format(file.absolute()))
5,352,981
async def test_combine(dut): """Test the Combine trigger.""" # gh-852 async def coro(delay): await Timer(delay, "ns") tasks = [await cocotb.start(coro(dly)) for dly in [10, 30, 20]] await Combine(*(t.join() for t in tasks))
5,352,982
def escape_parameter(value: Any) -> str: """ Escape a query parameter. """ if value == "*": return value if isinstance(value, str): value = value.replace("'", "''") return f"'{value}'" if isinstance(value, bytes): value = value.decode("utf-8") return f"'{value}'" if isinstance(value, bool): return "TRUE" if value else "FALSE" if isinstance(value, (int, float)): return str(value) return f"'{value}'"
5,352,983
def customAction(pcap): """对一个 session 中的每一个 packet 进行匿名化的处理 Args: pcap: 每一个 packet 文件 """ src_ip = "0.0.0.0" src_ipv6 = "0:0:0:0:0:0:0:0" src_port = 0 src_mac = "00:00:00:00:00:00" dst_ip = "0.0.0.0" dst_ipv6 = "0:0:0:0:0:0:0:0" dst_port = 0 dst_mac = "00:00:00:00:00:00" if 'Ether' in pcap: pcap.src = src_mac # 修改源 mac 地址 pcap.dst = dst_mac # 修改目的 mac 地址 if 'IP' in pcap: pcap["IP"].src = src_ip pcap["IP"].dst = dst_ip if 'IPv6' in pcap: pcap["IPv6"].src = src_ipv6 pcap["IPv6"].dst = dst_ipv6 if 'TCP' in pcap: pcap['TCP'].sport = src_port pcap['TCP'].dport = dst_port if 'UDP' in pcap: pcap['UDP'].sport = src_port pcap['UDP'].dport = dst_port if 'ARP' in pcap: pcap["ARP"].psrc = src_ip pcap["ARP"].pdst = dst_ip pcap["ARP"].hwsrc = src_mac pcap["ARP"].hwdst = dst_mac
5,352,984
def subplot3D(ax, code_azi, code_ele, ant_azi_num, ant_ele_num): """A beamforming pattern of uniform rectangular array""" thetas = np.linspace(0, 2*math.pi, 100, endpoint=False) betas = np.linspace(-math.pi/2, math.pi/2, 50) values = np.zeros((len(betas), len(thetas)), dtype=np.float32) for i, beta in enumerate(betas): for j, theta in enumerate(thetas): array_response_ele = 1/math.sqrt(ant_ele_num)*np.exp(1j*math.pi*np.arange(ant_ele_num)*math.sin(beta)) array_response_azi = 1/math.sqrt(ant_azi_num)*np.exp(1j*math.pi*np.arange(ant_azi_num)*math.sin(theta)*math.cos(beta)) values[i, j] = abs(np.sum(np.kron(code_ele*array_response_ele, code_azi*array_response_azi))) THETA, BETA = np.meshgrid(thetas, betas) X = values * np.cos(BETA) * np.sin(THETA) Y = values * np.cos(BETA) * np.cos(THETA) Z = values * np.sin(BETA) my_color = cm.jet(values / np.amax(values)) surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=my_color, cmap=plt.get_cmap('jet'), linewidth=0, antialiased=False, alpha=0.5) ax.set_axis_off() # ax.set_xlabel("x"); ax.set_ylabel("y"); ax.set_zlabel("z") # plt.colorbar(surf, pad=0.05)
5,352,985
def Get_Query(Fq): """ Get_Query """ Q = "" EoF = False Ok = False while True: l = Fq.readline() if ("--" in l) : # skip line continue elif l=="": EoF=True break else: Q += l if ";" in Q: Ok = True break return EoF, Ok, Q
5,352,986
def get_path_segments(url): """ Return a list of path segments from a `url` string. This list may be empty. """ path = unquote_plus(urlparse(url).path) segments = [seg for seg in path.split("/") if seg] if len(segments) <= 1: segments = [] return segments
5,352,987
def get_config_file(c: typing.Union[str, ConfigFile, None]) -> typing.Optional[ConfigFile]: """ Checks if the given argument is a file or a configFile and returns a loaded configFile else returns None """ if c is None: # See if there's a config file in the current directory where Python is being run from current_location_config = Path("flytekit.config") if current_location_config.exists(): logger.info(f"Using configuration from Python process root {current_location_config.absolute()}") return ConfigFile(str(current_location_config.absolute())) # If not, see if there's a config in the user's home directory home_dir_config = Path(Path.home(), ".flyte", "config") # _default_config_file_name in main.py if home_dir_config.exists(): logger.info(f"Using configuration from home directory {home_dir_config.absolute()}") return ConfigFile(str(home_dir_config.absolute())) # If not, see if the env var that flytectl sandbox tells the user to set is set, # or see if there's something in the default home directory location flytectl_path = Path(Path.home(), ".flyte", "config.yaml") flytectl_path_from_env = getenv(FLYTECTL_CONFIG_ENV_VAR, None) if flytectl_path_from_env: flytectl_path = Path(flytectl_path_from_env) if flytectl_path.exists(): logger.info(f"Using flytectl/YAML config {flytectl_path.absolute()}") return ConfigFile(str(flytectl_path.absolute())) # If not, then return None and let caller handle return None if isinstance(c, str): return ConfigFile(c) return c
5,352,988
def get_logger(tag: str) -> Logger: """ Produces logger with given message tag which will write logs to the console and file stored in LOG_FILE_PATH directory :param tag: tag for messages of the logger :return: logger object """ logger = logging.getLogger(tag) logger.setLevel(logging.DEBUG) # create console handler which logs even debug messages console_handler = logging.StreamHandler() console_handler.setLevel(logging.DEBUG) # create file handler which logs info messages file_handler = TimedRotatingFileHandler(LOG_FILE_PATH, when='midnight', interval=1, backupCount=1) file_handler.setLevel(logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(levelname)s - ' + tag + ': %(message)s') console_handler.setFormatter(formatter) file_handler.setFormatter(formatter) # add the handlers to the logger logger.addHandler(console_handler) logger.addHandler(file_handler) return logger
5,352,989
def get_index(): """Redirects the index to /form """ return redirect("/form")
5,352,990
def print_letter(letter): """Prints a letter. The input variable is the index of the letter to print. The letters from A to Z are indexed at 0 (so A=0, B=1, etc). There are currently 2 addition special 'characters'. 26 will 'print' a space 27 will execute a 'new line' You are free to add any additional special characters here. The pen should be positioned at the top left corner of the letter box before calling this myblock. After printing the letter, the pen will be moved to the position of the next letter. If it is at the end of the line, it will automatically move the pen to the beginning of the next line. """ variables['LastLetterWidth'] = variables['Seg4'] variables['LetterSpacing'] = 20 if letter == 0: print_a(1) elif letter == 1: print_b(1) elif letter == 2: print_c(1) elif letter == 3: print_d(1) elif letter == 4: print_e(1) elif letter == 5: print_f(1) elif letter == 6: print_g(1) elif letter == 7: print_h(1) elif letter == 8: print_i(1) variables['LastLetterWidth'] = 0 elif letter == 9: print_j(1) elif letter == 10: print_k(1) elif letter == 11: print_l(1) elif letter == 12: print_m(1) elif letter == 13: print_n(1) elif letter == 14: print_o(1) elif letter == 15: print_p(1) elif letter == 16: print_q(1) elif letter == 17: print_r(1) elif letter == 18: print_s(1) elif letter == 19: print_t(1) elif letter == 20: print_u(1) elif letter == 21: print_v(1) elif letter == 22: print_w(1) elif letter == 23: print_x(1) elif letter == 24: print_y(1) elif letter == 25: print_z(1) elif letter == 26: if variables['LinePosition'] == 0: variables['LetterSpacing'] = 0 variables['LastLetterWidth'] = 0 else: print_space(1) elif letter == 27: carriage_move(0) line_feed() variables['LetterSpacing'] = 0 # Move the pen to accommodate the letter spacing, and update all the # variables tracking the position of the pen on the line. letter_spacing = variables['LetterSpacing'] motor['A'].on_for_degrees(20, letter_spacing) line_position = (variables['LastLetterWidth'] + variables['LinePosition'] + letter_spacing) variables['LinePosition'] = line_position # Do an automatic new line if we are at the end of the current line. if line_position > variables['LineWidth']: carriage_move(0) line_feed()
5,352,991
def create_init_db(dbpath=_NBSEARCH_DB_PATH, clear=False): """Create and initialise database.""" if clear and os.path.exists(dbpath): os.remove(dbpath) db = sqlite_utils.Database(dbpath) create_tables(db)
5,352,992
def _register_network(network_id: str, chain_name: str): """Register a network. """ network = factory.create_network(network_id, chain_name) cache.infra.set_network(network) # Inform. utils.log(f"registered {network.name_raw} - metadata") return network
5,352,993
def cli(*args, **kwargs): """Execute scripts on a device via a serial port.""" # print(f"args = {args}, kwargs = {kwargs}") if kwargs["debug_mode"]: import debugpy print(f"Debugging is enabled, listening on: {DEBUG_ADDR}:{DEBUG_PORT}.") debugpy.listen(address=(DEBUG_ADDR, DEBUG_PORT)) print(" - execution paused, waiting for debugger to attach...") debugpy.wait_for_client() print(" - debugger is now attached, continuing execution.") serial_port = kwargs.pop("serial_port") device_id = kwargs.pop("device_id") config_dict = { "config": {"enforce_allowlist": True, "disable_discovery": True}, "schema": {ATTR_CONTROLLER: device_id}, "allowlist": {device_id: {"name": "Controller"}} } if kwargs.get("get_schedule") is not None: config_dict["schema"]["zones"] = {kwargs["get_schedule"]: {}} elif kwargs.get("set_schedule") is not None: kwargs["set_schedule"] = json.load(kwargs["set_schedule"]) config_dict["schema"]["zones"] = {kwargs["set_schedule"]["zone_idx"]: {}} asyncio.run(main(serial_port, **config_dict, **kwargs))
5,352,994
def test_adjust_inv_sigmoid_cutoff_half(): """Verifying the output with expected results for inverse sigmoid correction with cutoff equal to half and gain of 10""" image = np.arange(0, 255, 4, np.uint8).reshape((8, 8)) expected = np.array([ [253, 253, 252, 252, 251, 251, 250, 249], [249, 248, 247, 245, 244, 242, 240, 238], [235, 232, 229, 225, 220, 215, 210, 204], [197, 190, 182, 174, 165, 155, 146, 136], [126, 116, 106, 96, 87, 78, 70, 62], [ 55, 49, 43, 37, 33, 28, 25, 21], [ 18, 16, 14, 12, 10, 8, 7, 6], [ 5, 4, 4, 3, 3, 2, 2, 1]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 0.5, 10, True) assert_array_equal(result, expected)
5,352,995
def run_cmd(command: Cmd, cmd_parameters: Dict[str, str], workdir: str): """Run a single command.""" cmdline = command.cmd.format(**cmd_parameters) print('Running: {}'.format(cmdline)) start_time = datetime.datetime.now() proc = subprocess.Popen(cmdline, shell=True, cwd=workdir, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() if proc.poll() != 0: print(stdout) print(stderr) print('Benchmark failed.') sys.exit(1) end_time = datetime.datetime.now() command.execution_time = end_time - start_time print(' Execution time was: {}'.format(command.execution_time))
5,352,996
def file(base_path, other_path): """ Returns a single file """ return [[Path(base_path), Path(other_path)]]
5,352,997
def get_md_resource(file_path): """Read the file and parse into an XML tree. Parameters ---------- file_path : str Path of the file to read. Returns ------- etree.ElementTree XML tree of the resource on disk. """ namespaces = Namespaces().get_namespaces(keys=('gmd', 'gmi')) with io.open(file_path, mode='r', encoding='utf-8') as f: data = f.read().encode('utf-8') data = etree.fromstring(data) mdelem = data.find('.//' + util.nspath_eval( 'gmd:MD_Metadata', namespaces)) if mdelem is None: mdelem = data.find( './/' + util.nspath_eval('gmi:MI_Metadata', namespaces)) if mdelem is None and data.tag in ['{http://www.isotc211.org/2005/gmd}MD_Metadata', '{http://www.isotc211.org/2005/gmi}MI_Metadata']: mdelem = data return mdelem
5,352,998
def fmin_style(sfmin): """convert sfmin to style""" return Struct( is_valid=good(sfmin.is_valid, True), has_valid_parameters=good(sfmin.has_valid_parameters, True), has_accurate_covar=good(sfmin.has_accurate_covar, True), has_posdef_covar=good(sfmin.has_posdef_covar, True), has_made_posdef_covar=good(sfmin.has_made_posdef_covar, False), hesse_failed=good(sfmin.hesse_failed, False), has_covariance=good(sfmin.has_covariance, True), is_above_max_edm=good(sfmin.is_above_max_edm, False), has_reached_call_limit=caution(sfmin.has_reached_call_limit, False), )
5,352,999