docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Insert python list of tuples into SQL table Args: data (list): List of tuples table (str): Name of database table conn (connection object): database connection object columns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional] db_type (str): If "sqlite" or "mysql"
def insert_query_m(data, table, conn, columns=None, db_type='mysql'): # if length of data is very large we need to break into chunks the insert_query_m is then used recursively untill # all data has been inserted if len(data) > 10000: _chunk_query(data, 10000, columns, conn, table, db_type) else: # sqlite and mysql have type string (? or %s) reference to use if db_type == 'sqlite': type_sign = '?' else: type_sign = '%s' # create a string of types for the insertion string (e.g. ?,?,? if inserting 3 columns of data) type_com = type_sign + ", " type = type_com * (len(data[0]) - 1) type = type + type_sign # if using specific columns to insert data if columns: stmt = "INSERT INTO " + table + "( " + columns + ") VALUES (" + type + ")" else: stmt = "INSERT INTO " + table + " VALUES (" + type + ")" # execute query cursor = conn.cursor() cursor.executemany(stmt, data) conn.commit()
900,166
Call for inserting SQL query in chunks based on n rows Args: l (list): List of tuples n (int): Number of rows cn (str): Column names conn (connection object): Database connection object table (str): Table name db_type (str): If "sqlite" or "mysql"
def _chunk_query(l, n, cn, conn, table, db_type): # For item i in a range that is a length of l, [insert_query_m(l[i:i + n], table, conn, cn, db_type) for i in range(0, len(l), n)]
900,167
Convert any python list of lists (or tuples) so that the strings are formatted correctly for insertion into Args: ll (list): List of lists (or tuples)
def _make_sql_compatible(ll): new_ll = [] for l in ll: new_l = () for i in l: if not i: new_l = new_l + (None,) else: if isinstance(i, str): if sys.version_info < (3, 0): val = i.decode('utf8').encode('ascii', errors='ignore') else: # in py3 strings should be ok... val = i else: val = i new_l = new_l + (val,) new_ll.append(new_l) return new_ll
900,168
Find completions for current command. This assumes that we'll handle all completion logic here and that the shell's automatic file name completion is disabled. Args: command_line: Command line current_token: Token at cursor position: Current cursor position shell: Name of shell
def complete(command_line, current_token, position, shell: arg(choices=('bash', 'fish'))): position = int(position) tokens = shlex.split(command_line[:position]) all_argv, run_argv, command_argv = run.partition_argv(tokens[1:]) run_args = run.parse_args(run_argv) module = run_args.get('commands_module') module = module or DEFAULT_COMMANDS_MODULE module = normalize_path(module) try: collection = Collection.load_from_module(module) except Exception: collection = {} found_command = find_command(collection, tokens) or run if current_token: # Completing either a command name, option name, or path. if current_token.startswith('-'): if current_token not in found_command.option_map: print_command_options(found_command, current_token) else: print_commands(collection, shell) path = os.path.expanduser(current_token) path = os.path.expandvars(path) paths = glob.glob('%s*' % path) if paths: for entry in paths: if os.path.isdir(entry): print('%s/' % entry) else: print(entry) else: # Completing option value. If a value isn't expected, show the # options for the current command and the list of commands # instead. option = found_command.option_map.get(tokens[-1]) if option and option.takes_value: if option.choices: for choice in option.choices: print(choice) else: for entry in os.listdir(): if os.path.isdir(entry): print('%s/' % entry) else: print(entry) else: print_command_options(found_command) print_commands(collection, shell)
900,284
Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date.
def install_package(self, name, index=None, force=False, update=False): cmd = 'install' if force: cmd = '{0} {1}'.format(cmd, '--force-reinstall') if update: cmd = '{0} {1}'.format(cmd, '--update') if index: cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index)) self.pip('{0} {1}'.format(cmd, name))
900,298
Install packages from a requirements.txt file. Args: path (str): The path to the requirements file. index (str): The URL for a pypi index to use.
def install_requirements(self, path, index=None): cmd = 'install -r {0}'.format(path) if index: cmd = 'install --index-url {0} -r {1}'.format(index, path) self.pip(cmd)
900,299
Frame counts is the core of all the counting operations. It counts on a per-frame/per-region basis. Args: subsets (list): a list of Subset Objects. if not specified, the phenotypes are used. Returns: pandas.DataFrame: A dataframe of count data
def frame_counts(self,subsets=None): mergeon = self.cdf.frame_columns+['region_label'] if subsets is None: cnts = self.groupby(mergeon+['phenotype_label']).count()[['cell_index']].\ rename(columns={'cell_index':'count'}) mr = self.measured_regions mr['_key'] = 1 mp = pd.DataFrame({'phenotype_label':self.measured_phenotypes}) mp['_key'] = 1 mr = mr.merge(mp,on='_key').drop(columns='_key') cnts = mr.merge(cnts,on=mergeon+['phenotype_label'],how='left').fillna(0) else: # Use subsets if isinstance(subsets,SL): subsets=[subsets] cnts = [] labels = set([s.label for s in subsets]) for x in subsets: if x.label is None: raise ValueError("Subsets must be named") if len(labels) != len(subsets): raise ValueError("Subsets must be uniquely named.") seen_labels = [] for sl in subsets: if sl.label in seen_labels: raise ValueError("cannot use the same label twice in the subsets list") seen_labels.append(sl.label) df = self.cdf.subset(sl) df = df.groupby(mergeon).count()[['cell_index']].\ rename(columns={'cell_index':'count'}).reset_index() df = self.measured_regions.merge(df,on=mergeon,how='left').fillna(0) df['phenotype_label'] = sl.label cnts.append(df) cnts = pd.concat(cnts) cnts = cnts[mergeon+['region_area_pixels','phenotype_label','count']] cnts['region_area_mm2'] = cnts.apply(lambda x: (x['region_area_pixels']/1000000)*(self.microns_per_pixel*self.microns_per_pixel),1) cnts['density_mm2'] = cnts.apply(lambda x: np.nan if x['region_area_mm2'] == 0 else x['count']/x['region_area_mm2'],1) # make sure regions of size zero have counts of np.nan cnts.loc[cnts['region_area_pixels']<self.minimum_region_size_pixels,['count','density_mm2']] = np.nan return cnts
900,464
Wrapper to retrieve the axis of a given histogram. This can be convenient outside of just projections, so it's made available in the API. Args: axis_type: The type of axis to retrieve. Returns: Callable to retrieve the specified axis when given a hist.
def hist_axis_func(axis_type: enum.Enum) -> Callable[[Hist], Axis]: def axis_func(hist: Hist) -> Axis: # Determine the axis_type value # Use try here instead of checking for a particular type to protect against type changes # (say in the enum) try: # Try to extract the value from an enum hist_axis_type = axis_type.value except AttributeError: # Seems that we received an int, so just use that value hist_axis_type = axis_type if hasattr(hist, "ProjectionND") and hasattr(hist, "Projection"): # THnBase defines ProjectionND and Projection, so we will use those as proxies. # Return the proper THn access #logger.debug(f"From hist: {hist}, hist_axis_type: {hist_axis_type}, axis: {hist.GetAxis(hist_axis_type.value)}") return hist.GetAxis(hist_axis_type) else: # If it's not a THn, then it must be a TH1 derived axis_function_map = { TH1AxisType.x_axis.value: hist.GetXaxis, TH1AxisType.y_axis.value: hist.GetYaxis, TH1AxisType.z_axis.value: hist.GetZaxis } # Retrieve the axis function and execute it. It is done separately to # clarify any possible errors. return_func = axis_function_map[hist_axis_type] return return_func() return axis_func
900,504
Apply the associated range set to the axis of a given hist. Note: The min and max values should be bins, not user ranges! For more, see the binning explanation in ``apply_func_to_find_bin(...)``. Args: hist: Histogram to which the axis range restriction should be applied. Returns: None. The range is set on the axis.
def apply_range_set(self, hist: Hist) -> None: # Do individual assignments to clarify which particular value is causing an error here. axis = self.axis(hist) #logger.debug(f"axis: {axis}, axis(): {axis.GetName()}") # Help out mypy assert not isinstance(self.min_val, float) assert not isinstance(self.max_val, float) # Evaluate the functions to determine the values. min_val = self.min_val(axis) max_val = self.max_val(axis) # NOTE: Using SetRangeUser() here was a bug, since I've been passing bin values! In general, # passing bin values is more flexible, but requires the values to be passed to # ``apply_func_to_find_bin()`` to be shifted by some small epsilon to get the desired bin. self.axis(hist).SetRange(min_val, max_val)
900,507
Calls the actual projection function for the hist. Args: hist: Histogram from which the projections should be performed. Returns: The projected histogram.
def call_projection_function(self, hist: Hist) -> Hist: # Restrict projection axis ranges for axis in self.projection_axes: logger.debug(f"Apply projection axes hist range: {axis.name}") axis.apply_range_set(hist) projected_hist = None if hasattr(hist, "ProjectionND") and hasattr(hist, "Projection"): # THnBase defines ProjectionND and Projection, so we will use those as proxies. projected_hist = self._project_THn(hist = hist) elif hasattr(hist, "ProjectionZ") and hasattr(hist, "Project3D"): # TH3 defines ProjectionZ and Project3D, so we will use those as proxies. projected_hist = self._project_TH3(hist = hist) elif hasattr(hist, "ProjectionX") and hasattr(hist, "ProjectionY"): # TH2 defines ProjectionX and ProjectionY, so we will use those as proxies. projected_hist = self._project_TH2(hist = hist) else: raise TypeError(type(hist), f"Could not recognize hist {hist} of type {type(hist)}") # Cleanup restricted axes self.cleanup_cuts(hist, cut_axes = self.projection_axes) return projected_hist
900,511
Perform the actual THn -> THn or TH1 projection. This projection could be to 1D, 2D, 3D, or ND. Args: hist (ROOT.THnBase): Histogram from which the projections should be performed. Returns: ROOT.THnBase or ROOT.TH1: The projected histogram.
def _project_THn(self, hist: Hist) -> Any: # THnBase projections args are given as a list of axes, followed by any possible options. projection_axes = [axis.axis_type.value for axis in self.projection_axes] # Handle ROOT THnBase quirk... # 2D projection are called as (y, x, options), so we should reverse the order so it performs # as expected if len(projection_axes) == 2: # Reverses in place projection_axes.reverse() # Test calculating errors # Add "E" to ensure that errors will be calculated args = projection_axes + ["E"] # Do the actual projection logger.debug(f"hist: {hist.GetName()} args: {args}") if len(projection_axes) > 3: # Project into a THnBase object. projected_hist = hist.ProjectionND(*args) else: # Project a TH1 derived object. projected_hist = hist.Projection(*args) return projected_hist
900,512
Perform the actual TH3 -> TH1 projection. This projection could be to 1D or 2D. Args: hist (ROOT.TH3): Histogram from which the projections should be performed. Returns: ROOT.TH1: The projected histogram.
def _project_TH3(self, hist: Hist) -> Any: # Axis length validation if len(self.projection_axes) < 1 or len(self.projection_axes) > 2: raise ValueError(len(self.projection_axes), "Invalid number of axes") # Need to concatenate the names of the axes together projection_axis_name = "" for axis in self.projection_axes: # Determine the axis name based on the name of the axis type. # [:1] returns just the first letter. For example, we could get "xy" if the first axis as # x_axis and the second was y_axis. # NOTE: Careful. This depends on the name of the enumerated values!!! Since this isn't terribly # safe, we then perform additional validation on the same to ensure that it is one of the # expected axis names. proj_axis_name = axis.axis_type.name[:1] if proj_axis_name not in ["x", "y", "z"]: raise ValueError(f"Projection axis name {proj_axis_name} is not 'x', 'y', or 'z'. Please check your configuration.") projection_axis_name += proj_axis_name # Handle ROOT Project3D quirk... # 2D projection are called as (y, x, options), so we should reverse the order so it performs # as expected. # NOTE: This isn't well documented in TH3. It is instead described in THnBase.Projection(...) if len(self.projection_axes) == 2: # Reverse the axes projection_axis_name = projection_axis_name[::-1] # Do the actual projection logger.info(f"Projecting onto axes \"{projection_axis_name}\" from hist {hist.GetName()}") projected_hist = hist.Project3D(projection_axis_name) return projected_hist
900,513
Perform the actual TH2 -> TH1 projection. This projection can only be to 1D. Args: hist (ROOT.TH2): Histogram from which the projections should be performed. Returns: ROOT.TH1: The projected histogram.
def _project_TH2(self, hist: Hist) -> Any: if len(self.projection_axes) != 1: raise ValueError(len(self.projection_axes), "Invalid number of axes") #logger.debug(f"self.projection_axes[0].axis: {self.projection_axes[0].axis}, axis range name: {self.projection_axes[0].name}, axis_type: {self.projection_axes[0].axis_type}") # NOTE: We cannot use TH3.ProjectionZ(...) because it has different semantics than ProjectionX # and ProjectionY. In particular, it doesn't respect the axis limits of axis onto which it # is projected. So we have to separate the projection by histogram type as opposed to axis # length. projection_func_map = { TH1AxisType.x_axis.value: hist.ProjectionX, TH1AxisType.y_axis.value: hist.ProjectionY } # Determine the axis_type value # Use try here instead of checking for a particular type to protect against type changes (say # in the enum) try: # Try to extract the value from an enum axis_type = self.projection_axes[0].axis_type.value except ValueError: # Seems that we received an int, so just use that value axis_type = self.axis_type # type: ignore projection_func = projection_func_map[axis_type] # Do the actual projection logger.info(f"Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}") projected_hist = projection_func() return projected_hist
900,514
Driver function for projecting and storing a single observable. Args: kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...) Returns: The projected histogram. The histogram is also stored in the output specified by ``output_observable``.
def _project_single_observable(self, **kwargs: Dict[str, Any]) -> Hist: # Help out mypy assert isinstance(self.output_attribute_name, str) # Run the actual projection. output_hist, projection_name, projection_name_args, = self._project_observable( input_key = "single_observable", input_observable = self.observable_to_project_from, **kwargs, ) # Store the output. output_hist_args = projection_name_args output_hist_args.update({ # type: ignore "output_hist": output_hist, "projection_name": projection_name }) # Store the final histogram. output_hist = self.output_hist(**output_hist_args) # type: ignore # Store the final output hist if not hasattr(self.output_observable, self.output_attribute_name): raise ValueError(f"Attempted to assign hist to non-existent attribute {self.output_attribute_name} of object {self.output_observable}. Check the attribute name!") # Actually store the histogram. setattr(self.output_observable, self.output_attribute_name, output_hist) # Return the observable return output_hist
900,516
Driver function for projecting and storing a dictionary of observables. Args: kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...) Returns: The projected histograms. The projected histograms are also stored in ``output_observable``.
def _project_dict(self, **kwargs: Dict[str, Any]) -> Dict[str, Hist]: # Setup function arguments with values which don't change per loop. get_hist_args = copy.deepcopy(kwargs) projection_name_args = copy.deepcopy(kwargs) for key, input_observable in self.observable_to_project_from.items(): output_hist, projection_name, projection_name_args, = self._project_observable( input_key = key, input_observable = input_observable, get_hist_args = get_hist_args, projection_name_args = projection_name_args, **kwargs, ) # Store the output observable output_hist_args = projection_name_args output_hist_args.update({ # type: ignore "output_hist": output_hist, "projection_name": projection_name }) output_key_name = self.output_key_name(**output_hist_args) # type: ignore self.output_observable[output_key_name] = self.output_hist(**output_hist_args) # type: ignore return self.output_observable
900,517
Perform the requested projection(s). Note: All cuts on the original histograms will be reset when this function is completed. Args: kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...) Returns: The projected histogram(s). The projected histograms are also stored in ``output_observable``.
def project(self, **kwargs: Dict[str, Any]) -> Union[Hist, Dict[str, Hist]]: if self.single_observable_projection: return self._project_single_observable(**kwargs) else: return self._project_dict(**kwargs)
900,518
Cleanup applied cuts by resetting the axis to the full range. Inspired by: https://github.com/matplo/rootutils/blob/master/python/2.7/THnSparseWrapper.py Args: hist: Histogram for which the axes should be reset. cut_axes: List of axis cuts, which correspond to axes that should be reset.
def cleanup_cuts(self, hist: Hist, cut_axes: Iterable[HistAxisRange]) -> None: for axis in cut_axes: # According to the function TAxis::SetRange(first, last), the widest possible range is # (1, Nbins). Anything beyond that will be reset to (1, Nbins) axis.axis(hist).SetRange(1, axis.axis(hist).GetNbins())
900,519
Define the projection name for this projector. Note: This function is just a basic placeholder and likely should be overridden. Args: kwargs: Projection information dict combined with additional arguments passed to the projection function. Returns: Projection name string formatted with the passed options. By default, it returns ``projection_name_format`` formatted with the arguments to this function.
def projection_name(self, **kwargs: Dict[str, Any]) -> str: return self.projection_name_format.format(**kwargs)
900,520
The main method of Search class. It searches tTheia Landsat API Returns python dictionary Arguments: start_date -- date string. format: YYYY-MM-DD end_date -- date string. format: YYYY-MM-DD limit -- integer specigying the maximum results return. clipper -- clipper object : clipper.bbox / clipper.town
def search(self,limit,start_date=None,end_date=None,clipper=None): search_string = self._query_builder(start_date, end_date, clipper ) # Have to manually build the URI to bypass requests URI encoding # The api server doesn't accept encoded URIs #r = requests.get('%s?%s&&maxRecords=%s' % (self.api_url, # search_string, # limit)) try: r = requests.get('%s?%s&&maxRecords=%s' % (self.api_url, search_string, limit)) r.raise_for_status() except requests.HTTPError, e: exit ("site is not available") r_dict = json.loads(r.text) result={} if (r_dict['features'] == 0): result['status'] = u'error' result['message'] = "error while loading datas" else: result['status'] = u'SUCCESS' result['total'] = len(r_dict['features']) result['limit'] = limit result['ID']=[i['id'] for i in r_dict['features']] result['downloads']=[{"download" : i['properties']['services']['download']['url'], "id" : i['id']} for i in r_dict['features']] result['results'] = { "features": [{ 'properties':{'sceneID': i['id'], 'sat_type': i['properties']['platform'], 'thumbnail': i['properties']['thumbnail'], 'date': i['properties']['completionDate'], 'download': i['properties']['services']['download']['url']} , 'geometry': i['geometry'], "type": "Feature"} for i in r_dict['features']], "type": "FeatureCollection" } return result
900,763
Get commands in namespace. Args: namespace (dict|module): Typically a module. If not passed, the globals from the call site will be used. level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the namespace, ordered by name. Can be used to create ``__all__`` lists:: __all__ = list(get_commands_in_namespace())
def get_commands_in_namespace(namespace=None, level=1): from ..command import Command # noqa: Avoid circular import commands = {} if namespace is None: frame = inspect.stack()[level][0] namespace = frame.f_globals elif inspect.ismodule(namespace): namespace = vars(namespace) for name in namespace: obj = namespace[name] if isinstance(obj, Command): commands[name] = obj return OrderedDict((name, commands[name]) for name in sorted(commands))
900,930
Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css'
def _get_bundles_by_type(self, type): bundles = {} bundle_definitions = self.config.get(type) if bundle_definitions is None: return bundles # bundle name: common for bundle_name, paths in bundle_definitions.items(): bundle_files = [] # path: static/js/vendor/*.js for path in paths: # pattern: /tmp/static/js/vendor/*.js pattern = abspath = os.path.join(self.basedir, path) # assetdir: /tmp/static/js/vendor # assetdir contents: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js # - /tmp/static/js/vendor/index.html assetdir = os.path.dirname(abspath) # expanded_fnames after filtering using the pattern: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js fnames = [os.path.join(assetdir, fname) for fname in os.listdir(assetdir)] expanded_fnames = fnmatch.filter(fnames, pattern) bundle_files.extend(sorted(expanded_fnames)) bundles[bundle_name] = bundle_files return bundles
901,230
Orders population members from lowest fitness to highest fitness Args: Members (list): list of PyGenetics Member objects Returns: lsit: ordered lsit of Members, from highest fitness to lowest fitness
def minimize_best_n(Members): return(list(reversed(sorted( Members, key=lambda Member: Member.fitness_score ))))
901,313
Calculate precursor mz based on exact mass and precursor type Args: exact_mass (float): exact mass of compound of interest precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+' Return: neutral mass of compound
def get_precursor_mz(exact_mass, precursor_type): # these are just taken from what was present in the massbank .msp file for those missing the exact mass d = {'[M-H]-': -1.007276, '[M+H]+': 1.007276, '[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949) } try: return exact_mass + d[precursor_type] except KeyError as e: print(e) return False
901,519
Get line count of file Args: fn (str): Path to file Return: Number of lines in file (int)
def line_count(fn): with open(fn) as f: for i, l in enumerate(f): pass return i + 1
901,520
Configure the virtual environment for another path. Args: destination (str): The target path of the virtual environment. Note: This does not actually move the virtual environment. Is only rewrites the metadata required to support a move.
def relocate(self, destination): for activate in self.bin.activates: activate.vpath = destination for binfile in self.bin.files: if binfile.shebang and ( 'python' in binfile.shebang or 'pypy' in binfile.shebang ): binfile.shebang = '#!{0}'.format( os.path.join(destination, 'bin', 'python') )
901,711
Reconfigure and move the virtual environment to another path. Args: destination (str): The target path of the virtual environment. Note: Unlike `relocate`, this method *will* move the virtual to the given path.
def move(self, destination): self.relocate(destination) shutil.move(self.path, destination) self._path = destination
901,712
Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string.
def build_markdown_table(headers, rows, row_keys=None): row_maxes = _find_row_maxes(headers, rows) row_keys = row_keys or [key for key, value in headers.items()] table = [ _build_row(headers, row_maxes, row_keys), _build_separator(row_maxes, row_keys) ] for row in rows: table.append(_build_row(row, row_maxes, row_keys)) return '\n'.join(table) + '\n'
901,814
Generate Markdown Documentation for the given spec/app name. Args: app_name (str): The name of the application. spec (YapconfSpec): A yapconf specification with sources loaded. Returns (str): A valid, markdown string representation of the documentation for the given specification.
def generate_markdown_doc(app_name, spec): # Apply standard headers. sections = [ HEADER.format(app_name=app_name), SOURCES_HEADER.format(app_name=app_name) ] # Generate the sources section of the documentation sorted_labels = sorted(list(spec.sources)) for label in sorted_labels: sections.append( _generate_source_section(label, spec.sources[label], app_name) ) # Generate the config section. sections.append(CONFIG_HEADER.format(app_name=app_name)) table_rows, item_sections = _generate_item_sections( _sorted_dict_values(spec.items), app_name ) headers = { 'name': 'Name', 'type': 'Type', 'default': 'Default', 'description': 'Description' } sections.append( build_markdown_table( headers, table_rows, ['name', 'type', 'default', 'description'], ) ) for item_section in item_sections: sections.append(item_section) return '\n'.join([section for section in sections])
901,823
Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame Args: path (str): Where to write the directory of images suffix (str): for labeling the imaages you write format (str): default 'png' format to write the file overwrite (bool): default False. if true can overwrite files in the path Modifies: Creates path folder if necessary and writes images to path
def write_to_path(self,path,suffix='',format='png',overwrite=False): if os.path.exists(path) and overwrite is False: raise ValueError("Error: use ovewrite=True to overwrite images") if not os.path.exists(path): os.makedirs(path) for i,r in self.iterrows(): spath = os.path.join(path,r['project_name'],r['sample_name']) if not os.path.exists(spath): os.makedirs(spath) if suffix == '': fname = os.path.join(spath,r['frame_name']+'.'+format) else: fname = os.path.join(spath,r['frame_name']+'_'+suffix+'.'+format) imageio.imwrite(fname, r['image'],format=format)
901,829
Parameter object Args: name (str): name of the parameter min_val (int or float): minimum allowed value for the parameter max_val (int or float): maximum allowed value for the parameter
def __init__(self, name, min_val, max_val): self.name = name self.min_val = min_val self.max_val = max_val if type(min_val) != type(max_val): raise ValueError('Supplied min_val is not the same type as\ supplied max_val: {}, {}'.format( type(min_val), type(max_val)) ) self.dtype = type(min_val + max_val) if self.dtype not in SUPPORTED_DTYPES: raise ValueError('Unsupported data type: use {}' .format(SUPPORTED_DTYPES))
901,928
Member object Args: parameters (dictionary): dictionary of parameter names and values cost_fn_val (float): value returned by cost function using params
def __init__(self, parameters, cost_fn_val): self.parameters = parameters self.cost_fn_val = cost_fn_val self.fitness_score = self.__calc_fitness_score(cost_fn_val)
901,929
Adds a paramber to the Population Args: name (str): name of the parameter min_val (int or float): minimum value for the parameter max_val (int or float): maximum value for the parameter
def add_parameter(self, name, min_val, max_val): self.__parameters.append(Parameter(name, min_val, max_val))
901,936
Private, static method: mutates parameter Args: value (int or float): current value for Member's parameter param (Parameter): parameter object mut_rate (float): mutation rate of the value max_mut_amt (float): maximum mutation amount of the value Returns: int or float: mutated value
def __mutate_parameter(value, param, mut_rate, max_mut_amt): if uniform(0, 1) < mut_rate: mut_amt = uniform(0, max_mut_amt) op = choice((add, sub)) new_val = op(value, param.dtype( (param.max_val - param.min_val) * mut_amt )) if new_val > param.max_val: return param.max_val elif new_val < param.min_val: return param.min_val else: return new_val else: return value
901,939
Get the path of a command in the virtual if it exists. Args: cmd (str): The command to look for. Returns: str: The full path to the command. Raises: ValueError: If the command is not present.
def cmd_path(self, cmd): for binscript in self.bin.files: if binscript.path.endswith('/{0}'.format(cmd)): return binscript.path raise ValueError('The command {0} was not found.'.format(cmd))
902,202
Handle the new configuration. Args: new_config (dict): The new configuration
def handle_config_change(self, new_config): if self.user_handler: self.user_handler(self.current_config, new_config) self._call_spec_handlers(new_config) self.current_config = copy.deepcopy(new_config)
902,330
Sets up serial port by connecting to phsyical or software port. Depending on command line options, this function will either connect to a SerialTestClass() port for loopback testing or to the specified port from the command line option. If loopback is True it overrides the physical port specification. Args: loopback: argparse option port: argparse option Returns: serialPort: Pyserial serial port instance
def setupSerialPort(loopback, port): if loopback: # Implement loopback software serial port testSerial = SerialTestClass() serialPort = testSerial.serialPort else: # TODO enable serial port command line options (keep simple for user!) serialPort = serial.Serial(port, 115200, timeout=0) return serialPort
902,897
Rewrite a single line in the file. Args: line (str): The new text to write to the file. line_number (int): The line of the file to rewrite. Numbering starts at 0.
def writeline(self, line, line_number): tmp_file = tempfile.TemporaryFile('w+') if not line.endswith(os.linesep): line += os.linesep try: with open(self.path, 'r') as file_handle: for count, new_line in enumerate(file_handle): if count == line_number: new_line = line tmp_file.write(new_line) tmp_file.seek(0) with open(self.path, 'w') as file_handle: for new_line in tmp_file: file_handle.write(new_line) finally: tmp_file.close()
903,085
Load an object. Args: obj (str|object): Load the indicated object if this is a string; otherwise, return the object as is. To load a module, pass a dotted path like 'package.module'; to load an an object from a module pass a path like 'package.module:name'. Returns: object
def load_object(obj) -> object: if isinstance(obj, str): if ':' in obj: module_name, obj_name = obj.split(':') if not module_name: module_name = '.' else: module_name = obj obj = importlib.import_module(module_name) if obj_name: attrs = obj_name.split('.') for attr in attrs: obj = getattr(obj, attr) return obj
903,116
Create a YAML object for loading a YAML configuration. Args: modules_to_register: Modules containing classes to be registered with the YAML object. Default: None. classes_to_register: Classes to be registered with the YAML object. Default: None. Returns: A newly creating YAML object, configured as apporpirate.
def yaml(modules_to_register: Iterable[Any] = None, classes_to_register: Iterable[Any] = None) -> ruamel.yaml.YAML: # Defein a round-trip yaml object for us to work with. This object should be imported by other modules # NOTE: "typ" is a not a typo. It stands for "type" yaml = ruamel.yaml.YAML(typ = "rt") # Register representers and constructors # Numpy yaml.representer.add_representer(np.ndarray, numpy_to_yaml) yaml.constructor.add_constructor("!numpy_array", numpy_from_yaml) # Register external classes yaml = register_module_classes(yaml = yaml, modules = modules_to_register) yaml = register_classes(yaml = yaml, classes = classes_to_register) return yaml
903,446
Update the library meta data from the current line being parsed Args: line (str): The current line of the of the file being parsed
def _update_libdata(self, line): #################################################### # parse MONA Comments line #################################################### # The mona msp files contain a "comments" line that contains lots of other information normally separated # into by "" if re.match('^Comment.*$', line, re.IGNORECASE): comments = re.findall('"([^"]*)"', line) for c in comments: self._parse_meta_info(c) self._parse_compound_info(c) #################################################### # parse meta and compound info lines #################################################### # check the current line for both general meta data # and compound information self._parse_meta_info(line) self._parse_compound_info(line) #################################################### # End of meta data #################################################### # Most MSP files have the a standard line of text before the spectra information begins. Here we check # for this line and store the relevant details for the compound and meta information to be ready for insertion # into the database if self.collect_meta and (re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE) or re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE)): self._store_compound_info() self._store_meta_info() # Reset the temp meta and compound information self.meta_info = get_blank_dict(self.meta_regex) self.compound_info = get_blank_dict(self.compound_regex) self.other_names = [] self.collect_meta = False # ignore additional information in the 3rd column if using the MassBank spectra schema if re.match('^PK\$PEAK: m/z int\. rel\.int\.$', line, re.IGNORECASE): self.ignore_additional_spectra_info = True # Check if annnotation or spectra is to be in the next lines to be parsed if re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE): self.start_spectra = True return elif re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE): self.start_spectra_annotation = True match = re.match('^PK\$ANNOTATION:(.*)', line, re.IGNORECASE) columns = match.group(1) cl = columns.split() self.spectra_annotation_indexes = {i: cl.index(i) for i in cl} return #################################################### # Process annotation details #################################################### # e.g. molecular formula for each peak in the spectra if self.start_spectra_annotation: self._parse_spectra_annotation(line) #################################################### # Process spectra #################################################### if self.start_spectra: self._parse_spectra(line)
903,477
Parse and extract any other names that might be recorded for the compound Args: line (str): line of the msp file
def _get_other_names(self, line): m = re.search(self.compound_regex['other_names'][0], line, re.IGNORECASE) if m: self.other_names.append(m.group(1).strip())
903,484
Parse and extract all meta data by looping through the dictionary of meta_info regexs updates self.meta_info Args: line (str): line of the msp file
def _parse_meta_info(self, line): if self.mslevel: self.meta_info['ms_level'] = self.mslevel if self.polarity: self.meta_info['polarity'] = self.polarity for k, regexes in six.iteritems(self.meta_regex): for reg in regexes: m = re.search(reg, line, re.IGNORECASE) if m: self.meta_info[k] = m.group(1).strip()
903,485
Parse and extract all compound data by looping through the dictionary of compound_info regexs updates self.compound_info Args: line (str): line of the msp file
def _parse_compound_info(self, line): for k, regexes in six.iteritems(self.compound_regex): for reg in regexes: if self.compound_info[k]: continue m = re.search(reg, line, re.IGNORECASE) if m: self.compound_info[k] = m.group(1).strip() self._get_other_names(line)
903,486
Insert data stored in the current chunk of parsing into the selected database Args: remove_data (boolean): Remove the data stored within the LibraryData object for the current chunk of processing db_type (str): The type of database to submit to either 'sqlite', 'mysql' or 'django_mysql' [default sqlite]
def insert_data(self, remove_data=False, db_type='sqlite'): if self.update_source: # print "insert ref id" import msp2db self.c.execute( "INSERT INTO library_spectra_source (id, name, parsing_software) VALUES" " ({a}, '{b}', 'msp2db-v{c}')".format(a=self.current_id_origin, b=self.source, c=msp2db.__version__)) self.conn.commit() if self.compound_info_all: self.compound_info_all = _make_sql_compatible(self.compound_info_all) cn = ', '.join(self.compound_info.keys()) + ',created_at,updated_at' insert_query_m(self.compound_info_all, columns=cn, conn=self.conn, table='metab_compound', db_type=db_type) self.meta_info_all = _make_sql_compatible(self.meta_info_all) cn = 'id,' + ', '.join(self.meta_info.keys()) + ',library_spectra_source_id, inchikey_id' insert_query_m(self.meta_info_all, columns=cn, conn=self.conn, table='library_spectra_meta', db_type=db_type) cn = "id, mz, i, other, library_spectra_meta_id" insert_query_m(self.spectra_all, columns=cn, conn=self.conn, table='library_spectra', db_type=db_type) if self.spectra_annotation_all: cn = "id, mz, tentative_formula, mass_error, library_spectra_meta_id" insert_query_m(self.spectra_annotation_all, columns=cn, conn=self.conn, table='library_spectra_annotation', db_type=db_type) # self.conn.close() if remove_data: self.meta_info_all = [] self.spectra_all = [] self.spectra_annotation_all = [] self.compound_info_all = [] self._get_current_ids(source=False)
903,487
Checks if given hook module has been loaded Args: name (str): The name of the module to check Returns: bool. The return code:: True -- Loaded False -- Not Loaded
def isloaded(self, name): if name is None: return True if isinstance(name, str): return (name in [x.__module__ for x in self]) if isinstance(name, Iterable): return set(name).issubset([x.__module__ for x in self]) return False
903,735
Object Model for CSH LDAP groups. Arguments: lib -- handle to a CSHLDAP instance search_val -- the cn of the LDAP group to bind to
def __init__(self, lib, search_val): self.__dict__['__lib__'] = lib self.__dict__['__con__'] = lib.get_con() res = self.__con__.search_s( self.__ldap_group_ou__, ldap.SCOPE_SUBTREE, "(cn=%s)" % search_val, ['cn']) if res: self.__dict__['__dn__'] = res[0][0] else: raise KeyError("Invalid Search Name")
904,128
Check if a Member is in the bound group. Arguments: member -- the CSHMember object (or distinguished name) of the member to check against Keyword arguments: dn -- whether or not member is a distinguished name
def check_member(self, member, dn=False): if dn: res = self.__con__.search_s( self.__dn__, ldap.SCOPE_BASE, "(member=%s)" % dn, ['ipaUniqueID']) else: res = self.__con__.search_s( self.__dn__, ldap.SCOPE_BASE, "(member=%s)" % member.get_dn(), ['ipaUniqueID']) return len(res) > 0
904,130
Add a member to the bound group Arguments: member -- the CSHMember object (or distinguished name) of the member Keyword arguments: dn -- whether or not member is a distinguished name
def add_member(self, member, dn=False): if dn: if self.check_member(member, dn=True): return mod = (ldap.MOD_ADD, 'member', member.encode('ascii')) else: if self.check_member(member): return mod = (ldap.MOD_ADD, 'member', member.get_dn().encode('ascii')) if self.__lib__.__batch_mods__: self.__lib__.enqueue_mod(self.__dn__, mod) elif not self.__lib__.__ro__: mod_attrs = [mod] self.__con__.modify_s(self.__dn__, mod_attrs) else: print("ADD VALUE member = {} FOR {}".format(mod[2], self.__dn__))
904,131
Creates a subscriber binding to the given address and subscribe the given topics. The callback is invoked for every message received. Args: - address: the address to bind the PUB socket to. - topics: the topics to subscribe - callback: the callback to invoke for every message. Must accept 2 variables - topic and message - message_type: the type of message to receive
def subscriber(address,topics,callback,message_type): return Subscriber(address,topics,callback,message_type)
904,324
Send the message on the socket. Args: - message: the message to publish - message_type: the type of message being sent - topic: the topic on which to send the message. Defaults to ''.
def send(self,message,message_type,topic=''): if message_type == RAW: self._sock.send(message) elif message_type == PYOBJ: self._sock.send_pyobj(message) elif message_type == JSON: self._sock.send_json(message) elif message_type == MULTIPART: self._sock.send_multipart([topic, message]) elif message_type == STRING: self._sock.send_string(message) elif message_type == UNICODE: self._sock.send_unicode(message) else: raise Exception("Unknown message type %s"%(message_type,))
904,360
Receive the message of the specified type and retun Args: - message_type: the type of the message to receive Returns: - the topic of the message - the message received from the socket
def receive(self,message_type): topic = None message = None if message_type == RAW: message = self._sock.recv(flags=zmq.NOBLOCK) elif message_type == PYOBJ: message = self._sock.recv_pyobj(flags=zmq.NOBLOCK) elif message_type == JSON: message = self._sock.recv_json(flags=zmq.NOBLOCK) elif message_type == MULTIPART: data = self._sock.recv_multipart(flags=zmq.NOBLOCK) message = data[1] topic = data[0] elif message_type == STRING: message = self._sock.recv_string(flags=zmq.NOBLOCK) elif message_type == UNICODE: message = self._sock.recv_unicode(flags=zmq.NOBLOCK) else: raise Exception("Unknown message type %s"%(self._message_type,)) return (topic, message)
904,361
_check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned.
def is_closed(self): old_training_data = self.training_data self.training_data = {x: [] for x in self.sm_vector} for t in self.smi_vector: src_state = t[:-1] symbol = t[-1:] found = False for dst_state in self.sm_vector: if self.observation_table[dst_state] == self.observation_table[t]: self._add_training_data(src_state, dst_state, symbol) found = True break if not found: return False, t assert self.training_data != old_training_data, \ "No update happened from previous round. The algo will loop infinetely" return True, None
904,764
Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None
def _fill_table_entry(self, row, col): self.observation_table[row, col] = self._membership_query(row + col)
904,767
Run the string in the hypothesis automaton for index steps and then return the access string for the state reached concatanated with the rest of the string w. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed index (int): The index value for selecting the prefix of w Return: str: The access string
def _run_in_hypothesis(self, mma, w_string, index): state = mma.states[0] s_index = 0 for i in range(index): for arc in state: if arc.guard.is_sat(w_string[i]): state = mma.states[arc.dst_state] s_index = arc.dst_state # The id of the state is its index inside the Sm list access_string = self.observation_table.sm_vector[s_index] logging.debug( 'Access string for %d: %s - %d ', index, access_string, s_index) return access_string
904,768
Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Return: None
def _process_counter_example(self, mma, w_string): if len(w_string) == 1: self.observation_table.smi_vector.append(w_string) for exp in self.observation_table.em_vector: self._fill_table_entry(w_string, exp) diff = len(w_string) same = 0 membership_answer = self._membership_query(w_string) while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) if membership_answer != self._membership_query(access_string + w_string[i:]): diff = i else: same = i if diff - same == 1: break # First check if the transition is part of our training data. access_string = self._run_in_hypothesis(mma, w_string, diff - 1) wrong_transition = access_string + w_string[diff - 1] if wrong_transition not in self.observation_table.smi_vector: # If transition is not part of our training data add s_ib to Smi and # return to checking table closedness. self.observation_table.smi_vector.append(wrong_transition) for exp in self.observation_table.em_vector: self._fill_table_entry(wrong_transition, exp) return # This point presents a tradeoff between equivalence and membership # queries. If the transition in the counterexample'input_string breakpoint is not # part of our current training data (i.e. s_ib is not part of our Smi # set), then we assume a wrong transition and return to checking table # closure by adding s_ib to our training data. This saves a number of # membership queries since we don't add a row in our table unless # absolutely necessary. Notice that even if Equivalence queries are # expensive in general caching the result will be able to discover that # this iteration required a new state in the next equivalence query. exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp)
904,769
Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table.
def get_sfa_conjecture(self): sfa = SFA(self.alphabet) for s in self.observation_table.sm_vector: transitions = self._get_predicate_guards( s, self.observation_table.training_data[s]) for (t, pred) in transitions: src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(t) assert isinstance( pred, SetPredicate), "Invalid type for predicate {}".format(pred) sfa.add_arc(src_id, dst_id, pred) # Mark the final states in the hypothesis automaton. i = 0 for s in self.observation_table.sm_vector: sfa.states[i].final = self.observation_table[s, self.epsilon] i += 1 return sfa
904,771
Initializes table form a DFA Args: mma: The input automaton Returns: None
def _init_table_from_dfa(self, mma): observation_table_init = ObservationTableInit(self.epsilon, self.alphabet) sm_vector, smi_vector, em_vector = observation_table_init.initialize(mma, True) self.observation_table.sm_vector = sm_vector self.observation_table.smi_vector = smi_vector self.observation_table.em_vector = em_vector logging.info('Initialized from DFA em_vector table is the following:') logging.info(em_vector) self._fill_table_entry(self.epsilon, self.epsilon) # list(set([])) is used to remove duplicates, [1:0] to remove epsilon for row in sorted(list(set(sm_vector + smi_vector)), key=len)[1:]: for column in em_vector: self._fill_table_entry(str(row), str(column))
904,773
Implements the high level loop of the algorithm for learning a Mealy machine. Args: mma: Returns: MealyMachine: A model for the Mealy machine to be learned.
def learn_sfa(self, mma=None): logging.info('Initializing learning procedure.') if mma: self._init_table_from_dfa(mma) else: self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False # Make sure that the table is closed while not closed: logging.debug('Checking if table is closed.') closed, s = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(s) else: logging.debug('Table closed.') # Create conjecture sfa = self.get_sfa_conjecture() logging.info('Generated conjecture machine with %d states.', len(list(sfa.states))) # _check correctness logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(sfa) # Are we done? if found: logging.info('No counterexample found. Hypothesis is correct!') break # Add the new experiments into the table to reiterate the # learning loop logging.info( 'Processing counterexample %s with length %d.', counter_example, len(counter_example)) self._process_counter_example(sfa, counter_example) logging.info('Learning complete.') return '', sfa
904,774
Run the excel_to_html function from the command-line. Args: -p path to file -s name of the sheet to convert -css classes to apply -m attempt to combine merged cells -c caption for accessibility -su summary for accessibility -d details for accessibility Example use: excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true
def run_excel_to_html(): # Capture commandline arguments. prog='' argument must # match the command name in setup.py entry_points parser = argparse.ArgumentParser(prog='excel_to_html') parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.') parser.add_argument( '-s', nargs='?', help='The name of a sheet in our excel file. Defaults to "Sheet1".', ) parser.add_argument( '-css', nargs='?', help='Space separated css classes to append to the table.' ) parser.add_argument( '-m', action='store_true', help='Merge, attempt to combine merged cells.' ) parser.add_argument( '-c', nargs='?', help='Caption for creating an accessible table.' ) parser.add_argument( '-d', nargs='?', help='Two strings separated by a | character. The first string \ is for the html "summary" attribute and the second string is for the html "details" attribute. \ both values must be provided and nothing more.', ) parser.add_argument( '-r', action='store_true', help='Row headers. Does the table have row headers?' ) args = parser.parse_args() inputs = { 'p': args.p, 's': args.s, 'css': args.css, 'm': args.m, 'c': args.c, 'd': args.d, 'r': args.r, } p = inputs['p'] s = inputs['s'] if inputs['s'] else 'Sheet1' css = inputs['css'] if inputs['css'] else '' m = inputs['m'] if inputs['m'] else False c = inputs['c'] if inputs['c'] else '' d = inputs['d'].split('|') if inputs['d'] else [] r = inputs['r'] if inputs['r'] else False html = fp.excel_to_html( p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m ) print(html)
905,390
Checks whether a point is on the curve. Args: point (AffinePoint): Point to be checked. Returns: bool: True if point is on the curve, False otherwise.
def is_on_curve(self, point): X, Y = point.X, point.Y return ( pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b ) % self.P == 0
905,413
Generates a private key based on the password. SHA-256 is a member of the SHA-2 cryptographic hash functions designed by the NSA. SHA stands for Secure Hash Algorithm. The password is converted to bytes and hashed with SHA-256. The binary output is converted to a hex representation. Args: data (str): The data to be hashed with SHA-256. Returns: bytes: The hexadecimal representation of the hashed binary data.
def generate_private_key(self): random_string = base64.b64encode(os.urandom(4096)).decode('utf-8') binary_data = bytes(random_string, 'utf-8') hash_object = hashlib.sha256(binary_data) message_digest_bin = hash_object.digest() message_digest_hex = binascii.hexlify(message_digest_bin) return message_digest_hex
905,415
Determines the slope between this point and another point. Args: other (AffinePoint): The second point. Returns: int: Slope between self and other.
def slope(self, other): X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y Y3 = Y1 - Y2 X3 = X1 - X2 return (Y3 * self.inverse(X3)) % self.P
905,429
Publish the message on the PUB socket with the given topic name. Args: - message: the message to publish - message_type: the type of message being sent - topic: the topic on which to send the message. Defaults to ''.
def publish(self,message,message_type,topic=''): if message_type == MULTIPART: raise Exception("Unsupported request type") super(Publisher,self).send(message,message_type,topic)
905,469
Create the trie for betacode conversion. Args: text: The beta code text to convert. All of this text must be betacode. strict: Flag to allow for flexible diacritic order on input. Returns: The trie for conversion.
def _create_conversion_trie(strict): t = pygtrie.CharTrie() for beta, uni in _map.BETACODE_MAP.items(): if strict: t[beta] = uni else: # The order of accents is very strict and weak. Allow for many orders of # accents between asterisk and letter or after letter. This does not # introduce ambiguity since each betacode token only has one letter and # either starts with a asterisk or a letter. diacritics = beta[1:] perms = itertools.permutations(diacritics) for perm in perms: perm_str = beta[0] + ''.join(perm) t[perm_str.lower()] = uni t[perm_str.upper()] = uni return t
905,656
Converts the given text from betacode to unicode. Args: text: The beta code text to convert. All of this text must be betacode. strict: Flag to allow for flexible diacritic order on input. Returns: The converted text.
def beta_to_uni(text, strict=False): # Check if the requested configuration for conversion already has a trie # stored otherwise convert it. param_key = (strict,) try: t = _BETA_CONVERSION_TRIES[param_key] except KeyError: t = _create_conversion_trie(*param_key) _BETA_CONVERSION_TRIES[param_key] = t transform = [] idx = 0 possible_word_boundary = False while idx < len(text): if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN]) if step: possible_word_boundary = text[idx] in _BETA_PUNCTUATION key, value = step transform.append(value) idx += len(key) else: possible_word_boundary = True transform.append(text[idx]) idx += 1 # Check one last time in case there is some whitespace or punctuation at the # end and check if the last character is a sigma. if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA: transform[-1] = _FINAL_LC_SIGMA converted = ''.join(transform) return converted
905,659
Convert unicode text to a betacode equivalent. This method can handle tónos or oxeîa characters in the input. Args: text: The text to convert to betacode. This text does not have to all be Greek polytonic text, and only Greek characters will be converted. Note that in this case, you cannot convert to beta and then back to unicode. Returns: The betacode equivalent of the inputted text where applicable.
def uni_to_beta(text): u = _UNICODE_MAP transform = [] for ch in text: try: conv = u[ch] except KeyError: conv = ch transform.append(conv) converted = ''.join(transform) return converted
905,660
Gets the return string for a language that's supported by python. Used in cases when python provides support for the conversion. Args: language: string the langage to return for. level: integer, the indentation level. data: python data structure being converted (list of tuples) Returns: None, updates self.data_structure
def get_built_in(self, language, level, data): # Language is python pp = pprint.PrettyPrinter(indent=level) lookup = {'python' : pp.pformat(data), 'json' : str(json.dumps(data, sort_keys=True, indent=level, separators=(',', ': ')))} self.data_structure = lookup[language]
905,669
Helper function that tries to load a filepath (or python module notation) as a python module and on failure `exec` it. Args: path (str): Path or module to load The function tries to import `example.module` when either `example.module`, `example/module` or `example/module.py` is given.
def load(path): importpath = path.replace("/", ".").replace("\\", ".") if importpath[-3:] == ".py": importpath = importpath[:-3] try: importlib.import_module(importpath) except (ModuleNotFoundError, TypeError): exec(open(path).read())
905,778
Parse a GPX file into a GpxModel. Args: xml: A file-like-object opened in binary mode - that is containing bytes rather than characters. The root element of the XML should be a <gpx> element containing a version attribute. GPX versions 1.0 is supported. Returns: A GpxModel representing the data from the supplies xml. Raises: ValueError: The supplied XML could not be parsed as GPX.
def parse_gpx(gpx_element, gpxns=None): gpxns = gpxns if gpxns is not None else determine_gpx_namespace(gpx_element) if gpx_element.tag != gpxns+'gpx': raise ValueError("No gpx root element") get_text = lambda tag: optional_text(gpx_element, gpxns+tag) version = gpx_element.attrib['version'] if not version.startswith('1.0'): raise ValueError("Not a GPX 1.0 file") creator = gpx_element.attrib['creator'] name = get_text('name') description = get_text('desc') author_name = get_text('author') email = get_text('email') author = Person(author_name, email) url = get_text('url') urlname = get_text('urlname') links = make_links(url, urlname) time = get_text('time') keywords = get_text('keywords') bounds_element = gpx_element.find(gpxns+'bounds') bounds = nullable(parse_bounds)(bounds_element) metadata = Metadata(name=name, description=description, author=author, links=links, time=time, keywords=keywords, bounds=bounds) waypoint_elements = gpx_element.findall(gpxns+'wpt') waypoints = [parse_waypoint(waypoint_element, gpxns) for waypoint_element in waypoint_elements] route_elements = gpx_element.findall(gpxns+'rte') routes = [parse_route(route_element, gpxns) for route_element in route_elements] track_elements = gpx_element.findall(gpxns+'trk') tracks = [parse_track(track_element, gpxns) for track_element in track_elements] # TODO : Private elements gpx_model = GpxModel(creator, metadata, waypoints, routes, tracks) return gpx_model
906,051
Send a request message of the given type Args: - message: the message to publish - message_type: the type of message being sent
def request(self,message,message_type): if message_type == MULTIPART: raise Exception("Unsupported request type") super(Requestor,self).send(message,message_type)
906,255
Construct an HTTP request. Args: uri: The full path or partial path as a Uri object or a string. method: The HTTP method for the request, examples include 'GET', 'POST', etc. headers: dict of strings The HTTP headers to include in the request.
def __init__(self, uri=None, method=None, headers=None): self.headers = headers or {} self._body_parts = [] if method is not None: self.method = method if isinstance(uri, (str, unicode)): uri = Uri.parse_uri(uri) self.uri = uri or Uri() self.headers['MIME-version'] = '1.0' self.headers['Connection'] = 'close'
906,350
Opens a socket connection to the server to set up an HTTP request. Args: uri: The full URL for the request as a Uri object. headers: A dict of string pairs containing the HTTP headers for the request.
def _get_connection(self, uri, headers=None): connection = None if uri.scheme == 'https': if not uri.port: connection = httplib.HTTPSConnection(uri.host) else: connection = httplib.HTTPSConnection(uri.host, int(uri.port)) else: if not uri.port: connection = httplib.HTTPConnection(uri.host) else: connection = httplib.HTTPConnection(uri.host, int(uri.port)) return connection
906,364
Makes an HTTP request using httplib. Args: method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc. uri: str or atom.http_core.Uri headers: dict of strings mapping to strings which will be sent as HTTP headers in the request. body_parts: list of strings, objects with a read method, or objects which can be converted to strings using str. Each of these will be sent in order as the body of the HTTP request.
def _http_request(self, method, uri, headers=None, body_parts=None): if isinstance(uri, (str, unicode)): uri = Uri.parse_uri(uri) connection = self._get_connection(uri, headers=headers) if self.debug: connection.debuglevel = 1 if connection.host != uri.host: connection.putrequest(method, str(uri)) else: connection.putrequest(method, uri._get_relative_path()) # Overcome a bug in Python 2.4 and 2.5 # httplib.HTTPConnection.putrequest adding # HTTP request header 'Host: www.google.com:443' instead of # 'Host: www.google.com', and thus resulting the error message # 'Token invalid - AuthSub token has wrong scope' in the HTTP response. if (uri.scheme == 'https' and int(uri.port or 443) == 443 and hasattr(connection, '_buffer') and isinstance(connection._buffer, list)): header_line = 'Host: %s:443' % uri.host replacement_header_line = 'Host: %s' % uri.host try: connection._buffer[connection._buffer.index(header_line)] = ( replacement_header_line) except ValueError: # header_line missing from connection._buffer pass # Send the HTTP headers. for header_name, value in headers.iteritems(): connection.putheader(header_name, value) connection.endheaders() # If there is data, send it in the request. if body_parts: for part in body_parts: _send_data_part(part, connection) # Return the HTTP Response from the server. return connection.getresponse()
906,365
Creates a new event. `event` may be iterable or string Args: event (str): Name of event to declare Kwrgs: help (str): Help string for the event Raises: TypeError **Please** describe the event and its calling arguments in the help string.
def append(self, event, help=""): if isinstance(event, str): self._events[event] = HookList(is_waterfall=self.is_waterfall) self._help[event] = (help, getframeinfo(stack()[1][0])) if not help: logger.warning("Great, don't say anything about your hooks and \ wait for plugin creators to figure it out.") elif isinstance(event, Iterable): # Depricated. It does not give the ability to give help string # TODO: Remove this for name in event: self.append(name) else: raise TypeError("Invalid event name!")
906,403
Object Model for CSH LDAP users. Arguments: lib -- handle to a CSHLDAP instance search_val -- the uuid (or uid) of the member to bind to uid -- whether or not search_val is a uid
def __init__(self, lib, search_val, uid): self.__dict__['__lib__'] = lib self.__dict__['__con__'] = lib.get_con() res = None if uid: res = self.__con__.search_s( self.__ldap_user_ou__, ldap.SCOPE_SUBTREE, "(uid=%s)" % search_val, ['ipaUniqueID']) else: res = self.__con__.search_s( self.__ldap_user_ou__, ldap.SCOPE_SUBTREE, "(ipaUniqueID=%s)" % search_val, ['uid']) if res: self.__dict__['__dn__'] = res[0][0] else: raise KeyError("Invalid Search Name")
906,593
Get whether or not the bound CSH LDAP member object is part of a group. Arguments: group -- the CSHGroup object (or distinguished name) of the group to check membership for
def in_group(self, group, dn=False): if dn: return group in self.groups() return group.check_member(self)
906,595
Get a CSHMember object. Arguments: val -- the iButton ID of the member Returns: None if the iButton supplied does not correspond to a CSH Member
def get_member_ibutton(self, val): members = self.__con__.search_s( CSHMember.__ldap_user_ou__, ldap.SCOPE_SUBTREE, "(ibutton=%s)" % val, ['ipaUniqueID']) if members: return CSHMember( self, members[0][1]['ipaUniqueID'][0].decode('utf-8'), False) return None
907,020
Get a CSHMember object. Arguments: slack -- the Slack UID of the member Returns: None if the Slack UID provided does not correspond to a CSH Member
def get_member_slackuid(self, slack): members = self.__con__.search_s( CSHMember.__ldap_user_ou__, ldap.SCOPE_SUBTREE, "(slackuid=%s)" % slack, ['ipaUniqueID']) if members: return CSHMember( self, members[0][1]['ipaUniqueID'][0].decode('utf-8'), False) return None
907,021
Get the head of a directorship Arguments: val -- the cn of the directorship
def get_directorship_heads(self, val): __ldap_group_ou__ = "cn=groups,cn=accounts,dc=csh,dc=rit,dc=edu" res = self.__con__.search_s( __ldap_group_ou__, ldap.SCOPE_SUBTREE, "(cn=eboard-%s)" % val, ['member']) ret = [] for member in res[0][1]['member']: try: ret.append(member.decode('utf-8')) except UnicodeDecodeError: ret.append(member) except KeyError: continue return [CSHMember(self, dn.split('=')[1].split(',')[0], True) for dn in ret]
907,022
Enqueue a LDAP modification. Arguments: dn -- the distinguished name of the object to modify mod -- an ldap modfication entry to enqueue
def enqueue_mod(self, dn, mod): # mark for update if dn not in self.__pending_mod_dn__: self.__pending_mod_dn__.append(dn) self.__mod_queue__[dn] = [] self.__mod_queue__[dn].append(mod)
907,023
_check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned.
def is_closed(self): for t in self.smi_vector: found = False for s in self.sm_vector: if self.observation_table[s] == self.observation_table[t]: self.equiv_classes[t] = s found = True break if not found: return False, t return True, None
907,098
Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None
def _fill_table_entry(self, row, col): prefix = self._membership_query(row) full_output = self._membership_query(row + col) length = len(commonprefix([prefix, full_output])) self.observation_table[row, col] = full_output[length:]
907,099
Run the string in the hypothesis automaton for index steps and then return the access string for the state reached concatanated with the rest of the string w. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed index (int): The index value for selecting the prefix of w Return: str: The access string
def _run_in_hypothesis(self, mma, w_string, index): state = mma[0] for i in range(index): for arc in state: if mma.isyms.find(arc.ilabel) == w_string[i]: state = mma[arc.nextstate] s_index = arc.nextstate # The id of the state is its index inside the Sm list access_string = self.observation_table.sm_vector[s_index] logging.debug( 'Access string for %d: %s - %d ', index, access_string, s_index) return access_string
907,100
Checks if access string suffix matches with the examined string suffix Args: w_string (str): The examined string to be consumed access_string (str): The access string for the state index (int): The index value for selecting the prefix of w Returns: bool: A boolean valuei indicating if matching was successful
def _check_suffix(self, w_string, access_string, index): prefix_as = self._membership_query(access_string) full_as = self._membership_query(access_string + w_string[index:]) prefix_w = self._membership_query(w_string[:index]) full_w = self._membership_query(w_string) length = len(commonprefix([prefix_as, full_as])) as_suffix = full_as[length:] length = len(commonprefix([prefix_w, full_w])) w_suffix = full_w[length:] if as_suffix != w_suffix: logging.debug('Access string state incorrect') return True logging.debug('Access string state correct.') return False
907,101
Checks for bad DFA transitions using the examined string Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: str: The prefix of the examined string that matches
def _find_bad_transition(self, mma, w_string): conj_out = mma.consume_input(w_string) targ_out = self._membership_query(w_string) # TODO: handle different length outputs from conjecture and target # hypothesis. length = min(len(conj_out), len(targ_out)) diff = [i for i in range(length) if conj_out[i] != targ_out[i]] if len(diff) == 0: diff_index = len(targ_out) else: diff_index = diff[0] low = 0 high = len(w_string) while True: i = (low + high) / 2 length = len(self._membership_query(w_string[:i])) if length == diff_index + 1: return w_string[:i] elif length < diff_index + 1: low = i + 1 else: high = i - 1
907,102
Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: None
def _process_counter_example(self, mma, w_string): w_string = self._find_bad_transition(mma, w_string) diff = len(w_string) same = 0 while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) is_diff = self._check_suffix(w_string, access_string, i) if is_diff: diff = i else: same = i if diff - same == 1: break exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp)
907,103
Given a state input_string in Smi that is not equivalent with any state in Sm this method will move that state in Sm create a corresponding Smi state and fill the corresponding entries in the table. Args: access_string (str): State access string Returns: None
def _ot_make_closed(self, access_string): self.observation_table.sm_vector.append(access_string) for i in self.alphabet: self.observation_table.smi_vector.append(access_string + i) for e in self.observation_table.em_vector: self._fill_table_entry(access_string + i, e)
907,104
Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table.
def get_mealy_conjecture(self): mma = MealyMachine() for s in self.observation_table.sm_vector: for i in self.alphabet: dst = self.observation_table.equiv_classes[s + i] # If dst == None then the table is not closed. if dst is None: logging.debug('Conjecture attempt on non closed table.') return None o = self.observation_table[s, i] src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(dst) mma.add_arc(src_id, dst_id, i, o) # This works only for Mealy machines for s in mma.states: s.final = True return mma
907,105
Implements the high level loop of the algorithm for learning a Mealy machine. Args: None Returns: MealyMachine: The learned mealy machine
def learn_mealy_machine(self): logging.info('Initializing learning procedure.') self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False # Make sure that the table is closed and consistent while not closed: logging.debug('Checking if table is closed.') closed, string = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(string) else: logging.debug('Table closed.') # Create conjecture mma = self.get_mealy_conjecture() logging.info('Generated conjecture machine with %d states.', len(list(mma.states))) # _check correctness logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(mma) # Are we done? if found: logging.info('No counterexample found. Hypothesis is correct!') break # Add the new experiments into the table to reiterate the # learning loop logging.info( 'Processing counterexample %input_string with length %d.', counter_example, len(counter_example)) self._process_counter_example(mma, counter_example) logging.info('Learning complete.') return mma
907,107
Imports the module indicated in name Args: module_path: string representing a module path such as 'app.config' or 'app.extras.my_module' Returns: the module matching name of the last component, ie: for 'app.extras.my_module' it returns a reference to my_module Raises: BadModulePathError if the module is not found
def module_import(module_path): try: # Import whole module path. module = __import__(module_path) # Split into components: ['contour', # 'extras','appengine','ndb_persistence']. components = module_path.split('.') # Starting at the second component, set module to a # a reference to that component. at the end # module with be the last component. In this case: # ndb_persistence for component in components[1:]: module = getattr(module, component) return module except ImportError: raise BadModulePathError( 'Unable to find module "%s".' % (module_path,))
907,295
Traverse directory trees to find a contour.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of contour.yaml or None if not found
def find_contour_yaml(config_file=__file__, names=None): checked = set() contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked, names=names) if not contour_yaml: contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names) return contour_yaml
907,296
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of countour.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the countour.yaml file or None if it is not found
def _find_countour_yaml(start, checked, names=None): extensions = [] if names: for name in names: if not os.path.splitext(name)[1]: extensions.append(name + ".yaml") extensions.append(name + ".yml") yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in yaml_names: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return
907,297
Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: None
def _process_counter_example(self, mma, w_string): diff = len(w_string) same = 0 membership_answer = self._membership_query(w_string) while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) if membership_answer != self._membership_query(access_string + w_string[i:]): diff = i else: same = i if diff - same == 1: break exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp) return 0
907,684
Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table.
def get_dfa_conjecture(self): dfa = DFA(self.alphabet) for s in self.observation_table.sm_vector: for i in self.alphabet: dst = self.observation_table.equiv_classes[s + i] # If dst == None then the table is not closed. if dst == None: logging.debug('Conjecture attempt on non closed table.') return None obsrv = self.observation_table[s, i] src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(dst) dfa.add_arc(src_id, dst_id, i, obsrv) # Mark the final states in the hypothesis automaton. i = 0 for s in self.observation_table.sm_vector: dfa[i].final = self.observation_table[s, self.epsilon] i += 1 return dfa
907,685
Implements the high level loop of the algorithm for learning a Mealy machine. Args: mma (DFA): The input automaton Returns: MealyMachine: A string and a model for the Mealy machine to be learned.
def learn_dfa(self, mma=None): logging.info('Initializing learning procedure.') if mma: self._init_table_from_dfa(mma) else: self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False # Make sure that the table is closed while not closed: logging.debug('Checking if table is closed.') closed, string = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(string) else: logging.debug('Table closed.') # Create conjecture dfa = self.get_dfa_conjecture() logging.info('Generated conjecture machine with %d states.',len(list(dfa.states))) # _check correctness logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(dfa) # Are we done? if found: logging.info('No counterexample found. Hypothesis is correct!') break # Add the new experiments into the table to reiterate the # learning loop logging.info('Processing counterexample %s with length %d.', counter_example, len(counter_example)) self._process_counter_example(dfa, counter_example) logging.info('Learning complete.') logging.info('Learned em_vector table is the following:') logging.info(self.observation_table.em_vector) return '', dfa
907,687
This function allows an entity to publish data to the middleware. Args: data (string): contents to be published by this entity.
def publish(self, data): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} publish_url = self.base_url + "api/0.1.0/publish" publish_headers = {"apikey": self.entity_api_key} publish_data = { "exchange": "amq.topic", "key": str(self.entity_id), "body": str(data) } with self.no_ssl_verification(): r = requests.post(publish_url, json.dumps(publish_data), headers=publish_headers) response = dict() if "No API key" in str(r.content.decode("utf-8")): response["status"] = "failure" r = json.loads(r.content.decode("utf-8"))['message'] elif 'publish message ok' in str(r.content.decode("utf-8")): response["status"] = "success" r = r.content.decode("utf-8") else: response["status"] = "failure" r = r.content.decode("utf-8") response["response"] = str(r) return response
907,738
This function allows an entity to access the historic data. Args: entity (string): Name of the device to listen to query_filters (string): Elastic search response format string example, "pretty=true&size=10"
def db(self, entity, query_filters="size=10"): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} historic_url = self.base_url + "api/0.1.0/historicData?" + query_filters historic_headers = { "apikey": self.entity_api_key, "Content-Type": "application/json" } historic_query_data = json.dumps({ "query": { "match": { "key": entity } } }) with self.no_ssl_verification(): r = requests.get(historic_url, data=historic_query_data, headers=historic_headers) response = dict() if "No API key" in str(r.content.decode("utf-8")): response["status"] = "failure" else: r = r.content.decode("utf-8") response = r return response
907,739
This function allows an entity to list the devices to subscribe for data. This function must be called at least once, before doing a subscribe. Subscribe function will listen to devices that are bound here. Args: devices_to_bind (list): an array of devices to listen to. Example bind(["test100","testDemo"])
def bind(self, devices_to_bind): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} url = self.base_url + "api/0.1.0/subscribe/bind" headers = {"apikey": self.entity_api_key} data = { "exchange": "amq.topic", "keys": devices_to_bind, "queue": self.entity_id } with self.no_ssl_verification(): r = requests.post(url, json=data, headers=headers) response = dict() if "No API key" in str(r.content.decode("utf-8")): response["status"] = "failure" r = json.loads(r.content.decode("utf-8"))['message'] elif 'bind queue ok' in str(r.content.decode("utf-8")): response["status"] = "success" r = r.content.decode("utf-8") else: response["status"] = "failure" r = r.content.decode("utf-8") response["response"] = str(r) return response
907,740
This function allows an entity to unbound devices that are already bound. Args: devices_to_unbind (list): an array of devices that are to be unbound ( stop listening) Example unbind(["test10","testDemo105"])
def unbind(self, devices_to_unbind): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} url = self.base_url + "api/0.1.0/subscribe/unbind" headers = {"apikey": self.entity_api_key} data = { "exchange": "amq.topic", "keys": devices_to_unbind, "queue": self.entity_id } with self.no_ssl_verification(): r = requests.delete(url, json=data, headers=headers) print(r) response = dict() if "No API key" in str(r.content.decode("utf-8")): response["status"] = "failure" r = json.loads(r.content.decode("utf-8"))['message'] elif 'unbind' in str(r.content.decode("utf-8")): response["status"] = "success" r = r.content.decode("utf-8") else: response["status"] = "failure" r = r.content.decode("utf-8") response["response"] = str(r) return response
907,741
This function allows an entity to subscribe for data from the devices specified in the bind operation. It creates a thread with an event loop to manager the tasks created in start_subscribe_worker. Args: devices_to_bind (list): an array of devices to listen to
def subscribe(self, devices_to_bind=[]): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} self.bind(devices_to_bind) loop = asyncio.new_event_loop() t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,)) t1.daemon = True t1.start()
907,742