code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def exists(self): self_object = self.query.filter_by(id=self.id).first() if self_object is None: return False return True
Checks if item already exists in database
def delete(self): try: if self.exists() is False: return None self.db.session.delete(self) self.db.session.commit() except (Exception, BaseException) as error: # fail silently return None
Easy delete for db models
def save(self): try: if self.exists() is False: self.db.session.add(self) # self.db.session.merge(self) self.db.session.commit() except (Exception, BaseException) as error: if current_app.config['DEBUG']: raise error return None
Easy save(insert or update) for db models
def row_to_dict(self, row): constellation = self.parse_constellation(row[0]) name = self.parse_name(row[1]) ra, dec = self.parse_coordinates(row[2]) variable_type = row[3].strip() max_magnitude, symbol = self.parse_magnitude(row[4]) min_magnitude, symbol = self.parse_magnitude(row[5]) if symbol == '(' and max_magnitude is not None: # this is actually amplitude min_magnitude = max_magnitude + min_magnitude epoch = self.parse_epoch(row[8]) period = self.parse_period(row[10]) return { 'constellation': constellation, 'name': name, 'ra': ra, 'dec': dec, 'variable_type': variable_type, 'max_magnitude': max_magnitude, 'min_magnitude': min_magnitude, 'epoch': epoch, 'period': period, }
Converts a raw GCVS record to a dictionary of star data.
def parse_coordinates(self, coords_str): if coords_str.strip() == '': return (None, None) ra = '%s:%s:%s' % (coords_str[0:2], coords_str[2:4], coords_str[4:8]) dec = '%s:%s:%s' % (coords_str[8:11], coords_str[11:13], coords_str[13:15]) return (ra, dec)
Returns a pair of PyEphem-compatible coordinate strings (Ra, Dec). If the star has no coordinates in GCVS (there are such cases), a pair of None values is returned.
def parse_magnitude(self, magnitude_str): symbol = magnitude_str[0].strip() magnitude = magnitude_str[1:6].strip() return float(magnitude) if magnitude else None, symbol
Converts magnitude field to a float value, or ``None`` if GCVS does not list the magnitude. Returns a tuple (magnitude, symbol), where symbol can be either an empty string or a single character - one of '<', '>', '('.
def parse_epoch(self, epoch_str): epoch = epoch_str.translate(TRANSLATION_MAP)[:10].strip() return 2400000.0 + float(epoch) if epoch else None
Converts epoch field to a float value (adding 24... prefix), or ``None`` if there is no epoch in GCVS record.
def parse_period(self, period_str): period = period_str.translate(TRANSLATION_MAP)[3:14].strip() return float(period) if period else None
Converts period field to a float value or ``None`` if there is no period in GCVS record.
def find_hwpack_dir(root): root = path(root) log.debug('files in dir: %s', root) for x in root.walkfiles(): log.debug(' %s', x) hwpack_dir = None for h in (root.walkfiles('boards.txt')): assert not hwpack_dir hwpack_dir = h.parent log.debug('found hwpack: %s', hwpack_dir) assert hwpack_dir return hwpack_dir
search for hwpack dir under root.
def install_hwpack(url, replace_existing=False): d = tmpdir(tmpdir()) f = download(url) Archive(f).extractall(d) clean_dir(d) src_dhwpack = find_hwpack_dir(d) targ_dhwpack = hwpack_dir() / src_dhwpack.name doaction = 0 if targ_dhwpack.exists(): log.debug('hwpack already exists: %s', targ_dhwpack) if replace_existing: log.debug('remove %s', targ_dhwpack) targ_dhwpack.rmtree() doaction = 1 else: doaction = 1 if doaction: log.debug('move %s -> %s', src_dhwpack, targ_dhwpack) src_dhwpack.move(targ_dhwpack) hwpack_dir().copymode(targ_dhwpack) for x in targ_dhwpack.walk(): hwpack_dir().copymode(x)
install hwpackrary from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None
def create(self, volume_id, vtype, size, affinity): volume_id = volume_id or str(uuid.uuid4()) params = {'volume_type_name': vtype, 'size': size, 'affinity': affinity} return self.http_put('/volumes/%s' % volume_id, params=self.unused(params))
create a volume
def restore(self, volume_id, **kwargs): # These arguments are required self.required('create', kwargs, ['backup', 'size']) # Optional Arguments volume_id = volume_id or str(uuid.uuid4()) kwargs['volume_type_name'] = kwargs['volume_type_name'] or 'vtype' kwargs['size'] = kwargs['size'] or 1 # Make the request return self.http_put('/volumes/%s' % volume_id, params=self.unused(kwargs))
restore a volume from a backup
def create(self, volume_id, backup_id): backup_id = backup_id or str(uuid.uuid4()) return self.http_put('/backups/%s' % backup_id, params={'volume': volume_id})
create a backup
def create(self, name, **kwargs): # These arguments are required self.required('create', kwargs, ['hostname', 'port', 'storage_hostname', 'volume_type_name', 'size']) kwargs['name'] = name return self.http_post('/nodes', params=kwargs)
Create a new node
def update(self, name, **kwargs): # These arguments are allowed self.allowed('update', kwargs, ['hostname', 'port', 'status', 'storage_hostname', 'volume_type_name', 'size']) # Remove parameters that are None kwargs = self.unused(kwargs) return self.http_post('/nodes/%s' % name, params=kwargs)
Create a new node
def create(self, volume_id, ip, initiator): return self.http_put('/volumes/%s/export' % volume_id, params={ 'ip': ip, 'initiator': initiator })
create an export for a volume
def delete(self, volume_id, force=False): return self.http_delete('/volumes/%s/export' % volume_id, params={'force': force})
delete an export
def update(self, volume_id, **kwargs): # These arguments are allowed self.allowed('update', kwargs, ['status', 'instance_id', 'mountpoint', 'ip', 'initiator', 'session_ip', 'session_initiator']) # Remove parameters that are None params = self.unused(kwargs) return self.http_post('/volumes/%s/export' % volume_id, params=params)
update an export
def proto_refactor(proto_filename, namespace, namespace_path): with open(proto_filename) as f: data = f.read() if not re.search('syntax = "proto2"', data): insert_syntax = 'syntax = "proto2";\n' data = insert_syntax + data substitution = 'import "{}/\\1";'.format(namespace_path) data = re.sub('import\s+"([^"]+\.proto)"\s*;', substitution, data) return data
This method refactors a Protobuf file to import from a namespace that will map to the desired python package structure. It also ensures that the syntax is set to "proto2", since protoc complains without it. Args: proto_filename (str): the protobuf filename to be refactored namespace (str): the desired package name (i.e. "dropsonde.py2") namespace_path (str): the desired path corresponding to the package name (i.e. "dropsonde/py2")
def proto_refactor_files(dest_dir, namespace, namespace_path): for dn, dns, fns in os.walk(dest_dir): for fn in fns: fn = os.path.join(dn, fn) if fnmatch.fnmatch(fn, '*.proto'): data = proto_refactor(fn, namespace, namespace_path) with open(fn, 'w') as f: f.write(data)
This method runs the refactoring on all the Protobuf files in the Dropsonde repo. Args: dest_dir (str): directory where the Protobuf files lives. namespace (str): the desired package name (i.e. "dropsonde.py2") namespace_path (str): the desired path corresponding to the package name (i.e. "dropsonde/py2")
def clone_source_dir(source_dir, dest_dir): if os.path.isdir(dest_dir): print('removing', dest_dir) shutil.rmtree(dest_dir) shutil.copytree(source_dir, dest_dir)
Copies the source Protobuf files into a build directory. Args: source_dir (str): source directory of the Protobuf files dest_dir (str): destination directory of the Protobuf files
def protoc_command(lang, output_dir, proto_path, refactored_dir): proto_files = glob.glob(os.path.join(refactored_dir, '*.proto')) cmd = ['protoc', '-I', proto_path, '--{}_out'.format(lang), output_dir] cmd.extend(proto_files) print(' '.join(cmd)) p = subprocess.Popen( cmd, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin, cwd=proto_path) p.communicate()
Runs the "protoc" command on the refactored Protobuf files to generate the source python/python3 files. Args: lang (str): the language to compile with "protoc" (i.e. python, python3) output_dir (str): the output directory for the generated source files proto_path (str): the root protobuf build path in which to run "protoc" refactored_dir (str): the input directory of the Protobuf files
def configure(self, config): specification = config.get( 'ckan.budgets.specification', os.path.join(os.path.dirname(__file__), 'data', 'bdp', 'schema.json')) self.data = BudgetDataPackage(specification) countries_json = config.get( 'ckan.budgets.countries', os.path.join(os.path.dirname(__file__), 'data', 'countries.json')) with open(countries_json) as country_list: self.countries = json.load(country_list) country = config.get('ckan.budgets.default.country', None) if country is not None: self.default_country = country.upper() if self.default_country not in self.countries: raise ValueError('Uknown country code "{code}"'.format( code=country)) else: self.default_currency = None currencies_json = config.get( 'ckan.budget.currencies', os.path.join(os.path.dirname(__file__), 'data', 'currencies.json')) with open(currencies_json) as currency_list: self.currencies = json.load(currency_list) currency = config.get('ckan.budgets.default.currency', None) if currency is not None: self.default_currency = currency.upper() if self.default_currency not in self.currencies: raise ValueError('Unknown currency code "{code}"'.format( code=currency)) else: self.default_currency = None statuses_json = config.get( 'ckan.budget.statuses', os.path.join(os.path.dirname(__file__), 'data', 'bdp', 'statuses.json')) with open(statuses_json) as statuses_list: self.statuses = json.load(statuses_list)
Initialize the plugin. This creates a data object which holds a BudgetDataPackage parser which operates based on a specification which is either provided in the config via: ``ckan.budgets.specification`` or the included version.
def in_resource(self, field, resource): resource_field = resource.get(field, None) return resource_field is not None and resource_field != ''
Return True if resource contains a valid value for the field (not an empty or None value)
def are_budget_data_package_fields_filled_in(self, resource): fields = ['country', 'currency', 'year', 'status'] return all([self.in_resource(f, resource) for f in fields])
Check if the budget data package fields are all filled in because if not then this can't be a budget data package
def generate_budget_data_package(self, resource): # Return if the budget data package fields have not been filled in if not self.are_budget_data_package_fields_filled_in(resource): return try: resource['schema'] = self.data.schema except exceptions.NotABudgetDataPackageException: log.debug('Resource is not a Budget Data Package') resource['schema'] = [] return # If the schema fits, this can be exported as a budget data package # so we add the missing metadata fields to the resource. resource['BudgetDataPackage'] = True resource['standard'] = self.data.version resource['granularity'] = self.data.granularity resource['type'] = self.data.budget_type
Try to grab a budget data package schema from the resource. The schema only allows fields which are defined in the budget data package specification. If a field is found that is not in the specification this will return a NotABudgetDataPackageException and in that case we can just return and ignore the resource
def before_create(self, context, resource): # If the resource is being uploaded we load the uploaded file # If not we load the provided url if resource.get('upload', '') == '': self.data.load(resource['url']) else: self.data.load(resource['upload'].file) self.generate_budget_data_package(resource)
When triggered the resource which can either be uploaded or linked to will be parsed and analysed to see if it possibly is a budget data package resource (checking if all required headers and any of the recommended headers exist in the csv). The budget data package specific fields are then appended to the resource which makes it useful for export the dataset as a budget data package.
def before_update(self, context, current, resource): # Return if the budget data package fields have not been filled in if not self.are_budget_data_package_fields_filled_in(resource): return if resource.get('upload', '') == '': # If it isn't an upload we check if it's the same url if current['url'] == resource['url']: # Return if it's the same return else: self.data.load(resource['url']) else: self.data.load(resource['upload'].file) self.generate_budget_data_package(resource)
If the resource has changed we try to generate a budget data package, but if it hasn't then we don't do anything
def get_entry(self, entry_id): if entry_id in self._entries: return self._entries[entry_id] else: entry_offset = entry_id * self.entry_size # load entry entry = MftEntry( filename=self.filename, offset=self.offset + entry_offset, length=self.entry_size, index=entry_id ) # cache entry self._entries[entry_id] = entry return entry
Get mft entry by index. If entry is not already loaded it will load \ it from file specified during :class:`MftTable` initialization. Returns: MftEntry: initialized :class:`~.mft_entry.MftEntry`.
def initialize(): # Global 'factoids' list will be used to store factoids, fetched from a file. global factoids # Path to "factoid.txt" file is created. try: current_path = os.getcwd() file_path = os.path.join(current_path, "modules", "factoids", "factoid.txt") except OSError, error: raise seash_exceptions.InitializeError("Error during initializing factoids module: '" + str(error) + "'.") # We have to fatch list of factoids from "factoid.txt" file. try: file_object = open(file_path, 'r') factoids_temp = file_object.readlines() file_object.close() except IOError, error: raise seash_exceptions.InitializeError("Error during initializing factoids module: '" + str(error) + "'.") # Newline characters in a list, read from a file are removed. for factoid in factoids_temp: factoids.append(factoid.strip('\n')) # A random factoid is printed every time user runs seash. print random.choice(factoids)+"\n"
<Purpose> Used to print random seash factoid when user runs seash. <Arguments> None <Side Effects> Prints random factoid onto the screen. <Exceptions> UserError: Error during generating path to "factoid.txt" file or Error while opening, reading or closing "factoid.txt" file. <Return> None
def trans(self, key) -> str: key_list = self.__list_key(key) try: current_selection = \ current_app.config['LANGUAGE_PACKS'][ self.module_name][self.app_language] except KeyError as error: if current_app.config['DEBUG']: raise error return None for parsed_dot_key in key_list: try: current_selection = current_selection[parsed_dot_key] except (Exception, BaseException) as error: if current_app.config['DEBUG']: raise error return None return current_selection
Root Example: Translator() Translator.trans('messages.hello') resources/lang/en/messages.lang will be opened and parsed for { 'hello': 'Some english text' } If language is fr, resources/lang/fr/messages.lang will be opened and parsed for { 'hello': 'Some french text' } Module Example: Translator('[module-name]') Translator.trans('messages.hello')
def __load_file(self, key_list) -> str: file = str(key_list[0]) + self.extension key_list.pop(0) file_path = os.path.join(self.path, file) if os.path.exists(file_path): return Json.from_file(file_path) else: raise FileNotFoundError(file_path)
Load a translator file
def remove_programmer(programmer_id): log.debug('remove %s', programmer_id) lines = programmers_txt().lines() lines = filter( lambda x: not x.strip().startswith(programmer_id + '.'), lines) programmers_txt().write_lines(lines)
remove programmer. :param programmer_id: programmer id (e.g. 'avrisp') :rtype: None
def load(self, entity_class, entity): if self.__needs_flushing: self.flush() if entity.id is None: raise ValueError('Can not load entity without an ID.') cache = self.__get_cache(entity_class) sess_ent = cache.get_by_id(entity.id) if sess_ent is None: if self.__clone_on_load: sess_ent = self.__clone(entity, cache) else: # Only needed by the nosql backend pragma: no cover cache.add(entity) sess_ent = entity self.__unit_of_work.register_clean(entity_class, sess_ent) return sess_ent
Load the given repository entity into the session and return a clone. If it was already loaded before, look up the loaded entity and return it. All entities referenced by the loaded entity will also be loaded (and cloned) recursively. :raises ValueError: When an attempt is made to load an entity that has no ID
def onStart(self, event): c = event.container print '+' * 5, 'started:', c kv = lambda s: s.split('=', 1) env = {k: v for (k, v) in (kv(s) for s in c.attrs['Config']['Env'])} print env
Display the environment of a started container
def _get_cropped_file_names(self): files = [ff.name for ff in self.files] prefix = commonprefix(files) suffix = commonprefix([f[::-1] for f in files])[::-1] cropped = [f[len(prefix):-len(suffix)] for f in files] return cropped
self.files with common path prefix/suffix removed
def _identifier_data(self): # Use only file names data = [ff.name for ff in self.files] data.sort() # also use the folder name data.append(self.path.name) # add meta data data += self._identifier_meta() return hash_obj(data)
Return a unique identifier for the folder data
def _search_files(path): path = pathlib.Path(path) fifo = [] for fp in path.glob("*"): if fp.is_dir(): continue for fmt in formats: # series data is not supported in SeriesFolder if not fmt.is_series and fmt.verify(fp): fifo.append((fp, fmt.__name__)) break # ignore qpimage formats if multiple formats were # detected. theformats = [ff[1] for ff in fifo] formset = set(theformats) if len(formset) > 1: fmts_qpimage = ["SingleHdf5Qpimage", "SeriesHdf5Qpimage"] fifo = [ff for ff in fifo if ff[1] not in fmts_qpimage] # ignore raw tif files if single_tif_phasics is detected if len(formset) > 1 and "SingleTifPhasics" in theformats: fmts_badtif = "SingleTifHolo" fifo = [ff for ff in fifo if ff[1] not in fmts_badtif] # otherwise, prevent multiple file formats theformats2 = [ff[1] for ff in fifo] formset2 = set(theformats2) if len(formset2) > 1: msg = "Qpformat does not support multiple different file " \ + "formats within one directory: {}".format(formset2) raise MultipleFormatsNotSupportedError(msg) # sort the lists fifo = sorted(fifo) return fifo
Search a folder for data files .. versionchanged:: 0.6.0 `path` is not searched recursively anymore
def files(self): if self._files is None: fifo = SeriesFolder._search_files(self.path) self._files = [ff[0] for ff in fifo] self._formats = [ff[1] for ff in fifo] return self._files
List of files (only supported file formats)
def get_identifier(self, idx): name = self._get_cropped_file_names()[idx] return "{}:{}:{}".format(self.identifier, name, idx + 1)
Return an identifier for the data at index `idx` .. versionchanged:: 0.4.2 indexing starts at 1 instead of 0
def get_qpimage_raw(self, idx): ds = self._get_dataset(idx) qpi = ds.get_qpimage_raw() qpi["identifier"] = self.get_identifier(idx) return qpi
Return QPImage without background correction
def verify(path): valid = True fifo = SeriesFolder._search_files(path) # dataset size if len(fifo) == 0: valid = False # number of different file formats fifmts = [ff[1] for ff in fifo] if len(set(fifmts)) != 1: valid = False return valid
Verify folder file format The folder file format is only valid when there is only one file format present.
def get_paths(folder): '''Return *_phase.txt files in `folder`''' folder = pathlib.Path(folder).resolve() files = folder.rglob("*_phase.txt") return sorted(filesf get_paths(folder): '''Return *_phase.txt files in `folder`''' folder = pathlib.Path(folder).resolve() files = folder.rglob("*_phase.txt") return sorted(files)
Return *_phase.txt files in `folder`
def load_file(path): '''Load a txt data file''' path = pathlib.Path(path) data = path.open().readlines() # remove comments and empty lines data = [l for l in data if len(l.strip()) and not l.startswith("#")] # determine data shape n = len(data) m = len(data[0].strip().split()) res = np.zeros((n, m), dtype=np.dtype(float)) # write data to array, replacing comma with point decimal separator for ii in range(n): res[ii] = np.array(data[ii].strip().replace(",", ".").split(), dtype=float) return ref load_file(path): '''Load a txt data file''' path = pathlib.Path(path) data = path.open().readlines() # remove comments and empty lines data = [l for l in data if len(l.strip()) and not l.startswith("#")] # determine data shape n = len(data) m = len(data[0].strip().split()) res = np.zeros((n, m), dtype=np.dtype(float)) # write data to array, replacing comma with point decimal separator for ii in range(n): res[ii] = np.array(data[ii].strip().replace(",", ".").split(), dtype=float) return res
Load a txt data file
def load_field(path): '''Load QPI data using *_phase.txt files''' path = pathlib.Path(path) phase = load_file(path) inten = load_file(path.parent / (path.name[:-10] + "_intensity.txt")) ampli = np.sqrt(inten) return ampli * np.exp(1j * phasef load_field(path): '''Load QPI data using *_phase.txt files''' path = pathlib.Path(path) phase = load_file(path) inten = load_file(path.parent / (path.name[:-10] + "_intensity.txt")) ampli = np.sqrt(inten) return ampli * np.exp(1j * phase)
Load QPI data using *_phase.txt files
def import_pyfiles(path): n = 0 for pyfile in glob.glob(os.path.join(path, '*.py')): m = import_file(pyfile) IMPORTED_BUILD_SOURCES.append(m) n += 1 return n
Import all *.py files in specified directory.
def emit(self, record): if record.args and isinstance(record.args, collections.Mapping): extra = dict(self._extra, **record.args) # Merge metadata from handler and record else: extra = self._extra try: msg = self.format(record) pri = self.mapPriority(record.levelno) mid = getattr(record, 'MESSAGE_ID', None) send(msg, SOCKET=self.socket, MESSAGE_ID=mid, PRIORITY=format(pri), LOGGER=record.name, THREAD_NAME=record.threadName, CODE_FILE=record.pathname, CODE_LINE=record.lineno, CODE_FUNC=record.funcName, **extra) except Exception: self.handleError(record)
Write record as journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present.
def mapPriority(levelno): if levelno <= _logging.DEBUG: return LOG_DEBUG elif levelno <= _logging.INFO: return LOG_INFO elif levelno <= _logging.WARNING: return LOG_WARNING elif levelno <= _logging.ERROR: return LOG_ERR elif levelno <= _logging.CRITICAL: return LOG_CRIT else: return LOG_ALERT
Map logging levels to journald priorities. Since Python log level numbers are "sparse", we have to map numbers in between the standard levels too.
def get_args(self, func): def reverse(iterable): if iterable: iterable = list(iterable) while len(iterable): yield iterable.pop() args, varargs, varkw, defaults = inspect.getargspec(func) result = {} for default in reverse(defaults): result[args.pop()] = default for arg in reverse(args): if arg == 'self': continue result[arg] = None return result
Get the arguments of a method and return it as a dictionary with the supplied defaults, method arguments with no default are assigned None
def guess_format(path): for fmt in formats: if fmt.verify(path): return fmt.__name__ else: msg = "Undefined file format: '{}'".format(path) raise UnknownFileFormatError(msg)
Determine the file format of a folder or a file
def duration(seconds): if seconds < 1: return 'less than 1 sec' seconds = int(round(seconds)) components = [] for magnitude, label in ((3600, 'hr'), (60, 'min'), (1, 'sec')): if seconds >= magnitude: components.append('{} {}'.format(seconds // magnitude, label)) seconds %= magnitude return ' '.join(components)
Return a string of the form "1 hr 2 min 3 sec" representing the given number of seconds.
def filesize(num_bytes): for prefix in '-KMGTEPZY': if num_bytes < 999.9: break num_bytes /= 1000.0 if prefix == '-': return '{} B'.format(num_bytes) return '{:.3n} {}B'.format(num_bytes, prefix)
Return a string containing an approximate representation of *num_bytes* using a small number and decimal SI prefix.
def evolve(self, new_date): if self.state.date == new_date and not self.initial_state.date == new_date: return self.state if self._len: q = [self.random.gauss(0., 1.) for _ in range(int(self._len))] else: q = self.random.gauss(0., 1.) self.state.value = self.func(self.state.value, self.state.date, new_date, q) self.state.date = new_date return self.state
evolve to the new process state at the next date :param date new_date: date or point in time of the new state :return State:
def evolve(self, new_date): if all(p.state.date == new_date for p in self.producers): return [p.state for p in self.producers] if self._cholesky is not None: q = [self.random.gauss(0., 1.) for _ in range(len(self._cholesky))] q = list(self._cholesky.dot(q)) else: q = list() state = list() for p in self.producers: if len(self._driver_index[p]) == len(p.diffusion_driver): qq = [q[i] for i in self._driver_index[p]] elif len(self._driver_index[p]) < len(p.diffusion_driver): qq = list() for d in p.diffusion_driver: qqq = q[self._diffusion_driver.index(d)] if d in self._diffusion_driver else self.random.gauss(0., 1.) qq.append(qqq) else: qq = [self.random.gauss(0., 1.) for _ in p.diffusion_driver] p.random.extend(qq) state.append(p.evolve(new_date)) return state
evolve to the new process state at the next date :param date new_date: date or point in time of the new state :return State:
def _get_shortcut_prefix(self, user_agent, standart_prefix): # pylint: disable=no-self-use if user_agent is not None: user_agent = user_agent.lower() opera = 'opera' in user_agent mac = 'mac' in user_agent konqueror = 'konqueror' in user_agent spoofer = 'spoofer' in user_agent safari = 'applewebkit' in user_agent windows = 'windows' in user_agent chrome = 'chrome' in user_agent firefox = ( ('firefox' in user_agent) or ('minefield' in user_agent) ) internet_explorer = ( ('msie' in user_agent) or ('trident' in user_agent) ) if opera: return 'SHIFT + ESC' elif chrome and mac and (not spoofer): return 'CTRL + OPTION' elif safari and (not windows) and (not spoofer): return 'CTRL + ALT' elif (not windows) and (safari or mac or konqueror): return 'CTRL' elif firefox: return 'ALT + SHIFT' elif chrome or internet_explorer: return 'ALT' return standart_prefix return standart_prefix
Returns the shortcut prefix of browser. :param user_agent: The user agent of browser. :type user_agent: str :param standart_prefix: The default prefix. :type standart_prefix: str :return: The shortcut prefix of browser. :rtype: str
def _get_role_description(self, role): parameter = 'role-' + role.lower() if self.configure.has_parameter(parameter): return self.configure.get_parameter(parameter) return None
Returns the description of role. :param role: The role. :type role: str :return: The description of role. :rtype: str
def _get_language_description(self, language_code): language = language_code.lower() parameter = 'language-' + language if self.configure.has_parameter(parameter): return self.configure.get_parameter(parameter) elif '-' in language: codes = re.split(r'\-', language) parameter = 'language-' + codes[0] if self.configure.has_parameter(parameter): return self.configure.get_parameter(parameter) return None
Returns the description of language. :param language_code: The BCP 47 code language. :type language_code: str :return: The description of language. :rtype: str
def _get_description(self, element): description = None if element.has_attribute('title'): description = element.get_attribute('title') elif element.has_attribute('aria-label'): description = element.get_attribute('aria-label') elif element.has_attribute('alt'): description = element.get_attribute('alt') elif element.has_attribute('label'): description = element.get_attribute('label') elif ( (element.has_attribute('aria-labelledby')) or (element.has_attribute('aria-describedby')) ): if element.has_attribute('aria-labelledby'): description_ids = re.split( '[ \n\r\t]+', element.get_attribute('aria-labelledby').strip() ) else: description_ids = re.split( '[ \n\r\t]+', element.get_attribute('aria-describedby').strip() ) for description_id in description_ids: element_description = self.parser.find( '#' + description_id ).first_result() if element_description is not None: description = element_description.get_text_content() break elif ( (element.get_tag_name() == 'INPUT') and (element.has_attribute('type')) ): type_attribute = element.get_attribute('type').lower() if ( ( (type_attribute == 'button') or (type_attribute == 'submit') or (type_attribute == 'reset') ) and (element.has_attribute('value')) ): description = element.get_attribute('value') if not bool(description): description = element.get_text_content() return re.sub('[ \n\r\t]+', ' ', description.strip())
Returns the description of element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The description of element. :rtype: str
def _insert(self, element, new_element, before): tag_name = element.get_tag_name() append_tags = [ 'BODY', 'A', 'FIGCAPTION', 'LI', 'DT', 'DD', 'LABEL', 'OPTION', 'TD', 'TH' ] controls = ['INPUT', 'SELECT', 'TEXTAREA'] if tag_name == 'HTML': body = self.parser.find('body').first_result() if body is not None: self._insert(body, new_element, before) elif tag_name in append_tags: if before: element.prepend_element(new_element) else: element.append_element(new_element) elif tag_name in controls: labels = [] if element.has_attribute('id'): labels = self.parser.find( 'label[for="' + element.get_attribute('id') + '"]' ).list_results() if not labels: labels = self.parser.find(element).find_ancestors( 'label' ).list_results() for label in labels: self._insert(label, new_element, before) elif before: element.insert_before(new_element) else: element.insert_after(new_element)
Insert a element before or after other element. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param new_element: The element that be inserted. :type new_element: hatemile.util.html.htmldomelement.HTMLDOMElement :param before: To insert the element before the other element. :type before: bool
def _force_read_simple(self, element, text_before, text_after, data_of): self.id_generator.generate_id(element) identifier = element.get_attribute('id') selector = '[' + data_of + '="' + identifier + '"]' reference_before = self.parser.find( '.' + AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE + selector ).first_result() reference_after = self.parser.find( '.' + AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER + selector ).first_result() references = self.parser.find(selector).list_results() if reference_before in references: references.remove(reference_before) if reference_after in references: references.remove(reference_after) if not references: if text_before: if reference_before is not None: reference_before.remove_node() span = self.parser.create_element('span') span.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE ) span.set_attribute(data_of, identifier) span.append_text(text_before) self._insert(element, span, True) if text_after: if reference_after is not None: reference_after.remove_node() span = self.parser.create_element('span') span.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER ) span.set_attribute(data_of, identifier) span.append_text(text_after) self._insert(element, span, False)
Force the screen reader display an information of element. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param text_before: The text content to show before the element. :type text_before: str :param text_after: The text content to show after the element. :type text_after: str :param data_of: The name of attribute that links the content with element. :type data_of: str
def _force_read( self, element, value, text_prefix_before, text_suffix_before, text_prefix_after, text_suffix_after, data_of ): if (text_prefix_before) or (text_suffix_before): text_before = text_prefix_before + value + text_suffix_before else: text_before = '' if (text_prefix_after) or (text_suffix_after): text_after = text_prefix_after + value + text_suffix_after else: text_after = '' self._force_read_simple(element, text_before, text_after, data_of)
Force the screen reader display an information of element with prefixes or suffixes. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param value: The value to be show. :type value: str :param text_prefix_before: The prefix of value to show before the element. :type text_prefix_before: str :param text_suffix_before: The suffix of value to show before the element. :type text_suffix_before: str :param text_prefix_after: The prefix of value to show after the element. :type text_prefix_after: str :param text_suffix_after: The suffix of value to show after the element. :type text_suffix_after: str :param data_of: The name of attribute that links the content with element. :type data_of: str
def local_updatetime(port): print 'Time update failed, could not connect to any time servers...' print 'Your network connection may be down.' print "Falling back to using your computer's local clock." print # time.time() gives us the # of seconds since 1970, whereas the NTP # services gives us the # of seconds since 1900. time.time_settime(pythontime.time() + time.time_seconds_from_1900_to_1970)
<Purpose> Callback for time_interface.r2py to update the time that is used internally for nodemanager communications. <Arguments> port: The port to update on. This is not used however. It is only specified to adhere to the function signature expected by time_interface.r2py. <Side Effects> If we reach this function, then it means that other time server updates failed. We will notify the user of the failure, and set time.r2py to use the local clock. <Exceptions> None <Returns> None
def get_commands_from_commanddict(commanddict): if not commanddict: return [] commands = [] for command in commanddict: has_user_argument = False subcommands = get_commands_from_commanddict(commanddict[command]['children']) if subcommands: for subcommand in subcommands: commands.append(command + " " + subcommand) else: commands.append(command) return commands
<Purpose> Extracts the commands that are contained in the command dictionary. The arguments of these commands are not included. <Arguments> commanddict: A command dictionary in the format specified in seash_dictionary. <Exceptions> None <Side Effects> None <Return> A list of commands that are in the commanddict.
def provider(func=None, *, singleton=False, injector=None): def decorator(func): wrapped = _wrap_provider_func(func, {'singleton': singleton}) if injector: injector.register_provider(wrapped) return wrapped if func: return decorator(func) return decorator
Decorator to mark a function as a provider. Args: singleton (bool): The returned value should be a singleton or shared instance. If False (the default) the provider function will be invoked again for every time it's needed for injection. injector (Injector): If provided, the function is immediately registered as a provider with the injector instance. Example: @diay.provider(singleton=True) def myfunc() -> MyClass: return MyClass(args)
def inject(*args, **kwargs): def wrapper(obj): if inspect.isclass(obj) or callable(obj): _inject_object(obj, *args, **kwargs) return obj raise DiayException("Don't know how to inject into %r" % obj) return wrapper
Mark a class or function for injection, meaning that a DI container knows that it should inject dependencies into it. Normally you won't need this as the injector will inject the required arguments anyway, but it can be used to inject properties into a class without having to specify it in the constructor, or to inject arguments that aren't properly type hinted. Example: @diay.inject('foo', MyClass) class MyOtherClass: pass assert isinstance(injector.get(MyOtherClass).foo, MyClass)
def register_plugin(self, plugin: Plugin): if isinstance(plugin, Plugin): lazy = False elif issubclass(plugin, Plugin): lazy = True else: msg = 'plugin %r must be an object/class of type Plugin' % plugin raise DiayException(msg) predicate = inspect.isfunction if lazy else inspect.ismethod methods = inspect.getmembers(plugin, predicate=predicate) for _, method in methods: if getattr(method, '__di__', {}).get('provides'): if lazy: self.register_lazy_provider_method(plugin, method) else: self.register_provider(method)
Register a plugin.
def register_provider(self, func): if 'provides' not in getattr(func, '__di__', {}): raise DiayException('function %r is not a provider' % func) self.factories[func.__di__['provides']] = func
Register a provider function.
def register_lazy_provider_method(self, cls, method): if 'provides' not in getattr(method, '__di__', {}): raise DiayException('method %r is not a provider' % method) @functools.wraps(method) def wrapper(*args, **kwargs): return getattr(self.get(cls), method.__name__)(*args, **kwargs) self.factories[method.__di__['provides']] = wrapper
Register a class method lazily as a provider.
def set_factory(self, thing: type, value, overwrite=False): if thing in self.factories and not overwrite: raise DiayException('factory for %r already exists' % thing) self.factories[thing] = value
Set the factory for something.
def set_instance(self, thing: type, value, overwrite=False): if thing in self.instances and not overwrite: raise DiayException('instance for %r already exists' % thing) self.instances[thing] = value
Set an instance of a thing.
def get(self, thing: type): if thing in self.instances: return self.instances[thing] if thing in self.factories: fact = self.factories[thing] ret = self.get(fact) if hasattr(fact, '__di__') and fact.__di__['singleton']: self.instances[thing] = ret return ret if inspect.isclass(thing): return self._call_class_init(thing) elif callable(thing): return self.call(thing) raise DiayException('cannot resolve: %r' % thing)
Get an instance of some type.
def call(self, func, *args, **kwargs): guessed_kwargs = self._guess_kwargs(func) for key, val in guessed_kwargs.items(): kwargs.setdefault(key, val) try: return func(*args, **kwargs) except TypeError as exc: msg = ( "tried calling function %r but failed, probably " "because it takes arguments that cannot be resolved" ) % func raise DiayException(msg) from exc
Call a function, resolving any type-hinted arguments.
def to_dict(self): # add in the raw data into the dict result = { key[1:]: getattr(self, key) for key in self.__slots__ if key[0] == '_' and hasattr(self, key) } # deal with basic convenience wrappers try: colour = result.pop('colour') except KeyError: pass else: if colour: result['color'] = colour.value try: timestamp = result.pop('timestamp') except KeyError: pass else: if timestamp: result['timestamp'] = timestamp.isoformat() # add in the non raw attribute ones if self.type: result['type'] = self.type if self.description: result['description'] = self.description if self.url: result['url'] = self.url if self.title: result['title'] = self.title return result
Converts this embed object into a dict.
def do_list(self, resource): if resource == 'plugins': self.__list_plugins() elif resource == 'volumes': self.__list_volumes() else: self.logger.error("Unknown resource: '{}', type 'help list' " "to get more information".format(resource))
Enumerate resources Possible values: plugins, volumes
def do_load(self, filename): try: self.__session.load(filename) except IOError as e: self.logger.error(e.strerror)
Load disk image for analysis
def do_session(self, args): filename = 'Not specified' if self.__session.filename is None \ else self.__session.filename print('{0: <30}: {1}'.format('Filename', filename))
Print current session information
def _convert_iterable(self, iterable): # Return original if _wrapper isn't callable if not callable(self._wrapper): return iterable return [self._wrapper(x) for x in iterable]
Converts elements returned by an iterable into instances of self._wrapper
def _check_element(self, lookup_strings, instance): for q, val in lookup_strings.items(): if not field_lookup(instance, q, val, True): return False return True
Return True if lookup string/value pairs match against the passed object.
def get(self, **kwargs): for x in self: if self._check_element(kwargs, x): return x kv_str = self._stringify_kwargs(kwargs) raise QueryList.NotFound( "Element not found with attributes: %s" % kv_str)
Returns the first object encountered that matches the specified lookup parameters. >>> site_list.get(id=1) {'url': 'http://site1.tld/', 'published': False, 'id': 1} >>> site_list.get(published=True, id__lt=3) {'url': 'http://site1.tld/', 'published': True, 'id': 2} >>> site_list.filter(published=True).get(id__lt=3) {'url': 'http://site1.tld/', 'published': True, 'id': 2} If the QueryList contains multiple elements that match the criteria, only the first match will be returned. Use ``filter()`` to retrieve the entire set. If no match is found in the QueryList, the method will raise a ``NotFound`` exception. >>> site_list.get(id=None) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "querylist/list.py", line 113, in get "Element not found with attributes: %s" % kv_str) querylist.list.NotFound: Element not found with attributes: id=None
def exclude(self, **kwargs): return QueryList( data=(x for x in self if not self._check_element(kwargs, x)), wrapper=self._wrapper, wrap=False)
Generates a QueryList containing the subset of objects from this QueryList that do **not** match the provided field lookups. The following example returns the subset of a QueryList named ``site_list`` where the id is greather than 1000. >>> site_list.exclude(id__gt=1000) [{'url': 'http://site1001.tld/',...}, {...}], In the next example, ``exclude()`` returns the subset of objects from site_list that aren't published and don't have "test" in their title >>> site_list.exclude(published=True, title__icontains="test") [{'url': 'http://site1.tld/',...}, {...}] If all objects match the provided field lookups, then an empty QueryList is returned: >>> site_list.exclude(id__gt=0) []
def __create_handler_settings(config): server_config = config['conf']['server'] plugins_conf = config['conf']['plugins_enabled'] api_handler_settings = { 'auth_required': server_config.get('auth_required', True), 'upstream_timeout': server_config.get('upstream_timeout', None), 'registry': PluginBuilder.build_plugins(plugins_conf), } if api_handler_settings['auth_required']: api_handler_settings['auth_tokens'] = server_config['auth_tokens'] return { APIHandler: api_handler_settings }
:type config: dict
def runserver(ctx, conf, port, foreground): config = read_config(conf) debug = config['conf'].get('debug', False) click.echo('Debug mode {0}.'.format('on' if debug else 'off')) port = port or config['conf']['server']['port'] app_settings = { 'debug': debug, 'auto_reload': config['conf']['server'].get('auto_reload', False), } handlers_settings = __create_handler_settings(config) if foreground: click.echo('Requested mode: foreground') start_app(port, app_settings, handlers_settings) else: click.echo('Requested mode: background') # subprocess.call([sys.executable, 'yourscript.py'], env=os.environ.copy()) raise NotImplementedError
Run the fnExchange server
def as_repository(resource): reg = get_current_registry() if IInterface in provided_by(resource): resource = reg.getUtility(resource, name='collection-class') return reg.getAdapter(resource, IRepository)
Adapts the given registered resource to its configured repository. :return: object implementing :class:`everest.repositories.interfaces.IRepository`.
def commit_veto(request, response): # unused request arg pylint: disable=W0613 tm_header = response.headers.get('x-tm') if not tm_header is None: result = tm_header != 'commit' else: result = not response.status.startswith('2') \ and not tm_header == 'commit' return result
Strict commit veto to use with the transaction manager. Unlike the default commit veto supplied with the transaction manager, this will veto all commits for HTTP status codes other than 2xx unless a commit is explicitly requested by setting the "x-tm" response header to "commit". As with the default commit veto, the commit is always vetoed if the "x-tm" response header is set to anything other than "commit".
def set(cls, key, obj): with cls._lock: if not cls._globs.get(key) is None: raise ValueError('Duplicate key "%s".' % key) cls._globs[key] = obj return cls._globs[key]
Sets the given object as global object for the given key.
def as_representer(resource, content_type): reg = get_current_registry() rpr_reg = reg.queryUtility(IRepresenterRegistry) return rpr_reg.create(type(resource), content_type)
Adapts the given resource and content type to a representer. :param resource: resource to adapt. :param str content_type: content (MIME) type to obtain a representer for.
def get_mapping_registry(content_type): reg = get_current_registry() rpr_reg = reg.queryUtility(IRepresenterRegistry) return rpr_reg.get_mapping_registry(content_type)
Returns the data element registry for the given content type (a Singleton). :Note: This only works after a representer for the given content type has been created.
def data_element_tree_to_string(data_element): # FIXME: rewrite this as a visitor to use the data element tree traverser. def __dump(data_el, stream, offset): name = data_el.__class__.__name__ stream.write("%s%s" % (' ' * offset, name)) offset += 2 ifcs = provided_by(data_el) if ICollectionDataElement in ifcs: stream.write("[") first_member = True for member_data_el in data_el.get_members(): if first_member: stream.write('%s' % os.linesep + ' ' * offset) first_member = False else: stream.write(',%s' % os.linesep + ' ' * offset) __dump(member_data_el, stream, offset) stream.write("]") else: stream.write("(") if ILinkedDataElement in ifcs: stream.write("url=%s, kind=%s, relation=%s" % (data_el.get_url(), data_el.get_kind(), data_el.get_relation())) else: first_attr = True for attr_name, attr_value in iteritems_(data_el.data): if first_attr: first_attr = False else: stream.write(',%s' % os.linesep + ' ' * (offset + len(name) + 1)) if attr_value is None: continue if not IResourceDataElement in provided_by(attr_value): stream.write("%s=%s" % (attr_name, attr_value)) else: stream.write("%s=" % attr_name) __dump(attr_value, stream, offset) stream.write(')') stream = NativeIO() __dump(data_element, stream, 0) return stream.getvalue()
Creates a string representation of the given data element tree.
def finalize(self): super(StringWriterConsumer, self).finalize() self.result = self.decoder(self.result)
finalize simulation for consumer
def initialize(self, num_of_paths=None, grid=None, seed=None): super(StackedConsumer, self).initialize(grid, num_of_paths, seed) for c in self.consumers: c.initialize(grid, num_of_paths, seed) self.state = [c.state for c in self.consumers]
initialize StackedConsumer
def initialize_path(self, path_num=None): for c in self.consumers: c.initialize_path(path_num) self.state = [c.state for c in self.consumers]
make the consumer_state ready for the next MC path :param int path_num:
def finalize_path(self, path_num=None): for c in self.consumers: c.finalize_path(path_num) self.result = [c.result for c in self.consumers]
finalize path and populate result for ConsumerConsumer
def finalize(self): for c in self.consumers: c.finalize() self.result = [c.result for c in self.consumers]
finalize for ConsumerConsumer
def get(self, queue_get): for (c, cs) in izip(self.consumers, queue_get): c.get(cs) self.result = [c.result for c in self.consumers]
get to given consumer states. This function is used for merging of results of parallelized MC. The first state is used for merging in place. The states must be disjoint. :param object queue_get: second consumer state
def finalize(self): super(TransposedConsumer, self).finalize() self.result = map(list, zip(*self.result))
finalize for PathConsumer
def _get_attribute(self, offset): attr_type = self.get_uint_le(offset) # Attribute length is in header @ offset 0x4 length = self.get_uint_le(offset + 0x04) data = self.get_chunk(offset, length) return MftAttr.factory(attr_type, data)
Determines attribute type at the offset and returns \ initialized attribute object. Returns: MftAttr: One of the attribute objects \ (eg. :class:`~.mft_attribute.MftAttrFilename`). None: If atttribute type does not mach any one of the supported \ attribute types.
def find_in_matrix_2d(val, matrix): ''' Returns a tuple representing the index of an item in a 2D matrix. Arguments: - val (str) Value to look for - matrix (list) 2D matrix to search for val in Returns: - (tuple) Ordered pair representing location of val ''' dim = len(matrix[0]) item_index = 0 for row in matrix: for i in row: if i == val: break item_index += 1 if i == val: break loc = (int(item_index / dim), item_index % dim) return lof find_in_matrix_2d(val, matrix): ''' Returns a tuple representing the index of an item in a 2D matrix. Arguments: - val (str) Value to look for - matrix (list) 2D matrix to search for val in Returns: - (tuple) Ordered pair representing location of val ''' dim = len(matrix[0]) item_index = 0 for row in matrix: for i in row: if i == val: break item_index += 1 if i == val: break loc = (int(item_index / dim), item_index % dim) return loc
Returns a tuple representing the index of an item in a 2D matrix. Arguments: - val (str) Value to look for - matrix (list) 2D matrix to search for val in Returns: - (tuple) Ordered pair representing location of val
def get_defaults(path): ''' Reads file for configuration defaults. Arguments: - path (str) Absolute filepath (usually ~/.licenser) Returns: - (dict) Defaults for name, email, license, .txt extension ''' defaults = {} if os.path.isfile(path): with open(path) as f: for line in f: line = line.strip() if '=' not in line or line.startswith('#'): continue k, v = line.split('=', 1) v = v.strip('"').strip("'") defaults[k] = v return defaults else: return {f get_defaults(path): ''' Reads file for configuration defaults. Arguments: - path (str) Absolute filepath (usually ~/.licenser) Returns: - (dict) Defaults for name, email, license, .txt extension ''' defaults = {} if os.path.isfile(path): with open(path) as f: for line in f: line = line.strip() if '=' not in line or line.startswith('#'): continue k, v = line.split('=', 1) v = v.strip('"').strip("'") defaults[k] = v return defaults else: return {}
Reads file for configuration defaults. Arguments: - path (str) Absolute filepath (usually ~/.licenser) Returns: - (dict) Defaults for name, email, license, .txt extension
def get_license(name): ''' Returns the closest match to the requested license. Arguments: - name (str) License to use Returns: - (str) License that most closely matches the 'name' parameter ''' filenames = os.listdir(cwd + licenses_loc) licenses = dict(zip(filenames, [-1] * len(filenames))) for l in licenses: licenses[l] = compute_distance(name, l) return min(licenses, key=(lambda k: licenses[k])f get_license(name): ''' Returns the closest match to the requested license. Arguments: - name (str) License to use Returns: - (str) License that most closely matches the 'name' parameter ''' filenames = os.listdir(cwd + licenses_loc) licenses = dict(zip(filenames, [-1] * len(filenames))) for l in licenses: licenses[l] = compute_distance(name, l) return min(licenses, key=(lambda k: licenses[k]))
Returns the closest match to the requested license. Arguments: - name (str) License to use Returns: - (str) License that most closely matches the 'name' parameter
def generate_license(args): ''' Creates a LICENSE or LICENSE.txt file in the current directory. Reads from the 'assets' folder and looks for placeholders enclosed in curly braces. Arguments: - (tuple) Name, email, license, project, ext, year ''' with open(cwd + licenses_loc + args[2]) as f: license = f.read() license = license.format(name=args[0], email=args[1], license=args[2], project=args[3], year=args[5]) with open('LICENSE' + args[4], 'w') as f: f.write(license) print('licenser: license file added to current directory'f generate_license(args): ''' Creates a LICENSE or LICENSE.txt file in the current directory. Reads from the 'assets' folder and looks for placeholders enclosed in curly braces. Arguments: - (tuple) Name, email, license, project, ext, year ''' with open(cwd + licenses_loc + args[2]) as f: license = f.read() license = license.format(name=args[0], email=args[1], license=args[2], project=args[3], year=args[5]) with open('LICENSE' + args[4], 'w') as f: f.write(license) print('licenser: license file added to current directory')
Creates a LICENSE or LICENSE.txt file in the current directory. Reads from the 'assets' folder and looks for placeholders enclosed in curly braces. Arguments: - (tuple) Name, email, license, project, ext, year