code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def is_playing_shared_game(self, steamID, appid_playing, format=None): parameters = {'steamid' : steamID, 'appid_playing' : appid_playing} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
Returns valid lender SteamID if game currently played is borrowed. steamID: The users ID appid_playing: The game player is currently playing format: Return format. None defaults to json. (json, xml, vdf)
def get_server_info(self, format=None): parameters = {} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetServerInfo', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
Request the Steam Web API status and time. format: Return format. None defaults to json. (json, xml, vdf)
def create_request_url(self, profile_type, steamID): regex = re.compile('^\d{17,}$') if regex.match(steamID): if profile_type == self.USER: url = "http://steamcommunity.com/profiles/%s/?xml=1" % (steamID) if profile_type == self.GROUP: url = "http://steamcommunity.com/gid/%s/memberslistxml/?xml=1" % (steamID) else: if profile_type == self.USER: url = "http://steamcommunity.com/id/%s/?xml=1" % (steamID) if profile_type == self.GROUP: url = "http://steamcommunity.com/groups/%s/memberslistxml/?xml=1" % (steamID) return url
Create the url to submit to the Steam Community XML feed.
def get_user_info(self, steamID): url = self.create_request_url(self.USER, steamID) data = self.retrieve_request(url) return self.return_data(data, format='xml')
Request the Steam Community XML feed for a specific user.
def get_group_info(self, steamID): url = self.create_request_url(self.GROUP, steamID) data = self.retrieve_request(url) return self.return_data(data, format='xml')
Request the Steam Community XML feed for a specific group.
def Deserializer(stream_or_string, **options): if isinstance(stream_or_string, (bytes, six.string_types)): stream_or_string = BytesIO(stream_or_string) try: def line_generator(): for line in stream_or_string: yield json.loads(line.strip()) for obj in PythonDeserializer(line_generator(), **options): yield obj except GeneratorExit: raise except Exception as e: # Map to deserializer error six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
Deserialize a stream or string of JSON data.
def request(self, method, url, data=None, files=None, query=None, headers=None, timeout=60): headers = headers or {} if self.token: my_token = self.token else: from figure import token my_token = token if my_token: self.__set_authorization(headers, my_token) METHODS = { 'get': self.__get, 'post': self.__post, 'put': self.__put, 'head': self.__head, 'patch': self.__patch } request_method = METHODS[method.lower()] abs_url = urlparse.urljoin(self.api_base, url) encoded_query = urllib.urlencode(query or {}) abs_url = _build_api_url(abs_url, encoded_query) try: response = request_method(abs_url, data=data, files=files, headers=headers, timeout=timeout) response.encoding = 'utf-8' except RequestException: raise error.APIConnectionError() return self._interpret_response(response.text, response.status_code)
Mechanism for issuing an API call
def export(cls, folder, particles, datetimes): normalized_locations = [particle.normalized_locations(datetimes) for particle in particles] track_coords = [] for x in xrange(0, len(datetimes)): points = MultiPoint([loc[x].point.coords[0] for loc in normalized_locations]) track_coords.append(points.centroid.coords[0]) ls = LineString(track_coords) if not os.path.exists(folder): os.makedirs(folder) filepath = os.path.join(folder, "trackline.geojson") f = open(filepath, "wb") f.write(json.dumps(mapping(ls))) f.close() return filepath
Export trackline data to GeoJSON file
def export(cls, folder, particles, datetimes): if not os.path.exists(folder): os.makedirs(folder) particle_path = os.path.join(folder, 'particles.pickle') f = open(particle_path, "wb") pickle.dump(particles, f) f.close() datetimes_path = os.path.join(folder, 'datetimes.pickle') f = open(datetimes_path, "wb") pickle.dump(datetimes, f) f.close()
Export particle and datetime data to Pickled objects. This can be used to debug or to generate different output in the future.
def import_settings_class(setting_name): config_value = getattr(settings, setting_name) if config_value is None: raise ImproperlyConfigured("Required setting not found: {0}".format(setting_name)) return import_class(config_value, setting_name)
Return the class pointed to be an app setting variable.
def import_class(import_path, setting_name=None): mod_name, class_name = import_path.rsplit('.', 1) # import module mod = import_module_or_none(mod_name) if mod is not None: # Loaded module, get attribute try: return getattr(mod, class_name) except AttributeError: pass # For ImportError and AttributeError, raise the same exception. if setting_name: raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path)) else: raise ImproperlyConfigured("Class not found: {0}".format(import_path))
Import a class by name.
def import_apps_submodule(submodule): found_apps = [] for appconfig in apps.get_app_configs(): app = appconfig.name if import_module_or_none('{0}.{1}'.format(app, submodule)) is not None: found_apps.append(app) return found_apps
Look for a submodule is a series of packages, e.g. ".pagetype_plugins" in all INSTALLED_APPS.
def import_module_or_none(module_label): try: # On Python 3, importlib has much more functionality compared to Python 2. return importlib.import_module(module_label) except ImportError: # Based on code from django-oscar: # There are 2 reasons why there could be an ImportError: # # 1. Module does not exist. In that case, we ignore the import and return None # 2. Module exists but another ImportError occurred when trying to import the module. # In that case, it is important to propagate the error. # # ImportError does not provide easy way to distinguish those two cases. # Fortunately, the traceback of the ImportError starts at __import__ # statement. If the traceback has more than one frame, it means that # application was found and ImportError originates within the local app __, __, exc_traceback = sys.exc_info() frames = traceback.extract_tb(exc_traceback) frames = [f for f in frames if f[0] != "<frozen importlib._bootstrap>" and # Python 3.6 f[0] != IMPORT_PATH_IMPORTLIB and not f[0].endswith(IMPORT_PATH_GEVENT) and not IMPORT_PATH_PYDEV in f[0]] if len(frames) > 1: raise return None
Imports the module with the given name. Returns None if the module doesn't exist, but it does propagates import errors in deeper modules.
def get_logger(name): logger = logging.getLogger(name) logger.addHandler(logging.NullHandler()) return logger
Gets a logger Arguments: name - the name you wish to log as Returns: A logger!
def get_best_single_experiments(nets,expvars): ''' returns the experiments as a``TermSet`` object [instance]. ''' netsf = nets.to_file() expvarsf = expvars.to_file() i = 1 #single experiment num_exp = String2TermSet('pexperiment('+str(i)+')') num_expf = num_exp.to_file() prg = [ netsf, expvarsf, num_expf, find_best_exp_sets_prg , elem_path_prg ] coptions = '--project --opt-mode=optN --opt-strategy=0 --opt-heuristic' solver = GringoClasp(clasp_options=coptions) solutions = solver.run(prg,collapseTerms=True,collapseAtoms=False) os.unlink(num_expf) os.unlink(netsf) os.unlink(expvarsf) return solutionf get_best_single_experiments(nets,expvars): ''' returns the experiments as a``TermSet`` object [instance]. ''' netsf = nets.to_file() expvarsf = expvars.to_file() i = 1 #single experiment num_exp = String2TermSet('pexperiment('+str(i)+')') num_expf = num_exp.to_file() prg = [ netsf, expvarsf, num_expf, find_best_exp_sets_prg , elem_path_prg ] coptions = '--project --opt-mode=optN --opt-strategy=0 --opt-heuristic' solver = GringoClasp(clasp_options=coptions) solutions = solver.run(prg,collapseTerms=True,collapseAtoms=False) os.unlink(num_expf) os.unlink(netsf) os.unlink(expvarsf) return solutions
returns the experiments as a``TermSet`` object [instance].
async def handler(self): '''Handle loop, get and process messages''' self.ws = await websockets.connect(self.url, ssl=self.ssl) while self.ws: message = await self.ws.recv() for handle in self.on_message: if asyncio.iscoroutinefunction(handle): await handle(self, message) else: handle(self, messageync def handler(self): '''Handle loop, get and process messages''' self.ws = await websockets.connect(self.url, ssl=self.ssl) while self.ws: message = await self.ws.recv() for handle in self.on_message: if asyncio.iscoroutinefunction(handle): await handle(self, message) else: handle(self, message)
Handle loop, get and process messages
async def get(self, path, **query): '''return a get request Parameters ---------- path : str same as get_url query : kargs dict additional info to pass to get_url See Also -------- get_url : getJson : Returns ------- requests.models.Response the response that was given ''' url = self.get_url(path, **query) for i in range(self.tries+1): try: resp = await self.session.get(url, timeout=self.timeout) if await self._process_resp(resp): return resp else: continue except aiohttp.ClientConnectionError: if i >= self.tries: raise aiohttp.ClientConnectionError( 'Emby server is probably down' ync def get(self, path, **query): '''return a get request Parameters ---------- path : str same as get_url query : kargs dict additional info to pass to get_url See Also -------- get_url : getJson : Returns ------- requests.models.Response the response that was given ''' url = self.get_url(path, **query) for i in range(self.tries+1): try: resp = await self.session.get(url, timeout=self.timeout) if await self._process_resp(resp): return resp else: continue except aiohttp.ClientConnectionError: if i >= self.tries: raise aiohttp.ClientConnectionError( 'Emby server is probably down' )
return a get request Parameters ---------- path : str same as get_url query : kargs dict additional info to pass to get_url See Also -------- get_url : getJson : Returns ------- requests.models.Response the response that was given
async def getJson(self, path, **query): '''wrapper for get, parses response as json Parameters ---------- path : str same as get_url query : kargs dict additional info to pass to get_url See Also -------- get_url : get : Returns ------- dict the response content as a dict ''' for i in range(self.tries+1): try: return await (await self.get(path, **query)).json() except: if i >= self.tries: raisync def getJson(self, path, **query): '''wrapper for get, parses response as json Parameters ---------- path : str same as get_url query : kargs dict additional info to pass to get_url See Also -------- get_url : get : Returns ------- dict the response content as a dict ''' for i in range(self.tries+1): try: return await (await self.get(path, **query)).json() except: if i >= self.tries: raise
wrapper for get, parses response as json Parameters ---------- path : str same as get_url query : kargs dict additional info to pass to get_url See Also -------- get_url : get : Returns ------- dict the response content as a dict
def filter_by_missing(max_miss=0.01): def f(G, bim): Isnp = sp.isnan(G).mean(0) < max_miss G_out = G[:, Isnp] bim_out = bim[Isnp] return G_out, bim_out return f
return function that filters by missing values (takes maximum fraction of missing values, default is 0.01)
def filter_by_maf(min_maf=0.01): def f(G, bim): maf = 0.5 * G.mean(0) maf[maf > 0.5] = 1.0 - maf[maf > 0.5] Isnp = maf > min_maf G_out = G[:, Isnp] bim_out = bim[Isnp] return G_out, bim_out return f
return function that filters by maf (takes minimum maf, default is 0.01)
def standardize(): def f(G, bim): G_out = standardize_snps(G) return G_out, bim return f
return variant standarize function
def impute(imputer): def f(G, bim): return imputer.fit_transform(G), bim return f
return impute function
def compose(func_list): def f(G, bim): for func in func_list: G, bim = func(G, bim) return G, bim return f
composion of preprocessing functions
def asdict(self): items = {} for name in self._items: value = self._items[name] if isinstance(value, DictionaryObject): items[name] = value.asdict() else: items[name] = value return items
Copy the data back out of here and into a dict. Then return it. Some libraries may check specifically for dict objects, such as the json library; so, this makes it convenient to get the data back out. >>> import dictobj >>> d = {'a':1, 'b':2} >>> dictobj.DictionaryObject(d).asdict() == d True >>> d['c'] = {1:2, 3:4} >>> dictobj.DictionaryObject(d).asdict() == d True
def get_joke(): joke = None while joke is None: service_num = randint(1, NUM_SERVICES) joke = load_joke(service_num) return joke
Return a jokes from one of the random services.
def load_joke(service_num=1): result = { 1 : ronswanson.get_joke(), 2 : chucknorris.get_joke(), 3 : catfacts.get_joke(), 4 : dadjokes.get_joke(), }.get(service_num) return result
Pulls the joke from the service based on the argument. It is expected that all services used will return a string when successful or None otherwise.
def read_14c(fl): indata = pd.read_csv(fl, index_col=None, skiprows=11, header=None, names=['calbp', 'c14age', 'error', 'delta14c', 'sigma']) outcurve = CalibCurve(calbp=indata['calbp'], c14age=indata['c14age'], error=indata['error'], delta14c=indata['delta14c'], sigma=indata['sigma']) return outcurve
Create CalibCurve instance from Bacon curve file
def read_chron(fl): indata = pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python') outcore = ChronRecord(age=indata['age'], error=indata['error'], depth=indata['depth'], labid=indata['labID']) return outcore
Create ChronRecord instance from Bacon file
def read_proxy(fl): outcore = ProxyRecord(data=pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python')) return outcore
Read a file to create a proxy record instance
def to_pandas(self): agedepthdf = pd.DataFrame(self.age, index=self.data.depth) agedepthdf.columns = list(range(self.n_members())) out = (agedepthdf.join(self.data.set_index('depth')) .reset_index() .melt(id_vars=self.data.columns.values, var_name='mciter', value_name='age')) out['mciter'] = pd.to_numeric(out.loc[:, 'mciter']) if self.n_members() == 1: out = out.drop('mciter', axis=1) return out
Convert record to pandas.DataFrame
def _get_token_type(self, char): if char in '()': return self.SPLIT, 0 elif char == ',': return self.SPLIT, 1 elif char in '<>': return self.IGNORE, 2 elif char == '.': return self.JOIN, 3 elif char.isdigit(): return self.JOIN, 4 elif char.isalpha(): return self.JOIN, 5 elif char == '^': return self.JOIN, 6 elif char.isspace(): return self.IGNORE, 7 else: raise ValueError('TokeniserException: "{}" can not be tokenised'.format(char))
Returns a 2-tuple (behaviour, type). behaviours: 0 - join 1 - split 2 - ignore
def request(method, url, **kwargs): auth = kwargs.get('auth') headers = kwargs.get('headers', {}) # headers = headers.copy() # ? We do modify the dict in place here... if isinstance(auth, TreqKerberosAuth): del kwargs['auth'] if auth.force_preemptive: # Save a round-trip and set the Negotiate header on the first req. headers['Authorization'] = yield negotiate_header(url) response = yield treq.request(method=method, url=url, headers=headers, **kwargs) # Retry if we got a 401 / Negotiate response. if response.code == 401 and isinstance(auth, TreqKerberosAuth): auth_mechs = response.headers.getRawHeaders('WWW-Authenticate') if 'Negotiate' in auth_mechs: headers['Authorization'] = yield negotiate_header(url) response = yield treq.request(method=method, url=url, headers=headers, **kwargs) defer.returnValue(response)
Pass auth=HTTPKerberosAuth() kwarg
def negotiate_header(url): hostname = urlparse(url).hostname _, krb_context = kerberos.authGSSClientInit('HTTP@%s' % hostname) # authGSSClientStep goes over the network to the KDC (ie blocking). yield threads.deferToThread(kerberos.authGSSClientStep, krb_context, '') negotiate_details = kerberos.authGSSClientResponse(krb_context) defer.returnValue('Negotiate ' + negotiate_details)
Return the "Authorization" HTTP header value to use for this URL.
def _get_specification(self, specification): result = six.moves.urllib.parse.urlparse(specification) # If the specification has an http or an https scheme we can # retrieve it via an HTTP get request, else we try to open it # as a file. if result.scheme in ['http', 'https']: response = requests.get(specification) spec_json = response.json() else: with open(specification, 'r') as spec_file: spec_json = json.load(spec_file) return spec_json
Read the specification provided. It can either be a url or a file location.
def _get_headers(self, resource): # If the resource is a file we just open it up with the csv # reader (after being sure we're reading from the beginning # of the file if type(resource) == file: resource.seek(0) reader = csv.reader(resource) # If the resource is a basestring it is either a url or a file # location, so similarly to the specification mechanism we either # access it with an HTTP get request or by opening the file. elif isinstance(resource, basestring): result = six.moves.urllib.parse.urlparse(resource) if result.scheme in ['http', 'https']: with closing(requests.get(resource, stream=True)) as response: # Headers are alway the first row of a CSV file # so it's enought to just get the first line and # hopefully save bandwidth header_row = response.iter_lines().next() else: # It may seem weird to open up a csv file, read its header row # and then StringIO that into a new csv reader but this file # we want to close and we want the same interface for all with open(resource) as resource_file: reader = csv.reader(resource_file) header_row = reader.next() reader = csv.reader(cStringIO.StringIO(header_row)) else: raise IOError('Resource type not supported') return reader.next()
Get CSV file headers from the provided resource.
def schema(self): if self.headers is None: raise exceptions.NoResourceLoadedException( 'Resource must be loaded to find schema') try: fields = self.specification.get('fields', {}) parsed = { 'primaryKey': 'id', 'fields': [{ 'name': header, 'type': fields[header]['type'], 'description': fields[header]['description'] } for header in self.headers] } except KeyError: raise exceptions.NotABudgetDataPackageException( 'Includes other fields than the Budget Data Package fields') return parsed
The generated budget data package schema for this resource. If the resource has any fields that do not conform to the provided specification this will raise a NotABudgetDataPackageException.
def get_time(self, idx=0): timestr = SingleTifPhasics._get_meta_data(path=self.path, section="acquisition info", name="date & heure") if timestr is not None: timestr = timestr.split(".") # '2016-04-29_17h31m35s.00827' structtime = time.strptime(timestr[0], "%Y-%m-%d_%Hh%Mm%Ss") fracsec = float(timestr[1]) * 1e-5 # use calendar, because we need UTC thetime = calendar.timegm(structtime) + fracsec else: thetime = np.nan return thetime
Return the time of the tif data since the epoch The time is stored in the "61238" tag.
def verify(path): valid = False try: tf = SingleTifPhasics._get_tif(path) except (ValueError, IsADirectoryError): pass else: if (len(tf) == 3 and "61243" in tf.pages[0].tags and "61242" in tf.pages[0].tags and "61238" in tf.pages[0].tags and "61243" in tf.pages[1].tags and "61242" in tf.pages[1].tags and "max_sample_value" in tf.pages[0].tags and (tf.pages[0].tags["61242"].value != tf.pages[1].tags["61242"].value)): valid = True return valid
Verify that `path` is a phasics phase/intensity TIFF file
def _init( self, default_prefix='_', fext=TMPL_FN_EXT, req_tmpl_name=REQ_TMPL_NAME, text_prefix=REQUIREJS_TEXT_PREFIX, auto_reload=False, *a, **kw): self.default_prefix = default_prefix self.molds = {} self.tracked_entry_points = {} self.fext = fext self.req_tmpl_name = req_tmpl_name self.text_prefix = text_prefix self.auto_reload = auto_reload
Arguments: registry_name The name of this registry.
def _generate_and_store_mold_id_map(self, template_map, molds): name = self.req_tmpl_name for key in sorted(template_map.keys(), reverse=True): if len(key.split('/')) == 3 and key.endswith(name): mold_id = key[len(self.text_prefix):-len(name) - 1] molds[mold_id] = template_map[key][:-len(name) - 1] yield mold_id
Not a pure generator expression as this has the side effect of storing the resulting id and map it into a local dict. Produces a list of all valid mold_ids from the input template_keys. Internal function; NOT meant to be used outside of this class.
def mold_id_to_path(self, mold_id, default=_marker): def handle_default(debug_msg=None): if debug_msg: logger.debug('mold_id_to_path:' + debug_msg, mold_id) if default is _marker: raise KeyError( 'Failed to lookup mold_id %s to a path' % mold_id) return default result = self.molds.get(mold_id) if result: return result if not self.tracked_entry_points: return handle_default() try: prefix, mold_basename = mold_id.split('/') except ValueError: return handle_default( 'mold_id %s not found and not in standard format') entry_point = self.tracked_entry_points.get(prefix) if entry_point is None: return handle_default() return join(self._entry_point_to_path(entry_point), mold_basename)
Lookup the filesystem path of a mold identifier.
def lookup_path(self, mold_id_path, default=_marker): fragments = mold_id_path.split('/') mold_id = '/'.join(fragments[:2]) try: subpath = [] for piece in fragments[2:]: if (sep in piece or (altsep and altsep in piece) or piece == pardir): raise KeyError elif piece and piece != '.': subpath.append(piece) path = self.mold_id_to_path(mold_id) except KeyError: if default is _marker: raise return default return join(path, *subpath)
For the given mold_id_path, look up the mold_id and translate that path to its filesystem equivalent.
def verify_path(self, mold_id_path): try: path = self.lookup_path(mold_id_path) if not exists(path): raise KeyError except KeyError: raise_os_error(ENOENT) return path
Lookup and verify path.
def _get_skippers(configure, file_name=None): skippers = [] if file_name is None: file_name = os.path.join(os.path.dirname(os.path.dirname( os.path.dirname(os.path.realpath(__file__)) )), 'skippers.xml') xmldoc = minidom.parse(file_name) skippers_xml = xmldoc.getElementsByTagName( 'skippers' )[0].getElementsByTagName('skipper') for skipper_xml in skippers_xml: skippers.append({ 'selector': skipper_xml.attributes['selector'].value, 'description': configure.get_parameter( skipper_xml.attributes['description'].value ), 'shortcut': skipper_xml.attributes['shortcut'].value }) return skippers
Returns the skippers of configuration. :param configure: The configuration of HaTeMiLe. :type configure: hatemile.util.configure.Configure :param file_name: The file path of skippers configuration. :type file_name: str :return: The skippers of configuration. :rtype: list(dict(str, str))
def _generate_list_skippers(self): container = self.parser.find( '#' + AccessibleNavigationImplementation.ID_CONTAINER_SKIPPERS ).first_result() html_list = None if container is None: local = self.parser.find('body').first_result() if local is not None: container = self.parser.create_element('div') container.set_attribute( 'id', AccessibleNavigationImplementation.ID_CONTAINER_SKIPPERS ) local.prepend_element(container) if container is not None: html_list = self.parser.find(container).find_children( 'ul' ).first_result() if html_list is None: html_list = self.parser.create_element('ul') container.append_element(html_list) self.list_skippers_added = True return html_list
Generate the list of skippers of page. :return: The list of skippers of page. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
def _get_heading_level(self, element): # pylint: disable=no-self-use tag = element.get_tag_name() if tag == 'H1': return 1 elif tag == 'H2': return 2 elif tag == 'H3': return 3 elif tag == 'H4': return 4 elif tag == 'H5': return 5 elif tag == 'H6': return 6 return -1
Returns the level of heading. :param element: The heading. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The level of heading. :rtype: int
def _is_valid_heading(self): elements = self.parser.find('h1,h2,h3,h4,h5,h6').list_results() last_level = 0 count_main_heading = 0 self.validate_heading = True for element in elements: level = self._get_heading_level(element) if level == 1: if count_main_heading == 1: return False else: count_main_heading = 1 if (level - last_level) > 1: return False last_level = level return True
Check that the headings of page are sintatic correct. :return: True if the headings of page are sintatic correct or False if not. :rtype: bool
def _generate_anchor_for(self, element, data_attribute, anchor_class): self.id_generator.generate_id(element) if self.parser.find( '[' + data_attribute + '="' + element.get_attribute('id') + '"]' ).first_result() is None: if element.get_tag_name() == 'A': anchor = element else: anchor = self.parser.create_element('a') self.id_generator.generate_id(anchor) anchor.set_attribute('class', anchor_class) element.insert_before(anchor) if not anchor.has_attribute('name'): anchor.set_attribute('name', anchor.get_attribute('id')) anchor.set_attribute(data_attribute, element.get_attribute('id')) return anchor return None
Generate an anchor for the element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param data_attribute: The name of attribute that links the element with the anchor. :type data_attribute: str :param anchor_class: The HTML class of anchor. :type anchor_class: str :return: The anchor. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
def _free_shortcut(self, shortcut): alpha_numbers = '1234567890abcdefghijklmnopqrstuvwxyz' elements = self.parser.find('[accesskey]').list_results() found = False for element in elements: shortcuts = element.get_attribute('accesskey').lower() if CommonFunctions.in_list(shortcuts, shortcut): for key in alpha_numbers: found = True for element_with_shortcuts in elements: shortcuts = element_with_shortcuts.get_attribute( 'accesskey' ).lower() if CommonFunctions.in_list(shortcuts, key): found = False break if found: element.set_attribute('accesskey', key) break if found: break
Replace the shortcut of elements, that has the shortcut passed. :param shortcut: The shortcut. :type shortcut: str
def get_scrim(path=None, auto_write=None, shell=None, script=None, cache={}): '''Get a :class:`Scrim` instance. Each instance is cached so if you call get_scrim again with the same arguments you get the same instance. See also: :class:`Scrim` ''' args = (path, auto_write, shell, script) if args not in cache: cache[args] = Scrim(*args) return cache[argsf get_scrim(path=None, auto_write=None, shell=None, script=None, cache={}): '''Get a :class:`Scrim` instance. Each instance is cached so if you call get_scrim again with the same arguments you get the same instance. See also: :class:`Scrim` ''' args = (path, auto_write, shell, script) if args not in cache: cache[args] = Scrim(*args) return cache[args]
Get a :class:`Scrim` instance. Each instance is cached so if you call get_scrim again with the same arguments you get the same instance. See also: :class:`Scrim`
def _add(self, name, *args, **kwargs): '''Appends a command to the scrims list of commands. You should not need to use this.''' self.commands.append(Command(name, args, kwargs)f _add(self, name, *args, **kwargs): '''Appends a command to the scrims list of commands. You should not need to use this.''' self.commands.append(Command(name, args, kwargs))
Appends a command to the scrims list of commands. You should not need to use this.
def to_string(self, shell=None): '''Use the command executor to retrieve the text of the scrim script compatible with the provided shell. If no shell is provided, use :attr:`scrim.shell`. Attributes: shell (str): Which shell should we return a script for cmd.exe, powershell.exe, bash... Returns: str ''' shell = shell or self.shell lines = [] for c in self.commands: text = self.command_executor(c, shell) if text is not None: lines.append(text) return '\n'.join(linesf to_string(self, shell=None): '''Use the command executor to retrieve the text of the scrim script compatible with the provided shell. If no shell is provided, use :attr:`scrim.shell`. Attributes: shell (str): Which shell should we return a script for cmd.exe, powershell.exe, bash... Returns: str ''' shell = shell or self.shell lines = [] for c in self.commands: text = self.command_executor(c, shell) if text is not None: lines.append(text) return '\n'.join(lines)
Use the command executor to retrieve the text of the scrim script compatible with the provided shell. If no shell is provided, use :attr:`scrim.shell`. Attributes: shell (str): Which shell should we return a script for cmd.exe, powershell.exe, bash... Returns: str
def write(self): '''Write this Scrims commands to its path''' if self.path is None: raise Exception('Scrim.path is None') dirname = os.path.dirname(os.path.abspath(self.path)) if not os.path.exists(dirname): try: os.makedirs(dirname) except: raise OSError('Failed to create root for scrim output.') with open(self.path, 'w') as f: f.write(self.to_string()f write(self): '''Write this Scrims commands to its path''' if self.path is None: raise Exception('Scrim.path is None') dirname = os.path.dirname(os.path.abspath(self.path)) if not os.path.exists(dirname): try: os.makedirs(dirname) except: raise OSError('Failed to create root for scrim output.') with open(self.path, 'w') as f: f.write(self.to_string())
Write this Scrims commands to its path
def on_exit(self): '''atexit callback. If :attr:`Scrim.auto_write` is True write the scrim to :attr:`Scrim.path` as :attr:`Scrim.shell`''' if not all([self.auto_write, self.commands, self.script, self.path]): return self.write(f on_exit(self): '''atexit callback. If :attr:`Scrim.auto_write` is True write the scrim to :attr:`Scrim.path` as :attr:`Scrim.shell`''' if not all([self.auto_write, self.commands, self.script, self.path]): return self.write()
atexit callback. If :attr:`Scrim.auto_write` is True write the scrim to :attr:`Scrim.path` as :attr:`Scrim.shell`
def remove_orphans(self, instance, **kwargs): from activity_monitor.models import Activity try: instance_content_type = ContentType.objects.get_for_model(instance) timeline_item = Activity.objects.get(content_type=instance_content_type, object_id=instance.pk) timeline_item.delete() except Activity.DoesNotExist: return
When an item is deleted, first delete any Activity object that has been created on its behalf.
def follow_model(self, model): if model: self.models_by_name[model.__name__.lower()] = model signals.post_save.connect(create_or_update, sender=model) signals.post_delete.connect(self.remove_orphans, sender=model)
Follow a particular model class, updating associated Activity objects automatically.
def get_for_model(self, model): return self.filter(content_type=ContentType.objects.get_for_model(model))
Return a QuerySet of only items of a certain type.
def get_last_update_of_model(self, model, **kwargs): qs = self.get_for_model(model) if kwargs: qs = qs.filter(**kwargs) try: return qs.order_by('-timestamp')[0].timestamp except IndexError: return datetime.datetime.fromtimestamp(0)
Return the last time a given model's items were updated. Returns the epoch if the items were never updated.
def for_each(self, operation, limit=0, verbose=False): if limit != 0: count = 0 while self.has_next(): operation.perform(self.next()) count += 1 if verbose: print count if count >= limit: break else: while self.has_next(): operation.perform(self.next())
Applies the given Operation to each item in the stream. The Operation executes on the items in the stream in the order that they appear in the stream. If the limit is supplied, then processing of the stream will stop after that many items have been processed.
def _parse_lookup_string(self, lookup_chain): lookup_chain = lookup_chain.split('__') comparator = self.default_comparator # Only look for a lookup method if the lookup chain is larger than 1 if len(lookup_chain) <= 1: return lookup_chain, comparator # Get the correct lookup_method if the last value in the lookup # chain is a lookup method specifier if lookup_chain[-1] in self.comparators: comparator = self.comparators.get(lookup_chain.pop(-1)) return lookup_chain, comparator
Convert a lookup string to a (lookup_chain, comparator) tuple.
def _resolve_lookup_chain(self, chain, instance): value = instance for link in chain: value = getattr(value, link) return value
Return the value of inst.chain[0].chain[1].chain[...].chain[n].
def iregex(value, iregex): return re.match(iregex, value, flags=re.I)
Returns true if the value case insentively matches agains the regex.
def get_subdirectories(directory): return [name for name in os.listdir(directory) if name != '__pycache__' if os.path.isdir(os.path.join(directory, name))]
Get subdirectories without pycache
def get_python_path() -> str: python_bin = None if os.name == 'nt': python_root = os.path.abspath( os.path.join(os.__file__, os.pardir, os.pardir)) python_bin = os.path.join(python_root, 'python.exe') else: python_root = os.path.abspath( os.path.join(os.__file__, os.pardir, os.pardir, os.pardir)) python = os.__file__.rsplit('/')[-2] python_bin = os.path.join(python_root, 'bin', python) return python_bin
Accurately get python executable
def __collect_fields(self): form = FormData() form.add_field(self.__username_field, required=True, error=self.__username_error) form.add_field(self.__password_field, required=True, error=self.__password_error) form.parse() self.username = form.values[self.__username_field] self.password = form.values[self.__password_field] return
Use field values from config.json and collect from request
def session(self) -> str: session_jwt = None self.user = self.user_model.where_username(self.username) if self.user is None: return None self.user.updated() # update timestamp on user access if self.verify_password(self.user.password): session_id = Session.create_session_id() data = { 'session_id': session_id, 'user_id': self.user.id, 'user_email': self.user.email, } token_valid_for = \ current_app.config['AUTH']['JWT']['TOKENS']['VALID_FOR'] if \ 'VALID_FOR' in \ current_app.config['AUTH']['JWT']['TOKENS'] else 180 if current_app.config['AUTH']['JWT']['REFRESH_TOKENS']['ENABLED']: refresh_token_valid_for = \ current_app \ .config['AUTH']['JWT']['REFRESH_TOKENS']['VALID_FOR'] if \ 'VALID_FOR' in \ current_app.config['AUTH']['JWT']['REFRESH_TOKENS'] else \ 86400 session_jwt = JWT().create_token_with_refresh_token( data, token_valid_for, refresh_token_valid_for) else: session_jwt = JWT().create_token(data, token_valid_for) Session.create_session(session_id, self.user.id) return session_jwt return None
Generate a session(authorization Bearer) JWT token
def install_board(board_id, board_options, hwpack='arduino', replace_existing=False): doaction = 0 if board_id in boards(hwpack).keys(): log.debug('board already exists: %s', board_id) if replace_existing: log.debug('remove board: %s' , board_id) remove_board(board_id) doaction = 1 else: doaction = 1 if doaction: lines = bunch2properties(board_id, board_options) boards_txt().write_lines([''] + lines, append=1)
install board in boards.txt. :param board_id: string identifier :param board_options: dict like :param replace_existing: bool :rtype: None
def _parse_nested_interval(self, tokens): if tokens[0].isdigit(): return self._parse_interval(tokens) elif tokens[0] in ['join', 'order']: return self._parse_join(tokens) elif tokens[0] == 'complement': return self._parse_complement(tokens) raise ValueError('interval {} does not fit pattern.'.format(tokens))
Parses a super range. SuperRange ::= Range | Join | Complement
def _parse_interval(self, tokens): fr = int(tokens.pop(0)) - 1 if len(tokens) > 1 and tokens[0] in ['..', '^']: tokens.pop(0) # Pop '..' | '^' to = int(tokens.pop(0)) return GenomicInterval(fr, to, chromosome=self.hdr['ACCESSION']['value']) return GenomicInterval(fr, fr + 1, chromosome=self.hdr['ACCESSION']['value'])
Parses a range Range ::= <num> | <num> ('..' | '^') <num>
def _parse_join(self, tokens): children = [] tokens.pop(0) # Pop 'join' tokens.pop(0) # Pop '(' children.append(self._parse_nested_interval(tokens)) while tokens[0] == ',': tokens.pop(0) children.append(self._parse_nested_interval(tokens)) tokens.pop(0) # Pop ')' chromosome, strand = next((child.chromosome, child.strand) for child in children) start = min(child.start.position for child in children) stop = max(child.stop.position for child in children) parent = NestedGenomicInterval(start, stop, chromosome=chromosome, strand=strand) parent.children = children return parent
Parses a join. Join ::= 'join' '(' SuperRange [',' SuperRange] ')'
def _parse_complement(self, tokens): tokens.pop(0) # Pop 'complement' tokens.pop(0) # Pop '(' res = self._parse_nested_interval(tokens) tokens.pop(0) # Pop ')' res.switch_strand() return res
Parses a complement Complement ::= 'complement' '(' SuperRange ')'
def indel_at( self, position, check_insertions=True, check_deletions=True, one_based=True ): (insertions, deletions) = self.get_indels( one_based=one_based ) if check_insertions: for insertion in insertions: if insertion[0] == position: return True if check_deletions: for deletion in deletions: if deletion[0] < position < deletion[0] + deletion[1] + 1: return True return False
Does the read contain an indel at the given position? Return True if the read contains an insertion at the given position (position must be the base before the insertion event) or if the read contains a deletion where the base at position is deleted. Return False otherwise.
def get_indels( self, one_based=True ): cigar = self.get_cigar() #CIGAR_OP = list( 'MIDNSHP=X' ) insertions = [] deletions = [] position_offset = 0 position_start = self.get_position( one_based=one_based ) while cigar: cigar_size, cigar_op = cigar.pop( 0 ) if cigar_op in [ 0, 7, 8 ]: #M alignment match (can be a sequence match or mismatch); = sequence match; x sequence mismatch position_offset += cigar_size elif cigar_op == 1: #I insertion insertions.append((position_start + position_offset - 1, cigar_size)) elif cigar_op == 2: #D deletion from the reference deletions.append((position_start + position_offset - 1, cigar_size)) position_offset += cigar_size elif cigar_op == 3: #N skipped region from the reference position_offset += cigar_size elif cigar_op == 4: #S soft clipping (clipped sequences present in SEQ) pass elif cigar_op == 5: #H hard clipping (clipped sequences NOT present in SEQ) position_offset += cigar_size elif cigar_op == 6: #P padding (silent deletion from padded reference) pass else: #unknown cigar_op print >>sys.stderr, 'unknown cigar_op', cigar_op, cigar_size return (insertions, deletions)
Return a data structure containing all indels in the read. Returns the tuple (insertions, deletions) insertions = [(pos1,ins1), (pos2,ins2)] posN = start position (preceding base, VCF-style) insN = length of inserted sequence (not including preceding base) deletions = [(pos1,del1), (pos2,del2)] posN = start position (preceding base, VCF-style) delN = length of deleted sequence (not including preceding base)
def _operation_speak_as_spell_out(self, content, index, children): children.append(self._create_content_element( content[0:(index + 1)], 'spell-out' )) children.append(self._create_aural_content_element(' ', 'spell-out')) return children
The operation method of _speak_as method for spell-out. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement)
def _operation_speak_as_literal_punctuation( self, content, index, children ): data_property_value = 'literal-punctuation' if index != 0: children.append(self._create_content_element( content[0:index], data_property_value )) children.append(self._create_aural_content_element( ( ' ' + self._get_description_of_symbol(content[index:(index + 1)]) + ' ' ), data_property_value) ) children.append(self._create_visual_content_element( content[index:(index + 1)], data_property_value )) return children
The operation method of _speak_as method for literal-punctuation. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement)
def _operation_speak_as_no_punctuation(self, content, index, children): if index != 0: children.append(self._create_content_element( content[0:index], 'no-punctuation' )) children.append(self._create_visual_content_element( content[index:(index + 1)], 'no-punctuation' )) return children
The operation method of _speak_as method for no-punctuation. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement)
def _operation_speak_as_digits(self, content, index, children): data_property_value = 'digits' if index != 0: children.append(self._create_content_element( content[0:index], data_property_value )) children.append(self._create_aural_content_element( ' ', data_property_value )) children.append(self._create_content_element( content[index:(index + 1)], data_property_value )) return children
The operation method of _speak_as method for digits. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement)
def _set_symbols(self, file_name, configure): self.symbols = [] if file_name is None: file_name = os.path.join(os.path.dirname(os.path.dirname( os.path.dirname(os.path.realpath(__file__)) )), 'symbols.xml') xmldoc = minidom.parse(file_name) symbols_xml = xmldoc.getElementsByTagName( 'symbols' )[0].getElementsByTagName('symbol') for symbol_xml in symbols_xml: self.symbols.append({ 'symbol': symbol_xml.attributes['symbol'].value, 'description': configure.get_parameter( symbol_xml.attributes['description'].value ) })
Load the symbols with configuration. :param file_name: The file path of symbol configuration. :type file_name: str :param configure: The configuration of HaTeMiLe. :type configure: hatemile.util.configure.Configure
def _get_formated_symbol(self, symbol): # pylint: disable=no-self-use old_symbols = [ '\\', '.', '+', '*', '?', '^', '$', '[', ']', '{', '}', '(', ')', '|', '/', ',', '!', '=', ':', '-' ] replace_dict = { '\\': '\\\\', '.': r'\.', '+': r'\+', '*': r'\*', '?': r'\?', '^': r'\^', '$': r'\$', '[': r'\[', ']': r'\]', '{': r'\{', '}': r'\}', '(': r'\(', ')': r'\)', '|': r'\|', '/': r'\/', ',': r'\,', '!': r'\!', '=': r'\=', ':': r'\:', '-': r'\-' } for old in old_symbols: symbol = symbol.replace(old, replace_dict[old]) return symbol
Returns the symbol formated to be searched by regular expression. :param symbol: The symbol. :type symbol: str :return: The symbol formated. :rtype: str
def _get_regular_expression_of_symbols(self): regular_expression = None for symbol in self.symbols: formated_symbol = self._get_formated_symbol(symbol['symbol']) if regular_expression is None: regular_expression = '(' + formated_symbol + ')' else: regular_expression = ( regular_expression + '|(' + formated_symbol + ')' ) return regular_expression
Returns the regular expression to search all symbols. :return: The regular expression to search all symbols. :rtype: str
def _is_valid_inherit_element(self, element): # pylint: disable=no-self-use tag_name = element.get_tag_name() return ( (tag_name in AccessibleCSSImplementation.VALID_INHERIT_TAGS) and (not element.has_attribute(CommonFunctions.DATA_IGNORE)) )
Check that the children of element can be manipulated to apply the CSS properties. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the children of element can be manipulated to apply the CSS properties or False if the children of element cannot be manipulated to apply the CSS properties. :rtype: bool
def _isolate_text_node(self, element): if ( (element.has_children_elements()) and (self._is_valid_element(element)) ): if self._is_valid_element(element): child_nodes = element.get_children() for child_node in child_nodes: if isinstance(child_node, HTMLDOMTextNode): span = self.html_parser.create_element('span') span.set_attribute( AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT, 'true' ) span.append_text(child_node.get_text_content()) child_node.replace_node(span) children = element.get_children_elements() for child in children: self._isolate_text_node(child)
Isolate text nodes of element nodes. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _replace_element_by_own_content(self, element): # pylint: disable=no-self-use if element.has_children_elements(): children = element.get_children_elements() for child in children: element.insert_before(child) element.remove_node() elif element.has_children(): element.replace_node(element.get_first_node_child())
Replace the element by own text content. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _visit(self, element, operation): if self._is_valid_inherit_element(element): if element.has_children_elements(): children = element.get_children_elements() for child in children: self._visit(child, operation) elif self._is_valid_element(element): operation(element)
Visit and execute a operation in element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param operation: The operation to be executed. :type operation: function
def _create_content_element(self, content, data_property_value): content_element = self.html_parser.create_element('span') content_element.set_attribute( AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT, 'true' ) content_element.set_attribute( AccessibleCSSImplementation.DATA_SPEAK_AS, data_property_value ) content_element.append_text(content) return content_element
Create a element to show the content. :param content: The text content of element. :type content: str :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str :return: The element to show the content. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
def _create_aural_content_element(self, content, data_property_value): content_element = self._create_content_element( content, data_property_value ) content_element.set_attribute('unselectable', 'on') content_element.set_attribute('class', 'screen-reader-only') return content_element
Create a element to show the content, only to aural displays. :param content: The text content of element. :type content: str :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str :return: The element to show the content. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
def _create_visual_content_element(self, content, data_property_value): content_element = self._create_content_element( content, data_property_value ) content_element.set_attribute('aria-hidden', 'true') content_element.set_attribute('role', 'presentation') return content_element
Create a element to show the content, only to visual displays. :param content: The text content of element. :type content: str :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str :return: The element to show the content. :rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_normal(self, element): if element.has_attribute(AccessibleCSSImplementation.DATA_SPEAK): if ( (element.get_attribute( AccessibleCSSImplementation.DATA_SPEAK ) == 'none') and (not element.has_attribute( AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT )) ): element.remove_attribute('role') element.remove_attribute('aria-hidden') element.remove_attribute( AccessibleCSSImplementation.DATA_SPEAK ) else: self._replace_element_by_own_content(element)
Speak the content of element only. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_normal_inherit(self, element): self._visit(element, self._speak_normal) element.normalize()
Speak the content of element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_none(self, element): # pylint: disable=no-self-use element.set_attribute('role', 'presentation') element.set_attribute('aria-hidden', 'true') element.set_attribute(AccessibleCSSImplementation.DATA_SPEAK, 'none')
No speak any content of element only. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_none_inherit(self, element): self._isolate_text_node(element) self._visit(element, self._speak_none)
No speak any content of element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_as( self, element, regular_expression, data_property_value, operation ): children = [] pattern = re.compile(regular_expression) content = element.get_text_content() while content: matches = pattern.search(content) if matches is not None: index = matches.start() children = operation(content, index, children) new_index = index + 1 content = content[new_index:] else: break if children: if content: children.append(self._create_content_element( content, data_property_value )) while element.has_children(): element.get_first_node_child().remove_node() for child in children: element.append_element(child)
Execute a operation by regular expression for element only. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param regular_expression: The regular expression. :type regular_expression: str :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str :param operation: The operation to be executed. :type operation: function
def _reverse_speak_as(self, element, data_property_value): data_property = ( '[' + AccessibleCSSImplementation.DATA_SPEAK_AS + '="' + data_property_value + '"]' ) auxiliar_elements = self.html_parser.find(element).find_descendants( data_property ).list_results() for auxiliar_element in auxiliar_elements: auxiliar_element.remove_node() content_elements = self.html_parser.find(element).find_descendants( data_property ).list_results() for content_element in content_elements: if ( (element.has_attribute( AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT )) and (element.has_attribute( AccessibleCSSImplementation.DATA_ISOLATOR_ELEMENT ) == 'true') ): self._replace_element_by_own_content(content_element) element.normalize()
Revert changes of a speak_as method for element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param data_property_value: The value of custom attribute used to identify the fix. :type data_property_value: str
def _speak_as_normal(self, element): self._reverse_speak_as(element, 'spell-out') self._reverse_speak_as(element, 'literal-punctuation') self._reverse_speak_as(element, 'no-punctuation') self._reverse_speak_as(element, 'digits')
Use the default speak configuration of user agent for element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_as_spell_out_inherit(self, element): self._reverse_speak_as(element, 'spell-out') self._isolate_text_node(element) self._visit(element, self._speak_as_spell_out)
Speak one letter at a time for each word for elements and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_as_literal_punctuation(self, element): self._speak_as( element, self._get_regular_expression_of_symbols(), 'literal-punctuation', self._operation_speak_as_literal_punctuation )
Speak the punctuation for elements only. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_as_literal_punctuation_inherit(self, element): self._reverse_speak_as(element, 'literal-punctuation') self._reverse_speak_as(element, 'no-punctuation') self._isolate_text_node(element) self._visit(element, self._speak_as_literal_punctuation)
Speak the punctuation for elements and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_as_no_punctuation_inherit(self, element): self._reverse_speak_as(element, 'literal-punctuation') self._reverse_speak_as(element, 'no-punctuation') self._isolate_text_node(element) self._visit(element, self._speak_as_no_punctuation)
No speak the punctuation for element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_as_digits_inherit(self, element): self._reverse_speak_as(element, 'digits') self._isolate_text_node(element) self._visit(element, self._speak_as_digits)
Speak the digit at a time for each number for element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def _speak_header_always_inherit(self, element): self._speak_header_once_inherit(element) cell_elements = self.html_parser.find(element).find_descendants( 'td[headers],th[headers]' ).list_results() accessible_display = AccessibleDisplayImplementation( self.html_parser, self.configure ) for cell_element in cell_elements: accessible_display.display_cell_header(cell_element)
The cells headers will be spoken for every data cell for element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement