code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _filehandle(self): # if file is opened and it has been rolled we need to close the file # and then to reopen it if self._fh and self._has_file_rolled(): try: self._fh.close() except Exception: pass self._fh = None # if the file is closed (or has been closed right now), open it if not self._fh: self._open_file(self.filename) if not self.opened_before: self.opened_before = True self._fh.seek(0, os.SEEK_END) return self._fh
Return a filehandle to the file being tailed
def pre_save(self, model_instance, add): value = socket.gethostname() setattr(model_instance, self.attname, value) return value
Updates socket.gethostname() on each save.
def get_class(class_string): split_string = class_string.encode('ascii').split('.') import_path = '.'.join(split_string[:-1]) class_name = split_string[-1] if class_name: try: if import_path: mod = __import__(import_path, globals(), {}, [class_name]) cls = getattr(mod, class_name) else: cls = __import__(class_name, globals(), {}) if cls: return cls except (ImportError, AttributeError): pass return None
Get a class from a dotted string
def _register_handler(event, fun, external=False): registry = core.HANDLER_REGISTRY if external: registry = core.EXTERNAL_HANDLER_REGISTRY if not isinstance(event, basestring): # If not basestring, it is a BaseEvent subclass. # This occurs when class methods are registered as handlers event = core.parse_event_to_name(event) if event in registry: registry[event].append(fun) else: registry[event] = [fun] return fun
Register a function to be an event handler
def handler(param): if isinstance(param, basestring): return lambda f: _register_handler(param, f) else: core.HANDLER_METHOD_REGISTRY.append(param) return param
Decorator that associates a handler to an event class This decorator works for both methods and functions. Since it only registers the callable object and returns it without evaluating it. The name param should be informed in a dotted notation and should contain two informations: the django app name and the class name. Just like this: >>> @handler('deal.ActionLog') ... def blah(data): ... sys.stdout.write('I love python!\n') You can also use this same decorator to mark class methods as handlers. Just notice that the class *must* inherit from `BaseEvent`. >>> class MyEvent(BaseEvent) ... @handler('deal.ActionLog') ... def another_blah(data): ... sys.stdout.write('Stuff!\n')
def log(name, data=None): data = data or {} data.update(core.get_default_values(data)) # InvalidEventNameError, EventNotFoundError event_cls = core.find_event(name) event = event_cls(name, data) event.validate() # ValidationError data = core.filter_data_values(data) data = ejson.dumps(data) # TypeError # We don't use celery when developing if conf.getsetting('DEBUG'): core.process(name, data) else: tasks.process_task.delay(name, data)
Entry point for the event lib that starts the logging process This function uses the `name` param to find the event class that will be processed to log stuff. This name must provide two informations separated by a dot: the app name and the event class name. Like this: >>> name = 'deal.ActionLog' The "ActionLog" is a class declared inside the 'deal.events' module and this function will raise an `EventNotFoundError` error if it's not possible to import the right event class. The `data` param *must* be a dictionary, otherwise a `TypeError` will be rised. All keys *must* be strings and all values *must* be serializable by the `json.dumps` function. If you need to pass any unsupported object, you will have to register a serializer function. Consult the RFC-00003-serialize-registry for more information.
def validate_keys(self, *keys): current_keys = set(self.data.keys()) needed_keys = set(keys) if not needed_keys.issubset(current_keys): raise ValidationError( 'One of the following keys are missing from the ' 'event\'s data: {}'.format( ', '.join(needed_keys.difference(current_keys))) ) return True
Validation helper to ensure that keys are present in data This method makes sure that all of keys received here are present in the data received from the caller. It is better to call this method in the `validate()` method of your event. Not in the `clean()` one, since the first will be called locally, making it easier to debug things and find problems.
def addProject(gh_link): ''' Adds a github project to the data folder, unzips it, and deletes the zip file. Returns the project name and the path to the project folder. ''' name = os.path.basename(gh_link) zipurl = gh_link+"/archive/master.zip" outzip = os.path.join('temp_data',name+'.zip') if not os.path.exists('temp_data'): os.makedirs('temp_data') downloadFile(zipurl,outzip) zip = zipfile.ZipFile(outzip,mode='r') outpath = os.path.join('temp_data',name) zip.extractall(outpath) zip.close() os.remove(outzip) return name,outpatf addProject(gh_link): ''' Adds a github project to the data folder, unzips it, and deletes the zip file. Returns the project name and the path to the project folder. ''' name = os.path.basename(gh_link) zipurl = gh_link+"/archive/master.zip" outzip = os.path.join('temp_data',name+'.zip') if not os.path.exists('temp_data'): os.makedirs('temp_data') downloadFile(zipurl,outzip) zip = zipfile.ZipFile(outzip,mode='r') outpath = os.path.join('temp_data',name) zip.extractall(outpath) zip.close() os.remove(outzip) return name,outpath
Adds a github project to the data folder, unzips it, and deletes the zip file. Returns the project name and the path to the project folder.
def downloadFile(url,outfile=None): ''' Copied from http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py ''' if not outfile: outfile = url.split('/')[-1] r = requests.get(url, stream=True) with open(outfile, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) return outfilf downloadFile(url,outfile=None): ''' Copied from http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py ''' if not outfile: outfile = url.split('/')[-1] r = requests.get(url, stream=True) with open(outfile, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) return outfile
Copied from http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
def cleanDir(self): ''' Remove existing json datafiles in the target directory. ''' if os.path.isdir(self.outdir): baddies = ['tout.json','nout.json','hout.json'] for file in baddies: filepath = os.path.join(self.outdir,file) if os.path.isfile(filepath): os.remove(filepathf cleanDir(self): ''' Remove existing json datafiles in the target directory. ''' if os.path.isdir(self.outdir): baddies = ['tout.json','nout.json','hout.json'] for file in baddies: filepath = os.path.join(self.outdir,file) if os.path.isfile(filepath): os.remove(filepath)
Remove existing json datafiles in the target directory.
def makeHTML(self,mustachepath,htmlpath): '''Write an html file by applying this ideogram's attributes to a mustache template. ''' subs = dict() if self.title: subs["title"]=self.title subs["has_title"]=True else: subs["has_title"]=False subs["font_size"] = self.font_size subs["font_family"] = self.font_family subs["colorscheme"] = self.colorscheme subs["title_color"] = self.title_color subs["bgcolor"] = self.bgcolor with open(mustachepath,'r') as infile: mustache_text = pystache.render(infile.read(), subs) with open(htmlpath,'w+') as outfile: outfile.write(mustache_textf makeHTML(self,mustachepath,htmlpath): '''Write an html file by applying this ideogram's attributes to a mustache template. ''' subs = dict() if self.title: subs["title"]=self.title subs["has_title"]=True else: subs["has_title"]=False subs["font_size"] = self.font_size subs["font_family"] = self.font_family subs["colorscheme"] = self.colorscheme subs["title_color"] = self.title_color subs["bgcolor"] = self.bgcolor with open(mustachepath,'r') as infile: mustache_text = pystache.render(infile.read(), subs) with open(htmlpath,'w+') as outfile: outfile.write(mustache_text)
Write an html file by applying this ideogram's attributes to a mustache template.
def get_age(self, **kwargs): try: units = kwargs.get('units', None) if units is None: return self._age units = units.lower() if units == "days": z = self._age elif units == "hours": z = self._age * 24. elif units == "minutes": z = self._age * 24. * 60. elif units == "seconds": z = self._age * 24. * 60. * 60. else: raise return round(z,8) except StandardError: raise KeyError("Could not return age of particle")
Returns the particlees age (how long it has been forced) in a variety of units. Rounded to 8 decimal places. Parameters: units (optional) = 'days' (default), 'hours', 'minutes', or 'seconds'
def age(self, **kwargs): if kwargs.get('days', None) is not None: self._age += kwargs.get('days') return if kwargs.get('hours', None) is not None: self._age += kwargs.get('hours') / 24. return if kwargs.get('minutes', None) is not None: self._age += kwargs.get('minutes') / 24. / 60. return if kwargs.get('seconds', None) is not None: self._age += kwargs.get('seconds') / 24. / 60. / 60. return raise KeyError("Could not age particle, please specify 'days', 'hours', 'minutes', or 'seconds' parameter")
Age this particle. parameters (optional, only one allowed): days (default) hours minutes seconds
def normalized_indexes(self, model_timesteps): # Clean up locations # If duplicate time instances, remove the lower index clean_locs = [] for i,loc in enumerate(self.locations): try: if loc.time == self.locations[i+1].time: continue else: clean_locs.append(loc) except StandardError: clean_locs.append(loc) if len(clean_locs) == len(model_timesteps): return [ind for ind,loc in enumerate(self.locations) if loc in clean_locs] elif len(model_timesteps) < len(clean_locs): # We have at least one internal timestep for this particle # Pull out the matching location indexes indexes = [ind for ind,loc in enumerate(self.locations) if loc in clean_locs] if len(model_timesteps) == len(indexes): return indexes raise ValueError("Can't normalize") elif len(model_timesteps) > len(clean_locs): # The particle stopped before forcing for all of the model timesteps raise ValueError("Particle has less locations than model timesteps")
This function will normalize the particles locations to the timestep of the model that was run. This is used in output, as we should only be outputting the model timestep that was chosen to be run. In most cases, the length of the model_timesteps and the particle's locations will be the same (unless it hits shore). If they are not the same length pull out of locations the timesteps that are closest to the model_timesteps
def is_satisfied_by(self, candidate: Any, **kwds: Any) -> bool: candidate_name = self._candidate_name context = self._context if context: if candidate_name in kwds: raise ValueError(f"Candidate name '{candidate_name}' must " "not be given as keyword.") context.update(kwds) context[candidate_name] = candidate try: code = self._code except AttributeError: self._code = code = compile(self._ast_expr, '<str>', mode='eval') return eval(code, context)
Return True if `candidate` satisfies the specification.
def add_edge(self, fr, to): fr = self.add_vertex(fr) to = self.add_vertex(to) self.adjacency[fr].children.add(to) self.adjacency[to].parents.add(fr)
Add an edge to the graph. Multiple edges between the same vertices will quietly be ignored. N-partite graphs can be used to permit multiple edges by partitioning the graph into vertices and edges. :param fr: The name of the origin vertex. :param to: The name of the destination vertex. :return:
def add_vertex(self, v): if v not in self.adjacency: self.adjacency[v] = Vertex(v) return self.adjacency[v].vertex
Add a vertex to the graph. The vertex must implement __hash__ and __eq__ as it will be stored in a set. :param v: vertex :return: graph owned vertex
def clone(self, source_id, backup_id, size, volume_id=None, source_host=None): volume_id = volume_id or str(uuid.uuid4()) return self.http_put('/volumes/%s' % volume_id, params=self.unused({ 'source_host': source_host, 'source_volume_id': source_id, 'backup_id': backup_id, 'size': size }))
create a volume then clone the contents of the backup into the new volume
def create(self, volume_id, backup_id=None, timestamp=None): backup_id = backup_id or str(uuid.uuid4()) timestamp = timestamp or int(time()) return self.http_put('/volumes/%s/backups/%s' % (volume_id, backup_id), params={'timestamp': timestamp})
create a backup of a volume
def create(self, volume_id, ip=None): if ip: return self.http_put('/volumes/%s/export?ip=%s' % (volume_id, ip)) return self.http_put('/volumes/%s/export' % volume_id)
create an export for a volume
def boards(hwpack='arduino'): bunch = read_properties(boards_txt(hwpack)) bunch_items = list(bunch.items()) # remove invalid boards for bid, board in bunch_items: if 'build' not in board.keys() or 'name' not in board.keys(): log.debug('invalid board found: %s', bid) del bunch[bid] return bunch
read boards from boards.txt. :param core_package: 'all,'arduino',..
def board_names(hwpack='arduino'): ls = list(boards(hwpack).keys()) ls.sort() return ls
return installed board names.
def print_boards(hwpack='arduino', verbose=False): if verbose: pp(boards(hwpack)) else: print('\n'.join(board_names(hwpack)))
print boards from boards.txt.
def move_examples(root, lib_dir): all_pde = files_multi_pattern(root, INO_PATTERNS) lib_pde = files_multi_pattern(lib_dir, INO_PATTERNS) stray_pde = all_pde.difference(lib_pde) if len(stray_pde) and not len(lib_pde): log.debug( 'examples found outside lib dir, moving them: %s', stray_pde) examples = lib_dir / EXAMPLES examples.makedirs() for x in stray_pde: d = examples / x.namebase d.makedirs() x.move(d)
find examples not under lib dir, and move into ``examples``
def fix_examples_dir(lib_dir): for x in lib_dir.dirs(): if x.name.lower() == EXAMPLES: return for x in lib_dir.dirs(): if x.name.lower() == EXAMPLES: _fix_dir(x) return for x in lib_dir.dirs(): if 'example' in x.name.lower(): _fix_dir(x) return for x in lib_dir.dirs(): if len(files_multi_pattern(x, INO_PATTERNS)): _fix_dir(x) return
rename examples dir to ``examples``
def install_lib(url, replace_existing=False, fix_wprogram=True): d = tmpdir(tmpdir()) f = download(url) Archive(f).extractall(d) clean_dir(d) d, src_dlib = find_lib_dir(d) move_examples(d, src_dlib) fix_examples_dir(src_dlib) if fix_wprogram: fix_wprogram_in_files(src_dlib) targ_dlib = libraries_dir() / src_dlib.name if targ_dlib.exists(): log.debug('library already exists: %s', targ_dlib) if replace_existing: log.debug('remove %s', targ_dlib) targ_dlib.rmtree() else: raise ConfduinoError('library already exists:' + targ_dlib) log.debug('move %s -> %s', src_dlib, targ_dlib) src_dlib.move(targ_dlib) libraries_dir().copymode(targ_dlib) for x in targ_dlib.walk(): libraries_dir().copymode(x) return targ_dlib.name
install library from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None
def get_config(variable, default=None): if variable in CONFIG: return CONFIG[variable] if hasattr(settings, variable): return getattr(settings, variable) if variable in os.environ: return os.environ[variable] return default
Get configuration variable for strudel.* packages Args: variable (str): name of the config variable default: value to use of config variable not set Returns: variable value Order of search: 1. stutils.CONFIG 2. settings.py of the current folder 3. environment variable Known config vars so far: strudel.utils ST_FS_CACHE_DURATION - duration of filesystem cache in seconds ST_FS_CACHE_PATH - path to the folder to store filesystem cache strudel.ecosystems PYPI_SAVE_PATH - place to store downloaded PyPI packages PYPI_TIMEOUT - network timeout for PyPI API strudel.scraper GITHUB_API_TOKENS - comma separated list of GitHub tokens GITLAB_API_TOKENS - same for GitLab API
def _retry(n, f, *args, **kwargs): '''Try to call f(*args, **kwargs) "n" times before giving up. Wait 2**n seconds before retries.''' for i in range(n): try: return f(*args, **kwargs) except Exception as exc: if i == n - 1: log.error( '%s permanently failed with %r', f.__name__, exc) raise else: log.warning( '%s attempt #%d failed with %r', f.__name__, i, exc) time.sleep(2 ** i) raise RuntimeError('Should never get here!'f _retry(n, f, *args, **kwargs): '''Try to call f(*args, **kwargs) "n" times before giving up. Wait 2**n seconds before retries.''' for i in range(n): try: return f(*args, **kwargs) except Exception as exc: if i == n - 1: log.error( '%s permanently failed with %r', f.__name__, exc) raise else: log.warning( '%s attempt #%d failed with %r', f.__name__, i, exc) time.sleep(2 ** i) raise RuntimeError('Should never get here!')
Try to call f(*args, **kwargs) "n" times before giving up. Wait 2**n seconds before retries.
def _init_supervisor_rpc(self, rpc_or_port): '''Initialize supervisor RPC. Allow passing in an RPC connection, or a port number for making one. ''' if isinstance(rpc_or_port, int): if self.username: leader = 'http://{self.username}:{self.password}@' else: leader = 'http://' tmpl = leader + '{self.name}:{port}' url = tmpl.format(self=self, port=rpc_or_port) self.rpc = xmlrpc_client.ServerProxy( url, transport=TimeoutTransport()) else: self.rpc = rpc_or_port self.supervisor = self.rpc.supervisof _init_supervisor_rpc(self, rpc_or_port): '''Initialize supervisor RPC. Allow passing in an RPC connection, or a port number for making one. ''' if isinstance(rpc_or_port, int): if self.username: leader = 'http://{self.username}:{self.password}@' else: leader = 'http://' tmpl = leader + '{self.name}:{port}' url = tmpl.format(self=self, port=rpc_or_port) self.rpc = xmlrpc_client.ServerProxy( url, transport=TimeoutTransport()) else: self.rpc = rpc_or_port self.supervisor = self.rpc.supervisor
Initialize supervisor RPC. Allow passing in an RPC connection, or a port number for making one.
def _init_redis(redis_spec): if not redis_spec: return if isinstance(redis_spec, six.string_types): return redis.StrictRedis.from_url(redis_spec) # assume any other value is a valid instance return redis_spec
Return a StrictRedis instance or None based on redis_spec. redis_spec may be None, a Redis URL, or a StrictRedis instance
def _get_base(): try: name, _aliaslist, _addresslist = socket.gethostbyname_ex('deploy') except socket.gaierror: name = 'deploy' fallback = 'https://{name}/'.format(name=name) return os.environ.get('VELOCIRAPTOR_URL', fallback)
if 'deploy' resolves in this environment, use the hostname for which that name resolves. Override with 'VELOCIRAPTOR_URL'
def load_all(cls, vr, params=None): ob_docs = vr.query(cls.base, params) return [cls(vr, ob) for ob in ob_docs]
Create instances of all objects found
def dispatch(self, **changes): self.patch(**changes) trigger_url = self._vr._build_url(self.resource_uri, 'swarm/') resp = self._vr.session.post(trigger_url) resp.raise_for_status() try: return resp.json() except ValueError: return None
Patch the swarm with changes and then trigger the swarm.
def assemble(self): if not self.created: self.create() # trigger the build url = self._vr._build_url(self.resource_uri, 'build/') resp = self._vr.session.post(url) resp.raise_for_status()
Assemble a build
def _get_token(self): ''' requests an communication token from Grooveshark ''' self.session.token = self.request( 'getCommunicationToken', {'secretKey': self.session.secret}, {'uuid': self.session.user, 'session': self.session.session, 'clientRevision': grooveshark.const.CLIENTS['htmlshark']['version'], 'country': self.session.country, 'privacy': 0, 'client': 'htmlshark'})[1] self.session.time = time.time(f _get_token(self): ''' requests an communication token from Grooveshark ''' self.session.token = self.request( 'getCommunicationToken', {'secretKey': self.session.secret}, {'uuid': self.session.user, 'session': self.session.session, 'clientRevision': grooveshark.const.CLIENTS['htmlshark']['version'], 'country': self.session.country, 'privacy': 0, 'client': 'htmlshark'})[1] self.session.time = time.time()
requests an communication token from Grooveshark
def _request_token(self, method, client): ''' generates a request token ''' if time.time() - self.session.time > grooveshark.const.TOKEN_TIMEOUT: self._get_token() random_value = self._random_hex() return random_value + hashlib.sha1((method + ':' + self.session.token + ':' + grooveshark.const.CLIENTS[client]['token'] + ':' + random_value).encode('utf-8')).hexdigest(f _request_token(self, method, client): ''' generates a request token ''' if time.time() - self.session.time > grooveshark.const.TOKEN_TIMEOUT: self._get_token() random_value = self._random_hex() return random_value + hashlib.sha1((method + ':' + self.session.token + ':' + grooveshark.const.CLIENTS[client]['token'] + ':' + random_value).encode('utf-8')).hexdigest()
generates a request token
def request(self, method, parameters, header): ''' Grooveshark API request ''' data = json.dumps({ 'parameters': parameters, 'method': method, 'header': header}) request = urllib.Request( 'https://grooveshark.com/more.php?%s' % (method), data=data.encode('utf-8'), headers=self._json_request_header()) with contextlib.closing(self.urlopen(request)) as response: result = json.loads(response.read().decode('utf-8')) if 'result' in result: return response.info(), result['result'] elif 'fault' in result: raise RequestError(result['fault']['message'], result['fault']['code']) else: raise UnknownError(resultf request(self, method, parameters, header): ''' Grooveshark API request ''' data = json.dumps({ 'parameters': parameters, 'method': method, 'header': header}) request = urllib.Request( 'https://grooveshark.com/more.php?%s' % (method), data=data.encode('utf-8'), headers=self._json_request_header()) with contextlib.closing(self.urlopen(request)) as response: result = json.loads(response.read().decode('utf-8')) if 'result' in result: return response.info(), result['result'] elif 'fault' in result: raise RequestError(result['fault']['message'], result['fault']['code']) else: raise UnknownError(result)
Grooveshark API request
def header(self, method, client='htmlshark'): ''' generates Grooveshark API Json header ''' return {'token': self._request_token(method, client), 'privacy': 0, 'uuid': self.session.user, 'clientRevision': grooveshark.const.CLIENTS[client]['version'], 'session': self.session.session, 'client': client, 'country': self.session.countryf header(self, method, client='htmlshark'): ''' generates Grooveshark API Json header ''' return {'token': self._request_token(method, client), 'privacy': 0, 'uuid': self.session.user, 'clientRevision': grooveshark.const.CLIENTS[client]['version'], 'session': self.session.session, 'client': client, 'country': self.session.country}
generates Grooveshark API Json header
def _parse_album(self, album): ''' Parse search json-data and create an :class:`Album` object. ''' if album['CoverArtFilename']: cover_url = '%sm%s' % (grooveshark.const.ALBUM_COVER_URL, album['CoverArtFilename']) else: cover_url = None return Album( album['AlbumID'], album['Name'], album['ArtistID'], album['ArtistName'], cover_url, self.connectionf _parse_album(self, album): ''' Parse search json-data and create an :class:`Album` object. ''' if album['CoverArtFilename']: cover_url = '%sm%s' % (grooveshark.const.ALBUM_COVER_URL, album['CoverArtFilename']) else: cover_url = None return Album( album['AlbumID'], album['Name'], album['ArtistID'], album['ArtistName'], cover_url, self.connection)
Parse search json-data and create an :class:`Album` object.
def _parse_playlist(self, playlist): ''' Parse search json-data and create a :class:`Playlist` object. ''' if playlist['Picture']: cover_url = '%s70_%s' % (grooveshark.const.PLAYLIST_COVER_URL, playlist['Picture']) else: cover_url = None return Playlist( playlist['PlaylistID'], playlist['Name'], cover_url, self.connectionf _parse_playlist(self, playlist): ''' Parse search json-data and create a :class:`Playlist` object. ''' if playlist['Picture']: cover_url = '%s70_%s' % (grooveshark.const.PLAYLIST_COVER_URL, playlist['Picture']) else: cover_url = None return Playlist( playlist['PlaylistID'], playlist['Name'], cover_url, self.connection)
Parse search json-data and create a :class:`Playlist` object.
def popular(self, period=DAILY): ''' Get popular songs. :param period: time period :rtype: a generator generates :class:`Song` objects Time periods: +---------------------------------+-----------------------------------+ | Constant | Meaning | +=================================+===================================+ | :const:`Client.DAILY` | Popular songs of this day | +---------------------------------+-----------------------------------+ | :const:`Client.MONTHLY` | Popular songs of this month | +---------------------------------+-----------------------------------+ ''' songs = self.connection.request( 'popularGetSongs', {'type': period}, self.connection.header('popularGetSongs'))[1]['Songs'] return (Song.from_response(song, self.connection) for song in songsf popular(self, period=DAILY): ''' Get popular songs. :param period: time period :rtype: a generator generates :class:`Song` objects Time periods: +---------------------------------+-----------------------------------+ | Constant | Meaning | +=================================+===================================+ | :const:`Client.DAILY` | Popular songs of this day | +---------------------------------+-----------------------------------+ | :const:`Client.MONTHLY` | Popular songs of this month | +---------------------------------+-----------------------------------+ ''' songs = self.connection.request( 'popularGetSongs', {'type': period}, self.connection.header('popularGetSongs'))[1]['Songs'] return (Song.from_response(song, self.connection) for song in songs)
Get popular songs. :param period: time period :rtype: a generator generates :class:`Song` objects Time periods: +---------------------------------+-----------------------------------+ | Constant | Meaning | +=================================+===================================+ | :const:`Client.DAILY` | Popular songs of this day | +---------------------------------+-----------------------------------+ | :const:`Client.MONTHLY` | Popular songs of this month | +---------------------------------+-----------------------------------+
def playlist(self, playlist_id): ''' Get a playlist from it's ID :param playlist_id: ID of the playlist :rtype: a :class:`Playlist` object ''' playlist = self.connection.request( 'getPlaylistByID', {'playlistID': playlist_id}, self.connection.header('getPlaylistByID'))[1] return self._parse_playlist(playlistf playlist(self, playlist_id): ''' Get a playlist from it's ID :param playlist_id: ID of the playlist :rtype: a :class:`Playlist` object ''' playlist = self.connection.request( 'getPlaylistByID', {'playlistID': playlist_id}, self.connection.header('getPlaylistByID'))[1] return self._parse_playlist(playlist)
Get a playlist from it's ID :param playlist_id: ID of the playlist :rtype: a :class:`Playlist` object
def collection(self, user_id): # TODO further evaluation of the page param, I don't know where the # limit is. dct = {'userID': user_id, 'page': 0} r = 'userGetSongsInLibrary' result = self.connection.request(r, dct, self.connection.header(r)) songs = result[1]['Songs'] return [Song.from_response(song, self.connection) for song in songs]
Get the song collection of a user. :param user_id: ID of a user. :rtype: list of :class:`Song`
def hwpack_names(): ls = hwpack_dir().listdir() ls = [x.name for x in ls] ls = [x for x in ls if x != 'tools'] arduino_included = 'arduino' in ls ls = [x for x in ls if x != 'arduino'] ls.sort() if arduino_included: ls = ['arduino'] + ls # move to 1st pos return ls
return installed hardware package names.
def _create_parser(self, html_parser, current_url): css_code = '' elements = html_parser.find( 'style,link[rel="stylesheet"]' ).list_results() for element in elements: if element.get_tag_name() == 'STYLE': css_code = css_code + element.get_text_content() else: css_code = css_code + requests.get( urljoin(current_url, element.get_attribute('href')) ).text self.stylesheet = tinycss.make_parser().parse_stylesheet(css_code)
Create the tinycss stylesheet. :param html_parser: The HTML parser. :type html_parser: hatemile.util.html.htmldomparser.HTMLDOMParser :param current_url: The current URL of page. :type current_url: str
def partition(thelist, n): try: n = int(n) thelist = list(thelist) except (ValueError, TypeError): return [thelist] p = len(thelist) / n return [thelist[p*i:p*(i+1)] for i in range(n - 1)] + [thelist[p*(i+1):]]
Break a list into ``n`` pieces. The last list may be larger than the rest if the list doesn't break cleanly. That is:: >>> l = range(10) >>> partition(l, 2) [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] >>> partition(l, 3) [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] >>> partition(l, 4) [[0, 1], [2, 3], [4, 5], [6, 7, 8, 9]] >>> partition(l, 5) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
def partition_horizontal(thelist, n): try: n = int(n) thelist = list(thelist) except (ValueError, TypeError): return [thelist] newlists = [list() for i in range(int(ceil(len(thelist) / float(n))))] for i, val in enumerate(thelist): newlists[i/n].append(val) return newlists
Break a list into ``n`` peices, but "horizontally." That is, ``partition_horizontal(range(10), 3)`` gives:: [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]] Clear as mud?
def partition_horizontal_twice(thelist, numbers): n, n2 = numbers.split(',') try: n = int(n) n2 = int(n2) thelist = list(thelist) except (ValueError, TypeError): return [thelist] newlists = [] while thelist: newlists.append(thelist[:n]) thelist = thelist[n:] newlists.append(thelist[:n2]) thelist = thelist[n2:] return newlists
numbers is split on a comma to n and n2. Break a list into peices each peice alternating between n and n2 items long ``partition_horizontal_twice(range(14), "3,4")`` gives:: [[0, 1, 2], [3, 4, 5, 6], [7, 8, 9], [10, 11, 12, 13]] Clear as mud?
def history_search_forward(self, e): # () u'''Search forward through the history for the string of characters between the start of the current line and the point. This is a non-incremental search. By default, this command is unbound.''' self.l_buffer=self._history.history_search_forward(self.l_bufferf history_search_forward(self, e): # () u'''Search forward through the history for the string of characters between the start of the current line and the point. This is a non-incremental search. By default, this command is unbound.''' self.l_buffer=self._history.history_search_forward(self.l_buffer)
u'''Search forward through the history for the string of characters between the start of the current line and the point. This is a non-incremental search. By default, this command is unbound.
def history_search_backward(self, e): # () u'''Search backward through the history for the string of characters between the start of the current line and the point. This is a non-incremental search. By default, this command is unbound.''' self.l_buffer=self._history.history_search_backward(self.l_bufferf history_search_backward(self, e): # () u'''Search backward through the history for the string of characters between the start of the current line and the point. This is a non-incremental search. By default, this command is unbound.''' self.l_buffer=self._history.history_search_backward(self.l_buffer)
u'''Search backward through the history for the string of characters between the start of the current line and the point. This is a non-incremental search. By default, this command is unbound.
def quoted_insert(self, e): # (C-q or C-v) u'''Add the next character typed to the line verbatim. This is how to insert key sequences like C-q, for example.''' e = self.console.getkeypress() self.insert_text(e.charf quoted_insert(self, e): # (C-q or C-v) u'''Add the next character typed to the line verbatim. This is how to insert key sequences like C-q, for example.''' e = self.console.getkeypress() self.insert_text(e.char)
u'''Add the next character typed to the line verbatim. This is how to insert key sequences like C-q, for example.
def paste(self,e): u'''Paste windows clipboard''' if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert(False) self.insert_text(txtf paste(self,e): u'''Paste windows clipboard''' if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert(False) self.insert_text(txt)
u'''Paste windows clipboard
def paste_mulitline_code(self,e): u'''Paste windows clipboard''' reg=re.compile(u"\r?\n") if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert(False) t=reg.split(txt) t=[row for row in t if row.strip()!=u""] #remove empty lines if t!=[u""]: self.insert_text(t[0]) self.add_history(self.l_buffer.copy()) self.paste_line_buffer=t[1:] log(u"multi: %s"%self.paste_line_buffer) return True else: return Falsf paste_mulitline_code(self,e): u'''Paste windows clipboard''' reg=re.compile(u"\r?\n") if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert(False) t=reg.split(txt) t=[row for row in t if row.strip()!=u""] #remove empty lines if t!=[u""]: self.insert_text(t[0]) self.add_history(self.l_buffer.copy()) self.paste_line_buffer=t[1:] log(u"multi: %s"%self.paste_line_buffer) return True else: return False
u'''Paste windows clipboard
def ipython_paste(self,e): u'''Paste windows clipboard. If enable_ipython_paste_list_of_lists is True then try to convert tabseparated data to repr of list of lists or repr of array''' if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert( self.enable_ipython_paste_list_of_lists) if self.enable_ipython_paste_for_paths: if len(txt)<300 and (u"\t" not in txt) and (u"\n" not in txt): txt=txt.replace(u"\\", u"/").replace(u" ", ur"\ ") self.insert_text(txtf ipython_paste(self,e): u'''Paste windows clipboard. If enable_ipython_paste_list_of_lists is True then try to convert tabseparated data to repr of list of lists or repr of array''' if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert( self.enable_ipython_paste_list_of_lists) if self.enable_ipython_paste_for_paths: if len(txt)<300 and (u"\t" not in txt) and (u"\n" not in txt): txt=txt.replace(u"\\", u"/").replace(u" ", ur"\ ") self.insert_text(txt)
u'''Paste windows clipboard. If enable_ipython_paste_list_of_lists is True then try to convert tabseparated data to repr of list of lists or repr of array
def main(cls, args=None): if args is None: args = sys.argv[1:] try: o = cls() o.parseOptions(args) except usage.UsageError as e: print(o.getSynopsis()) print(o.getUsage()) print(str(e)) return 1 except CLIError as ce: print(str(ce)) return ce.returnCode return 0
Fill in command-line arguments from argv
def compose_schemas(*schemas): key = 'jsonattrs:compose:' + ','.join([str(s.pk) for s in schemas]) cached = caches['jsonattrs'].get(key) if cached: s_attrs, required_attrs, default_attrs = cached # Deserialize attrs when retrieving from cache attrs = OrderedDict((k, Attribute(**v)) for k, v in s_attrs.items()) return attrs, required_attrs, default_attrs # Extract schema attributes, names of required attributes and # names of attributes with defaults, composing schemas. schema_attrs = [ s.attributes.select_related('attr_type').all() for s in schemas] attrs = OrderedDict() required_attrs = set() default_attrs = set() for attributes in schema_attrs: for attr in attributes: if attr.omit: if attr.name in attrs: del attrs[attr.name] else: attrs[attr.name] = attr required_attrs = {n for n, a in attrs.items() if a.required} default_attrs = {n for n, a in attrs.items() if a.default is not None and a.default != ''} # Serialize attrs to make it smaller in cache s_attrs = OrderedDict((k, v.to_dict()) for k, v in attrs.items()) caches['jsonattrs'].set(key, (s_attrs, required_attrs, default_attrs)) return attrs, required_attrs, default_attrs
Returns a single three-ple of the following for all provided schemas: - a map of attribute names to attributes for all related schema attributes - a set of names of all related required schema attributes - a set of names of all related default schema attributes For the sake of performance, values are written-to and returned-from the jsonattrs cache.
def _index_files(path): with zipfile.ZipFile(path) as zf: names = sorted(zf.namelist()) names = [nn for nn in names if nn.endswith(".tif")] names = [nn for nn in names if nn.startswith("SID PHA")] phasefiles = [] for name in names: with zf.open(name) as pt: fd = io.BytesIO(pt.read()) if SingleTifPhasics.verify(fd): phasefiles.append(name) return phasefiles
Search zip file for SID PHA files
def files(self): if self._files is None: self._files = SeriesZipTifPhasics._index_files(self.path) return self._files
List of Phasics tif file names in the input zip file
def verify(path): valid = False try: zf = zipfile.ZipFile(path) except (zipfile.BadZipfile, IsADirectoryError): pass else: names = sorted(zf.namelist()) names = [nn for nn in names if nn.endswith(".tif")] names = [nn for nn in names if nn.startswith("SID PHA")] for name in names: with zf.open(name) as pt: fd = io.BytesIO(pt.read()) if SingleTifPhasics.verify(fd): valid = True break zf.close() return valid
Verify that `path` is a zip file with Phasics TIFF files
def write_logfile(): # type: () -> None command = os.path.basename(os.path.realpath(os.path.abspath(sys.argv[0]))) now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f') filename = '{}-{}.log'.format(command, now) with open(filename, 'w') as logfile: if six.PY3: logfile.write(_LOGFILE_STREAM.getvalue()) else: logfile.write(_LOGFILE_STREAM.getvalue().decode( # type: ignore errors='replace'))
Write a DEBUG log file COMMAND-YYYYMMDD-HHMMSS.ffffff.log.
def excepthook(type, value, traceback): # pylint: disable=unused-argument try: six.reraise(type, value, traceback) except type: _LOGGER.exception(str(value)) if isinstance(value, KeyboardInterrupt): message = "Cancelling at the user's request." else: message = handle_unexpected_exception(value) print(message, file=sys.stderr)
Log exceptions instead of printing a traceback to stderr.
def handle_unexpected_exception(exc): # type: (BaseException) -> str try: write_logfile() addendum = 'Please see the log file for more information.' except IOError: addendum = 'Unable to write log file.' try: message = str(exc) return '{}{}{}'.format(message, '\n' if message else '', addendum) except Exception: # pylint: disable=broad-except return str(exc)
Return an error message and write a log file if logging was not enabled. Args: exc: The unexpected exception. Returns: A message to display to the user concerning the unexpected exception.
def enable_logging(log_level): # type: (typing.Union[None, int]) -> None root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) logfile_handler = logging.StreamHandler(_LOGFILE_STREAM) logfile_handler.setLevel(logging.DEBUG) logfile_handler.setFormatter(logging.Formatter( '%(levelname)s [%(asctime)s][%(name)s] %(message)s')) root_logger.addHandler(logfile_handler) if signal.getsignal(signal.SIGTERM) == signal.SIG_DFL: signal.signal(signal.SIGTERM, _logfile_sigterm_handler) if log_level: handler = logging.StreamHandler() handler.setFormatter(_LogColorFormatter()) root_logger.setLevel(log_level) root_logger.addHandler(handler)
Configure the root logger and a logfile handler. Args: log_level: The logging level to set the logger handler.
def get_log_level(args): # type: (typing.Dict[str, typing.Any]) -> int index = -1 log_level = None if '<command>' in args and args['<command>']: index = sys.argv.index(args['<command>']) if args.get('--debug'): log_level = 'DEBUG' if '--debug' in sys.argv and sys.argv.index('--debug') < index: sys.argv.remove('--debug') elif '-d' in sys.argv and sys.argv.index('-d') < index: sys.argv.remove('-d') elif args.get('--verbose'): log_level = 'INFO' if '--verbose' in sys.argv and sys.argv.index('--verbose') < index: sys.argv.remove('--verbose') elif '-v' in sys.argv and sys.argv.index('-v') < index: sys.argv.remove('-v') elif args.get('--log-level'): log_level = args['--log-level'] sys.argv.remove('--log-level') sys.argv.remove(log_level) if log_level not in (None, 'DEBUG', 'INFO', 'WARN', 'ERROR'): raise exceptions.InvalidLogLevelError(log_level) return getattr(logging, log_level) if log_level else None
Get the log level from the CLI arguments. Removes logging arguments from sys.argv. Args: args: The parsed docopt arguments to be used to determine the logging level. Returns: The correct log level based on the three CLI arguments given. Raises: ValueError: Raised if the given log level is not in the acceptable list of values.
def _logfile_sigterm_handler(*_): # type: (...) -> None logging.error('Received SIGTERM.') write_logfile() print('Received signal. Please see the log file for more information.', file=sys.stderr) sys.exit(signal)
Handle exit signals and write out a log file. Raises: SystemExit: Contains the signal as the return code.
def format(self, record): # type: (logging.LogRecord) -> str if record.levelno >= logging.ERROR: color = colorama.Fore.RED elif record.levelno >= logging.WARNING: color = colorama.Fore.YELLOW elif record.levelno >= logging.INFO: color = colorama.Fore.RESET else: color = colorama.Fore.CYAN format_template = ( '{}{}%(levelname)s{} [%(asctime)s][%(name)s]{} %(message)s') if sys.stdout.isatty(): self._fmt = format_template.format( colorama.Style.BRIGHT, color, colorama.Fore.RESET, colorama.Style.RESET_ALL ) else: self._fmt = format_template.format(*[''] * 4) if six.PY3: self._style._fmt = self._fmt # pylint: disable=protected-access return super(_LogColorFormatter, self).format(record)
Format the log record with timestamps and level based colors. Args: record: The log record to format. Returns: The formatted log record.
def oauth_url(client_id, permissions=None, server=None, redirect_uri=None): url = 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot'.format(client_id) if permissions is not None: url = url + '&permissions=' + str(permissions.value) if server is not None: url = url + "&guild_id=" + server.id if redirect_uri is not None: from urllib.parse import urlencode url = url + "&response_type=code&" + urlencode({'redirect_uri': redirect_uri}) return url
A helper function that returns the OAuth2 URL for inviting the bot into servers. Parameters ----------- client_id : str The client ID for your bot. permissions : :class:`Permissions` The permissions you're requesting. If not given then you won't be requesting any permissions. server : :class:`Server` The server to pre-select in the authorization screen, if available. redirect_uri : str An optional valid redirect URI.
def ask_question(self, question_text, question=None): if question is not None: q = question.to_dict() else: q = WatsonQuestion(question_text).to_dict() r = requests.post(self.url + '/question', json={'question': q}, headers={ 'Accept': 'application/json', 'X-SyncTimeout': 30 }, auth=(self.username, self.password)) try: response_json = r.json() except ValueError: raise Exception('Failed to parse response JSON') return WatsonAnswer(response_json)
Ask Watson a question via the Question and Answer API :param question_text: question to ask Watson :type question_text: str :param question: if question_text is not provided, a Question object representing the question to ask Watson :type question: WatsonQuestion :return: Answer
def process_queue(queue=None, **kwargs): while True: item = queue.get() if item is None: queue.task_done() logger.info(f"{queue}: exiting process queue.") break filename = os.path.basename(item) try: queue.next_task(item, **kwargs) except Exception as e: queue.task_done() logger.warn(f"{queue}: item={filename}. {e}\n") logger.exception(e) sys.stdout.write( style.ERROR( f"{queue}. item={filename}. {e}. Exception has been logged.\n" ) ) sys.stdout.flush() break else: logger.info(f"{queue}: Successfully processed {filename}.\n") queue.task_done()
Loops and waits on queue calling queue's `next_task` method. If an exception occurs, log the error, log the exception, and break.
def fs_cache(app_name='', cache_type='', idx=1, expires=DEFAULT_EXPIRES, cache_dir='', helper_class=_FSCacher): def decorator(func): return helper_class(func, cache_dir, app_name, cache_type, idx, expires) return decorator
A decorator to cache results of functions returning pd.DataFrame or pd.Series objects under: <cache_dir>/<app_name>/<cache_type>/<func_name>.<param_string>.csv, missing parts, like app_name and cache_type, will be omitted If cache_dir is omitted, stutils 'ST_FS_CACHE_PATH' conf dir will be used. If 'ST_FS_CACHE_PATH' is not configured, a temporary directory will be created. :param app_name: if present, cache files for this application will be stored in a separate folder :param idx: number of columns to use as an index :param cache_type: if present, cache files within app directory will be separated into different folders by their cache_type. :param expires: cache duration in seconds :param cache_dir: set custom file cache path
def typed_fs_cache(app_name, *args, **kwargs): return functools.partial(fs_cache, app_name, *args, **kwargs)
Convenience method to simplify declaration of multiple @fs_cache e.g., >>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30) >>> @my_fs_cache('first_method') ... def some_method(*args, **kwargs): ... pass >>> @my_fs_cache('second_method') ... def some_other_method(*args, **kwargs): ... pass
def memoize(func): cache = {} @functools.wraps(func) def wrapper(*args): key = "__".join(str(arg) for arg in args) if key not in cache: cache[key] = func(*args) return cache[key] return wrapper
Classic memoize decorator for non-class methods
def cached_method(func): @functools.wraps(func) def wrapper(self, *args): if not hasattr(self, "_cache"): self._cache = {} key = _argstring((func.__name__,) + args) if key not in self._cache: self._cache[key] = func(self, *args) return self._cache[key] return wrapper
Memoize for class methods
def guard(func): semaphore = threading.Lock() @functools.wraps(func) def wrapper(*args, **kwargs): semaphore.acquire() try: return func(*args, **kwargs) finally: semaphore.release() return wrapper
Prevents the decorated function from parallel execution. Internally, this decorator creates a Lock object and transparently obtains/releases it when calling the function.
def threadpool(num_workers=None): def decorator(func): @functools.wraps(func) def wrapper(data): return mapreduce.map(func, data, num_workers) return wrapper return decorator
Apply stutils.mapreduce.map to the given function
def invalidate_all(self): for fname in os.listdir(self.cache_path): if fname.startswith(self.func.__name__ + "."): os.remove(os.path.join(self.cache_path, fname))
Remove all files caching this function
def key(self): return [part.strip() for part in self.href.split("/") if part.strip()][ -1]
Example:: /browse/homes/ca/ -> ca /browse/homes/ca/los-angeles-county/ -> los-angeles-county /browse/homes/ca/los-angeles-county/91001/ -> 91001 /browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895 :return:
def handle(self, object, *args, **kw): ''' Calls each plugin in this PluginSet with the specified object, arguments, and keywords in the standard group plugin order. The return value from each successive invoked plugin is passed as the first parameter to the next plugin. The final return value is the object returned from the last plugin. If this plugin set is empty (i.e. no plugins exist or matched the spec), then a ValueError exception is thrown. ''' if not bool(self): if not self.spec or self.spec == SPEC_ALL: raise ValueError('No plugins available in group %r' % (self.group,)) raise ValueError( 'No plugins in group %r matched %r' % (self.group, self.spec)) for plugin in self.plugins: object = plugin.handle(object, *args, **kw) return objecf handle(self, object, *args, **kw): ''' Calls each plugin in this PluginSet with the specified object, arguments, and keywords in the standard group plugin order. The return value from each successive invoked plugin is passed as the first parameter to the next plugin. The final return value is the object returned from the last plugin. If this plugin set is empty (i.e. no plugins exist or matched the spec), then a ValueError exception is thrown. ''' if not bool(self): if not self.spec or self.spec == SPEC_ALL: raise ValueError('No plugins available in group %r' % (self.group,)) raise ValueError( 'No plugins in group %r matched %r' % (self.group, self.spec)) for plugin in self.plugins: object = plugin.handle(object, *args, **kw) return object
Calls each plugin in this PluginSet with the specified object, arguments, and keywords in the standard group plugin order. The return value from each successive invoked plugin is passed as the first parameter to the next plugin. The final return value is the object returned from the last plugin. If this plugin set is empty (i.e. no plugins exist or matched the spec), then a ValueError exception is thrown.
def filter(self, object, *args, **kw): ''' Identical to `PluginSet.handle`, except: #. If this plugin set is empty, `object` is returned as-is. #. If any plugin returns ``None``, it is returned without calling any further plugins. ''' for plugin in self.plugins: object = plugin.handle(object, *args, **kw) if object is None: return object return objecf filter(self, object, *args, **kw): ''' Identical to `PluginSet.handle`, except: #. If this plugin set is empty, `object` is returned as-is. #. If any plugin returns ``None``, it is returned without calling any further plugins. ''' for plugin in self.plugins: object = plugin.handle(object, *args, **kw) if object is None: return object return object
Identical to `PluginSet.handle`, except: #. If this plugin set is empty, `object` is returned as-is. #. If any plugin returns ``None``, it is returned without calling any further plugins.
def select(self, name): ''' Returns a new PluginSet that has only the plugins in this that are named `name`. ''' return PluginSet(self.group, name, [ plug for plug in self.plugins if plug.name == name]f select(self, name): ''' Returns a new PluginSet that has only the plugins in this that are named `name`. ''' return PluginSet(self.group, name, [ plug for plug in self.plugins if plug.name == name])
Returns a new PluginSet that has only the plugins in this that are named `name`.
def browse_home_listpage_url(self, state=None, county=None, zipcode=None, street=None, **kwargs): url = self.domain_browse_homes for item in [state, county, zipcode, street]: if item: url = url + "/%s" % item url = url + "/" return url
Construct an url of home list page by state, county, zipcode, street. Example: - https://www.zillow.com/browse/homes/ca/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/
def hamming(s, t): if len(s) != len(t): raise ValueError('Hamming distance needs strings of equal length.') return sum(s_ != t_ for s_, t_ in zip(s, t))
Calculate the Hamming distance between two strings. From Wikipedia article: Iterative with two matrix rows. :param s: string 1 :type s: str :param t: string 2 :type s: str :return: Hamming distance :rtype: float
def _render_bundle(bundle_name): try: bundle = get_bundles()[bundle_name] except KeyError: raise ImproperlyConfigured("Bundle '%s' is not defined" % bundle_name) if bundle.use_bundle: return _render_file(bundle.bundle_type, bundle.get_url(), attrs=({'media':bundle.media} if bundle.media else {})) # Render files individually bundle_files = [] for bundle_file in bundle.files: if bundle_file.precompile_in_debug: bundle_files.append(_render_file(bundle_file.bundle_type, bundle_file.precompile_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) else: bundle_files.append(_render_file(bundle_file.file_type, bundle_file.file_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) return '\n'.join(bundle_files)
Renders the HTML for a bundle in place - one HTML tag or many depending on settings.USE_BUNDLES
def from_string(self, string_representation, resource=None): stream = NativeIO(string_representation) return self.from_stream(stream, resource=resource)
Extracts resource data from the given string and converts them to a new resource or updates the given resource from it.
def from_bytes(self, bytes_representation, resource=None): text = bytes_representation.decode(self.encoding) return self.from_string(text, resource=resource)
Extracts resource data from the given bytes representation and calls :method:`from_string` with the resulting text representation.
def to_string(self, obj): stream = NativeIO() self.to_stream(obj, stream) return text_(stream.getvalue(), encoding=self.encoding)
Converts the given resource to a string representation and returns it.
def to_bytes(self, obj, encoding=None): if encoding is None: encoding = self.encoding text = self.to_string(obj) return bytes_(text, encoding=self.encoding)
Converts the given resource to bytes representation in the encoding specified by :param:`encoding` and returns it.
def data_from_bytes(self, byte_representation): text = byte_representation.decode(self.encoding) return self.data_from_string(text)
Converts the given bytes representation to resource data.
def data_to_string(self, data_element): stream = NativeIO() self.data_to_stream(data_element, stream) return stream.getvalue()
Converts the given data element into a string representation. :param data_element: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` :returns: string representation (using the MIME content type configured for this representer)
def data_to_bytes(self, data_element, encoding=None): if encoding is None: encoding = self.encoding text = self.data_to_string(data_element) return bytes_(text, encoding=encoding)
Converts the given data element into a string representation using the :method:`data_to_string` method and encodes the resulting text with the given encoding.
def create_from_resource_class(cls, resource_class): mp_reg = get_mapping_registry(cls.content_type) mp = mp_reg.find_or_create_mapping(resource_class) return cls(resource_class, mp)
Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class.
def data_from_stream(self, stream): parser = self._make_representation_parser(stream, self.resource_class, self._mapping) return parser.run()
Creates a data element reading a representation from the given stream. :returns: object implementing :class:`everest.representers.interfaces.IExplicitDataElement`
def data_to_stream(self, data_element, stream): generator = \ self._make_representation_generator(stream, self.resource_class, self._mapping) generator.run(data_element)
Writes the given data element to the given stream.
def resource_from_data(self, data_element, resource=None): return self._mapping.map_to_resource(data_element, resource=resource)
Converts the given data element to a resource. :param data_element: object implementing :class:`everest.representers.interfaces.IExplicitDataElement`
def configure(self, options=None, attribute_options=None): # pylint: disable=W0221 self._mapping.update(options=options, attribute_options=attribute_options)
Configures the options and attribute options of the mapping associated with this representer with the given dictionaries. :param dict options: configuration options for the mapping associated with this representer. :param dict attribute_options: attribute options for the mapping associated with this representer.
def with_updated_configuration(self, options=None, attribute_options=None): return self._mapping.with_updated_configuration(options=options, attribute_options= attribute_options)
Returns a context in which this representer is updated with the given options and attribute options.
def jsPath(path): '''Returns a relative path without \, -, and . so that the string will play nicely with javascript.''' shortPath=path.replace( "C:\\Users\\scheinerbock\\Desktop\\"+ "ideogram\\scrapeSource\\test\\","") noDash = shortPath.replace("-","_dash_") jsPath=noDash.replace("\\","_slash_").replace(".","_dot_") return jsPatf jsPath(path): '''Returns a relative path without \, -, and . so that the string will play nicely with javascript.''' shortPath=path.replace( "C:\\Users\\scheinerbock\\Desktop\\"+ "ideogram\\scrapeSource\\test\\","") noDash = shortPath.replace("-","_dash_") jsPath=noDash.replace("\\","_slash_").replace(".","_dot_") return jsPath
Returns a relative path without \, -, and . so that the string will play nicely with javascript.
def jsName(path,name): '''Returns a name string without \, -, and . so that the string will play nicely with javascript.''' shortPath=path.replace( "C:\\Users\\scheinerbock\\Desktop\\"+ "ideogram\\scrapeSource\\test\\","") noDash = shortPath.replace("-","_dash_") jsPath=noDash.replace("\\","_slash_").replace(".","_dot_") jsName=jsPath+'_slash_'+name return jsNamf jsName(path,name): '''Returns a name string without \, -, and . so that the string will play nicely with javascript.''' shortPath=path.replace( "C:\\Users\\scheinerbock\\Desktop\\"+ "ideogram\\scrapeSource\\test\\","") noDash = shortPath.replace("-","_dash_") jsPath=noDash.replace("\\","_slash_").replace(".","_dot_") jsName=jsPath+'_slash_'+name return jsName
Returns a name string without \, -, and . so that the string will play nicely with javascript.
def getStartNodes(fdefs,calls): '''Return a list of nodes in fdefs that have no inbound edges''' s=[] for source in fdefs: for fn in fdefs[source]: inboundEdges=False for call in calls: if call.target==fn: inboundEdges=True if not inboundEdges: s.append(fn) return f getStartNodes(fdefs,calls): '''Return a list of nodes in fdefs that have no inbound edges''' s=[] for source in fdefs: for fn in fdefs[source]: inboundEdges=False for call in calls: if call.target==fn: inboundEdges=True if not inboundEdges: s.append(fn) return s
Return a list of nodes in fdefs that have no inbound edges
def getChildren(current,calls,blacklist=[]): ''' Return a list of the children of current that are not in used. ''' return [c.target for c in calls if c.source==current and c.target not in blacklistf getChildren(current,calls,blacklist=[]): ''' Return a list of the children of current that are not in used. ''' return [c.target for c in calls if c.source==current and c.target not in blacklist]
Return a list of the children of current that are not in used.