code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def _ensure_module_folder_exists():
if not os.path.isdir(MODULES_FOLDER_PATH):
try:
os.mkdir(MODULES_FOLDER_PATH)
except OSError, e:
if "file already exists" in str(e):
raise RuntimeError("Could not create modules folder: file exists with the same name") | Checks to see if the module folder exists. If it does not, create it.
If there is an existing file with the same name, we raise a RuntimeError. |
def find_vm(self, name):
try:
domain = self.hyper.lookupByName(name)
VM = VirtualMachine(domain, self)
except libvirtError:
VM = None
return VM | Try and find a VM by name
:param name: Name of the VM
:type name: str |
def fast_clone(self, VM, clone_name, mem=None):
disks = VM.get_disks()
ints = VM.get_interfaces()
count = 0
new_disks = []
for disk in disks:
pool = disk.pool
new_disk_name = '{0}-disk{1}'.format(clone_name, count)
count += 1
new_disk = pool.create_backed_vol(new_disk_name, disk)
new_disks.append(new_disk)
for inter in ints:
inter.mac = None
# if the mac is set to None we don't include it in the xml
# and libvirt will autogen one for us
return self.create_vm(
VM.domain_type,
clone_name,
VM.num_cpus,
mem or VM.current_memory,
mem or VM.max_memory,
new_disks,
ints) | Create a 'fast' clone of a VM. This means we make
a snapshot of the disk and copy some of the settings
and then create a new VM based on the snapshot and settings
The VM is transient so when it is shutdown it deletes itself
:param VM: The VM to base this clone on
:type VM: sham.machine.VirtualMachine
:param clone_name: The name for this clone
:type clone_name: str |
def get_configuration_from_settings(self, setting_info):
settings = self.get_settings()
return dict([(name, settings.get(key))
for (name, key) in setting_info
if not settings.get(key, None) is None]) | Returns a dictionary with configuration names as keys and setting
values extracted from this configurator's settings as values.
:param setting_info: Sequence of 2-tuples containing the configuration
name as the first and the setting name as the second element. |
def add_repository(self, name, repository_type, repository_class,
aggregate_class, make_default, configuration):
repo_mgr = self.get_registered_utility(IRepositoryManager)
if name is None:
# If no name was given, this is assumed to be the ROOT repository
# for the given repository type.
name = REPOSITORY_DOMAINS.ROOT
repo = repo_mgr.new(repository_type, name=name,
make_default=make_default,
repository_class=repository_class,
aggregate_class=aggregate_class,
configuration=configuration)
repo_mgr.set(repo) | Generic method for adding a repository. |
def encrypt_email(email):
aes = SimpleAES(flask.current_app.config["AES_KEY"])
return aes.encrypt(email) | The default encryption function for storing emails in the database. This
uses AES and the encryption key defined in the applications configuration.
:param email:
The email address. |
def decrypt_email(enc_email):
aes = SimpleAES(flask.current_app.config["AES_KEY"])
return aes.decrypt(enc_email) | The inverse of :func:`encrypt_email`.
:param enc_email:
The encrypted email address. |
def shortlink_scanned(self, data):
# Inform log that we received an event
self.logger.info("Received shortlink_scanned event")
data = json.loads(data)
customer_token = str(data['object']['id'])
response = self.mapiclient.create_payment_request(
customer=customer_token,
currency="NOK",
amount="20.00",
allow_credit=True,
pos_id=self._pos_id,
pos_tid=str(uuid.uuid4()),
action='auth',
expires_in=90,
callback_uri="pusher:m-winterwarming-pos_callback_chan",
text='Have some hot chocolate!')
self._tid = response['id']
print(str(self._tid)) | Called when a shortlink_scanned event is received |
def pusher_connected(self, data):
# Inform user that pusher is done connecting
self.logger.info("Pusherclient connected")
# Bind the events we want to listen to
self.callback_client.bind("payment_authorized",
self.payment_authorized)
self.callback_client.bind("shortlink_scanned",
self.shortlink_scanned) | Called when the pusherclient is connected |
def get(self, *args, **kwargs):
if self.query.where:
# If there is any other ``where`` filter on this QuerySet just call
# super. There will be a where clause if this QuerySet has already
# been filtered/cloned.
return super(CachingQuerySet, self).get(*args, **kwargs)
# Punt on anything more complicated than get by pk/id only...
if len(kwargs) == 1:
k = kwargs.keys()[0]
if k in ('pk', 'pk__exact', '%s' % self.model._meta.pk.attname,
'%s__exact' % self.model._meta.pk.attname):
obj = cache.get(self.model._cache_key(pk=kwargs.values()[0]))
if obj is not None:
obj.from_cache = True
return obj
# Calls self.iterator to fetch objects, storing object in cache.
return super(CachingQuerySet, self).get(*args, **kwargs) | Checks the cache to see if there's a cached entry for this pk. If not, fetches
using super then stores the result in cache.
Most of the logic here was gathered from a careful reading of
``django.db.models.sql.query.add_filter`` |
def find_route_by_view_name(self, view_name):
if not view_name:
return (None, None)
for uri, route in self.routes_all.items():
if route.name == view_name:
return uri, route
return (None, None) | Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:return: tuple containing (uri, Route) |
def _get(self, url, method, host):
url = host + url
# Check against known static routes
route = self.routes_static.get(url)
method_not_supported = self._invalid_usage(
'Method {} not allowed for URL {}'.format(
method, url), status_code=405)
if route:
if route.methods and method not in route.methods:
raise method_not_supported
match = route.pattern.match(url)
else:
route_found = False
# Move on to testing all regex routes
for route in self.routes_dynamic[url_hash(url)]:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Lastly, check against all regex routes that cannot be hashed
for route in self.routes_always_check:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Route was found but the methods didn't match
if route_found:
raise method_not_supported
raise self._not_found(
'Requested URL {} not found'.format(url))
kwargs = {p.name: p.cast(value)
for value, p
in zip(match.groups(1), route.parameters)}
route_handler = route.handler
if hasattr(route_handler, 'handlers'):
route_handler = route_handler.handlers[method]
return route_handler, [], kwargs, route.uri | Get a request handler based on the URL of the request, or raises an
error. Internal method for caching.
:param url: request URL
:param method: request method
:return: handler, arguments, keyword arguments |
def is_stream_handler(self, request):
handler = self.get(request)[0]
if (hasattr(handler, 'view_class') and
hasattr(handler.view_class, request.method.lower())):
handler = getattr(handler.view_class, request.method.lower())
return hasattr(handler, 'is_stream') | Handler for request is stream or not.
:param request: Request object
:return: bool |
def add_vertex(self, v):
self.graph.add_vertex(v)
self.vs.add(v) | Add a vertex to the graph
:param v: The vertex name. |
def add_outward_edge(self, v, e):
self.add_vertex(v)
self.graph.add_vertex(e)
self.es.add(e)
self.graph.add_edge(e, v) | Add an outward edge to a vertex
:param v: The source vertex.
:param e: The name of the outward edge. |
def fetch_path(self, name):
with codecs.open(self.lookup_path(name), encoding='utf-8') as fd:
return fd.read() | Fetch contents from the path retrieved via lookup_path.
No caching will be done. |
def execute(self, mold_id, data, wrapper_tag='div'):
template = self.load_mold(mold_id)
kwargs = {}
kwargs.update(data)
kwargs['_nunja_data_'] = 'data-nunja="%s"' % mold_id
kwargs['_template_'] = template
kwargs['_wrapper_tag_'] = wrapper_tag
return self._core_template_.render(**kwargs) | Execute a mold `mold_id` by rendering through ``env``.
This is done using its default template, with data provided as
dict.
This returns the wrapped content, which contains the bits that
the client-side on-load script trigger will execute the index.js
defined for this mold; if this is not desired, simply call the
render method instead. |
def render(self, mold_id, data):
template = self.load_mold(mold_id)
return template.render(**data) | Render a mold `mold_id`. No wrappers are applied as only the
default template defined for the mold is rendered. |
def _get_model_table(self, part):
rows = self.parser.find(part).find_children('tr').list_results()
table = []
for row in rows:
table.append(self._get_model_row(self.parser.find(
row
).find_children('td,th').list_results()))
return self._get_valid_model_table(table) | Returns a list that represents the table.
:param part: The table header, table footer or table body.
:type part: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The list that represents the table.
:rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) |
def _get_valid_model_table(self, ros):
# pylint: disable=no-self-use
new_table = []
if bool(ros):
length_table = len(ros)
for row_index in range(0, length_table):
cells_added = 0
original_row = [] + ros[row_index]
if len(new_table) <= row_index:
new_table.append([])
length_row = len(original_row)
for cell_index in range(0, length_row):
cell = original_row[cell_index]
new_cell_index = cell_index + cells_added
new_row = new_table[row_index]
while True:
if len(new_row) <= new_cell_index:
new_row.append(None)
break
elif new_row[new_cell_index] is None:
break
else:
cells_added += 1
new_cell_index = cell_index + cells_added
new_row[new_cell_index] = cell
if cell.has_attribute('rowspan'):
rowspan = int(cell.get_attribute('rowspan'))
if rowspan > 1:
for rowspan_index in range(1, rowspan):
new_row_index = row_index + rowspan_index
if len(new_table) <= new_row_index:
new_table.append([])
while (
len(new_table[new_row_index])
< new_cell_index
):
new_table[new_row_index].append(None)
new_table[new_row_index].append(cell)
return new_table | Returns a list that represents the table with the rowspans.
:param ros: The list that represents the table without the rowspans.
:type ros: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:return The list that represents the table with the rowspans.
:rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) |
def _get_model_row(self, row):
# pylint: disable=no-self-use
new_row = [] + row
size = len(row)
for i in range(0, size):
cell = row[i]
if cell.has_attribute('colspan'):
colspan = int(cell.get_attribute('colspan'))
if colspan > 1:
for j in range(1, colspan):
new_row.insert(i + j, cell)
return new_row | Returns a list that represents the line of table with the colspans.
:param row: The list that represents the line of table without the
colspans.
:type row: list(hatemile.util.html.htmldomelement.HTMLDOMElement)
:return: The list that represents the line of table with the colspans.
:rtype: list(hatemile.util.html.htmldomelement.HTMLDOMElement) |
def _validate_header(self, hed):
# pylint: disable=no-self-use
if not bool(hed):
return False
length = -1
for row in hed:
if not bool(row):
return False
elif length == -1:
length = len(row)
elif len(row) != length:
return False
return True | Validate the list that represents the table header.
:param hed: The list that represents the table header.
:type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:return: True if the table header is valid or False if the table header
is not valid.
:rtype: bool |
def _get_cells_headers_ids(self, hed, index):
# pylint: disable=no-self-use
ids = []
for row in hed:
if row[index].get_tag_name() == 'TH':
ids.append(row[index].get_attribute('id'))
return ids | Returns a list with ids of rows of same column.
:param hed: The list that represents the table header.
:type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:param index: The index of columns.
:type index: int
:return: The list with ids of rows of same column.
:rtype: list(str) |
def _associate_data_cells_with_header_cells_of_row(self, element):
table = self._get_model_table(element)
for row in table:
headers_ids = []
for cell in row:
if cell.get_tag_name() == 'TH':
self.id_generator.generate_id(cell)
headers_ids.append(cell.get_attribute('id'))
cell.set_attribute('scope', 'row')
if bool(headers_ids):
for cell in row:
if cell.get_tag_name() == 'TD':
headers = cell.get_attribute('headers')
for header_id in headers_ids:
headers = CommonFunctions.increase_in_list(
headers,
header_id
)
cell.set_attribute('headers', headers) | Associate the data cell with header cell of row.
:param element: The table body or table footer.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement |
def _prepare_header_cells(self, table_header):
cells = self.parser.find(table_header).find_children(
'tr'
).find_children('th').list_results()
for cell in cells:
self.id_generator.generate_id(cell)
cell.set_attribute('scope', 'col') | Set the scope of header cells of table header.
:param table_header: The table header.
:type table_header: hatemile.util.html.htmldomelement.HTMLDOMElement |
def white(self, kelvin):
whiteness = int(((kelvin - MIN_KELVIN) * 255)/(MAX_KELVIN-MIN_KELVIN))
whiteness = max(min(whiteness,255),0)
data = [CODE_COLOUR, self._id, 255 - whiteness, whiteness]
self._mode = MODE_WHITE
self._white = kelvin
self._effect = ''
self._send(data) | Supports between 2700K and 6500K white
:type kelvin int: |
def encrypt(clear_text) -> str:
if not isinstance(clear_text, bytes):
clear_text = str.encode(clear_text)
cipher = Fernet(current_app.config['KEY'])
return cipher.encrypt(clear_text).decode("utf-8") | Use config.json key to encrypt |
def decrypt(crypt_text) -> str:
cipher = Fernet(current_app.config['KEY'])
if not isinstance(crypt_text, bytes):
crypt_text = str.encode(crypt_text)
return cipher.decrypt(crypt_text).decode("utf-8") | Use config.json key to decrypt |
def get_volume(self, id):
# If the id is actually a path
if exists(id):
with open(id) as file:
size = os.lseek(file.fileno(), 0, os.SEEK_END)
return {'path': id, 'size': size}
return self.volume.get(id) | return volume information if the argument is an id or a path |
def randomize(self, device=None, percent=100, silent=False):
volume = self.get_volume(device)
# The number of blocks in the volume
blocks = int(volume['size'] / BLOCK_SIZE)
# How many writes should be to the device
# (based on the percentage requested)
num_writes = int(blocks * percent * 0.01)
# Build a list of offsets we write to
offsets = sorted(random.sample(range(blocks), num_writes))
total = 0
if not silent:
print('Writing urandom to %s bytes in %s' % (volume['size'],
volume['path']))
with open(volume['path'], 'w') as file:
for offset in offsets:
if not silent:
self.dot()
file.seek(offset * BLOCK_SIZE)
# Create a random string 32k long then duplicate
# the randomized string 128 times (32768 * 128 = 4MB)
data = os.urandom(32768) * 128
total += len(data)
# write out the 4MB block of randomized data
file.write(data)
print("\nWrote: %s" % total) | Writes random data to the beginning of each 4MB block on a block device
this is useful when performance testing the backup process
(Without any optional arguments will randomize the first 32k of each
4MB block on 100 percent of the device) |
def read(self, device=None, offset=0, bs=None, count=1):
volume = self.get_volume(device)
block_size = bs or BLOCK_SIZE
offset = int(offset) * block_size
count = int(count)
print("Offset: ", offset)
total = 0
with directio.open(volume['path'], buffered=block_size) as file:
file.seek(offset)
for i in range(0, count):
total += os.write(sys.stdout.fileno(), file.read(block_size))
os.write(sys.stdout.fileno(), "\nRead: %d Bytes\n" % total) | Using DIRECT_O read from the block device specified to stdout
(Without any optional arguments will read the first 4k from the device) |
def write(self, device=None, char=0, bs=None, count=None):
volume = self.get_volume(device)
block_size = bs or BLOCK_SIZE
# Calculate the number of blocks that are in the volume
count = count or (volume['size'] / block_size)
data = "".join([chr(int(char)) for i in range(0, block_size)])
print("Writing: '%c'" % data[0])
total = 0
with directio.open(volume['path'], buffered=block_size) as file:
for i in range(0, count):
self.dot()
total += file.write(data)
print("\nWrote: ", total)
return 0 | Using DIRECT_O write a character in 4k chunks to a specified block
device (Without any optional arguments will write NULL's to the
entire device) |
def backup(self, id=None, src=None, timestamp=None):
# Set basic Logging
logging.basicConfig()
# Get the lunr logger
log = logger.get_logger()
# Output Debug level info
log.logger.setLevel(logging.DEBUG)
# Load the local storage configuration
conf = LunrConfig.from_storage_conf()
# If no time provided, use current time
timestamp = timestamp or time()
# Init our helpers
volume = VolumeHelper(conf)
backup = BackupHelper(conf)
try:
# Create the snapshot
snapshot = volume.create_snapshot(src, id, timestamp)
# For testing non-snapshot speeds
# snapshot = volume.get(src)
# snapshot['backup_id'] = id
# snapshot['origin'] = src
# snapshot['timestamp'] = 1338410885.0
# del snapshot['volume']
print("Created snap-shot: ", pprint(snapshot))
with self.timeit(snapshot['size']):
# Backup the snapshot
print("Starting Backup")
backup.save(snapshot, id)
finally:
# Delete the snapshot if it was created
if 'snapshot' in locals():
self._remove_volume(snapshot['path']) | This runs a backup job outside of the storage api,
which is useful for performance testing backups |
def clone(self, id=None, src=None, backup=None, size=None):
# Set basic Logging
logging.basicConfig()
# Get the lunr logger
log = logger.get_logger()
# Output Debug level info
log.logger.setLevel(logging.DEBUG)
# Load the local storage configuration
conf = LunrConfig.from_storage_conf()
# Init the volume helper
volume = VolumeHelper(conf)
# Attempt to figure out the original volume size
size = size or str(volume.get(src)['size'] / 1073741824)
# Size is in gigs
if not re.match('G', size):
size = size + 'G'
# Create a tag to apply to the lvm volume
tag = encode_tag(source_volume_id=src, backup_id=backup)
# Create the volume
execute('lvcreate', volume.volume_group,
name=id, size=size, addtag=tag)
# Get info for the newly created volume
new = volume.get(id)
with self.timeit():
print("Starting Backup")
# Restore volume from the backup
volume.clone(new, src, backup) | This runs a clone job outside of the storage api,
which is useful for performance testing backup restores
(Example: storage tools clone volume-clone
--backup volume-backup --src volume-original) |
def get_ip(request):
if getsetting('LOCAL_GEOLOCATION_IP'):
return getsetting('LOCAL_GEOLOCATION_IP')
forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if not forwarded_for:
return UNKNOWN_IP
for ip in forwarded_for.split(','):
ip = ip.strip()
if not ip.startswith('10.') and not ip == '127.0.0.1':
return ip
return UNKNOWN_IP | Return the IP address inside the HTTP_X_FORWARDED_FOR var inside
the `request` object.
The return of this function can be overrided by the
`LOCAL_GEOLOCATION_IP` variable in the `conf` module.
This function will skip local IPs (starting with 10. and equals to
127.0.0.1). |
def get_connection(self):
if self.conn:
return self.conn
redis_configs = getsetting('REDIS_CONNECTIONS')
if redis_configs:
config_name = getsetting('EVENTLIB_REDIS_CONFIG_NAME', 'default')
config = redis_configs[config_name]
host = config['HOST']
port = config['PORT']
self.conn = redis.StrictRedis(host=host, port=port)
else:
self.conn = None
return self.conn | Return a valid redis connection based on the following settings
* `REDIS_CONNECTIONS`
* `EVENTLIB_REDIS_CONFIG_NAME`
The first one is a dictionary in the following format:
>>> {
... 'server1': {'HOST': 'redis-server-1', 'POST': 9001},
... 'server2': {'HOST': 'redis-server-2', 'POST': 9002},
... ]
The second one is the name of the entry present in the above
dict, like `server1` or `server2`. |
def _run_setup_py(self, args, echo=True, echo2=True, ff=''):
python = self.python
if ff:
setup_py = '-c"%s"' % (RUN_SETUP % locals())
else:
setup_py = 'setup.py %s' % ' '.join(args)
rc, lines = self.process.popen(
'"%(python)s" %(setup_py)s' % locals(), echo=echo, echo2=echo2)
return rc, lines | Run setup.py with monkey-patched setuptools.
The patch forces setuptools to use the file-finder 'ff'.
If 'ff' is the empty string, the patch is not applied.
'args' is the list of arguments that should be passed to
setup.py. |
def app_factory(global_settings, **local_settings): # pylint: disable=W0613
config = Configurator()
config.setup_registry(settings=local_settings,
root_factory=RootFactory())
if 'configure_zcml' in local_settings:
config.load_zcml(local_settings['configure_zcml'])
app = config.make_wsgi_app()
# In the absence of an application name in the settings, we have to
# extract the main app's name from the ini file, which unfortunately
# means parsing it again.
app_name = app_name_from_ini_file(global_settings['__file__'])
ep_group = "%s.plugins" % app_name
plugin_mgr = config.get_registered_utility(IPluginManager)
plugin_mgr.load_all(ep_group)
return app | Default factory for creating a WSGI application using the everest
configurator and root factory.
:param dict global_settings: Global settings extracted from an ini file.
Not used in this default app factory.
:param dict local_settings: App settings extracted from an ini file. |
def factory(attr_type, data):
constructors = {
MFT_ATTR_STANDARD_INFORMATION: MftAttrStandardInformation,
MFT_ATTR_ATTRIBUTE_LIST: MftAttrAttributeList,
MFT_ATTR_FILENAME: MftAttrFilename,
MFT_ATTR_OBJECT_ID: MftAttrObjectId,
MFT_ATTR_SECURITY_DESCRIPTOR: MftAttrSecurityDescriptor,
MFT_ATTR_VOLUME_NAME: MftAttrVolumeName,
MFT_ATTR_VOLUME_INFO: MftAttrVolumeInfo,
MFT_ATTR_DATA: MftAttrData,
MFT_ATTR_INDEX_ROOT: MftAttrIndexRoot,
MFT_ATTR_INDEX_ALLOCATION: MftAttrIndexAllocation,
MFT_ATTR_BITMAP: MftAttrBitmap,
MFT_ATTR_REPARSE_POINT: MftAttrReparsePoint,
MFT_ATTR_LOGGED_TOOLSTREAM: MftAttrLoggedToolstream,
}
if attr_type not in constructors:
return None
return constructors[attr_type](data) | Returns Initialized attribute object based on attr_type \
(eg. :class:`MftAttrStandardInformation`)
Args:
attr_type (uint): Attribute type number (eg. 0x10 - \
$STANDARD_INFORMATION)
data (byte array): Data to initialize attribute object with. |
def bulk_get_or_create(self, data_list):
items_to_create = dict()
for record_key, record_config in data_list.items():
if record_key not in items_to_create:
record = self.get_instance(record_key)
if not record:
items_to_create[record_key] = self.model_cls(**record_config)
if items_to_create:
"""
TODO. I think we can optimize this. Switch to values, get the primary id
Query set is just select the model with that ID. Return the model object without running the full queryset again. Should be a lot faster.
"""
self.model_cls.objects.bulk_create(items_to_create.values())
self.set_record_lookup(True)
return self.record_lookup | data_list is the data to get or create
We generate the query and set all the record keys based on passed in queryset
Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time
Use values instead of the whole object, much faster
Args:
data_list:
Returns: |
async def fetch_page(session, host):
await asyncio.sleep(random.randint(0, 25) * 0.1)
start = time.time()
logger.info('Fetch from {}'.format(host))
try:
response = await session.get(host, allow_redirects=False)
except aiohttp.ClientResponseError as err:
# likely a 404 implying HTTP but no page
# likely a 401 implying HTTP but no access
# FIXME: for instance, a gateway
# headers are available via err.headers()
# https://multidict.readthedocs.io/en/stable/multidict.html#multidict.CIMultiDict
results_tuple = (host, 'no page', err)
except aiohttp.ClientConnectorError as err:
# likely device at IP but no HTTP server
results_tuple = (host, 'no http', err)
except aiohttp.ServerConnectionError as err:
# likely ServerTimeoutError implying no device at IP
results_tuple = (host, 'no dev', err)
except aiohttp.InvalidURL as err:
# likely a malformed URL
results_tuple = (host, 'no URL', err)
# except Exception as err:
# # Generic trap for debug
# results_tuple = (host, 'unknown', err)
else:
try:
text_response = await response.text()
except aiohttp.ClientPayloadError as err:
# trouble reading page TODO: anyway to recover?
results_tuple = (host, 'no read', err)
else:
results_tuple = (host, 'found', text_response)
response.close()
logger.info('Recvd from {} after {:.2f}s'.format(host, time.time() - start))
return results_tuple | Perform the page fetch from an individual host.
`session` - An aiohttp
[client session](http://aiohttp.readthedocs.io/en/stable/client_reference.html#client-session)
`host` - URL to fetch
`return` tuple with the following:
* The host parameter
* A vague status string
* Text response or an exception depending on status above |
async def asynchronous(urls=None, re_filter=None):
class _URLBase(str):
""" Convenient access to hostname (ip) portion of the URL """
@property
def hostname(self):
return urlsplit(self).hostname
http_devices = {}
qualified_devices = []
connection = aiohttp.TCPConnector(limit=0)
async with aiohttp.ClientSession(connector=connection,
conn_timeout=5, raise_for_status=True) as session:
futures = [fetch_page(session, url) for url in urls]
for future in asyncio.as_completed(futures):
response = await future
if 'found' in response[1]:
http_devices[response[0]] = response[2]
logger.debug('Processed %s', response[0])
if re_filter.search(response[2]):
qualified_devices.append(_URLBase(response[0]))
# print('The following responded to HTTP:')
# for x in http_devices.keys():
# print(x)
return qualified_devices | Asynchronous request manager for session. Returns list of responses that
match the filter.
`urls` - tuple of URLs to request
`re_filter` - a compiled regular expression
[object](https://docs.python.org/3/library/re.html#re-objects) |
def url_generator(network=None, path=''):
network_object = ipaddress.ip_network(network)
if network_object.num_addresses > 256:
# will need to batch process this case otherwise we run out of selectors
logger.error('Scan limited to 256 addresses, requested %d.', network_object.num_addresses)
raise NotImplementedError
elif network_object.num_addresses > 1:
# async request upto 256 hosts
network_hosts = network_object.hosts()
else:
# assume user intent was a single IP address
network_hosts = [network_object.network_address]
return (urlunsplit(('http',str(ip),path,'','')) for ip in network_hosts) | Return a tuple of URLs with path, one for each host on network
`network` - IP address and subnet mask compatible with
[ipaddress library](https://docs.python.org/3/library/ipaddress.html#ipaddress.ip_network)
`path` - Path portion of a URL as defined by
[url(un)split](https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlsplit) |
def survey(network=None, path='', pattern='', log=False):
if log:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.CRITICAL)
network_scan = asyncio.ensure_future(asynchronous(
urls=url_generator(network=network, path=path),
re_filter=re.compile(pattern))
)
ioloop = asyncio.get_event_loop()
ioloop.run_until_complete(network_scan)
# Zero-sleep to allow underlying connections to close
# http://aiohttp.readthedocs.io/en/stable/client_advanced.html#graceful-shutdown
ioloop.run_until_complete(asyncio.sleep(0))
# ioloop.close() # don't close the loop, so it's available for re-use
# https://stackoverflow.com/questions/45010178/how-to-use-asyncio-event-loop-in-library-function
return sorted(network_scan.result(), key=lambda x: ipaddress.ip_address(x.hostname)) | Search network for hosts with a response to path that matches pattern
`network` - IP address and subnet mask compatible with
[ipaddress library](https://docs.python.org/3/library/ipaddress.html#ipaddress.ip_network)
`path` - Path portion of a URL as defined by
[url(un)split](https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlsplit)
`pattern` - A regular expression pattern compatible with
[re.compile](https://docs.python.org/3/library/re.html#re.compile)
`log` - boolean to control logging level |
def cli():
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(
'%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
))
logger.addHandler(ch)
import argparse
parser = argparse.ArgumentParser(description="Search 'network' for hosts with a \
response to 'path' that matches 'filter'")
parser.add_argument('network', help='IP address with optional mask, e.g. 192.168.0.0/24')
parser.add_argument('-p', '--path', help='URL path at host, e.g. index.html',
default='')
parser.add_argument('-f', '--filter', help='Regular expression pattern for filter',
dest='pattern', default='')
parser.add_argument('-l', '--log', help='Enable logging', action='store_true')
args = parser.parse_args()
print('Scanning, please wait ...')
result = survey(**vars(args))
print('Found {} match{}{}{} on {}'.format(len(result), 'es' if len(result)!=1 else '',
' for ' if args.pattern else '', args.pattern, args.network))
for x in result:
print(x.hostname) | Command line interface |
def hidden_cursor():
if sys.stdout.isatty():
_LOGGER.debug('Hiding cursor.')
print('\x1B[?25l', end='')
sys.stdout.flush()
try:
yield
finally:
if sys.stdout.isatty():
_LOGGER.debug('Showing cursor.')
print('\n\x1B[?25h', end='')
sys.stdout.flush() | Temporarily hide the terminal cursor. |
def display_status():
def print_status(msg, color):
"""Print the status message.
Args:
msg: The message to display (e.g. OK or FAILED).
color: The ANSI color code to use in displaying the message.
"""
print('\r' if sys.stdout.isatty() else '\t', end='')
print('{}{}[{color}{msg}{}]{}'.format(
Cursor.FORWARD(_ncols() - 8),
Style.BRIGHT,
Fore.RESET,
Style.RESET_ALL,
color=color,
msg=msg[:6].upper().center(6)
))
sys.stdout.flush()
try:
yield
except Status as e:
_LOGGER.debug(e)
print_status(e.msg, e.color)
if e.exc:
raise e.exc # pylint: disable=raising-bad-type
except (KeyboardInterrupt, EOFError):
raise
except Exception:
print_status('FAILED', Fore.RED)
raise
else:
print_status('OK', Fore.GREEN) | Display an OK or FAILED message for the context block. |
def timed_display(msg):
def print_header(msg, newline=True):
"""Print a header line.
Args:
msg: A message to be printed in the center of the header line.
newline: Whether or not to print a newline at the end of the
header. This can be convenient for allowing the line to
overwrite another.
"""
if sys.stdout.isatty():
print('\r', end=Style.BRIGHT + Fore.BLUE)
print(' {} '.format(msg).center(_ncols(), '='),
end='\n{}'.format(Style.RESET_ALL)
if newline else Style.RESET_ALL)
sys.stdout.flush()
def print_message(msg):
"""Print a task title message.
Args:
msg: The message to display before running the task.
"""
if sys.stdout.isatty():
print('\r', end='')
msg = msg.ljust(_ncols())
print(msg, end='')
sys.stdout.flush()
start = time.time()
print_header(msg)
with hidden_cursor():
try:
yield print_message
finally:
delta = time.time() - start
print_header('completed in {:.2f}s'.format(delta), False) | A timed block to run tasks with titles and success/failure messages.
Args:
msg: The header message to print at the beginning of the timed block. |
def run_tasks(header, tasks):
tasks = list(tasks)
with timed_display(header) as print_message:
with tqdm(tasks, position=1, desc='Progress', disable=None,
bar_format='{desc}{percentage:3.0f}% |{bar}|',
total=sum(t[2] if len(t) > 2 else 1 for t in tasks),
dynamic_ncols=True) as pbar:
for task in tasks:
print_message(task[0])
with display_status():
try:
task[1]()
finally:
pbar.update(task[2] if len(task) > 2 else 1) | Run a group of tasks with a header, footer and success/failure messages.
Args:
header: A message to print in the header bar before the tasks are run.
tasks: A list of tuples containing a task title, a task, and a weight.
If the tuple only contains two values, the weight is assumed to be
one. |
def _pusher_connect_handler(self, data):
self.channel = self.pusher.subscribe(self.pos_callback_chan)
for listener in self.pusher_connected_listeners:
listener(data) | Event handler for the connection_established event. Binds the
shortlink_scanned event |
def _runForever(self, stop_event):
while(not stop_event.is_set()):
state = self.pusher.connection.state
if (state is not "connecting" and
state is not "connected"):
self.logger.warning(
"Pusher seems to be disconnected, trying to reconnect")
self.pusher.connect()
stop_event.wait(0.5) | Runs the main loop
Arguments:
stop_event: threading.Event() as a stop signal |
def stop(self):
self.pusherthread_stop.set()
self.pusher.disconnect()
# wait until pusher is down
while self.pusher.connection.state is "connected":
sleep(0.1)
logging.info("shutting down pusher connector thread") | Stops the pusherclient cleanly |
def load(self, filename, bs=512):
with open(filename, 'rb') as f:
f.seek(GPT_HEADER_OFFSET + 0x0C)
header_size = struct.unpack("<I", f.read(4))[0]
f.seek(GPT_HEADER_OFFSET)
header_data = f.read(header_size)
self.header = GPT_HEADER(header_data)
if (self.header.signature != GPT_SIGNATURE):
raise Exception("Invalid GPT signature")
self.__load_partition_entries(f, bs) | Loads GPT partition table.
Args:
filename (str): path to file or device to open for reading
bs (uint): Block size of the volume, default: 512
Raises:
IOError: If file does not exist or not readable |
def __load_partition_entries(self, fd, bs):
fd.seek(self.header.part_lba * bs)
for p in range(0, self.header.num_partitions):
data = fd.read(self.header.part_size)
entry = GptPartitionEntry(data)
if entry.type_guid != uuid.UUID(
'{00000000-0000-0000-0000-000000000000}'
):
self.__partition_entries.append(entry)
else:
# stop loading on empty partition entry
break | Loads the list of :class:`GptPartition` partition entries
Args:
bs (uint): Block size of the volume |
def slice_local_to_global(self, index_slice, axis=0):
local_start = self.int_local_to_global_start(index_slice.start, axis)
local_stop = self.int_local_to_global_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step) | Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated |
def local_to_global(self, index):
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_local_to_global(index)
elif type(index) is slice:
return self.slice_local_to_global(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_local_to_global(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_local_to_global(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type') | Calculate local index from global index
:param index: input index
:return: local index for data |
def slice_global_to_local(self, index_slice, axis=0):
if index_slice.stop < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index_slice.start > self.__mask[axis].stop-self.__halos[1][axis]:
return None
local_start = self.int_global_to_local_start(index_slice.start, axis)
local_stop = self.int_global_to_local_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step) | Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated |
def global_to_local(self, index):
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_global_to_local(index)
elif type(index) is slice:
return self.slice_global_to_local(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
#if len(index) is not len(self.__mask):
# raise IndexError('check length of parameter index')
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_global_to_local(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_global_to_local(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type') | Calculate local index from global index
:param index: input index
:return: local index for data |
def int_global_to_local_start(self, index, axis=0):
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start:
return 0
return index-self.__mask[axis].start | Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return: |
def int_global_to_local_stop(self, index, axis=0):
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index > self.__mask[axis].stop:
return self.__mask[axis].stop-self.__mask[axis].start
return index-self.__mask[axis].start | Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return: |
def int_global_to_local(self, index, axis=0):
# Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
return index-self.__mask[axis].start | Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return: |
def int_out_of_bounds(self, index, axis=0):
#if index >= self._global_shape[axis]:
if index > self._global_shape[axis]:
raise IndexError('index is larger than the upper bound')
# wrap around index if negative like in python
if index < 0:
index += self._global_shape[axis]
#warnings.warn('warp around may occur')
# check for invalid wrap around
if index < 0:
raise IndexError('index is smaller than the lower bound')
return index | examples if index is out of local processing bounds
function is used to perform examples for index of type integer
:param index: global index to examples as type int
:param axis: current axis to examples
:return: return input or raise error |
def out_of_bounds(self, index):
if type(index) is int:
return self.int_out_of_bounds(index)
elif type(index) is slice:
return self.slice_out_of_bounds(index)
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if type(item) is slice:
temp_index = self.slice_out_of_bounds(item, k)
elif type(item) is int:
temp_index = self.int_out_of_bounds(item, k)
# FIXME: will fail if item is no int or slice!
if temp_index is None:
return temp_index
local_index.append(temp_index)
return tuple(local_index) | Check index for out of bounds
:param index: index as integer, tuple or slice
:return: local index as tuple |
def reservoir(iterator, k):
sample = list(itertools.islice(iterator, 0, k))
for i, item in enumerate(iterator):
replace = random.randint(0, i + k)
if replace < k:
sample[replace] = item
return sample | Performs reservoir sampling of k items in iterator. Make sure that the iterator is a once-only iterator
(ie. not created using the "range" function).
:param iterator: set of items to sample from
:param k: sample k items
:return: list of sampled items |
def get_qpimage(self, idx):
if self._bgdata:
# The user has explicitly chosen different background data
# using `get_qpimage_raw`.
qpi = super(SeriesHdf5Qpimage, self).get_qpimage(idx)
else:
# We can use the background data stored in the qpimage hdf5 file
with self._qpseries() as qps:
qpi = qps.get_qpimage(index=idx).copy()
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | Return background-corrected QPImage of data at index `idx` |
def get_qpimage_raw(self, idx):
with self._qpseries() as qps:
qpi = qps.get_qpimage(index=idx).copy()
# Remove previously performed background correction
qpi.set_bg_data(None)
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | Return QPImage without background correction |
def verify(path):
valid = False
try:
h5 = h5py.File(path, mode="r")
qpi0 = h5["qpi_0"]
except (OSError, KeyError):
pass
else:
if ("qpimage version" in qpi0.attrs and
"phase" in qpi0 and
"amplitude" in qpi0 and
"bg_data" in qpi0["phase"] and
"bg_data" in qpi0["amplitude"]):
valid = True
return valid | Verify that `path` has the qpimage series file format |
def generate_requirements(output_path=None):
from django.conf import settings
reqs = set()
for app in settings.INSTALLED_APPS:
if app in mapping.keys():
reqs |= set(mapping[app])
if output_path is None:
print "--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/"
for item in reqs:
print item
else:
try:
out_file = open(output_path, 'w')
out_file.write("--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/\n")
for item in reqs:
out_file.write("%s\n" % item)
finally:
out_file.close() | Loop through the INSTALLED_APPS and create a set of requirements for pip.
if output_path is ``None`` then write to standard out, otherwise write
to the path. |
def register_mbr_plugin(self, fs_id, plugin):
self.logger.debug('MBR: {}, FS ID: {}'
.format(self.__get_plugin_name(plugin), fs_id))
self.__mbr_plugins[fs_id].append(plugin) | Used in plugin's registration routine,
to associate it's detection method with given filesystem id
Args:
fs_id: filesystem id that is read from MBR partition entry
plugin: plugin that supports this filesystem |
def register_gpt_plugin(self, fs_guid, plugin):
key = uuid.UUID(fs_guid.lower())
self.logger.debug('GPT: {}, GUID: {}'
.format(self.__get_plugin_name(plugin), fs_guid))
self.__gpt_plugins[key].append(plugin) | Used in plugin's registration routine,
to associate it's detection method with given filesystem guid
Args:
fs_guid: filesystem guid that is read from GPT partition entry
plugin: plugin that supports this filesystem |
def detect_mbr(self, filename, offset, fs_id):
self.logger.debug('Detecting MBR partition type')
if fs_id not in self.__mbr_plugins:
return None
else:
plugins = self.__mbr_plugins.get(fs_id)
for plugin in plugins:
if plugin.detect(filename, offset):
return plugin.get_volume_object()
return None | Used by rawdisk.session.Session to match mbr partitions against
filesystem plugins.
Args:
filename: device or file that it will read in order to detect
the filesystem fs_id: filesystem id to match (ex. 0x07)
offset: offset for the filesystem that is being matched
Returns:
Volume object supplied by matched plugin.
If there is no match, None is returned |
def detect_gpt(self, filename, offset, fs_guid):
self.logger.debug('Detecting GPT partition type')
if fs_guid not in self.__gpt_plugins:
return None
else:
plugins = self.__gpt_plugins.get(fs_guid)
for plugin in plugins:
if plugin.detect(filename, offset):
return plugin.get_volume_object()
return None | Used by rawdisk.session.Session to match gpt partitions agains
filesystem plugins.
Args:
filename: device or file that it will read in order to detect the
filesystem
fs_id: filesystem guid to match
(ex. {EBD0A0A2-B9E5-4433-87C0-68B6B72699C7})
offset: offset for the filesystem that is being matched
Returns:
Volume object supplied by matched plugin.
If there is no match, None is returned |
def inject_documentation(**options):
import cog
loader = ConfigLoader(**options)
cog.out("\n" + loader.documentation + "\n\n") | Generate configuration documentation in reStructuredText_ syntax.
:param options: Any keyword arguments are passed on to the
:class:`ConfigLoader` initializer.
This methods injects the generated documentation into the output generated
by cog_.
.. _cog: https://pypi.python.org/pypi/cogapp |
def new_checksum(self):
if self.context.is_file(self.filename):
friendly_name = format_path(self.filename)
logger.debug("Calculating SHA1 of %s ..", friendly_name)
context = hashlib.sha1()
context.update(self.context.read_file(self.filename))
checksum = context.hexdigest()
logger.debug("The SHA1 digest of %s is %s.", friendly_name, checksum)
return checksum | Get the SHA1 digest of the contents of :attr:`filename` (a string). |
def old_checksum(self):
if self.context.is_file(self.checksum_file):
logger.debug("Reading saved checksum from %s ..", format_path(self.checksum_file))
checksum = self.context.read_file(self.checksum_file).decode('ascii')
logger.debug("Saved checksum is %s.", checksum)
return checksum | Get the checksum stored in :attr:`checksum_file` (a string or :data:`None`). |
def read_file(self, filename):
logger.info("Reading file: %s", format_path(filename))
contents = self.context.read_file(filename)
num_lines = len(contents.splitlines())
logger.debug("Read %s from %s.",
pluralize(num_lines, 'line'),
format_path(filename))
return contents.rstrip() | Read a text file and provide feedback to the user.
:param filename: The pathname of the file to read (a string).
:returns: The contents of the file (a string). |
def execute_file(self, filename):
logger.info("Executing file: %s", format_path(filename))
contents = self.context.execute(filename, capture=True).stdout
num_lines = len(contents.splitlines())
logger.debug("Execution of %s yielded % of output.",
format_path(filename),
pluralize(num_lines, 'line'))
return contents.rstrip() | Execute a file and provide feedback to the user.
:param filename: The pathname of the file to execute (a string).
:returns: Whatever the executed file returns on stdout (a string). |
def write_file(self, filename, contents):
logger.info("Writing file: %s", format_path(filename))
contents = contents.rstrip() + b"\n"
self.context.write_file(filename, contents)
logger.debug("Wrote %s to %s.",
pluralize(len(contents.splitlines()), "line"),
format_path(filename)) | Write a text file and provide feedback to the user.
:param filename: The pathname of the file to write (a string).
:param contents: The new contents of the file (a string). |
def available_files(self):
matches = []
for pattern in self.filename_patterns:
logger.debug("Matching filename pattern: %s", pattern)
matches.extend(natsort(glob.glob(parse_path(pattern))))
return matches | The filenames of the available configuration files (a list of strings).
The value of :attr:`available_files` is computed the first time its
needed by searching for available configuration files that match
:attr:`filename_patterns` using :func:`~glob.glob()`. If you set
:attr:`available_files` this effectively disables searching for
configuration files. |
def documentation(self):
r
from humanfriendly.tables import format_rst_table
formatted_table = format_rst_table([
(directory,
self.get_main_pattern(directory).replace('*', r'\*'),
self.get_modular_pattern(directory).replace('*', r'\*'))
for directory in self.base_directories
], [
"Directory",
"Main configuration file",
"Modular configuration files",
])
return format(DOCUMENTATION_TEMPLATE, table=formatted_table).strip() | r"""
Configuration documentation in reStructuredText_ syntax (a string).
The purpose of the :attr:`documentation` property is to provide
documentation on the integration of :class:`ConfigLoader` into other
projects without denormalizing the required knowledge via copy/paste.
.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText |
def filename_patterns(self):
patterns = []
for directory in self.base_directories:
patterns.append(self.get_main_pattern(directory))
patterns.append(self.get_modular_pattern(directory))
return patterns | Filename patterns to search for available configuration files (a list of strings).
The value of :attr:`filename_patterns` is computed the first time it is
needed. Each of the :attr:`base_directories` generates two patterns:
1. A pattern generated by :func:`get_main_pattern()`.
2. A pattern generated by :func:`get_modular_pattern()`.
Here's an example:
>>> from update_dotdee import ConfigLoader
>>> loader = ConfigLoader(program_name='update-dotdee')
>>> loader.filename_patterns
['/etc/update-dotdee.ini',
'/etc/update-dotdee.d/*.ini',
'~/.update-dotdee.ini',
'~/.update-dotdee.d/*.ini',
'~/.config/update-dotdee.ini',
'~/.config/update-dotdee.d/*.ini'] |
def parser(self):
parser = configparser.RawConfigParser()
for filename in self.available_files:
friendly_name = format_path(filename)
logger.debug("Loading configuration file: %s", friendly_name)
loaded_files = parser.read(filename)
if len(loaded_files) == 0:
self.report_issue("Failed to load configuration file! (%s)", friendly_name)
logger.debug("Loaded %s from %s.",
pluralize(len(parser.sections()), "section"),
pluralize(len(self.available_files), "configuration file"))
return parser | A :class:`configparser.RawConfigParser` object with :attr:`available_files` loaded. |
def get_main_pattern(self, directory):
return os.path.join(directory, format(
'{prefix}{program_name}.{extension}',
extension=self.filename_extension.lstrip('.'),
program_name=self.program_name,
prefix=self.get_prefix(directory),
)) | Get the :func:`~glob.glob()` pattern to find the main configuration file.
:param directory: The pathname of a base directory (a string).
:returns: A filename pattern (a string).
This method generates a pattern that matches a filename based on
:attr:`program_name` with the suffix :attr:`filename_extension` in the
given base `directory`. Here's an example:
>>> from update_dotdee import ConfigLoader
>>> loader = ConfigLoader(program_name='update-dotdee')
>>> [loader.get_main_pattern(d) for d in loader.base_directories]
['/etc/update-dotdee.ini',
'~/.update-dotdee.ini',
'~/.config/update-dotdee.ini'] |
def report_issue(self, message, *args, **kw):
if self.strict:
raise ValueError(format(message, *args, **kw))
else:
logger.warning(format(message, *args, **kw)) | Handle a problem by raising an exception or logging a warning (depending on :attr:`strict`). |
def validate_input(function):
@wraps(function)
def wrapper(*args, **kwargs):
try:
name = function.__name__ + '_validator' # find validator name
globals()[name](kwargs) # call validation function
return function(*args, **kwargs)
except KeyError:
raise Exception("Could not find validation schema for the"
" function " + function.__name__)
return wrapper | Decorator that validates the kwargs of the function passed to it. |
def getImportFromObjects(node):
'''Returns a list of objects referenced by import from node'''
somenames = [x.asname for x in node.names if x.asname]
othernames = [x.name for x in node.names if not x.asname]
return somenames+othernamef getImportFromObjects(node):
'''Returns a list of objects referenced by import from node'''
somenames = [x.asname for x in node.names if x.asname]
othernames = [x.name for x in node.names if not x.asname]
return somenames+othernames | Returns a list of objects referenced by import from node |
def mft_record_size(self):
if self.extended_bpb.clusters_per_mft < 0:
return 2 ** abs(self.extended_bpb.clusters_per_mft)
else:
return self.clusters_per_mft * self.sectors_per_cluster * \
self.bytes_per_sector | Returns:
int: MFT record size in bytes |
def mft_offset(self):
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_cluster | Returns:
int: MFT Table offset from the beginning of the partition in bytes |
def mft_mirror_offset(self):
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_mirror_cluster | Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes |
def clear_mappers():
# Remove our hybrid property constructs.
for mpr, is_primary in _mapper_registry.items():
if is_primary:
for attr_name in ('id', 'slug'):
try:
attr = object.__getattribute__(mpr.class_, attr_name)
if isinstance(attr, hybrid_property):
if attr_name == 'id':
delattr(mpr.class_, attr_name)
else:
setattr(mpr.class_, attr_name, attr.descriptor)
except AttributeError:
pass
sa_clear_mappers() | Clears all mappers set up by SA and also clears all custom "id" and
"slug" attributes inserted by the :func:`mapper` function in this module.
This should only ever be needed in a testing context. |
def as_slug_expression(attr):
slug_expr = sa_func.replace(attr, ' ', '-')
slug_expr = sa_func.replace(slug_expr, '_', '-')
slug_expr = sa_func.lower(slug_expr)
return slug_expr | Converts the given instrumented string attribute into an SQL expression
that can be used as a slug.
Slugs are identifiers for members in a collection that can be used in an
URL. We create slug columns by replacing non-URL characters with dashes
and lower casing the result. We need this at the ORM level so that we can
use the slug in a query expression. |
def synonym(name):
return hybrid_property(lambda inst: getattr(inst, name),
lambda inst, value: setattr(inst, name, value),
expr=lambda cls: getattr(cls, name)) | Utility function mimicking the behavior of the old SA synonym function
with the new hybrid property semantics. |
def map_system_entities(engine, metadata, reset):
# Map the user message system entity.
msg_tbl = Table('_user_messages', metadata,
Column('guid', String, nullable=False, primary_key=True),
Column('text', String, nullable=False),
Column('time_stamp', DateTime(timezone=True),
nullable=False, default=sa_func.now()),
)
mapper(UserMessage, msg_tbl, id_attribute='guid')
if reset:
metadata.drop_all(bind=engine, tables=[msg_tbl])
metadata.create_all(bind=engine, tables=[msg_tbl]) | Maps all system entities. |
def inspect(orm_class, attribute_name):
key = (orm_class, attribute_name)
elems = OrmAttributeInspector.__cache.get(key)
if elems is None:
elems = OrmAttributeInspector.__inspect(key)
OrmAttributeInspector.__cache[key] = elems
return elems | :param attribute_name: name of the mapped attribute to inspect.
:returns: list of 2-tuples containing information about the inspected
attribute (first element: mapped entity attribute kind; second
attribute: mapped entity attribute) |
def trang(self, outfn=None, ext='.rng'):
from . import JARS
java = os.environ.get('java') or 'java'
trang_jar = os.path.join(JARS, 'trang.jar')
outfn = outfn or os.path.splitext(self.fn)[0] + ext
stderr = tempfile.NamedTemporaryFile()
try:
result = subprocess.check_call(
[java, "-jar", trang_jar, self.fn, outfn],
universal_newlines=True,
stderr=stderr)
except subprocess.CalledProcessError as e:
f = open(stderr.name, 'r+b')
output = f.read(); f.close()
raise RuntimeError(str(output, 'utf-8')).with_traceback(sys.exc_info()[2]) from None
if result==0:
return outfn | use trang to convert the Schema to the given output filename or to the given extension
SIDE EFFECT: creates a new file on the filesystem. |
def schematron(self, fn=None, outfn=None, ext='.sch'):
from .xslt import XSLT
from . import PATH, XML, etree
fn = fn or self.fn
if os.path.splitext(fn)[-1].lower()==ext:
return fn
elif os.path.splitext(fn)[-1].lower()!='.rng':
fn = Schema(fn=fn).trang(ext='.rng')
rng2sch = XSLT(fn=os.path.join(PATH, 'xslts', 'rng2sch.xslt'))
rng = XML(fn=fn)
outfn = outfn or os.path.splitext(fn)[0]+ext
sch = XML(fn=outfn, root=rng2sch.saxon9(rng.root).getroot())
sch.write()
return sch.fn | convert the Schema to schematron and save at the given output filename or with the given extension. |
def from_tag(cls, tag, schemas, ext='.rnc'):
return cls(fn=cls.filename(tag, schemas, ext=ext)) | load a schema using an element's tag. schemas can be a string or a list of strings |
def filename(cls, tag, schemas, ext='.rnc'):
if type(schemas)==str:
schemas = re.split("\s*,\s*", schemas)
for schema in schemas:
fn = os.path.join(schema, cls.dirname(tag), cls.basename(tag, ext=ext))
if os.path.exists(fn):
return fn | given a tag and a list of schemas, return the filename of the schema.
If schemas is a string, treat it as a comma-separated list. |
def dirname(cls, namespace):
md = re.match("^\{?(?:[^:]+:/{0,2})?([^\}]+)\}?", namespace)
if md is not None:
dirname = re.sub("[/:]", '_', md.group(1))
else:
dirname = ''
return dirname | convert a namespace url to a directory name.
Also accepts an Element 'tag' with namespace prepended in {braces}. |
def errors_as_text(self):
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors) | only available to Django 1.7+ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.