code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def export(self, filename, offset=0, length=None):
self.__validate_offset(filename=filename, offset=offset, length=length)
with open(filename, 'w') as f:
if length is None:
length = len(self.data) - offset
if offset > 0:
output = self.data[offset:length]
else:
output = self.data[:length]
f.write(output) | Exports byte array to specified destination
Args:
filename (str): destination to output file
offset (int): byte offset (default: 0) |
def tmpdir():
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True) | Create a tempdir context for the cwd and remove it after. |
def run(command, verbose=False):
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code) | Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in. |
def parse_redis_url(url):
warnings.warn(
"Use redis.StrictRedis.from_url instead", DeprecationWarning,
stacklevel=2)
parsed = urllib.parse.urlsplit(url)
return {
'host': parsed.hostname,
'port': parsed.port,
'db': int(parsed.path.replace('/', '')),
} | Given a url like redis://localhost:6379/0, return a dict with host, port,
and db members. |
def lock_file(f, block=False):
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise | If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue. |
def which(name, flags=os.X_OK):
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result | Search PATH for executable files with the given name.
Taken from Twisted. |
def get_lxc_version():
runner = functools.partial(
subprocess.check_output,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Old LXC had an lxc-version executable, and prefixed its result with
# "lxc version: "
try:
result = runner(['lxc-version']).rstrip()
return parse_version(result.replace("lxc version: ", ""))
except (OSError, subprocess.CalledProcessError):
pass
# New LXC instead has a --version option on most installed executables.
return parse_version(runner(['lxc-start', '--version']).rstrip()) | Asks the current host what version of LXC it has. Returns it as a
string. If LXC is not installed, raises subprocess.CalledProcessError |
def get_qpimage(self, idx=0):
if self._bgdata:
# The user has explicitly chosen different background data
# using `get_qpimage_raw`.
qpi = super(SingleHdf5Qpimage, self).get_qpimage()
else:
# We can use the background data stored in the qpimage hdf5 file
qpi = qpimage.QPImage(h5file=self.path,
h5mode="r",
h5dtype=self.as_type,
).copy()
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | Return background-corrected QPImage |
def get_qpimage_raw(self, idx=0):
qpi = qpimage.QPImage(h5file=self.path,
h5mode="r",
h5dtype=self.as_type,
).copy()
# Remove previously performed background correction
qpi.set_bg_data(None)
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | Return QPImage without background correction |
def simulate(s0, transmat, steps=1):
# Single-Step simulation
if steps == 1:
return np.dot(s0, transmat)
# Multi-Step simulation
out = np.zeros(shape=(steps + 1, len(s0)), order='C')
out[0, :] = s0
for i in range(1, steps + 1):
out[i, :] = np.dot(out[i - 1, :], transmat)
return out | Simulate the next state
Parameters
----------
s0 : ndarray
Vector with state variables at t=0
transmat : ndarray
The estimated transition/stochastic matrix.
steps : int
(Default: 1) The number of steps to simulate model outputs ahead.
If steps>1 the a Mult-Step Simulation is triggered.
Returns
-------
out : ndarray
(steps=1) Vector with simulated state variables ().
(steps>1) Matrix with out[:,step] columns (Fortran order) from a
Multi-Step Simulation. The first column is the initial state
vector out[:,0]=s0 for algorithmic reasons. |
def storage_type(self):
nf = np.load(str(self.path), mmap_mode="c", allow_pickle=False)
if np.iscomplexobj(nf):
st = "field"
else:
st = "phase"
return st | Depending on input data type, the storage type is either
"field" (complex) or "phase" (real). |
def get_qpimage_raw(self, idx=0):
# Load experimental data
nf = np.load(str(self.path), mmap_mode="c", allow_pickle=False)
meta_data = copy.copy(self.meta_data)
qpi = qpimage.QPImage(data=nf,
which_data=self.storage_type,
meta_data=meta_data,
h5dtype=self.as_type)
# get identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | Return QPImage without background correction |
def verify(path):
path = pathlib.Path(path)
valid = False
if path.suffix == ".npy":
try:
nf = np.load(str(path), mmap_mode="r", allow_pickle=False)
except (OSError, ValueError, IsADirectoryError):
pass
else:
if len(nf.shape) == 2:
valid = True
return valid | Verify that `path` has a supported numpy file format |
def _get_stem(self):
filename = os.path.basename(self.src_path)
stem, ext = os.path.splitext(filename)
return "index" if stem in ("index", "README", "__init__") else stem | Return the name of the file without it's extension. |
def parse_setup(filepath):
'''Get the kwargs from the setup function in setup.py'''
# TODO: Need to parse setup.cfg and merge with the data from below
# Monkey patch setuptools.setup to capture keyword arguments
setup_kwargs = {}
def setup_interceptor(**kwargs):
setup_kwargs.update(kwargs)
import setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup_interceptor
# Manually compile setup.py
with open(filepath, 'r') as f:
code = compile(f.read(), '', 'exec')
setup = ModuleType('setup')
exec(code, setup.__dict__)
# Remove monkey patch
setuptools.setup = setuptools_setup
return setup_kwargf parse_setup(filepath):
'''Get the kwargs from the setup function in setup.py'''
# TODO: Need to parse setup.cfg and merge with the data from below
# Monkey patch setuptools.setup to capture keyword arguments
setup_kwargs = {}
def setup_interceptor(**kwargs):
setup_kwargs.update(kwargs)
import setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup_interceptor
# Manually compile setup.py
with open(filepath, 'r') as f:
code = compile(f.read(), '', 'exec')
setup = ModuleType('setup')
exec(code, setup.__dict__)
# Remove monkey patch
setuptools.setup = setuptools_setup
return setup_kwargs | Get the kwargs from the setup function in setup.py |
def get_console_scripts(setup_data):
'''Parse and return a list of console_scripts from setup_data'''
# TODO: support ini format of entry_points
# TODO: support setup.cfg entry_points as available in pbr
if 'entry_points' not in setup_data:
return []
console_scripts = setup_data['entry_points'].get('console_scripts', [])
return [script.split('=')[0].strip() for script in console_scriptsf get_console_scripts(setup_data):
'''Parse and return a list of console_scripts from setup_data'''
# TODO: support ini format of entry_points
# TODO: support setup.cfg entry_points as available in pbr
if 'entry_points' not in setup_data:
return []
console_scripts = setup_data['entry_points'].get('console_scripts', [])
return [script.split('=')[0].strip() for script in console_scripts] | Parse and return a list of console_scripts from setup_data |
def build_plugin(cls, class_name, config):
mod_path, class_name = class_name.rsplit('.', 1)
plugin_cls = getattr(importlib.import_module(mod_path), class_name)
return plugin_cls(config) | Create an instance of the named plugin and return it
:param class_name: fully qualified name of class
:type class_name: str
:param config: the supporting configuration for plugin
:type config: PluginConfig
:rtype: AbstractPlugin
:return: an instance of a concrete implementation of AbstractPlugin |
def build_plugins(cls, plugins_conf):
plugins = {}
for alias, params_dict in plugins_conf.items():
plugin_config = PluginConfig(**(params_dict.get('config') or {}))
plugins[alias] = cls.build_plugin(class_name=params_dict['class_name'], config=plugin_config)
return plugins | Create an instance of the named plugin and return it
:param plugins_conf: dict of {alias: dict(plugin builder params) }
:type plugins_conf: dict
:rtype: dict[str, AbstractPlugin]
:return: dict of alias: plugin instance |
def install_programmer(programmer_id, programmer_options, replace_existing=False):
doaction = 0
if programmer_id in programmers().keys():
log.debug('programmer already exists: %s', programmer_id)
if replace_existing:
log.debug('remove programmer: %s', programmer_id)
remove_programmer(programmer_id)
doaction = 1
else:
doaction = 1
if doaction:
lines = bunch2properties(programmer_id, programmer_options)
programmers_txt().write_lines([''] + lines, append=1) | install programmer in programmers.txt.
:param programmer_id: string identifier
:param programmer_options: dict like
:param replace_existing: bool
:rtype: None |
def filter_tag(arg):
try:
strip_len = len('Key=')
key, value = arg[strip_len:].split(',Value=', 1)
return key, value
except:
msg = 'Invalid --filter-tag argument: {}'
raise argparse.ArgumentTypeError(msg.format(arg)) | Parses a --filter-tag argument |
def file_arg(arg):
prefix = 'file://'
if arg.startswith(prefix):
return os.path.abspath(arg[len(prefix):])
else:
msg = 'Invalid file argument "{}", does not begin with "file://"'
raise argparse.ArgumentTypeError(msg.format(arg)) | Parses a file argument, i.e. starts with file:// |
def schedule(self, job, when):
pjob = pickle.dumps(job)
self._redis.zadd('ss:scheduled', when, pjob) | Schedule job to run at when nanoseconds since the UNIX epoch. |
def schedule_in(self, job, timedelta):
now = long(self._now() * 1e6)
when = now + timedelta.total_seconds() * 1e6
self.schedule(job, when) | Schedule job to run at datetime.timedelta from now. |
def schedule_now(self, job):
now = long(self._now() * 1e6)
self.schedule(job, now) | Schedule job to run as soon as possible. |
def _get_aria_autocomplete(self, field):
tag_name = field.get_tag_name()
input_type = None
if field.has_attribute('type'):
input_type = field.get_attribute('type').lower()
if (
(tag_name == 'TEXTAREA')
or (
(tag_name == 'INPUT')
and (not (
(input_type == 'button')
or (input_type == 'submit')
or (input_type == 'reset')
or (input_type == 'image')
or (input_type == 'file')
or (input_type == 'checkbox')
or (input_type == 'radio')
or (input_type == 'hidden')
))
)
):
value = None
if field.has_attribute('autocomplete'):
value = field.get_attribute('autocomplete').lower()
else:
form = self.parser.find(field).find_ancestors(
'form'
).first_result()
if (form is None) and (field.has_attribute('form')):
form = self.parser.find(
'#' + field.get_attribute('form')
).first_result()
if (form is not None) and (form.has_attribute('autocomplete')):
value = form.get_attribute('autocomplete').lower()
if value == 'on':
return 'both'
elif (
(field.has_attribute('list'))
and (self.parser.find(
'datalist[id="' + field.get_attribute('list') + '"]'
).first_result() is not None)
):
return 'list'
elif value == 'off':
return 'none'
return None | Returns the appropriate value for attribute aria-autocomplete of field.
:param field: The field.
:type field: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The ARIA value of field.
:rtype: str |
def _validate(self, field, list_attribute):
if not self.scripts_added:
self._generate_validation_scripts()
self.id_generator.generate_id(field)
self.script_list_fields_with_validation.append_text(
'hatemileValidationList.'
+ list_attribute
+ '.push("'
+ field.get_attribute('id')
+ '");'
) | Validate the field when its value change.
:param field: The field.
:param list_attribute: The list attribute of field with validation. |
def remove(item):
if os.path.isdir(item):
shutil.rmtree(item)
else:
# Assume it's a file. error if not.
os.remove(item) | Delete item, whether it's a file, a folder, or a folder
full of other files and folders. |
def remove_pattern(root, pat, verbose=True):
print("removing pattern", root, pat)
combined = root + pat
print('combined', combined)
items = glob.glob(combined)
print('items', items)
for item in items:
print('item', item)
if is_inside(root, item):
remove(item)
elif verbose:
print("{item} is not inside {root}! Skipping.".format(**vars())) | Given a directory, and a pattern of files like "garbage.txt" or
"*pyc" inside it, remove them.
Try not to delete the whole OS while you're at it. |
def get_slugignores(root, fname='.slugignore'):
try:
with open(os.path.join(root, fname)) as f:
return [l.rstrip('\n') for l in f]
except IOError:
return [] | Given a root path, read any .slugignore file inside and return a list of
patterns that should be removed prior to slug compilation.
Return empty list if file does not exist. |
def clean_slug_dir(root):
if not root.endswith('/'):
root += '/'
for pattern in get_slugignores(root):
print("pattern", pattern)
remove_pattern(root, pattern) | Given a path, delete anything specified in .slugignore. |
def export(self, folder_path, format=None):
if format is None:
raise ValueError("Must export to a specific format, no format specified.")
format = format.lower()
if format == "trackline" or format[-4:] == "trkl":
ex.Trackline.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes)
elif format == "shape" or format == "shapefile" or format[-3:] == "shp":
ex.GDALShapefile.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes)
elif format == "netcdf" or format[-2:] == "nc":
ex.NetCDF.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes, summary=str(self))
elif format == "pickle" or format[-3:] == "pkl" or format[-6:] == "pickle":
ex.Pickle.export(folder=folder_path, particles=self.particles, datetimes=self.datetimes) | General purpose export method, gets file type
from filepath extension
Valid output formats currently are:
Trackline: trackline or trkl or *.trkl
Shapefile: shapefile or shape or shp or *.shp
NetCDF: netcdf or nc or *.nc |
def _parse(args):
ordered = []
opt_full = dict()
opt_abbrev = dict()
args = args + [''] # Avoid out of range
i = 0
while i < len(args) - 1:
arg = args[i]
arg_next = args[i+1]
if arg.startswith('--'):
if arg_next.startswith('-'):
raise ValueError('{} lacks value'.format(arg))
else:
opt_full[arg[2:]] = arg_next
i += 2
elif arg.startswith('-'):
if arg_next.startswith('-'):
raise ValueError('{} lacks value'.format(arg))
else:
opt_abbrev[arg[1:]] = arg_next
i += 2
else:
ordered.append(arg)
i += 1
return ordered, opt_full, opt_abbrev | Parse passed arguments from shell. |
def _construct_optional(params):
args = []
filtered = {key: arg.default for key, arg in params.items() if arg.default != inspect._empty}
for key, default in filtered.items():
arg = OptionalArg(full=key, abbrev=key[0].lower(), default=default)
args.append(arg)
args_full, args_abbrev = dict(), dict()
# Resolve conflicts
known_count = defaultdict(int)
for arg in args:
args_full[arg.full] = arg
if known_count[arg.abbrev] == 0:
args_abbrev[arg.abbrev] = arg
elif known_count[arg.abbrev] == 1:
new_abbrev = arg.abbrev.upper()
args_full[arg.full] = OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
else:
new_abbrev = arg.abbrev.upper() + str(known_count[arg.abbrev])
args_full[arg.full] = OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
known_count[arg.abbrev] += 1
return args_full, args_abbrev | Construct optional args' key and abbreviated key from signature. |
def _keyboard_access(self, element):
# pylint: disable=no-self-use
if not element.has_attribute('tabindex'):
tag = element.get_tag_name()
if (tag == 'A') and (not element.has_attribute('href')):
element.set_attribute('tabindex', '0')
elif (
(tag != 'A')
and (tag != 'INPUT')
and (tag != 'BUTTON')
and (tag != 'SELECT')
and (tag != 'TEXTAREA')
):
element.set_attribute('tabindex', '0') | Provide keyboard access for element, if it not has.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement |
def _add_event_in_element(self, element, event):
if not self.main_script_added:
self._generate_main_scripts()
if self.script_list is not None:
self.id_generator.generate_id(element)
self.script_list.append_text(
event
+ "Elements.push('"
+ element.get_attribute('id')
+ "');"
) | Add a type of event in element.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param event: The type of event.
:type event: str |
def tee(process, filter):
lines = []
while True:
line = process.stdout.readline()
if line:
if sys.version_info[0] >= 3:
line = decode(line)
stripped_line = line.rstrip()
if filter(stripped_line):
sys.stdout.write(line)
lines.append(stripped_line)
elif process.poll() is not None:
process.stdout.close()
break
return lines | Read lines from process.stdout and echo them to sys.stdout.
Returns a list of lines read. Lines are not newline terminated.
The 'filter' is a callable which is invoked for every line,
receiving the line as argument. If the filter returns True, the
line is echoed to sys.stdout. |
def tee2(process, filter):
while True:
line = process.stderr.readline()
if line:
if sys.version_info[0] >= 3:
line = decode(line)
stripped_line = line.rstrip()
if filter(stripped_line):
sys.stderr.write(line)
elif process.returncode is not None:
process.stderr.close()
break | Read lines from process.stderr and echo them to sys.stderr.
The 'filter' is a callable which is invoked for every line,
receiving the line as argument. If the filter returns True, the
line is echoed to sys.stderr. |
def run(args, echo=True, echo2=True, shell=False, cwd=None, env=None):
if not callable(echo):
echo = On() if echo else Off()
if not callable(echo2):
echo2 = On() if echo2 else Off()
process = Popen(
args,
stdout=PIPE,
stderr=PIPE,
shell=shell,
cwd=cwd,
env=env
)
with background_thread(tee2, (process, echo2)):
lines = tee(process, echo)
return process.returncode, lines | Run 'args' and return a two-tuple of exit code and lines read.
If 'echo' is True, the stdout stream is echoed to sys.stdout.
If 'echo2' is True, the stderr stream is echoed to sys.stderr.
The 'echo' and 'echo2' arguments may be callables, in which
case they are used as tee filters.
If 'shell' is True, args are executed via the shell.
The 'cwd' argument causes the child process to be executed in cwd.
The 'env' argument allows to pass a dict replacing os.environ. |
def register_representer_class(self, representer_class):
if representer_class in self.__rpr_classes.values():
raise ValueError('The representer class "%s" has already been '
'registered.' % representer_class)
self.__rpr_classes[representer_class.content_type] = representer_class
if issubclass(representer_class, MappingResourceRepresenter):
# Create and hold a mapping registry for the registered resource
# representer class.
mp_reg = representer_class.make_mapping_registry()
self.__mp_regs[representer_class.content_type] = mp_reg | Registers the given representer class with this registry, using
its MIME content type as the key. |
def create(self, resource_class, content_type):
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
if rpr_fac is None:
# Register a representer with default configuration on the fly
# and look again.
self.register(resource_class, content_type)
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
return rpr_fac(resource_class) | Creates a representer for the given combination of resource and
content type. This will also find representer factories that were
registered for a base class of the given resource. |
def make_gpg_home(appname, config_dir=None):
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
if not os.path.exists(path):
os.makedirs( path, 0700 )
else:
os.chmod( path, 0700 )
return path | Make GPG keyring dir for a particular application.
Return the path. |
def get_gpg_home( appname, config_dir=None ):
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path | Get the GPG keyring directory for a particular application.
Return the path. |
def make_gpg_tmphome( prefix=None, config_dir=None ):
if prefix is None:
prefix = "tmp"
config_dir = get_config_dir( config_dir )
tmppath = os.path.join( config_dir, "tmp" )
if not os.path.exists( tmppath ):
os.makedirs( tmppath, 0700 )
tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath )
return tmpdir | Make a temporary directory to hold GPG keys that are not
going to be stored to the application's keyring. |
def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ):
assert is_valid_appname(appname)
key_bin = str(key_bin)
assert len(key_bin) > 0
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = make_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.import_keys( key_bin )
try:
assert res.count == 1, "Failed to store key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to store key to %s" % keydir)
log.debug("res: %s" % res.__dict__)
log.debug("(%s)\n%s" % (len(key_bin), key_bin))
return None
return res.fingerprints[0] | Store a key locally to our app keyring.
Does NOT put it into a blockchain ID
Return the key ID on success
Return None on error |
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ):
assert is_valid_appname(appname)
if gpghome is None:
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
else:
keydir = gpghome
gpg = gnupg.GPG( homedir=keydir )
res = gpg.delete_keys( [key_id] )
if res.status == 'Must delete secret key first':
# this is a private key
res = gpg.delete_keys( [key_id], secret=True )
try:
assert res.status == 'ok', "Failed to delete key (%s)" % res
except AssertionError, e:
log.exception(e)
log.error("Failed to delete key '%s'" % key_id)
log.debug("res: %s" % res.__dict__)
return False
return True | Remove a public key locally from our local app keyring
Return True on success
Return False on error |
def gpg_download_key( key_id, key_server, config_dir=None ):
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
recvdat = gpg.recv_keys( key_server, key_id )
fingerprint = None
try:
assert recvdat.count == 1
assert len(recvdat.fingerprints) == 1
fingerprint = recvdat.fingerprints[0]
except AssertionError, e:
log.exception(e)
log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server))
shutil.rmtree( tmpdir )
return None
keydat = gpg.export_keys( [fingerprint] )
shutil.rmtree( tmpdir )
return str(keydat) | Download a GPG key from a key server.
Do not import it into any keyrings.
Return the ASCII-armored key |
def gpg_key_fingerprint( key_data, config_dir=None ):
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.import_keys( key_data )
try:
assert res.count == 1, "Failed to import key"
assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints"
fingerprint = res.fingerprints[0]
shutil.rmtree(tmpdir)
return fingerprint
except AssertionError, e:
log.exception(e)
shutil.rmtree(tmpdir)
return None | Get the key ID of a given serialized key
Return the fingerprint on success
Return None on error |
def gpg_verify_key( key_id, key_data, config_dir=None ):
key_data = str(key_data)
config_dir = get_config_dir( config_dir )
sanitized_key_id = "".join( key_id.upper().split(" ") )
if len(sanitized_key_id) < 16:
log.debug("Fingerprint is too short to be secure")
return False
fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir )
if fingerprint is None:
log.debug("Failed to fingerprint key")
return False
if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ):
log.debug("Imported key does not match the given ID")
return False
else:
return True | Verify that a given serialized key, when imported, has the given key ID.
Return True on success
Return False on error |
def gpg_export_key( appname, key_id, config_dir=None, include_private=False ):
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
keydir = get_gpg_home( appname, config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
keydat = gpg.export_keys( [key_id], secret=include_private )
if not keydat:
log.debug("Failed to export key %s from '%s'" % (key_id, keydir))
assert keydat
return keydat | Get the ASCII-armored key, given the ID |
def gpg_list_profile_keys( blockchain_id, proxy=None, wallet_keys=None, config_dir=None ):
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
# extract
ret = []
for account in accounts:
if account['service'] != 'pgp':
continue
info = {
"identifier": account['identifier'],
"contentUrl": account['contentUrl']
}
if 'keyName' in account.keys():
info['keyName'] = account['keyName']
ret.append(info)
return ret | List all GPG keys in a user profile:
Return a list of {'identifier': key ID, 'contentUrl': URL to the key data} on success
Raise on error
Return {'error': ...} on failure |
def gpg_list_app_keys( blockchain_id, appname, proxy=None, wallet_keys=None, config_dir=None ):
raise Exception("BROKEN; depends on list_mutable_data")
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME )
if proxy is None:
proxy = blockstack_client.get_default_proxy( config_path=client_config_path )
key_info = []
key_prefix = "gpg.%s." % appname
# immutable data key listing (look for keys that start with 'appname:')
immutable_listing = list_immutable_data( blockchain_id, proxy=proxy )
if 'error' in immutable_listing:
raise Exception("Blockstack error: %s" % immutable_listing['error'])
for immutable in immutable_listing['data']:
name = immutable['data_id']
data_hash = immutable['hash']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_immutable_data_url( blockchain_id, name, data_hash )
})
# mutable data key listing (look for keys that start with 'appname:')
# TODO: use 'accounts'
mutable_listing = list_mutable_data( blockchain_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in mutable_listing:
raise Exception("Blockstack error: %s" % mutable_listing['error'])
for mutable in mutable_listing['data']:
name = mutable['data_id']
version = mutable['version']
if name.startswith( key_prefix ):
key_info.append( {
'keyName': name[len(key_prefix):],
'contentUrl': make_mutable_data_url( blockchain_id, name, version )
})
return key_info | List the set of available GPG keys tagged for a given application.
Return list of {'keyName': key name, 'contentUrl': URL to key data}
Raise on error |
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ):
res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
return res
removed_accounts = res['removed']
errors = []
# blow away all state
for account in removed_accounts:
if not account.has_key('contentUrl'):
continue
key_url = account['contentUrl']
if key_url.startswith("blockstack://"):
# delete
try:
res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys )
if 'error' in res:
errors.append({'key_url': key_url, 'message': res['error']})
except AssertionError, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
raise
except Exception, e:
log.exception(e)
log.error("Failed to delete '%s'" % key_url)
continue
ret = {'status': True}
if len(errors) > 0:
ret['delete_errors'] = errors
return ret | Remove a GPG from a blockchain ID's global account.
Do NOT remove it from the local keyring.
Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed.
Return {'error': ...} on error |
def gpg_profile_create_key( blockchain_id, keyname, immutable=True, proxy=None, wallet_keys=None, config_dir=None, gpghome=None, use_key_server=True, key_server=None ):
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
keydir = make_gpg_tmphome( "create-account-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
assert key_res
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id] )
assert key_data
# save the key itself, to the global keyring
rc = gpg_stash_key( keyname, key_data, gpghome=gpghome )
assert rc, "Failed to store key '%s' (%s)" % (keyname, key_id)
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_profile_put_key( blockchain_id, key_id, immutable=immutable, use_key_server=use_key_server, key_server=key_server, key_name=keyname, proxy=proxy, wallet_keys=wallet_keys, gpghome=gpghome )
return add_res | Create a new account key.
Select good default GPG values (4096-bit, RSA/RSA)
Note that without rngd running, this may take a while.
Add the new key to the user's account.
Return {'status': True, 'key_url': ..., 'key_id': ..., } on success
Return {'error': ...} on error |
def gpg_profile_get_key( blockchain_id, keyname, key_id=None, proxy=None, wallet_keys=None, config_dir=None, gpghome=None ):
assert is_valid_keyname( keyname )
if config_dir is None:
config_dir = get_config_dir()
if gpghome is None:
gpghome = get_default_gpg_home()
accounts = blockstack_client.list_accounts( blockchain_id, proxy=proxy )
if 'error' in accounts:
return accounts
accounts = accounts.pop('accounts')
if len(accounts) == 0:
return {'error': 'No accounts in this profile'}
all_gpg_accounts = filter( lambda a: a['service'] == 'pgp', accounts )
if len(all_gpg_accounts) == 0:
return {'error': 'No GPG accounts in this profile'}
# find the one with this key name
gpg_accounts = filter( lambda ga: (ga.has_key('keyName') and ga['keyName'] == keyname) or (key_id is not None and ga['identifier'] == key_id), all_gpg_accounts )
if len(gpg_accounts) == 0:
return {'error': 'No such GPG key found'}
if len(gpg_accounts) > 1:
return {'error': 'Multiple keys with that name'}
key_url = gpg_accounts[0].get('contentUrl', DEFAULT_KEY_SERVER)
# go get the key
key_data = gpg_fetch_key( key_url, key_id=gpg_accounts[0]['identifier'], config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to download and verify key'}
ret = {
'status': True,
'key_id': gpg_accounts[0]['identifier'],
'key_data': key_data
}
return ret | Get the profile key
Return {'status': True, 'key_data': ..., 'key_id': ...} on success
Return {'error': ...} on error |
def gpg_app_create_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ):
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
client_config_path = os.path.join(config_dir, blockstack_client.CONFIG_FILENAME)
if proxy is None:
proxy = blockstack_client.get_default_proxy(config_path=client_config_path)
keydir = make_gpg_tmphome( "create-app-", config_dir=config_dir )
gpg = gnupg.GPG( homedir=keydir )
log.debug("Generating GPG key (this may take a while)")
key_input = gpg.gen_key_input( key_type="RSA", name_email=blockchain_id + "/" + appname, key_length=4096, name_real=keyname )
key_res = gpg.gen_key( key_input )
key_id = key_res.fingerprint
key_data = gpg.export_keys( [key_id], secret=True )
shutil.rmtree(keydir)
# propagate to blockstack
add_res = gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=txid, immutable=immutable, proxy=proxy, wallet_keys=wallet_keys, config_dir=config_dir )
return add_res | Create a new application GPG key.
Use good defaults (RSA-4096)
Stash it to the app-specific keyring locally.
Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success |
def gpg_app_get_key( blockchain_id, appname, keyname, immutable=False, key_id=None, key_hash=None, key_version=None, proxy=None, config_dir=None ):
assert is_valid_appname(appname)
assert is_valid_keyname(keyname)
if config_dir is None:
config_dir = get_config_dir()
fq_key_name = "gpg.%s.%s" % (appname, keyname)
key_url = None
if immutable:
# try immutable
key_url = blockstack_client.make_immutable_data_url( blockchain_id, fq_key_name, key_hash )
else:
# try mutable
key_url = blockstack_client.make_mutable_data_url( blockchain_id, fq_key_name, key_version )
log.debug("fetch '%s'" % key_url)
key_data = gpg_fetch_key( key_url, key_id, config_dir=config_dir )
if key_data is None:
return {'error': 'Failed to fetch key'}
if key_id is None:
key_id = gpg_key_fingerprint( key_data, config_dir=config_dir )
ret = {
'status': True,
'key_id': key_id,
'key_data': key_data,
'app_name': appname
}
return ret | Get an app-specific GPG key.
Return {'status': True, 'key_id': ..., 'key': ..., 'app_name': ...} on success
return {'error': ...} on error |
def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ):
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir )
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
# do the signature
gpg = gnupg.GPG( homedir=tmpdir )
res = None
with open(path_to_sign, "r") as fd_in:
res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True )
shutil.rmtree(tmpdir)
if not res:
log.debug("sign_file error: %s" % res.__dict__)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to sign data'}
return {'status': True, 'sig': res.data } | Sign a file on disk.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'error': ...} on error |
def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ):
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir )
res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
# stash detached signature
fd, path = tempfile.mkstemp( prefix=".sig-verify-" )
f = os.fdopen(fd, "w")
f.write( sigdata )
f.flush()
os.fsync(f.fileno())
f.close()
# verify
gpg = gnupg.GPG( homedir=tmpdir )
with open(path, "r") as fd_in:
res = gpg.verify_file( fd_in, data_filename=path_to_verify )
shutil.rmtree(tmpdir)
try:
os.unlink(path)
except:
pass
if not res:
log.debug("verify_file error: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("verification succeeded from keys in %s" % config_dir)
return {'status': True} | Verify a file on disk was signed by the given sender.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error |
def get_primary_command_usage(message=''):
# type: (str) -> str
if not settings.merge_primary_command and None in settings.subcommands:
return format_usage(settings.subcommands[None].__doc__)
if not message:
message = '\n{}\n'.format(settings.message) if settings.message else ''
doc = _DEFAULT_DOC.format(message=message)
if None in settings.subcommands:
return _merge_doc(doc, settings.subcommands[None].__doc__)
return format_usage(doc) | Return the usage string for the primary command. |
def get_help_usage(command):
# type: (str) -> None
if not command:
doc = get_primary_command_usage()
elif command in ('-a', '--all'):
subcommands = [k for k in settings.subcommands if k is not None]
available_commands = subcommands + ['help']
command_doc = '\nAvailable commands:\n{}\n'.format(
'\n'.join(' {}'.format(c) for c in sorted(available_commands)))
doc = get_primary_command_usage(command_doc)
elif command.startswith('-'):
raise ValueError("Unrecognized option '{}'.".format(command))
elif command in settings.subcommands:
subcommand = settings.subcommands[command]
doc = format_usage(subcommand.__doc__)
docopt.docopt(doc, argv=('--help',)) | Print out a help message and exit the program.
Args:
command: If a command value is supplied then print the help message for
the command module if available. If the command is '-a' or '--all',
then print the standard help message but with a full list of
available commands.
Raises:
ValueError: Raised if the help message is requested for an invalid
command or an unrecognized option is passed to help. |
def format_usage(doc, width=None):
# type: (str, Optional[int]) -> str
sections = doc.replace('\r', '').split('\n\n')
width = width or get_terminal_size().columns or 80
return '\n\n'.join(_wrap_section(s.strip(), width) for s in sections) | Format the docstring for display to the user.
Args:
doc: The docstring to reformat for display.
Returns:
The docstring formatted to parse and display to the user. This includes
dedenting, rewrapping, and translating the docstring if necessary. |
def parse_commands(docstring):
# type: (str) -> Generator[Tuple[List[str], List[str]], None, None]
try:
docopt.docopt(docstring, argv=())
except (TypeError, docopt.DocoptLanguageError):
return
except docopt.DocoptExit:
pass
for command in _parse_section('usage', docstring):
args = command.split()
commands = []
i = 0
for i, arg in enumerate(args):
if arg[0].isalpha() and not arg[0].isupper():
commands.append(arg)
else:
break
yield commands, args[i:] | Parse a docopt-style string for commands and subcommands.
Args:
docstring: A docopt-style string to parse. If the string is not a valid
docopt-style string, it will not yield and values.
Yields:
All tuples of commands and subcommands found in the docopt docstring. |
def _merge_doc(original, to_merge):
# type: (str, str) -> str
if not original:
return to_merge or ''
if not to_merge:
return original or ''
sections = []
for name in ('usage', 'arguments', 'options'):
sections.append(_merge_section(
_get_section(name, original),
_get_section(name, to_merge)
))
return format_usage('\n\n'.join(s for s in sections).rstrip()) | Merge two usage strings together.
Args:
original: The source of headers and initial section lines.
to_merge: The source for the additional section lines to append.
Returns:
A new usage string that contains information from both usage strings. |
def _merge_section(original, to_merge):
# type: (str, str) -> str
if not original:
return to_merge or ''
if not to_merge:
return original or ''
try:
index = original.index(':') + 1
except ValueError:
index = original.index('\n')
name = original[:index].strip()
section = '\n '.join(
(original[index + 1:].lstrip(), to_merge[index + 1:].lstrip())
).rstrip()
return '{name}\n {section}'.format(name=name, section=section) | Merge two sections together.
Args:
original: The source of header and initial section lines.
to_merge: The source for the additional section lines to append.
Returns:
A new section string that uses the header of the original argument and
the section lines from both. |
def _get_section(name, source):
# type: (str, str) -> Optional[str]
pattern = re.compile(
'^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name),
re.IGNORECASE | re.MULTILINE)
usage = None
for section in pattern.findall(source):
usage = _merge_section(usage, section.strip())
return usage | Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section. |
def _wrap_section(source, width):
# type: (str, int) -> str
if _get_section('usage', source):
return _wrap_usage_section(source, width)
if _is_definition_section(source):
return _wrap_definition_section(source, width)
lines = inspect.cleandoc(source).splitlines()
paragraphs = (textwrap.wrap(line, width, replace_whitespace=False)
for line in lines)
return '\n'.join(line for paragraph in paragraphs for line in paragraph) | Wrap the given section string to the current terminal size.
Intelligently wraps the section string to the given width. When wrapping
section lines, it auto-adjusts the spacing between terms and definitions.
It also adjusts commands the fit the correct length for the arguments.
Args:
source: The section string to wrap.
Returns:
The wrapped section string. |
def _is_definition_section(source):
try:
definitions = textwrap.dedent(source).split('\n', 1)[1].splitlines()
return all(
re.match(r'\s\s+((?!\s\s).+)\s\s+.+', s) for s in definitions)
except IndexError:
return False | Determine if the source is a definition section.
Args:
source: The usage string source that may be a section.
Returns:
True if the source describes a definition section; otherwise, False. |
def _wrap_usage_section(source, width):
# type: (str, int) -> str
if not any(len(line) > width for line in source.splitlines()):
return source
section_header = source[:source.index(':') + 1].strip()
lines = [section_header]
for commands, args in parse_commands(source):
command = ' {} '.format(' '.join(commands))
max_len = width - len(command)
sep = '\n' + ' ' * len(command)
wrapped_args = sep.join(textwrap.wrap(' '.join(args), max_len))
full_command = command + wrapped_args
lines += full_command.splitlines()
return '\n'.join(lines) | Wrap the given usage section string to the current terminal size.
Note:
Commands arguments are wrapped to the column that the arguments began
on the first line of the command.
Args:
source: The section string to wrap.
Returns:
The wrapped section string. |
def _wrap_definition_section(source, width):
# type: (str, int) -> str
index = source.index('\n') + 1
definitions, max_len = _get_definitions(source[index:])
sep = '\n' + ' ' * (max_len + 4)
lines = [source[:index].strip()]
for arg, desc in six.iteritems(definitions):
wrapped_desc = sep.join(textwrap.wrap(desc, width - max_len - 4))
lines.append(' {arg:{size}} {desc}'.format(
arg=arg,
size=str(max_len),
desc=wrapped_desc
))
return '\n'.join(lines) | Wrap the given definition section string to the current terminal size.
Note:
Auto-adjusts the spacing between terms and definitions.
Args:
source: The section string to wrap.
Returns:
The wrapped section string. |
def _get_definitions(source):
# type: (str) -> Tuple[Dict[str, str], int]
max_len = 0
descs = collections.OrderedDict() # type: Dict[str, str]
lines = (s.strip() for s in source.splitlines())
non_empty_lines = (s for s in lines if s)
for line in non_empty_lines:
if line:
arg, desc = re.split(r'\s\s+', line.strip())
arg_len = len(arg)
if arg_len > max_len:
max_len = arg_len
descs[arg] = desc
return descs, max_len | Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument. |
def _parse_section(name, source):
# type: (str, str) -> List[str]
section = textwrap.dedent(_get_section(name, source)[7:])
commands = [] # type: List[str]
for line in section.splitlines():
if not commands or line[:1].isalpha() and line[:1].islower():
commands.append(line)
else:
commands[-1] = '{} {}'.format(commands[-1].strip(), line.strip())
return commands | Yield each section line.
Note:
Depending on how it is wrapped, a section line can take up more than
one physical line.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A list containing each line, de-wrapped by whitespace from the source
code.
If the section is defined multiple times in the source code, all lines
from all sections with that name will be returned. |
def move(self, particle, u, v, w, modelTimestep, **kwargs):
# Kill the particle if it isn't settled and isn't already dead.
if not particle.settled and not particle.dead:
particle.die()
# Still save the temperature and salinity for the model output
temp = kwargs.get('temperature', None)
if temp is not None and math.isnan(temp):
temp = None
particle.temp = temp
salt = kwargs.get('salinity', None)
if salt is not None and math.isnan(salt):
salt = None
particle.salt = salt
u = 0
v = 0
w = 0
# Do the calculation to determine the new location
result = AsaTransport.distance_from_location_using_u_v_w(u=u, v=v, w=w, timestep=modelTimestep, location=particle.location)
result['u'] = u
result['v'] = v
result['w'] = w
return result | I'm dead, so no behaviors should act on me |
def get_current_history_length(self):
u'''Return the number of lines currently in the history.
(This is different from get_history_length(), which returns
the maximum number of lines that will be written to a history file.)'''
value = len(self.history)
log(u"get_current_history_length:%d"%value)
return valuf get_current_history_length(self):
u'''Return the number of lines currently in the history.
(This is different from get_history_length(), which returns
the maximum number of lines that will be written to a history file.)'''
value = len(self.history)
log(u"get_current_history_length:%d"%value)
return value | u'''Return the number of lines currently in the history.
(This is different from get_history_length(), which returns
the maximum number of lines that will be written to a history file.) |
def get_history_item(self, index):
u'''Return the current contents of history item at index (starts with index 1).'''
item = self.history[index - 1]
log(u"get_history_item: index:%d item:%r"%(index, item))
return item.get_line_text(f get_history_item(self, index):
u'''Return the current contents of history item at index (starts with index 1).'''
item = self.history[index - 1]
log(u"get_history_item: index:%d item:%r"%(index, item))
return item.get_line_text() | u'''Return the current contents of history item at index (starts with index 1). |
def read_history_file(self, filename=None):
u'''Load a readline history file.'''
if filename is None:
filename = self.history_filename
try:
for line in open(filename, u'r'):
self.add_history(lineobj.ReadLineTextBuffer(ensure_unicode(line.rstrip())))
except IOError:
self.history = []
self.history_cursor = f read_history_file(self, filename=None):
u'''Load a readline history file.'''
if filename is None:
filename = self.history_filename
try:
for line in open(filename, u'r'):
self.add_history(lineobj.ReadLineTextBuffer(ensure_unicode(line.rstrip())))
except IOError:
self.history = []
self.history_cursor = 0 | u'''Load a readline history file. |
def write_history_file(self, filename = None):
u'''Save a readline history file.'''
if filename is None:
filename = self.history_filename
fp = open(filename, u'wb')
for line in self.history[-self.history_length:]:
fp.write(ensure_str(line.get_line_text()))
fp.write(u'\n')
fp.close(f write_history_file(self, filename = None):
u'''Save a readline history file.'''
if filename is None:
filename = self.history_filename
fp = open(filename, u'wb')
for line in self.history[-self.history_length:]:
fp.write(ensure_str(line.get_line_text()))
fp.write(u'\n')
fp.close() | u'''Save a readline history file. |
def add_history(self, line):
u'''Append a line to the history buffer, as if it was the last line typed.'''
if not hasattr(line, "get_line_text"):
line = lineobj.ReadLineTextBuffer(line)
if not line.get_line_text():
pass
elif len(self.history) > 0 and self.history[-1].get_line_text() == line.get_line_text():
pass
else:
self.history.append(line)
self.history_cursor = len(self.historyf add_history(self, line):
u'''Append a line to the history buffer, as if it was the last line typed.'''
if not hasattr(line, "get_line_text"):
line = lineobj.ReadLineTextBuffer(line)
if not line.get_line_text():
pass
elif len(self.history) > 0 and self.history[-1].get_line_text() == line.get_line_text():
pass
else:
self.history.append(line)
self.history_cursor = len(self.history) | u'''Append a line to the history buffer, as if it was the last line typed. |
def previous_history(self, current): # (C-p)
u'''Move back through the history list, fetching the previous command. '''
if self.history_cursor == len(self.history):
self.history.append(current.copy()) #do not use add_history since we do not want to increment cursor
if self.history_cursor > 0:
self.history_cursor -= 1
current.set_line(self.history[self.history_cursor].get_line_text())
current.point = lineobj.EndOfLinf previous_history(self, current): # (C-p)
u'''Move back through the history list, fetching the previous command. '''
if self.history_cursor == len(self.history):
self.history.append(current.copy()) #do not use add_history since we do not want to increment cursor
if self.history_cursor > 0:
self.history_cursor -= 1
current.set_line(self.history[self.history_cursor].get_line_text())
current.point = lineobj.EndOfLine | u'''Move back through the history list, fetching the previous command. |
def next_history(self, current): # (C-n)
u'''Move forward through the history list, fetching the next command. '''
if self.history_cursor < len(self.history) - 1:
self.history_cursor += 1
current.set_line(self.history[self.history_cursor].get_line_text()f next_history(self, current): # (C-n)
u'''Move forward through the history list, fetching the next command. '''
if self.history_cursor < len(self.history) - 1:
self.history_cursor += 1
current.set_line(self.history[self.history_cursor].get_line_text()) | u'''Move forward through the history list, fetching the next command. |
def beginning_of_history(self): # (M-<)
u'''Move to the first line in the history.'''
self.history_cursor = 0
if len(self.history) > 0:
self.l_buffer = self.history[0f beginning_of_history(self): # (M-<)
u'''Move to the first line in the history.'''
self.history_cursor = 0
if len(self.history) > 0:
self.l_buffer = self.history[0] | u'''Move to the first line in the history. |
def end_of_history(self, current): # (M->)
u'''Move to the end of the input history, i.e., the line currently
being entered.'''
self.history_cursor = len(self.history)
current.set_line(self.history[-1].get_line_text()f end_of_history(self, current): # (M->)
u'''Move to the end of the input history, i.e., the line currently
being entered.'''
self.history_cursor = len(self.history)
current.set_line(self.history[-1].get_line_text()) | u'''Move to the end of the input history, i.e., the line currently
being entered. |
def get_time_objects_from_model_timesteps(cls, times, start):
modelTimestep = []
newtimes = []
for i in xrange(0, len(times)):
try:
modelTimestep.append(times[i+1] - times[i])
except StandardError:
modelTimestep.append(times[i] - times[i-1])
newtimes.append(start + timedelta(seconds=times[i]))
return (modelTimestep, newtimes) | Calculate the datetimes of the model timesteps
times should start at 0 and be in seconds |
def fill_polygon_with_points(cls, goal=None, polygon=None):
if goal is None:
raise ValueError("Must specify the number of points (goal) to fill the polygon with")
if polygon is None or (not isinstance(polygon, Polygon) and not isinstance(polygon, MultiPolygon)):
raise ValueError("Must specify a polygon to fill points with")
minx = polygon.bounds[0]
maxx = polygon.bounds[2]
miny = polygon.bounds[1]
maxy = polygon.bounds[3]
points = []
now = time.time()
while len(points) < goal:
random_x = random.uniform(minx, maxx)
random_y = random.uniform(miny, maxy)
p = Point(random_x, random_y)
if p.within(polygon):
points.append(p)
logger.info("Filling polygon with points took %f seconds" % (time.time() - now))
return points | Fill a shapely polygon with X number of points |
def shutdown(self):
# cleanup
self.started = False
try:
# nice way of doing things - let's wait until all items
# in the queue are processed
for t in self._threads:
t.join()
finally:
# Emergency brake - if a KeyboardInterrupt is raised,
# threads will finish processing current task and exit
self.stopped = True | Wait for all threads to complete |
def _unpack_bytes(bytes):
if bytes == b'':
return 0
int_length = 4
len_diff = int_length - len(bytes)
bytes = bytes + len_diff * b'\x00'
return struct.unpack("<L", bytes)[0] | Unpack a set of bytes into an integer. First pads to 4 bytes.
Little endian. |
def append_overhead_costs(costs, new_id, overhead_percentage=0.15):
total_time = 0
for item in costs:
total_time += item['time']
costs.append({
'id': new_id,
'task': 'Overhead, Bufixes & Iterations',
'time': total_time * overhead_percentage, },
)
return costs | Adds 15% overhead costs to the list of costs.
Usage::
from rapid_prototyping.context.utils import append_overhead_costs
costs = [
....
]
costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0])
:param costs: Your final list of costs.
:param new_id: The id that this new item should get. |
def arduino_default_path():
if sys.platform == 'darwin':
s = path('/Applications/Arduino.app/Contents/Resources/Java')
elif sys.platform == 'win32':
s = None
else:
s = path('/usr/share/arduino/')
return s | platform specific default root path. |
def arduino_path():
x = _ARDUINO_PATH
if not x:
x = os.environ.get('ARDUINO_HOME')
if not x:
x = arduino_default_path()
assert x, str(x)
x = path(x).expand().abspath()
assert x.exists(), 'arduino path not found:' + str(x)
return x | expanded root path, ARDUINO_HOME env var or arduino_default_path() |
def download_file_powershell(url, target, headers={}):
target = os.path.abspath(target)
powershell_cmd = "$request = (new-object System.Net.WebClient);"
for k, v in headers.items():
powershell_cmd += "$request.headers['%s'] = '%s';" % (k, v)
powershell_cmd += "$request.DownloadFile(%(url)r, %(target)r)" % vars()
cmd = [
'powershell',
'-Command',
powershell_cmd,
]
_clean_check(cmd, target) | Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete. |
def download_file_insecure_to_io(url, target_file=None, headers={}):
src = None
try:
req = Request(
url,
data=None,
headers=headers
)
src = urlopen(req)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
target_file.write(data)
finally:
if src:
src.close() | Use Python to download the file, even though it cannot authenticate the
connection. |
def compute_probability_settle(trajectory_files, bbox=None,
nx=1000, ny=1000, method='overall'):
prob = compute_probability(trajectory_files,
bbox,
nx, ny,
method,
parameter='settlement',
)
return prob | This function creates a probability (stochastic) grid
for trajectory model data based on settlement location,
normalized by run.
probability_grid = compute_probability_settle([myfile1.nc, myfile2.nc],
bbox = [-75, 23, -60, 45],
nx = 1000, ny = 1000,
method='overall') |
def export_probability(outputname, **kwargs):
bbox = kwargs.get('bbox', None)
nx, ny = kwargs.get('nx', None), kwargs.get('ny', None)
if bbox == None:
raise ValueError('Must supply bbox keyword argument.')
if nx == None or ny == None:
raise ValueError('Must supply nx and ny keyword arguments.')
prob = compute_probability(**kwargs)
export_grid(outputname, prob, bbox, nx, ny) | Calculate probability and export to gis raster/grid
format.
export_probability(prob_out,
trajectory_files = [myfiles1.nc, myfiles2.nc],
bbox = [-75, 23, -60, 45],
nx = 1000, ny = 1000,
method = 'overall') |
def install_metaboard(
replace_existing=False,
):
metaboard = AutoBunch()
metaboard.name = 'Metaboard'
metaboard.upload.protocol = 'usbasp'
metaboard.upload.maximum_size = '14336'
metaboard.upload.speed = '19200'
metaboard.build.mcu = 'atmega168'
metaboard.build.f_cpu = '16000000L'
metaboard.build.core = 'arduino'
metaboard.upload.disable_flushing = 'true'
board_id = 'metaboard'
install_board(board_id, metaboard, replace_existing=replace_existing) | install metaboard.
http://metalab.at/wiki/Metaboard |
def __total_pages(self) -> int:
row_count = self.model.query.count()
if isinstance(row_count, int):
return int(row_count / self.limit)
return None | Return max pages created by limit |
def __filter_query(self) -> str:
filter_query = 'WHERE %s'
bind_values = {}
if not self.__filters:
return None
for filter in self.__filters:
bind = {
'name': Security.random_string(5),
'value': filter['value']}
filter_str = '%s %s :%s' % \
(filter['column'], filter['operator'], bind['name'])
bind_values[bind['name']] = bind['value']
filter_query = filter_query % (filter_str + ' AND %s')
return {
'query': filter_query.replace(' AND %s', ''),
'binds': bind_values} | Generate a WHERE/AND string for SQL |
def page(self, page_number=0) -> list:
# workaround flask-sqlalchemy/issues/516
offset = page_number * self.limit
sql = 'SELECT * FROM %s {} LIMIT :li OFFSET :o' \
% (self.model.__tablename__)
filter_query = self.__filter_query()
if filter_query is None:
sql = text(sql.format(''))
result = self.model.db.engine.execute(
sql, li=self.limit, o=offset)
else:
filter_query['binds']['li'] = self.limit
filter_query['binds']['o'] = offset
sql = text(sql.format(filter_query['query']))
result = self.model.db.engine.execute(
sql, **filter_query['binds'])
result_keys = result.keys()
result_models = []
for row in result:
model = self.model()
key_count = 0
for key in result_keys:
setattr(model, key, row[key_count])
key_count = key_count + 1
result_models.append(model)
return result_models | Return [models] by page_number based on limit |
def links(self, base_link, current_page) -> dict:
max_pages = self.max_pages - 1 if \
self.max_pages > 0 else self.max_pages
base_link = '/%s' % (base_link.strip("/"))
self_page = current_page
prev = current_page - 1 if current_page is not 0 else None
prev_link = '%s/page/%s/%s' % (base_link, prev, self.limit) if \
prev is not None else None
next = current_page + 1 if current_page < max_pages else None
next_link = '%s/page/%s/%s' % (base_link, next, self.limit) if \
next is not None else None
first = 0
last = max_pages
return {
'self': '%s/page/%s/%s' % (base_link, self_page, self.limit),
'prev': prev_link,
'next': next_link,
'first': '%s/page/%s/%s' % (base_link, first, self.limit),
'last': '%s/page/%s/%s' % (base_link, last, self.limit),
} | Return JSON paginate links |
def json_paginate(self, base_url, page_number):
data = self.page(page_number)
first_id = None
last_id = None
if data:
first_id = data[0].id
last_id = data[-1].id
return {
'meta': {
'total_pages': self.max_pages,
'first_id': first_id,
'last_id': last_id,
'current_page': page_number
},
'data': self.page(page_number),
'links': self.links(base_url, page_number)
} | Return a dict for a JSON paginate |
def log_level(level_string):
if level_string.isdigit():
return int(level_string)
return getattr(logging, level_string.upper()) | Return a log level for a string
>>> log_level('DEBUG') == logging.DEBUG
True
>>> log_level('30') == logging.WARNING
True |
def add_arguments(parser, default_level=logging.INFO):
adder = (
getattr(parser, 'add_argument', None)
or getattr(parser, 'add_option')
)
adder(
'-l', '--log-level', default=default_level, type=log_level,
help="Set log level (DEBUG, INFO, WARNING, ERROR)") | Add arguments to an ArgumentParser or OptionParser for purposes of
grabbing a logging level. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.