code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def process_json_file(file_name):
with open(file_name, 'rt') as fh:
pybel_graph = pybel.from_json_file(fh, False)
return process_pybel_graph(pybel_graph)
|
Return a PybelProcessor by processing a Node-Link JSON file.
For more information on this format, see:
http://pybel.readthedocs.io/en/latest/io.html#node-link-json
Parameters
----------
file_name : str
The path to a Node-Link JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
|
def _float(value):
if "[" in value:
value, sep, unit = value.partition("[")
unit = sep + unit
if unit in ("[km]", "[km/s]"):
multiplier = 1000
elif unit == "[s]":
multiplier = 1
else:
raise ValueError("Unknown unit for this field", unit)
else:
multiplier = 1000
return float(value) * multiplier
|
Conversion of state vector field, with automatic unit handling
|
def _install_eslint(self, bootstrap_dir):
with pushd(bootstrap_dir):
result, install_command = self.install_module(
package_manager=self.node_distribution.get_package_manager(package_manager=PACKAGE_MANAGER_YARNPKG),
workunit_name=self.INSTALL_JAVASCRIPTSTYLE_TARGET_NAME,
workunit_labels=[WorkUnitLabel.PREP])
if result != 0:
raise TaskError('Failed to install ESLint\n'
'\t{} failed with exit code {}'.format(install_command, result))
self.context.log.debug('Successfully installed ESLint to {}'.format(bootstrap_dir))
return bootstrap_dir
|
Install the ESLint distribution.
:rtype: string
|
def _expand(template, seq):
if is_text(template):
return _simple_expand(template, seq)
elif is_data(template):
template = wrap(template)
assert template["from"], "Expecting template to have 'from' attribute"
assert template.template, "Expecting template to have 'template' attribute"
data = seq[-1][template["from"]]
output = []
for d in data:
s = seq + (d,)
output.append(_expand(template.template, s))
return coalesce(template.separator, "").join(output)
elif is_list(template):
return "".join(_expand(t, seq) for t in template)
else:
if not _Log:
_late_import()
_Log.error("can not handle")
|
seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE
|
def reducer_metro(self, metro, values):
lookup = CachedLookup(precision=POI_GEOHASH_PRECISION)
for i, value in enumerate(values):
type_tag, lonlat, data = value
if type_tag == 1:
lookup.insert(i, dict(
geometry=dict(type='Point', coordinates=project(lonlat)),
properties=dict(tags=data)
))
else:
if not lookup.data_store:
return
poi_names = []
kwargs = dict(buffer_size=POI_DISTANCE, multiple=True)
for poi in lookup.get(lonlat, **kwargs):
has_tag = [ tag in poi['tags'] for tag in POI_TAGS ]
if any(has_tag) and 'name' in poi['tags']:
poi_names.append(poi['tags']['name'])
for poi in set(poi_names):
yield (metro, poi), 1
|
Output tags of POI locations nearby tweet locations
Values will be sorted coming into reducer.
First element in each value tuple will be either 1 (osm POI) or 2 (geotweet).
Build a spatial index with POI records.
For each tweet lookup nearby POI, and emit tag values for predefined tags.
|
def name(self):
if not hasattr(self, "_name"):
self._name = "{}_hub_module_embedding".format(self.key)
return self._name
|
Returns string. Used for variable_scope and naming.
|
def command(self, regexp):
def decorator(fn):
self.add_command(regexp, fn)
return fn
return decorator
|
Register a new command
:param str regexp: Regular expression matching the command to register
:Example:
>>> @bot.command(r"/echo (.+)")
>>> def echo(chat, match):
>>> return chat.reply(match.group(1))
|
def convert_roipooling(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
pooled_shape = convert_string_to_list(attrs.get('pooled_size'))
scale = float(attrs.get("spatial_scale"))
node = onnx.helper.make_node(
'MaxRoiPool',
input_nodes,
[name],
pooled_shape=pooled_shape,
spatial_scale=scale,
name=name
)
return [node]
|
Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool
operator and return the created node.
|
def com_google_fonts_check_metadata_nameid_family_and_full_names(ttFont, font_metadata):
from fontbakery.utils import get_name_entry_strings
font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
if font_familynames:
font_familyname = font_familynames[0]
else:
font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)[0]
font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)[0]
if font_fullname != font_metadata.full_name:
yield FAIL, Message("fullname-mismatch",
("METADATA.pb: Fullname (\"{}\")"
" does not match name table"
" entry \"{}\" !").format(font_metadata.full_name,
font_fullname))
elif font_familyname != font_metadata.name:
yield FAIL, Message("familyname-mismatch",
("METADATA.pb Family name \"{}\")"
" does not match name table"
" entry \"{}\" !").format(font_metadata.name,
font_familyname))
else:
yield PASS, ("METADATA.pb familyname and fullName fields"
" match corresponding name table entries.")
|
METADATA.pb font.name and font.full_name fields match
the values declared on the name table?
|
def generate_http_basic_token(username, password):
token = base64.b64encode('{}:{}'.format(username, password).encode('utf-8')).decode('utf-8')
return token
|
Generates a HTTP basic token from username and password
Returns a token string (not a byte)
|
def user_id_partition_keygen(request_envelope):
try:
user_id = request_envelope.context.system.user.user_id
return user_id
except AttributeError:
raise PersistenceException("Couldn't retrieve user id from request "
"envelope, for partition key use")
|
Retrieve user id from request envelope, to use as partition key.
:param request_envelope: Request Envelope passed during skill
invocation
:type request_envelope: ask_sdk_model.RequestEnvelope
:return: User Id retrieved from request envelope
:rtype: str
:raises: :py:class:`ask_sdk_core.exceptions.PersistenceException`
|
def list_runners(*args):
run_ = salt.runner.Runner(__opts__)
runners = set()
if not args:
for func in run_.functions:
runners.add(func.split('.')[0])
return sorted(runners)
for module in args:
if '*' in module:
for func in fnmatch.filter(run_.functions, module):
runners.add(func.split('.')[0])
else:
for func in run_.functions:
mod_test = func.split('.')[0]
if mod_test == module:
runners.add(mod_test)
return sorted(runners)
|
List the runners loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_runners
Runner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_runners 'm*'
|
def listen(timeout=6.0, port=BOOT_PORT):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('0.0.0.0', port))
s.settimeout(timeout)
try:
message, (ipaddr, port) = s.recvfrom(512)
return ipaddr
except socket.timeout:
return None
|
Listen for a 'ping' broadcast message from an unbooted SpiNNaker board.
Unbooted SpiNNaker boards send out a UDP broadcast message every 4-ish
seconds on port 54321. This function listens for such messages and reports
the IP address that it came from.
Parameters
----------
timeout : float
Number of seconds to wait for a message to arrive.
port : int
The port number to listen on.
Returns
-------
str or None
The IP address of the SpiNNaker board from which a ping was received or
None if no ping was observed.
|
def parallel(processes, threads):
pool = multithread(threads)
pool.map(run_process, processes)
pool.close()
pool.join()
|
execute jobs in processes using N threads
|
def ems(self, value: int) -> 'Gap':
raise_not_number(value)
self.gap = '{}em'.format(value)
return self
|
Set the margin in ems.
|
def weld_iloc_indices_with_missing(array, weld_type, indices):
weld_obj = create_empty_weld_object()
weld_obj_id_array = get_weld_obj_id(weld_obj, array)
weld_obj_id_indices = get_weld_obj_id(weld_obj, indices)
missing_literal = default_missing_data_literal(weld_type)
if weld_type == WeldVec(WeldChar()):
missing_literal = get_weld_obj_id(weld_obj, missing_literal)
weld_template =
weld_obj.weld_code = weld_template.format(array=weld_obj_id_array,
indices=weld_obj_id_indices,
type=weld_type,
missing=missing_literal)
return weld_obj
|
Retrieve the values at indices. Indices greater than array length get replaced with
a corresponding-type missing value literal.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data. Assumed to be bool data.
weld_type : WeldType
The WeldType of the array data.
indices : numpy.ndarray or WeldObject
The indices to lookup.
Returns
-------
WeldObject
Representation of this computation.
|
def reverse(self, points, **kwargs):
if isinstance(points, list):
return self.batch_reverse(points, **kwargs)
if self.order == "lat":
x, y = points
else:
y, x = points
return self.reverse_point(x, y, **kwargs)
|
General method for reversing addresses, either a single address or
multiple.
*args should either be a longitude/latitude pair or a list of
such pairs::
>>> multiple_locations = reverse([(40, -19), (43, 112)])
>>> single_location = reverse((40, -19))
|
def import_env(*envs):
'import environment variables from host'
for env in envs:
parts = env.split(':', 1)
if len(parts) == 1:
export_as = env
else:
env, export_as = parts
env_val = os.environ.get(env)
if env_val is not None:
yield '{}={}'.format(export_as, shlex.quote(env_val))
|
import environment variables from host
|
def skip(roman_numeral, skip=1):
i = numerals.index(roman_numeral) + skip
return numerals[i % 7]
|
Skip the given places to the next roman numeral.
Examples:
>>> skip('I')
'II'
>>> skip('VII')
'I'
>>> skip('I', 2)
'III'
|
def _construct_from_json(self, rec):
self.delete()
for required_key in ['dagobah_id', 'created_jobs']:
setattr(self, required_key, rec[required_key])
for job_json in rec.get('jobs', []):
self._add_job_from_spec(job_json)
self.commit(cascade=True)
|
Construct this Dagobah instance from a JSON document.
|
def bootstrap(self, mc_bit=0x10, seed=None):
if seed is not None: np.random.seed(seed)
data = copy.deepcopy(self.data)
idx = np.random.randint(0,len(data),len(data))
data[self.config['catalog']['mag_1_field']][:] = self.mag_1[idx]
data[self.config['catalog']['mag_err_1_field']][:] = self.mag_err_1[idx]
data[self.config['catalog']['mag_2_field']][:] = self.mag_2[idx]
data[self.config['catalog']['mag_err_2_field']][:] = self.mag_err_2[idx]
data[self.config['catalog']['mc_source_id_field']][:] |= mc_bit
return Catalog(self.config, data=data)
|
Return a random catalog by boostrapping the colors of the objects in the current catalog.
|
def _keys(expr):
if isinstance(expr, SequenceExpr):
dtype = expr.data_type
else:
dtype = expr.value_type
return composite_op(expr, DictKeys, df_types.List(dtype.key_type))
|
Retrieve keys of a dict
:param expr: dict sequence / scalar
:return:
|
def delete_by_id(self, webhook, params={}, **options):
path = "/webhooks/%s" % (webhook)
return self.client.delete(path, params, **options)
|
This method permanently removes a webhook. Note that it may be possible
to receive a request that was already in flight after deleting the
webhook, but no further requests will be issued.
Parameters
----------
webhook : {Id} The webhook to delete.
|
def as_dot(self) -> str:
return nx.drawing.nx_pydot.to_pydot(self._graph).to_string()
|
Return as a string the dot version of the graph.
|
def _extract(self):
self.log.debug("Extracting emails from text content")
for item in self.data:
emails = extract_emails(item, self.domain, self.fuzzy)
self.results.extend(emails)
self.log.debug("Email extraction completed")
return list(set(self.results))
|
Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
|
def dfs_back_edges(graph, start_node):
visited = set()
finished = set()
def _dfs_back_edges_core(node):
visited.add(node)
for child in iter(graph[node]):
if child not in finished:
if child in visited:
yield node, child
else:
for s,t in _dfs_back_edges_core(child):
yield s,t
finished.add(node)
for s,t in _dfs_back_edges_core(start_node):
yield s,t
|
Do a DFS traversal of the graph, and return with the back edges.
Note: This is just a naive recursive implementation, feel free to replace it.
I couldn't find anything in networkx to do this functionality. Although the
name suggest it, but `dfs_labeled_edges` is doing something different.
:param graph: The graph to traverse.
:param node: The node where to start the traversal
:returns: An iterator of 'backward' edges
|
def remove_object(self, bucket_name, object_name):
is_valid_bucket_name(bucket_name)
is_non_empty_string(object_name)
self._url_open('DELETE', bucket_name=bucket_name,
object_name=object_name)
|
Remove an object from the bucket.
:param bucket_name: Bucket of object to remove
:param object_name: Name of object to remove
:return: None
|
def load(self):
hdf_filename = os.path.join(self._dump_dirname, 'result.h5')
if os.path.isfile(hdf_filename):
store = pd.HDFStore(hdf_filename, mode='r')
keys = store.keys()
if keys == ['/df']:
self.result = store['df']
else:
if set(keys) == set(map(lambda i: '/%s' % i, range(len(keys)))):
self.result = [store[str(k)] for k in range(len(keys))]
else:
self.result = {k[1:]: store[k] for k in keys}
else:
self.result = joblib.load(
os.path.join(self._output_dirname, 'dump', 'result.pkl'))
|
Load this step's result from its dump directory
|
def save(self, fname):
try:
with open(fname, "w") as f:
f.write(str(self))
except Exception as ex:
print('ERROR = cant save grid results to ' + fname + str(ex))
|
saves a grid to file as ASCII text
|
def ProcessMessages(self, msgs=None, token=None):
if not data_store.AFF4Enabled():
return
filestore_fd = aff4.FACTORY.Create(
legacy_filestore.FileStore.PATH,
legacy_filestore.FileStore,
mode="w",
token=token)
for vfs_urn in msgs:
with aff4.FACTORY.Open(vfs_urn, mode="rw", token=token) as vfs_fd:
try:
filestore_fd.AddFile(vfs_fd)
except Exception as e:
logging.exception("Exception while adding file to filestore: %s", e)
|
Process the new file and add to the file store.
|
def baseglob(pat, base):
return [f for f in glob(pat) if f.startswith(base)]
|
Given a pattern and a base, return files that match the glob pattern
and also contain the base.
|
def patch_stdout_context(self, raw=False, patch_stdout=True, patch_stderr=True):
return _PatchStdoutContext(
self.stdout_proxy(raw=raw),
patch_stdout=patch_stdout, patch_stderr=patch_stderr)
|
Return a context manager that will replace ``sys.stdout`` with a proxy
that makes sure that all printed text will appear above the prompt, and
that it doesn't destroy the output from the renderer.
:param patch_stdout: Replace `sys.stdout`.
:param patch_stderr: Replace `sys.stderr`.
|
def _get_redis_server(opts=None):
global REDIS_SERVER
if REDIS_SERVER:
return REDIS_SERVER
if not opts:
opts = _get_redis_cache_opts()
if opts['cluster_mode']:
REDIS_SERVER = StrictRedisCluster(startup_nodes=opts['startup_nodes'],
skip_full_coverage_check=opts['skip_full_coverage_check'])
else:
REDIS_SERVER = redis.StrictRedis(opts['host'],
opts['port'],
unix_socket_path=opts['unix_socket_path'],
db=opts['db'],
password=opts['password'])
return REDIS_SERVER
|
Return the Redis server instance.
Caching the object instance.
|
def get_institutes_trend_graph_urls(start, end):
graph_list = []
for institute in Institute.objects.all():
urls = get_institute_trend_graph_url(institute, start, end)
urls['institute'] = institute
graph_list.append(urls)
return graph_list
|
Get all institute trend graphs.
|
def _chance(solution, pdf):
return _prod([1.0 - abs(bit - p) for bit, p in zip(solution, pdf)])
|
Return the chance of obtaining a solution from a pdf.
The probability of many independant weighted "coin flips" (one for each bit)
|
def _get_stddevs(self, C, stddev_types, num_sites):
sigma_inter = C['tau'] + np.zeros(num_sites)
sigma_intra = C['sigma'] + np.zeros(num_sites)
std = []
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
std += [np.sqrt(sigma_intra**2 + sigma_inter**2)]
elif stddev_type == const.StdDev.INTRA_EVENT:
std.append(sigma_intra)
elif stddev_type == const.StdDev.INTER_EVENT:
std.append(sigma_inter)
return std
|
Return total standard deviation as described in paragraph 5.2 pag 200.
|
def mean_pressure_weighted(pressure, *args, **kwargs):
r
heights = kwargs.pop('heights', None)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', None)
ret = []
layer_arg = get_layer(pressure, *args, heights=heights,
bottom=bottom, depth=depth)
layer_p = layer_arg[0]
layer_arg = layer_arg[1:]
pres_int = 0.5 * (layer_p[-1].magnitude**2 - layer_p[0].magnitude**2)
for i, datavar in enumerate(args):
arg_mean = np.trapz(layer_arg[i] * layer_p, x=layer_p) / pres_int
ret.append(arg_mean * datavar.units)
return ret
|
r"""Calculate pressure-weighted mean of an arbitrary variable through a layer.
Layer top and bottom specified in height or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
*args : `pint.Quantity`
Parameters for which the pressure-weighted mean is to be calculated.
heights : `pint.Quantity`, optional
Heights from sounding. Standard atmosphere heights assumed (if needed)
if no heights are given.
bottom: `pint.Quantity`, optional
The bottom of the layer in either the provided height coordinate
or in pressure. Don't provide in meters AGL unless the provided
height coordinate is meters AGL. Default is the first observation,
assumed to be the surface.
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa.
Returns
-------
`pint.Quantity`
u_mean: u-component of layer mean wind.
`pint.Quantity`
v_mean: v-component of layer mean wind.
|
def settings(**kwargs):
from pyemma import config
old_settings = {}
try:
for k, v in kwargs.items():
old_settings[k] = getattr(config, k)
setattr(config, k, v)
yield
finally:
for k, v in old_settings.items():
setattr(config, k, v)
|
apply given PyEMMA config values temporarily within the given context.
|
def register (g):
assert isinstance(g, Generator)
id = g.id()
__generators [id] = g
for t in sequence.unique(g.target_types()):
__type_to_generators.setdefault(t, []).append(g)
base = id.split ('.', 100) [0]
__generators_for_toolset.setdefault(base, []).append(g)
invalidate_extendable_viable_source_target_type_cache()
|
Registers new generator instance 'g'.
|
def mdaZeros(shap, dtype=numpy.float, mask=None):
res = MaskedDistArray(shap, dtype)
res[:] = 0
res.mask = mask
return res
|
Zero constructor for masked distributed array
@param shap the shape of the array
@param dtype the numpy data type
@param mask mask array (or None if all data elements are valid)
|
def netHours(self):
if self.specifiedHours is not None:
return self.specifiedHours
elif self.category in [getConstant('general__eventStaffCategoryAssistant'),getConstant('general__eventStaffCategoryInstructor')]:
return self.event.duration - sum([sub.netHours for sub in self.replacementFor.all()])
else:
return sum([x.duration for x in self.occurrences.filter(cancelled=False)])
|
For regular event staff, this is the net hours worked for financial purposes.
For Instructors, netHours is caclulated net of any substitutes.
|
def We(self):
We = trapz_loglog(self._gam * self._nelec, self._gam * mec2)
return We
|
Total energy in electrons used for the radiative calculation
|
def make_pre_build_hook(extra_compiler_config_params):
def pre_build_hook(build_context, target):
target.compiler_config = CompilerConfig(
build_context, target, extra_compiler_config_params)
target.props._internal_dict_['compiler_config'] = (
target.compiler_config.as_dict())
return pre_build_hook
|
Return a pre-build hook function for C++ builders.
When called, during graph build, it computes and stores the compiler-config
object on the target, as well as adding it to the internal_dict prop for
hashing purposes.
|
def calculate(self, order, transaction):
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share
|
Pay commission based on dollar value of shares.
|
def tuple(self, r):
m, n, t = self.args
r, k = divmod(r, t)
r, u = divmod(r, 2)
i, j = divmod(r, n)
return i, j, u, k
|
Converts the linear_index `q` into an chimera_index
Parameters
----------
r : int
The linear_index node label
Returns
-------
q : tuple
The chimera_index node label corresponding to r
|
def read(self, num_bytes):
self.check_pyb()
try:
return self.pyb.serial.read(num_bytes)
except (serial.serialutil.SerialException, TypeError):
self.close()
raise DeviceError('serial port %s closed' % self.dev_name_short)
|
Reads data from the pyboard over the serial port.
|
def callback(self, timestamp, event_type, payload):
try:
data = (event_type, payload)
LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, '
'payload: %(payload)s\n', (
{'event': event_type, 'payload': payload}))
if 'create' in event_type:
pri = self._create_pri
elif 'delete' in event_type:
pri = self._delete_pri
elif 'update' in event_type:
pri = self._update_pri
else:
pri = self._delete_pri
self._pq.put((pri, timestamp, data))
except Exception as exc:
LOG.exception('Error: %(err)s for event %(event)s',
{'err': str(exc), 'event': event_type})
|
Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event
|
def Pop(self, index=0):
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
previous = self[index - 1]
result = previous.nested_path
previous.nested_path = result.nested_path
result.nested_path = None
return result
|
Removes and returns the pathspec at the specified index.
|
def writeint2dnorm(filename, Intensity, Error=None):
whattosave = {'Intensity': Intensity}
if Error is not None:
whattosave['Error'] = Error
if filename.upper().endswith('.NPZ'):
np.savez(filename, **whattosave)
elif filename.upper().endswith('.MAT'):
scipy.io.savemat(filename, whattosave)
else:
np.savetxt(filename, Intensity)
if Error is not None:
name, ext = os.path.splitext(filename)
np.savetxt(name + '_error' + ext, Error)
|
Save the intensity and error matrices to a file
Inputs
------
filename: string
the name of the file
Intensity: np.ndarray
the intensity matrix
Error: np.ndarray, optional
the error matrix (can be ``None``, if no error matrix is to be saved)
Output
------
None
|
def tokenize_by_number(s):
r = find_number(s)
if r == None:
return [ s ]
else:
tokens = []
if r[0] > 0:
tokens.append(s[0:r[0]])
tokens.append( float(s[r[0]:r[1]]) )
if r[1] < len(s):
tokens.extend(tokenize_by_number(s[r[1]:]))
return tokens
assert False
|
splits a string into a list of tokens
each is either a string containing no numbers
or a float
|
def reset(self):
"Close the current failed connection and prepare for a new one"
log.info("resetting client")
rpc_client = self._rpc_client
self._addrs.append(self._peer.addr)
self.__init__(self._addrs)
self._rpc_client = rpc_client
self._dispatcher.rpc_client = rpc_client
rpc_client._client = weakref.ref(self)
|
Close the current failed connection and prepare for a new one
|
def config_delete(args):
r = fapi.delete_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, [200,204])
return r.text if r.text else None
|
Remove a method config from a workspace
|
def remove(coll, value):
coll_class = coll.__class__
return coll_class(x for x in coll if x != value)
|
Remove all the occurrences of a given value
:param coll: a collection
:param value: the value to remove
:returns: a list
>>> data = ('NA', 0, 1, 'NA', 1, 2, 3, 'NA', 5)
>>> remove(data, 'NA')
(0, 1, 1, 2, 3, 5)
|
def deactivate(self, node_id):
node = self.node_list[node_id]
self.node_list[node_id] = node._replace(active=False)
|
Deactivate the node identified by node_id.
Deactivates the node corresponding to node_id, which means that
it can never be the output of a nearest_point query.
Note:
The node is not removed from the tree, its data is steel available.
Args:
node_id (int): The node identifier (given to the user after
its insertion).
|
def __set_document_signals(self):
self.document().contentsChanged.connect(self.contents_changed.emit)
self.document().contentsChanged.connect(self.__document__contents_changed)
self.document().modificationChanged.connect(self.modification_changed.emit)
self.document().modificationChanged.connect(self.__document__modification_changed)
|
Connects the editor document signals.
|
def root(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots = encode_many(estimated_labels, False)[0]
comparison_scores = (ref_roots == est_roots).astype(np.float)
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores
|
Compare chords according to roots.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.root(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut.
|
def is_deletion(self):
return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt)
|
Does this variant represent the deletion of nucleotides from the
reference genome?
|
def query_by_account(self, account_id, end_time=None, start_time=None):
path = {}
data = {}
params = {}
path["account_id"] = account_id
if start_time is not None:
params["start_time"] = start_time
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/accounts/{account_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/accounts/{account_id}".format(**path), data=data, params=params, no_data=True)
|
Query by account.
List authentication events for a given account.
|
def clear_build_directory(self):
stat = os.stat(self.build_directory)
shutil.rmtree(self.build_directory)
os.makedirs(self.build_directory, stat.st_mode)
|
Clear the build directory where pip unpacks the source distribution archives.
|
def _merge(self, value):
if value is not None and not isinstance(value, dict):
return value
if not self._pairs:
return {}
collected = {}
for k_validator, v_validator in self._pairs:
k_default = k_validator.get_default_for(None)
if k_default is None:
continue
if value:
v_for_this_k = value.get(k_default)
else:
v_for_this_k = None
v_default = v_validator.get_default_for(v_for_this_k)
collected.update({k_default: v_default})
if value:
for k, v in value.items():
if k not in collected:
collected[k] = v
return collected
|
Returns a dictionary based on `value` with each value recursively
merged with `spec`.
|
def etcd(url=DEFAULT_URL, mock=False, **kwargs):
if mock:
from etc.adapters.mock import MockAdapter
adapter_class = MockAdapter
else:
from etc.adapters.etcd import EtcdAdapter
adapter_class = EtcdAdapter
return Client(adapter_class(url, **kwargs))
|
Creates an etcd client.
|
def get_by_name(opname, operators):
ret_op_classes = [op for op in operators if op.__name__ == opname]
if len(ret_op_classes) == 0:
raise TypeError('Cannot found operator {} in operator dictionary'.format(opname))
elif len(ret_op_classes) > 1:
raise ValueError(
'Found duplicate operators {} in operator dictionary. Please check '
'your dictionary file.'.format(opname)
)
ret_op_class = ret_op_classes[0]
return ret_op_class
|
Return operator class instance by name.
Parameters
----------
opname: str
Name of the sklearn class that belongs to a TPOT operator
operators: list
List of operator classes from operator library
Returns
-------
ret_op_class: class
An operator class
|
def is_iterable(maybe_iter, unless=(string_types, dict)):
try:
iter(maybe_iter)
except TypeError:
return False
return not isinstance(maybe_iter, unless)
|
Return whether ``maybe_iter`` is an iterable, unless it's an instance of one
of the base class, or tuple of base classes, given in ``unless``.
Example::
>>> is_iterable('foo')
False
>>> is_iterable(['foo'])
True
>>> is_iterable(['foo'], unless=list)
False
>>> is_iterable(xrange(5))
True
|
def turn_on_nightlight(self):
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
body['mode'] = 'auto'
response, _ = helpers.call_api(
'/15a/v1/device/nightlightstatus',
'put',
headers=helpers.req_headers(self.manager),
json=body
)
return helpers.check_response(response, '15a_ntlight')
|
Turn on nightlight
|
def get_assignable_repository_ids(self, repository_id):
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
repositories = lookup_session.get_repositories()
id_list = []
for repository in repositories:
id_list.append(repository.get_id())
return IdList(id_list)
|
Gets a list of repositories including and under the given repository node in which any asset can be assigned.
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
return: (osid.id.IdList) - list of assignable repository ``Ids``
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
|
def _populate_comptparms(self, img_array):
if img_array.dtype == np.uint8:
comp_prec = 8
else:
comp_prec = 16
numrows, numcols, num_comps = img_array.shape
if version.openjpeg_version_tuple[0] == 1:
comptparms = (opj.ImageComptParmType * num_comps)()
else:
comptparms = (opj2.ImageComptParmType * num_comps)()
for j in range(num_comps):
comptparms[j].dx = self._cparams.subsampling_dx
comptparms[j].dy = self._cparams.subsampling_dy
comptparms[j].w = numcols
comptparms[j].h = numrows
comptparms[j].x0 = self._cparams.image_offset_x0
comptparms[j].y0 = self._cparams.image_offset_y0
comptparms[j].prec = comp_prec
comptparms[j].bpp = comp_prec
comptparms[j].sgnd = 0
self._comptparms = comptparms
|
Instantiate and populate comptparms structure.
This structure defines the image components.
Parameters
----------
img_array : ndarray
Image data to be written to file.
|
def _parse_xml(self, xml):
vms("Parsing <static> XML child tag.", 2)
for child in xml:
if "path" in child.attrib and "target" in child.attrib:
if child.tag == "file":
self.files.append({"source": child.attrib["path"],
"target": child.attrib["target"]})
elif child.tag == "folder":
self.folders.append({"source": child.attrib["path"],
"target": child.attrib["target"]})
|
Extracts objects representing and interacting with the settings in the
xml tag.
|
def execute_scenario(scenario):
for action in scenario.sequence:
execute_subcommand(scenario.config, action)
if 'destroy' in scenario.sequence:
scenario.prune()
|
Execute each command in the given scenario's configured sequence.
:param scenario: The scenario to execute.
:returns: None
|
def pick_tile_size(self, seg_size, data_lengths, valid_chunks, valid_lengths):
if len(valid_lengths) == 1:
return data_lengths[0], valid_chunks[0], valid_lengths[0]
else:
target_size = seg_size / 3
pick, pick_diff = 0, abs(valid_lengths[0] - target_size)
for i, size in enumerate(valid_lengths):
if abs(size - target_size) < pick_diff:
pick, pick_diff = i, abs(size - target_size)
return data_lengths[pick], valid_chunks[pick], valid_lengths[pick]
|
Choose job tiles size based on science segment length
|
def is_refreshable_url(self, request):
backend_session = request.session.get(BACKEND_SESSION_KEY)
is_oidc_enabled = True
if backend_session:
auth_backend = import_string(backend_session)
is_oidc_enabled = issubclass(auth_backend, OIDCAuthenticationBackend)
return (
request.method == 'GET' and
is_authenticated(request.user) and
is_oidc_enabled and
request.path not in self.exempt_urls
)
|
Takes a request and returns whether it triggers a refresh examination
:arg HttpRequest request:
:returns: boolean
|
def broadcast(self, channel, event, data):
payload = self._server.serialize_event(event, data)
for socket_id in self.subscriptions.get(channel, ()):
rv = self._server.sockets.get(socket_id)
if rv is not None:
rv.socket.send(payload)
|
Broadcasts an event to all sockets listening on a channel.
|
def configure(cls, **kwargs):
attrs = {}
for key in ('prefix', 'handle_uniqueness', 'key'):
if key in kwargs:
attrs[key] = kwargs.pop(key)
if 'transform' in kwargs:
attrs['transform'] = staticmethod(kwargs.pop('transform'))
name = kwargs.pop('name', None)
if kwargs:
raise TypeError('%s.configure only accepts these named arguments: %s' % (
cls.__name__,
', '.join(('prefix', 'transform', 'handle_uniqueness', 'key', 'name')),
))
return type((str if PY3 else oldstr)(name or cls.__name__), (cls, ), attrs)
|
Create a new index class with the given info
This allow to avoid creating a new class when only few changes are
to be made
Parameters
----------
kwargs: dict
prefix: str
The string part to use in the collection, before the normal suffix.
For example `foo` to filter on `myfiled__foo__eq=`
This prefix will also be used by the indexes to store the data at
a different place than the same index without prefix.
transform: callable
A function that will transform the value to be used as the reference
for the index, before the call to `normalize_value`.
If can be extraction of a date, or any computation.
The filter in the collection will then have to use a transformed value,
for example `birth_date__year=1976` if the transform take a date and
transform it to a year.
handle_uniqueness: bool
To make the index handle or not the uniqueness
key: str
To override the key used by the index. Two indexes for the same field of
the same type must not have the same key or data will be saved at the same place.
Note that the default key is None for `EqualIndex`, `text-range` for
`TextRangeIndex` and `number-range` for `NumberRangeIndex`
name: str
The name of the new multi-index class. If not set, it will be the same
as the current class
Returns
-------
type
A new class based on `cls`, with the new attributes set
|
def _forbidden(self, path, value):
if path[0] == '/':
path = path[1:]
for rule in reversed(self.rules):
if isinstance(rule[1], six.string_types):
if fnmatch(path, rule[1]):
return not rule[0]
elif rule[1](path, value):
return not rule[0]
return True
|
Is a stat forbidden? Goes through the rules to find one that
applies. Chronologically newer rules are higher-precedence than
older ones. If no rule applies, the stat is forbidden by default.
|
def parse_attribute_map(filenames):
forward = {}
backward = {}
for filename in filenames:
with open(filename) as fp:
for line in fp:
(name, friendly_name, name_format) = line.strip().split()
forward[(name, name_format)] = friendly_name
backward[friendly_name] = (name, name_format)
return forward, backward
|
Expects a file with each line being composed of the oid for the attribute
exactly one space, a user friendly name of the attribute and then
the type specification of the name.
:param filenames: List of filenames on mapfiles.
:return: A 2-tuple, one dictionary with the oid as keys and the friendly
names as values, the other one the other way around.
|
def matches(self, query):
thread_query = 'thread:{tid} AND {subquery}'.format(tid=self._id,
subquery=query)
num_matches = self._dbman.count_messages(thread_query)
return num_matches > 0
|
Check if this thread matches the given notmuch query.
:param query: The query to check against
:type query: string
:returns: True if this thread matches the given query, False otherwise
:rtype: bool
|
def setup(app):
import sphinxcontrib_django.docstrings
import sphinxcontrib_django.roles
sphinxcontrib_django.docstrings.setup(app)
sphinxcontrib_django.roles.setup(app)
|
Allow this module to be used as sphinx extension.
This attaches the Sphinx hooks.
:type app: sphinx.application.Sphinx
|
def from_dict(cls, dictionary):
cookbooks = set()
sources = set()
other = set()
groups = [sources, cookbooks, other]
for key, val in dictionary.items():
if key == 'cookbook':
cookbooks.update({cls.cookbook_statement(cbn, meta)
for cbn, meta in val.items()})
elif key == 'source':
sources.update({"source '%s'" % src for src in val})
elif key == 'metadata':
other.add('metadata')
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
|
Create a Berksfile instance from a dict.
|
def many(self):
for i in self.tc_requests.many(self.api_type, None, self.api_entity):
yield i
|
Gets all of the owners available.
Args:
|
def create_mixin(self):
_builder = self
class CustomModelMixin(object):
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self)
@classmethod
def get_model_custom_fields(cls):
return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls))
def get_custom_fields(self):
return _builder.fields_model_class.objects.filter(content_type=self._content_type)
def get_custom_value(self, field):
return _builder.values_model_class.objects.get(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
def set_custom_value(self, field, value):
custom_value, created = \
_builder.values_model_class.objects.get_or_create(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
custom_value.value = value
custom_value.full_clean()
custom_value.save()
return custom_value
return CustomModelMixin
|
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
|
def undobutton_action(self):
if len(self.history) > 1:
old = self.history.pop(-1)
self.selection_array = old
self.mask.set_data(old)
self.fig.canvas.draw_idle()
|
when undo is clicked, revert the thematic map to the previous state
|
def create(cls, repo, path, ref='HEAD', message=None, force=False, **kwargs):
args = (path, ref)
if message:
kwargs['m'] = message
if force:
kwargs['f'] = True
repo.git.tag(*args, **kwargs)
return TagReference(repo, "%s/%s" % (cls._common_path_default, path))
|
Create a new tag reference.
:param path:
The name of the tag, i.e. 1.0 or releases/1.0.
The prefix refs/tags is implied
:param ref:
A reference to the object you want to tag. It can be a commit, tree or
blob.
:param message:
If not None, the message will be used in your tag object. This will also
create an additional tag object that allows to obtain that information, i.e.::
tagref.tag.message
:param force:
If True, to force creation of a tag even though that tag already exists.
:param kwargs:
Additional keyword arguments to be passed to git-tag
:return: A new TagReference
|
def from_filename(self, filename):
i = len(self.base_directory)
if filename[:i] != self.base_directory:
raise ValueError('Filename needs to start with "%s";\nyou passed "%s".' % (self.base_directory, filename))
if filename.endswith(self.extension):
if len(self.extension) > 0:
j = -len(self.extension)
else:
j = None
return self.key_transformer.from_path(tuple(filename[i:j].strip('/').split('/')))
|
Convert an absolute filename into key.
|
def configure(self, options, conf):
super(LeakDetectorPlugin, self).configure(options, conf)
if options.leak_detector_level:
self.reporting_level = int(options.leak_detector_level)
self.report_delta = options.leak_detector_report_delta
self.patch_mock = options.leak_detector_patch_mock
self.ignore_patterns = options.leak_detector_ignore_patterns
self.save_traceback = options.leak_detector_save_traceback
self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False))
|
Configure plugin.
|
def get_submit_args(args):
submit_args = dict(
testrun_id=args.testrun_id,
user=args.user,
password=args.password,
no_verify=args.no_verify,
verify_timeout=args.verify_timeout,
log_file=args.job_log,
dry_run=args.dry_run,
)
submit_args = {k: v for k, v in submit_args.items() if v is not None}
return Box(submit_args, frozen_box=True, default_box=True)
|
Gets arguments for the `submit_and_verify` method.
|
def serialize_job(job):
d = dict(
id=job.get_id(),
uri=url_for('jobs.get_job', job_id=job.get_id(), _external=True),
status=job.get_status(),
result=job.result
)
return d
|
Return a dictionary representing the job.
|
def _process_terminal_state(self, job_record):
msg = 'Job {0} for {1}@{2} is in the terminal state {3}, ' \
'and is no further govern by the State Machine {4}' \
.format(job_record.db_id, job_record.process_name, job_record.timeperiod, job_record.state, self.name)
self._log_message(WARNING, job_record.process_name, job_record.timeperiod, msg)
|
method logs a warning message notifying that the job is no longer govern by this state machine
|
def _startup(cls):
for endpoint_name in sorted(hookenv.relation_types()):
relf = relation_factory(endpoint_name)
if not relf or not issubclass(relf, cls):
continue
rids = sorted(hookenv.relation_ids(endpoint_name))
rids = ['{}:{}'.format(endpoint_name, rid) if ':' not in rid
else rid for rid in rids]
endpoint = relf(endpoint_name, rids)
cls._endpoints[endpoint_name] = endpoint
endpoint.register_triggers()
endpoint._manage_departed()
endpoint._manage_flags()
for relation in endpoint.relations:
hookenv.atexit(relation._flush_data)
|
Create Endpoint instances and manage automatic flags.
|
def get_storage(path=None, options=None):
path = path or settings.STORAGE
options = options or settings.STORAGE_OPTIONS
if not path:
raise ImproperlyConfigured('You must specify a storage class using '
'DBBACKUP_STORAGE settings.')
return Storage(path, **options)
|
Get the specified storage configured with options.
:param path: Path in Python dot style to module containing the storage
class. If empty settings.DBBACKUP_STORAGE will be used.
:type path: ``str``
:param options: Parameters for configure the storage, if empty
settings.DBBACKUP_STORAGE_OPTIONS will be used.
:type options: ``dict``
:return: Storage configured
:rtype: :class:`.Storage`
|
def _reset(self):
self._in_declare = False
self._is_create = False
self._begin_depth = 0
self.consume_ws = False
self.tokens = []
self.level = 0
|
Set the filter attributes to its default values
|
def validate(self):
for key, val in self.grammar.items():
try:
setattr(self, key, val)
except ValueError as e:
raise ValidationError('invalid contents: ' + e.args[0])
|
Validate the contents of the object.
This calls ``setattr`` for each of the class's grammar properties. It
will catch ``ValueError``s raised by the grammar property's setters
and re-raise them as :class:`ValidationError`.
|
def get_assessment_part_items(self, assessment_part_id):
mgr = self._get_provider_manager('ASSESSMENT_AUTHORING', local=True)
lookup_session = mgr.get_assessment_part_lookup_session(proxy=self._proxy)
if self._catalog_view == ISOLATED:
lookup_session.use_isolated_bank_view()
else:
lookup_session.use_federated_bank_view()
item_ids = lookup_session.get_assessment_part(assessment_part_id).get_item_ids()
mgr = self._get_provider_manager('ASSESSMENT')
lookup_session = mgr.get_item_lookup_session(proxy=self._proxy)
lookup_session.use_federated_bank_view()
return lookup_session.get_items_by_ids(item_ids)
|
Gets the list of items mapped to the given ``AssessmentPart``.
In plenary mode, the returned list contains all known items or
an error results. Otherwise, the returned list may contain only
those items that are accessible through this session.
arg: assessment_part_id (osid.id.Id): ``Id`` of the
``AssessmentPart``
return: (osid.assessment.ItemList) - list of items
raise: NotFound - ``assessment_part_id`` not found
raise: NullArgument - ``assessment_part_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
|
def main(args=None):
parser = argparse.ArgumentParser(
description='Extracts metadata from a Fuel-converted HDF5 file.')
parser.add_argument("filename", help="HDF5 file to analyze")
args = parser.parse_args()
with h5py.File(args.filename, 'r') as h5file:
interface_version = h5file.attrs.get('h5py_interface_version', 'N/A')
fuel_convert_version = h5file.attrs.get('fuel_convert_version', 'N/A')
fuel_convert_command = h5file.attrs.get('fuel_convert_command', 'N/A')
message_prefix = message_prefix_template.format(
os.path.basename(args.filename))
message_body = message_body_template.format(
fuel_convert_command, interface_version, fuel_convert_version)
message = ''.join(['\n', message_prefix, '\n', '=' * len(message_prefix),
message_body])
print(message)
|
Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
|
def deepcopy_strip(item):
if isinstance(item, MutableMapping):
return {k: deepcopy_strip(v) for k, v in iteritems(item)}
if isinstance(item, MutableSequence):
return [deepcopy_strip(k) for k in item]
return item
|
Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
|
def predict(self, data):
with log_start_finish('_FakeRegressionResults prediction', logger):
model_design = dmatrix(
self._rhs, data=data, return_type='dataframe')
return model_design.dot(self.params).values
|
Predict new values by running data through the fit model.
Parameters
----------
data : pandas.DataFrame
Table with columns corresponding to the RHS of `model_expression`.
Returns
-------
predicted : ndarray
Array of predicted values.
|
def generate_dict_schema(size, valid):
schema = {}
generator_items = []
for i in range(0, size):
while True:
key_schema, key_generator = generate_random_schema(valid)
if key_schema not in schema:
break
value_schema, value_generator = generate_random_schema(valid)
schema[key_schema] = value_schema
generator_items.append((key_generator, value_generator))
generator = ({next(k_gen): next(v_gen) for k_gen, v_gen in generator_items} for i in itertools.count())
return schema, generator
|
Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns
|
def get_mapping(version=1, exported_at=None, app_name=None):
if exported_at is None:
exported_at = timezone.now()
app_name = app_name or settings.HEROKU_CONNECT_APP_NAME
return {
'version': version,
'connection': {
'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID,
'app_name': app_name,
'exported_at': exported_at.isoformat(),
},
'mappings': [
model.get_heroku_connect_mapping()
for model in get_heroku_connect_models()
]
}
|
Return Heroku Connect mapping for the entire project.
Args:
version (int): Version of the Heroku Connect mapping, default: ``1``.
exported_at (datetime.datetime): Time the export was created, default is ``now()``.
app_name (str): Name of Heroku application associated with Heroku Connect the add-on.
Returns:
dict: Heroku Connect mapping.
Note:
The version does not need to be incremented. Exports from the Heroku Connect
website will always have the version number ``1``.
|
def aggregate(self, other=None):
if not self.status:
return self
if not other:
return self
if not other.status:
return other
return Value(True, other.index, self.value + other.value, None)
|
collect the furthest failure from self and other.
|
def require_Gtk(min_version=2):
if not _in_X:
raise RuntimeError('Not in X session.')
if _has_Gtk < min_version:
raise RuntimeError('Module gi.repository.Gtk not available!')
if _has_Gtk == 2:
logging.getLogger(__name__).warn(
_("Missing runtime dependency GTK 3. Falling back to GTK 2 "
"for password prompt"))
from gi.repository import Gtk
if not Gtk.init_check(None)[0]:
raise RuntimeError(_("X server not connected!"))
return Gtk
|
Make sure Gtk is properly initialized.
:raises RuntimeError: if Gtk can not be properly initialized
|
def create(self, *args, **kwargs):
try:
return super(CloudBlockStorageManager, self).create(*args,
**kwargs)
except exc.BadRequest as e:
msg = e.message
if "Clones currently must be >= original volume size" in msg:
raise exc.VolumeCloneTooSmall(msg)
else:
raise
|
Catches errors that may be returned, and raises more informational
exceptions.
|
def initialize(log_file, project_dir=None, debug=False):
print_splash()
log.setup_logging(log_file, print_log_location=False, debug=debug)
logger = log.get_logger('pipeline')
if project_dir is not None:
make_dir(os.path.normpath(project_dir))
logger.info('PROJECT DIRECTORY: {}'.format(project_dir))
logger.info('')
logger.info('LOG LOCATION: {}'.format(log_file))
print('')
return logger
|
Initializes an AbTools pipeline.
Initialization includes printing the AbTools splash, setting up logging,
creating the project directory, and logging both the project directory
and the log location.
Args:
log_file (str): Path to the log file. Required.
project_dir (str): Path to the project directory. If not provided,
the project directory won't be created and the location won't be logged.
debug (bool): If ``True``, the logging level will be set to ``logging.DEBUG``.
Default is ``FALSE``, which logs at ``logging.INFO``.
Returns:
logger
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.