code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def rgb_to_xterm(r, g, b):
if r < 5 and g < 5 and b < 5:
return 16
best_match = 0
smallest_distance = 10000000000
for c in range(16, 256):
d = (COLOR_TABLE[c][0] - r) ** 2 + \
(COLOR_TABLE[c][1] - g) ** 2 + \
(COLOR_TABLE[c][2] - b) ** 2
if d < smallest_distance:
smallest_distance = d
best_match = c
return best_match
|
Quantize RGB values to an xterm 256-color ID
This works by envisioning the RGB values for all 256 xterm colors
as 3D euclidean space and brute-force searching for the nearest
neighbor.
This is very slow. If you're very lucky, :func:`compile_speedup`
will replace this function automatically with routines in
`_xterm256.c`.
|
def delete(self, accountId):
acct = BaseAccount.get(accountId)
if not acct:
raise Exception('No such account found')
acct.delete()
auditlog(event='account.delete', actor=session['user'].username, data={'accountId': accountId})
return self.make_response('Account deleted')
|
Delete an account
|
def write_info_file(tensorboard_info):
payload = "%s\n" % _info_to_string(tensorboard_info)
with open(_get_info_file_path(), "w") as outfile:
outfile.write(payload)
|
Write TensorBoardInfo to the current process's info file.
This should be called by `main` once the server is ready. When the
server shuts down, `remove_info_file` should be called.
Args:
tensorboard_info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
|
def debugArgsToDict(a):
s = a.replace('+', ' ')
s = s.replace('=', ':')
s = re.sub(r'([A-Z][A-Z_]+)', r"'\1'", s)
return ast.literal_eval('{ ' + s + ' }')
|
Converts a string representation of debug arguments to a dictionary.
The string can be of the form
IDENTIFIER1=val1,IDENTIFIER2=val2
:param a: the argument string
:return: the dictionary
|
def printLn(self, respType, respString):
if 'E' in respType:
respString = '(Error) ' + respString
if 'W' in respType:
respString = '(Warning) ' + respString
if 'S' in respType:
self.printSysLog(respString)
self.results['response'] = (self.results['response'] +
respString.splitlines())
return
|
Add one or lines of output to the response list.
Input:
Response type: One or more characters indicate type of response.
E - Error message
N - Normal message
S - Output should be logged
W - Warning message
|
def flat(self, obj, mask=0):
s = self.base
if self.leng and self.item > 0:
s += self.leng(obj) * self.item
if _getsizeof:
s = _getsizeof(obj, s)
if mask:
s = (s + mask) & ~mask
return s
|
Return the aligned flat size.
|
def process_messages_loop_internal(self):
while self.receiving_messages:
self.work_request = None
self.connection.receive_loop_with_callback(self.queue_name, self.save_work_request_and_close)
if self.work_request:
self.process_work_request()
|
Busy loop that processes incoming WorkRequest messages via functions specified by add_command.
Disconnects while servicing a message, reconnects once finished processing a message
Terminates if a command runs shutdown method
|
def getsize(self, key=None):
if key is None:
return os.path.getsize(self.filename)
return hdf5.ByteCounter.get_nbytes(
h5py.File.__getitem__(self.hdf5, key))
|
Return the size in byte of the output associated to the given key.
If no key is given, returns the total size of all files.
|
def _is_number_matching_desc(national_number, number_desc):
if number_desc is None:
return False
actual_length = len(national_number)
possible_lengths = number_desc.possible_length
if len(possible_lengths) > 0 and actual_length not in possible_lengths:
return False
return _match_national_number(national_number, number_desc, False)
|
Determine if the number matches the given PhoneNumberDesc
|
def target_id(self):
if self._target_id:
return self._target_id
if self._existing:
self._target_id = self._existing.get("target_id")
return self._target_id
|
Returns the id the target, to which this post has to be syndicated.
:returns: string
|
def set(self, dict_name, key, value, priority=None):
if priority is not None:
priorities = {key: priority}
else:
priorities = None
self.update(dict_name, {key: value}, priorities=priorities)
|
Set a single value for a single key.
This requires a session lock.
:param str dict_name: name of the dictionary to update
:param str key: key to update
:param str value: value to assign to `key`
:param int priority: priority score for the value (if any)
|
def _build_graph_run(self, run_args):
with tf.Graph().as_default() as g:
input_ = run_args.input
placeholder = tf.compat.v1.placeholder(
dtype=input_.dtype, shape=input_.shape)
output = run_args.fct(placeholder)
return GraphRun(
session=raw_nogpu_session(g),
graph=g,
placeholder=placeholder,
output=output,
)
|
Create a new graph for the given args.
|
def query_events(resource_root, query_str=None):
params = None
if query_str:
params = dict(query=query_str)
return call(resource_root.get, EVENTS_PATH, ApiEventQueryResult,
params=params)
|
Search for events.
@param query_str: Query string.
@return: A list of ApiEvent.
|
def setError(self, msg=None, title=None):
if msg is not None:
self.messageLabel.setText(msg)
if title is not None:
self.titleLabel.setText(title)
|
Shows and error message
|
def FQP_point_to_FQ2_point(pt: Tuple[FQP, FQP, FQP]) -> Tuple[FQ2, FQ2, FQ2]:
return (
FQ2(pt[0].coeffs),
FQ2(pt[1].coeffs),
FQ2(pt[2].coeffs),
)
|
Transform FQP to FQ2 for type hinting.
|
def draw_rect(self, rect):
check_int_err(lib.SDL_RenderDrawRect(self._ptr, rect._ptr))
|
Draw a rectangle on the current rendering target.
Args:
rect (Rect): The destination rectangle, or None to outline the entire rendering target.
Raises:
SDLError: If an error is encountered.
|
def raw(mime='application/octet-stream'):
def dfn(fn):
tags = getattr(fn, 'tags', set())
tags.add('raw')
fn.tags = tags
fn.mime = getattr(fn, 'mime', mime)
return fn
return dfn
|
Constructs a decorator that marks the fn
as raw response format
|
def verify_integrity(self):
if not self.__integrity_check:
if not self.__appid:
raise Exception('U2F_APPID was not defined! Please define it in configuration file.')
if self.__facets_enabled and not len(self.__facets_list):
raise Exception(
)
undefined_message = 'U2F {name} handler is not defined! Please import {name} through {method}!'
if not self.__get_u2f_devices:
raise Exception(undefined_message.format(name='Read', method='@u2f.read'))
if not self.__save_u2f_devices:
raise Exception(undefined_message.format(name='Save', method='@u2f.save'))
if not self.__call_success_enroll:
raise Exception(undefined_message.format(name='enroll onSuccess', method='@u2f.enroll_on_success'))
if not self.__call_success_sign:
raise Exception(undefined_message.format(name='sign onSuccess', method='@u2f.sign_on_success'))
self.__integrity_check = True
return True
|
Verifies that all required functions been injected.
|
def _read_data_type_src(self, length):
_resv = self._read_fileng(4)
_addr = list()
for _ in range((length - 4) // 16):
_addr.append(ipaddress.ip_address(self._read_fileng(16)))
data = dict(
ip=tuple(_addr),
)
return data
|
Read IPv6-Route Source Route data.
Structure of IPv6-Route Source Route data [RFC 5095]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len | Routing Type=0| Segments Left |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Address[1] +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Address[2] +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
. . .
. . .
. . .
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Address[n] +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 - Reserved
8 64 route.ip Address
............
|
def v_grammar_unique_defs(ctx, stmt):
defs = [('typedef', 'TYPE_ALREADY_DEFINED', stmt.i_typedefs),
('grouping', 'GROUPING_ALREADY_DEFINED', stmt.i_groupings)]
if stmt.parent is None:
defs.extend(
[('feature', 'FEATURE_ALREADY_DEFINED', stmt.i_features),
('identity', 'IDENTITY_ALREADY_DEFINED', stmt.i_identities),
('extension', 'EXTENSION_ALREADY_DEFINED', stmt.i_extensions)])
for (keyword, errcode, dict) in defs:
for definition in stmt.search(keyword):
if definition.arg in dict:
other = dict[definition.arg]
err_add(ctx.errors, definition.pos,
errcode, (definition.arg, other.pos))
else:
dict[definition.arg] = definition
|
Verify that all typedefs and groupings are unique
Called for every statement.
Stores all typedefs in stmt.i_typedef, groupings in stmt.i_grouping
|
def run(self):
while True:
try:
cursor = JSON_CLIENT.json_client['local']['oplog.rs'].find(
{'ts': {'$gt': self.last_timestamp}})
except TypeError:
pass
else:
cursor.add_option(2)
cursor.add_option(8)
cursor.add_option(32)
self._retry()
for doc in cursor:
self.last_timestamp = doc['ts']
if doc['ns'] in self.receivers:
self._run_namespace(doc)
time.sleep(1)
|
main control loop for thread
|
def macro_state(self, micro_state):
assert len(micro_state) == len(self.micro_indices)
reindexed = self.reindex()
micro_state = np.array(micro_state)
return tuple(0 if sum(micro_state[list(reindexed.partition[i])])
in self.grouping[i][0] else 1
for i in self.macro_indices)
|
Translate a micro state to a macro state
Args:
micro_state (tuple[int]): The state of the micro nodes in this
coarse-graining.
Returns:
tuple[int]: The state of the macro system, translated as specified
by this coarse-graining.
Example:
>>> coarse_grain = CoarseGrain(((1, 2),), (((0,), (1, 2)),))
>>> coarse_grain.macro_state((0, 0))
(0,)
>>> coarse_grain.macro_state((1, 0))
(1,)
>>> coarse_grain.macro_state((1, 1))
(1,)
|
def from_config(cls, pyvlx, item):
name = item['name']
ident = item['id']
subtype = item['subtype']
typeid = item['typeId']
return cls(pyvlx, ident, name, subtype, typeid)
|
Read roller shutter from config.
|
def primary_from_id(self, tax_id):
s = select([self.names.c.tax_name],
and_(self.names.c.tax_id == tax_id,
self.names.c.is_primary))
res = s.execute()
output = res.fetchone()
if not output:
msg = 'value "{}" not found in names.tax_id'.format(tax_id)
raise ValueError(msg)
else:
return output[0]
|
Returns primary taxonomic name associated with tax_id
|
def is_compression_coordinate(ds, variable):
if not is_coordinate_variable(ds, variable):
return False
compress = getattr(ds.variables[variable], 'compress', None)
if not isinstance(compress, basestring):
return False
if not compress:
return False
if variable in compress:
return False
for dim in compress.split():
if dim not in ds.dimensions:
return False
return True
|
Returns True if the variable is a coordinate variable that defines a
compression scheme.
:param netCDF4.Dataset nc: An open netCDF dataset
:param str variable: Variable name
|
def enable() -> None:
if not isinstance(sys.stdout, DebugPrint):
sys.stdout = DebugPrint(sys.stdout)
|
Patch ``sys.stdout`` to use ``DebugPrint``.
|
def _check_kets(*ops, same_space=False, disjunct_space=False):
if not all([(isinstance(o, State) and o.isket) for o in ops]):
raise TypeError("All operands must be Kets")
if same_space:
if not len({o.space for o in ops if o is not ZeroKet}) == 1:
raise UnequalSpaces(str(ops))
if disjunct_space:
spc = TrivialSpace
for o in ops:
if o.space & spc > TrivialSpace:
raise OverlappingSpaces(str(ops))
spc *= o.space
|
Check that all operands are Kets from the same Hilbert space.
|
def karbasa(self, result):
probs = result['all_probs']
probs.sort()
return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
|
Finding if class probabilities are close to eachother
Ratio of the distance between 1st and 2nd class,
to the distance between 1st and last class.
:param result: The dict returned by LM.calculate()
|
def install_supervisor(self, update=False):
script = supervisor.Recipe(
self.buildout,
self.name,
{'user': self.options.get('user'),
'program': self.options.get('program'),
'command': templ_cmd.render(config=self.conf_filename, prefix=self.prefix),
'stopwaitsecs': '30',
'killasgroup': 'true',
})
return script.install(update)
|
install supervisor config for redis
|
def docker_container():
if SETUP_SPLASH:
dm = DockerManager()
dm.start_container()
try:
requests.post(f'{SPLASH_URL}/_gc')
except requests.exceptions.RequestException:
pass
yield
|
Start the Splash server on a Docker container.
If the container doesn't exist, it is created and named 'splash-detectem'.
|
def _from_dict(cls, _dict):
args = {}
xtra = _dict.copy()
if 'id' in _dict:
args['id'] = _dict.get('id')
del xtra['id']
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
del xtra['metadata']
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
del xtra['collection_id']
if 'result_metadata' in _dict:
args['result_metadata'] = QueryResultMetadata._from_dict(
_dict.get('result_metadata'))
del xtra['result_metadata']
if 'title' in _dict:
args['title'] = _dict.get('title')
del xtra['title']
args.update(xtra)
return cls(**args)
|
Initialize a QueryResult object from a json dictionary.
|
def bill(self, year=None, month=None):
endpoint = '/'.join((self.server_url, '_api', 'v2', 'bill'))
return self._usage_endpoint(endpoint, year, month)
|
Retrieves Cloudant billing data, optionally for a given year and month.
:param int year: Year to query against, for example 2014.
Optional parameter. Defaults to None. If used, it must be
accompanied by ``month``.
:param int month: Month to query against that must be an integer
between 1 and 12. Optional parameter. Defaults to None.
If used, it must be accompanied by ``year``.
:returns: Billing data in JSON format
|
def run_process(*args, **kwargs):
warnings.warn(
"procrunner.run_process() is deprecated and has been renamed to run()",
DeprecationWarning,
stacklevel=2,
)
return run(*args, **kwargs)
|
API used up to version 0.2.0.
|
def nanoFTPProxy(host, port, user, passwd, type):
libxml2mod.xmlNanoFTPProxy(host, port, user, passwd, type)
|
Setup the FTP proxy informations. This can also be done by
using ftp_proxy ftp_proxy_user and ftp_proxy_password
environment variables.
|
def pprofile(line, cell=None):
if cell is None:
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
)
|
Profile line execution.
|
def Query(self, query):
cursor = self._database.cursor()
cursor.execute(query)
return cursor
|
Queries the database.
Args:
query (str): SQL query.
Returns:
sqlite3.Cursor: results.
Raises:
sqlite3.DatabaseError: if querying the database fails.
|
def chosen_inline_handler(self, *custom_filters, state=None, run_task=None, **kwargs):
def decorator(callback):
self.register_chosen_inline_handler(callback, *custom_filters, state=state, run_task=run_task, **kwargs)
return callback
return decorator
|
Decorator for chosen inline query handler
Example:
.. code-block:: python3
@dp.chosen_inline_handler(lambda chosen_inline_query: True)
async def some_chosen_inline_handler(chosen_inline_query: types.ChosenInlineResult)
:param state:
:param custom_filters:
:param run_task: run callback in task (no wait results)
:param kwargs:
:return:
|
def assign_candidate(self, verse: Verse, candidate: str) -> Verse:
verse.scansion = candidate
verse.valid = True
verse.accented = self.formatter.merge_line_scansion(
verse.original, verse.scansion)
return verse
|
Helper method; make sure that the verse object is properly packaged.
:param verse:
:param candidate:
:return:
|
def increment(method):
if not hasattr(method, '__context'):
raise ContextException("Method does not have context!")
ctxt = getattr(method, '__context')
ctxt.enter()
return ctxt
|
Static method used to increment the depth of a context belonging to 'method'
:param function method: A method with a context
:rtype: caliendo.hooks.Context
:returns: The context instance for the method.
|
def delete(name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
url = conn.get_queue_url(QueueName=name)['QueueUrl']
conn.delete_queue(QueueUrl=url)
except botocore.exceptions.ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True}
|
Delete an SQS queue.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.delete myqueue region=us-east-1
|
def sorted(collection):
if len(collection) < 1:
return collection
if isinstance(collection, dict):
return sorted(collection.items(), key=lambda x: x[0])
if isinstance(list(collection)[0], Operation):
key = lambda x: x.operation_id
elif isinstance(list(collection)[0], str):
key = lambda x: SchemaObjects.get(x).name
else:
raise TypeError(type(collection[0]))
return sorted(collection, key=key)
|
sorting dict by key,
schema-collection by schema-name
operations by id
|
def make_cube_slice(map_in, loge_bounds):
axis = map_in.geom.axes[0]
i0 = utils.val_to_edge(axis.edges, 10**loge_bounds[0])[0]
i1 = utils.val_to_edge(axis.edges, 10**loge_bounds[1])[0]
new_axis = map_in.geom.axes[0].slice(slice(i0, i1))
geom = map_in.geom.to_image()
geom = geom.to_cube([new_axis])
map_out = WcsNDMap(geom, map_in.data[slice(i0, i1), ...].copy())
return map_out
|
Extract a slice from a map cube object.
|
def prettify_xml(xml_str):
parsed_xml = parseString(get_string(xml_str))
pretty_xml = '\n'.join(
[line for line in parsed_xml.toprettyxml(indent=' ' * 2)
.split('\n') if line.strip()])
if not pretty_xml.endswith('\n'):
pretty_xml += '\n'
return pretty_xml
|
returns prettified XML without blank lines
based on http://stackoverflow.com/questions/14479656/
:param xml_str: the XML to be prettified
:type xml_str: str
:return: the prettified XML
:rtype: str
|
def c_source(self):
relocs = Relocs(
''.join(self.c_self_relocs()), *self.c_module_relocs()
)
return Source(
''.join(self.c_typedefs()),
'' if self.opts.no_structs else self.c_struct(),
''.join(self.c_hashes()),
''.join(self.c_var_decls()),
relocs,
self.c_loadlib() + ''.join(self.c_getprocs())
)
|
Return strings.
|
def resolve_request_path(requested_uri):
from settings import PATH_ALIASES
for key, val in PATH_ALIASES.items():
if re.match(key, requested_uri):
return re.sub(key, val, requested_uri)
return requested_uri
|
Check for any aliases and alter the path accordingly.
Returns resolved_uri
|
def as_index(keys, axis=semantics.axis_default, base=False, stable=True, lex_as_struct=False):
if isinstance(keys, Index):
if type(keys) is BaseIndex and base==False:
keys = keys.keys
else:
return keys
if isinstance(keys, tuple):
if lex_as_struct:
keys = as_struct_array(*keys)
else:
return LexIndex(keys, stable)
try:
keys = np.asarray(keys)
except:
raise TypeError('Given object does not form a valid set of keys')
if axis is None:
keys = keys.flatten()
if keys.ndim==1:
if base:
return BaseIndex(keys)
else:
return Index(keys, stable=stable)
else:
return ObjectIndex(keys, axis, stable=stable)
|
casting rules for a keys object to an index object
the preferred semantics is that keys is a sequence of key objects,
except when keys is an instance of tuple,
in which case the zipped elements of the tuple are the key objects
the axis keyword specifies the axis which enumerates the keys
if axis is None, the keys array is flattened
if axis is 0, the first axis enumerates the keys
which of these two is the default depends on whether backwards_compatible == True
if base==True, the most basic index possible is constructed.
this avoids an indirect sort; if it isnt required, this has better performance
|
def autocomplete(self):
params = self.set_lay_params()
logging.info("PARAMS="+str(params))
results = self.solr.search(**params)
logging.info("Docs found: {}".format(results.hits))
return self._process_layperson_results(results)
|
Execute solr query for autocomplete
|
def multi_reciprocal_extra(xs, ys, noise=False):
ns = np.linspace(0.5, 6.0, num=56)
best = ['', np.inf]
fit_results = {}
weights = get_weights(xs, ys)
for n in ns:
popt = extrapolate_reciprocal(xs, ys, n, noise)
m = measure(reciprocal, xs, ys, popt, weights)
pcov = []
fit_results.update({n: {'measure': m, 'popt': popt, 'pcov': pcov}})
for n in fit_results:
if fit_results[n]['measure'] <= best[1]:
best = reciprocal, fit_results[n]['measure'], n
return fit_results[best[2]]['popt'], fit_results[best[2]]['pcov'], best
|
Calculates for a series of powers ns the parameters for which the last two points are at the curve.
With these parameters measure how well the other data points fit.
return the best fit.
|
def init_distance_ttable(wordlist, distance_function):
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r]
else:
t_table[r, c] = distance_function(w, v) + 0.001
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
|
Initialize pair-wise rhyme strenghts according to the given word distance function
|
def force_log(self, logType, message, data=None, tback=None, stdout=True, file=True):
log = self._format_message(logType=logType, message=message, data=data, tback=tback)
if stdout:
self.__log_to_stdout(self.__logTypeFormat[logType][0] + log + self.__logTypeFormat[logType][1] + "\n")
try:
self.__stdout.flush()
except:
pass
try:
os.fsync(self.__stdout.fileno())
except:
pass
if file:
self.__log_to_file(log)
self.__log_to_file("\n")
try:
self.__logFileStream.flush()
except:
pass
try:
os.fsync(self.__logFileStream.fileno())
except:
pass
self.__lastLogged[logType] = log
self.__lastLogged[-1] = log
|
Force logging a message of a certain logtype whether logtype level is allowed or not.
:Parameters:
#. logType (string): A defined logging type.
#. message (string): Any message to log.
#. tback (None, str, list): Stack traceback to print and/or write to
log file. In general, this should be traceback.extract_stack
#. stdout (boolean): Whether to force logging to standard output.
#. file (boolean): Whether to force logging to file.
|
def fetch_all_records(self):
r
api = self.doapi_manager
return map(self._record, api.paginate(self.record_url, 'domain_records'))
|
r"""
Returns a generator that yields all of the DNS records for the domain
:rtype: generator of `DomainRecord`\ s
:raises DOAPIError: if the API endpoint replies with an error
|
def ccmodmsk_class_label_lookup(label):
clsmod = {'ism': admm_ccmod.ConvCnstrMODMaskDcpl_IterSM,
'cg': admm_ccmod.ConvCnstrMODMaskDcpl_CG,
'cns': admm_ccmod.ConvCnstrMODMaskDcpl_Consensus,
'fista': fista_ccmod.ConvCnstrMODMask}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvCnstrMODMask solver method %s' % label)
|
Get a ConvCnstrMODMask class from a label string.
|
def add_property(self, prop, objects=()):
self._properties.add(prop)
self._objects |= objects
self._pairs.update((o, prop) for o in objects)
|
Add a property to the definition and add ``objects`` as related.
|
def save(self, filename=None):
if self.__fname is None and filename is None:
raise ValueError('Config loaded from string, no filename specified')
conf = self.__config
cpa = dict_to_cp(conf)
with open(self.__fname if filename is None else filename, 'w') as f:
cpa.write(f)
|
Write config to file.
|
def add(self, organization, domain=None, is_top_domain=False, overwrite=False):
if not organization:
return CMD_SUCCESS
if not domain:
try:
api.add_organization(self.db, organization)
except InvalidValueError as e:
raise RuntimeError(str(e))
except AlreadyExistsError as e:
msg = "organization '%s' already exists in the registry" % organization
self.error(msg)
return e.code
else:
try:
api.add_domain(self.db, organization, domain,
is_top_domain=is_top_domain,
overwrite=overwrite)
except InvalidValueError as e:
raise RuntimeError(str(e))
except AlreadyExistsError as e:
msg = "domain '%s' already exists in the registry" % domain
self.error(msg)
return e.code
except NotFoundError as e:
self.error(str(e))
return e.code
return CMD_SUCCESS
|
Add organizations and domains to the registry.
This method adds the given 'organization' or 'domain' to the registry,
but not both at the same time.
When 'organization' is the only parameter given, it will be added to
the registry. When 'domain' parameter is also given, the function will
assign it to 'organization'. In this case, 'organization' must exists in
the registry prior adding the domain.
A domain can only be assigned to one company. If the given domain is already
in the registry, the method will fail. Set 'overwrite' to 'True' to create
the new relationship. In this case, previous relationships will be removed.
The new domain can be also set as a top domain. That is useful to avoid
the insertion of sub-domains that belong to the same organization (i.e
eu.example.com, us.example.com). Take into account when 'overwrite' is set
it will update 'is_top_domain' flag too.
:param organization: name of the organization to add
:param domain: domain to add to the registry
:param is_top_domain: set the domain as a top domain
:param overwrite: force to reassign the domain to the given company
|
def get_nice_to_pegasus_fn(*args, **kwargs):
if args or kwargs:
warnings.warn("Deprecation warning: get_pegasus_to_nice_fn does not need / use parameters anymore")
def c2p0(y, x, u, k): return (u, y+1 if u else x, 4+k if u else 4+k, x if u else y)
def c2p1(y, x, u, k): return (u, y+1 if u else x, k if u else 8+k, x if u else y)
def c2p2(y, x, u, k): return (u, y if u else x + 1, 8+k if u else k, x if u else y)
def n2p(t, y, x, u, k): return [c2p0, c2p1, c2p2][t](y, x, u, k)
return n2p
|
Returns a coordinate translation function from the 5-term "nice"
coordinates to the 4-term pegasus_index coordinates.
Details on the returned function, nice_to_pegasus(t, y, x, u, k)
Inputs are 5-tuples of ints, return is a 4-tuple of ints. See
pegasus_graph for description of the pegasus_index and "nice"
coordinate systems.
Returns
-------
nice_to_pegasus_fn(pegasus_coordinates): a function
A function that accepts Pegasus coordinates and returns the corresponding
augmented chimera coordinates
|
def __thread_started(self):
if self.__task is None:
raise RuntimeError('Unable to start thread without "start" method call')
self.__task.start()
self.__task.start_event().wait(self.__scheduled_task_startup_timeout__)
|
Start a scheduled task
:return: None
|
def calcMhFromMz(mz, charge):
mh = (mz * charge) - (maspy.constants.atomicMassProton * (charge-1) )
return mh
|
Calculate the MH+ value from mz and charge.
:param mz: float, mass to charge ratio (Dalton / charge)
:param charge: int, charge state
:returns: mass to charge ratio of the mono protonated ion (charge = 1)
|
def using_config(_func=None):
def decorator(func):
@wraps(func)
def inner_dec(*args, **kwargs):
g = func.__globals__
var_name = 'config'
sentinel = object()
oldvalue = g.get(var_name, sentinel)
g[var_name] = apps.get_app_config('django_summernote').config
try:
res = func(*args, **kwargs)
finally:
if oldvalue is sentinel:
del g[var_name]
else:
g[var_name] = oldvalue
return res
return inner_dec
if _func is None:
return decorator
else:
return decorator(_func)
|
This allows a function to use Summernote configuration
as a global variable, temporarily.
|
def generate(env):
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f90'
env['FORTRAN'] = fcomp
env['F90'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF90'] = '$F90'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC')
|
Add Builders and construction variables for sun f90 compiler to an
Environment.
|
def get_uid(brain_or_object):
if is_portal(brain_or_object):
return '0'
if is_brain(brain_or_object) and base_hasattr(brain_or_object, "UID"):
return brain_or_object.UID
return get_object(brain_or_object).UID()
|
Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string
|
def read(self):
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = to_str(self.data)
obj = self._get_object_parser(
self._combine_lines(data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
|
Read the whole JSON input into a pandas object.
|
def search_next(self, obj):
if 'meta' in obj and 'next' in obj['meta'] and obj['meta']['next'] != None:
uri = self.api_url % obj['meta']['next']
header, content = self._http_uri_request(uri)
resp = json.loads(content)
if not self._is_http_response_ok(header):
error = resp.get('error_message', 'Unknown Error')
raise HttpException(header.status, header.reason, error)
return resp
return {}
|
Takes the dictionary that is returned by 'search' or 'search_next' function and gets the next batch of results
Args:
obj: dictionary returned by the 'search' or 'search_next' function
Returns:
A dictionary with a data returned by the server
Raises:
HttpException with the error message from the server
|
def click(self, x: int, y: int) -> None:
self._execute('-s', self.device_sn, 'shell',
'input', 'tap', str(x), str(y))
|
Simulate finger click.
|
def get_intersection(self, division):
try:
return IntersectRelationship.objects.get(
from_division=self, to_division=division
).intersection
except ObjectDoesNotExist:
raise Exception("No intersecting relationship with that division.")
|
Get intersection percentage of intersecting divisions.
|
def filter_data(df, filter_name, verbose=False):
"Filter certain entries with given name."
df = df[df.stop_name.apply(
lambda cell: filter_name.encode('utf-8') in cell)]
if verbose:
msg = '- Filtered down to %d entries containing "%s".'
print(msg % (len(df), filter_name))
return df
|
Filter certain entries with given name.
|
def random_output(self, max=100):
output = []
item1 = item2 = MarkovChain.START
for i in range(max-3):
item3 = self[(item1, item2)].roll()
if item3 is MarkovChain.END:
break
output.append(item3)
item1 = item2
item2 = item3
return output
|
Generate a list of elements from the markov chain.
The `max` value is in place in order to prevent excessive iteration.
|
def _get_bounds(mapper, values):
array = np.array([mapper.get(x) for x in values])
return array[:, 0], array[:, 1]
|
Extract first and second value from tuples of mapped bins.
|
def close(self):
self._outfile.write(struct.pack('>2h', 4, 0x0400))
if self._close:
self._outfile.close()
|
Finalize the GDSII stream library.
|
async def set_topic(self, channel, topic):
if not self.is_channel(channel):
raise ValueError('Not a channel: {}'.format(channel))
elif not self.in_channel(channel):
raise NotInChannel(channel)
await self.rawmsg('TOPIC', channel, topic)
|
Set topic on channel.
Users should only rely on the topic actually being changed when receiving an on_topic_change callback.
|
def replace_grist (features, new_grist):
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
assert isinstance(new_grist, basestring)
single_item = False
if isinstance(features, str):
features = [features]
single_item = True
result = []
for feature in features:
grist, split, value = feature.partition('>')
if not value and not split:
value = grist
result.append(new_grist + value)
if single_item:
return result[0]
return result
|
Replaces the grist of a string by a new one.
Returns the string with the new grist.
|
def get_build_container_dir(self, arch):
dir_name = self.get_dir_name()
return join(self.ctx.build_dir, 'other_builds',
dir_name, '{}__ndk_target_{}'.format(arch, self.ctx.ndk_api))
|
Given the arch name, returns the directory where it will be
built.
This returns a different directory depending on what
alternative or optional dependencies are being built.
|
def spare_disk(self, disk_xml=None):
spare_disk = {}
disk_types = set()
for filer_disk in disk_xml:
disk_types.add(filer_disk.find('effective-disk-type').text)
if not filer_disk.find('raid-state').text == 'spare':
continue
disk_type = filer_disk.find('effective-disk-type').text
if disk_type in spare_disk:
spare_disk[disk_type] += 1
else:
spare_disk[disk_type] = 1
for disk_type in disk_types:
if disk_type in spare_disk:
self.push('spare_' + disk_type, 'disk', spare_disk[disk_type])
else:
self.push('spare_' + disk_type, 'disk', 0)
|
Number of spare disk per type.
For example: storage.ontap.filer201.disk.SATA
|
def save(self, fp, mode='wb'):
if hasattr(fp, 'write'):
self._record.write(fp)
else:
with open(fp, mode) as f:
self._record.write(f)
|
Save the PSD file.
:param fp: filename or file-like object.
:param mode: file open mode, default 'wb'.
|
def sync_fork(gh_token, github_repo_id, repo, push=True):
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
|
Sync the current branch in this fork against the direct parent on Github
|
def select_entry(self, *arguments):
matches = self.smart_search(*arguments)
if len(matches) > 1:
logger.info("More than one match, prompting for choice ..")
labels = [entry.name for entry in matches]
return matches[labels.index(prompt_for_choice(labels))]
else:
logger.info("Matched one entry: %s", matches[0].name)
return matches[0]
|
Select a password from the available choices.
:param arguments: Refer to :func:`smart_search()`.
:returns: The name of a password (a string) or :data:`None`
(when no password matched the given `arguments`).
|
def shutdown_executors(wait=True):
return {k: shutdown_executor(k, wait) for k in list(_EXECUTORS.keys())}
|
Clean-up the resources of all initialized executors.
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executors have been
reclaimed.
:type wait: bool
:return:
Shutdown pool executor.
:rtype: dict[str,dict]
|
def intersect(self, r):
if not len(r) == 4:
raise ValueError("bad sequ. length")
self.x0, self.y0, self.x1, self.y1 = TOOLS._intersect_rect(self, r)
return self
|
Restrict self to common area with rectangle r.
|
def names(self):
ret = set()
for arr in self:
if isinstance(arr, InteractiveList):
ret.update(arr.names)
else:
ret.add(arr.name)
return ret
|
Set of the variable in this list
|
def render(self, name, value, attrs=None, **kwargs):
output = super(Select2Mixin, self).render(
name, value, attrs=attrs, **kwargs)
id_ = attrs['id']
output += self.render_js_code(
id_, name, value, attrs=attrs, **kwargs)
return mark_safe(output)
|
Extend base class's `render` method by appending
javascript inline text to html output.
|
def make_sentence(list_words):
lw_len = len(list_words)
if lw_len > 6:
list_words.insert(lw_len // 2 + random.choice(range(-2, 2)), ',')
sentence = ' '.join(list_words).replace(' ,', ',')
return sentence.capitalize() + '.'
|
Return a sentence from list of words.
:param list list_words: list of words
:returns: sentence
:rtype: str
|
def get_environment_vars(filename):
if sys.platform == "linux" or sys.platform == "linux2":
return {
'LD_PRELOAD': path.join(LIBFAKETIME_DIR, "libfaketime.so.1"),
'FAKETIME_SKIP_CMDS': 'nodejs',
'FAKETIME_TIMESTAMP_FILE': filename,
}
elif sys.platform == "darwin":
return {
'DYLD_INSERT_LIBRARIES': path.join(LIBFAKETIME_DIR, "libfaketime.1.dylib"),
'DYLD_FORCE_FLAT_NAMESPACE': '1',
'FAKETIME_TIMESTAMP_FILE': filename,
}
else:
raise RuntimeError("libfaketime does not support '{}' platform".format(sys.platform))
|
Return a dict of environment variables required to run a service under faketime.
|
def get_default_name(self):
long_names = [name for name in self.name if name.startswith("--")]
short_names = [name for name in self.name if not name.startswith("--")]
if long_names:
return to_snake_case(long_names[0].lstrip("-"))
return to_snake_case(short_names[0].lstrip("-"))
|
Return the default generated name to store value on the parser for this option.
eg. An option *['-s', '--use-ssl']* will generate the *use_ssl* name
Returns:
str: the default name of the option
|
def split_number_and_unit(s):
if not s:
raise ValueError('empty value')
s = s.strip()
pos = len(s)
while pos and not s[pos-1].isdigit():
pos -= 1
number = int(s[:pos])
unit = s[pos:].strip()
return (number, unit)
|
Parse a string that consists of a integer number and an optional unit.
@param s a non-empty string that starts with an int and is followed by some letters
@return a triple of the number (as int) and the unit
|
def suggestions(self, word):
suggestions = set(self._misspelling_dict.get(word, [])).union(
set(self._misspelling_dict.get(word.lower(), [])))
return sorted([same_case(source=word, destination=w)
for w in suggestions])
|
Returns a list of suggestions for a misspelled word.
Args:
word: The word to check.
Returns:
List of zero or more suggested replacements for word.
|
def stream_text(text, chunk_size=default_chunk_size):
if isgenerator(text):
def binary_stream():
for item in text:
if six.PY2 and isinstance(text, six.binary_type):
yield text
else:
yield text.encode("utf-8")
data = binary_stream()
elif six.PY2 and isinstance(text, six.binary_type):
data = text
else:
data = text.encode("utf-8")
return stream_bytes(data, chunk_size)
|
Gets a buffered generator for streaming text.
Returns a buffered generator which encodes a string as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
text : str
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
|
def heatmap_seaborn(dfr, outfilename=None, title=None, params=None):
maxfigsize = 120
calcfigsize = dfr.shape[0] * 1.1
figsize = min(max(8, calcfigsize), maxfigsize)
if figsize == maxfigsize:
scale = maxfigsize / calcfigsize
sns.set_context("notebook", font_scale=scale)
if params.classes is None:
col_cb = None
else:
col_cb = get_seaborn_colorbar(dfr, params.classes)
params.labels = get_safe_seaborn_labels(dfr, params.labels)
params.colorbar = col_cb
params.figsize = figsize
params.linewidths = 0.25
fig = get_seaborn_clustermap(dfr, params, title=title)
if outfilename:
fig.savefig(outfilename)
return fig
|
Returns seaborn heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
|
def length2mesh(length, lattice, rotations=None):
rec_lattice = np.linalg.inv(lattice)
rec_lat_lengths = np.sqrt(np.diagonal(np.dot(rec_lattice.T, rec_lattice)))
mesh_numbers = np.rint(rec_lat_lengths * length).astype(int)
if rotations is not None:
reclat_equiv = get_lattice_vector_equivalence(
[r.T for r in np.array(rotations)])
m = mesh_numbers
mesh_equiv = [m[1] == m[2], m[2] == m[0], m[0] == m[1]]
for i, pair in enumerate(([1, 2], [2, 0], [0, 1])):
if reclat_equiv[i] and not mesh_equiv:
m[pair] = max(m[pair])
return np.maximum(mesh_numbers, [1, 1, 1])
|
Convert length to mesh for q-point sampling
This conversion for each reciprocal axis follows VASP convention by
N = max(1, int(l * |a|^* + 0.5))
'int' means rounding down, not rounding to nearest integer.
Parameters
----------
length : float
Length having the unit of direct space length.
lattice : array_like
Basis vectors of primitive cell in row vectors.
dtype='double', shape=(3, 3)
rotations: array_like, optional
Rotation matrices in real space. When given, mesh numbers that are
symmetrically reasonable are returned. Default is None.
dtype='intc', shape=(rotations, 3, 3)
Returns
-------
array_like
dtype=int, shape=(3,)
|
def run_export_db(filename=None):
if not filename:
filename = settings.DB_DUMP_FILENAME
with cd(settings.FAB_SETTING('SERVER_PROJECT_ROOT')):
run_workon('fab export_db:remote=True,filename={}'.format(filename))
|
Exports the database on the server.
Usage::
fab prod run_export_db
fab prod run_export_db:filename=foobar.dump
|
def class_name_for_data_type(data_type, ns=None):
assert is_user_defined_type(data_type) or is_alias(data_type), \
'Expected composite type, got %r' % type(data_type)
name = fmt_class(data_type.name)
if ns:
return prefix_with_ns_if_necessary(name, data_type.namespace, ns)
return name
|
Returns the name of the Python class that maps to a user-defined type.
The name is identical to the name in the spec.
If ``ns`` is set to a Namespace and the namespace of `data_type` does
not match, then a namespace prefix is added to the returned name.
For example, ``foreign_ns.TypeName``.
|
def _validate_applications(self, apps):
for application_id, application_config in apps.items():
self._validate_config(application_id, application_config)
application_config["APPLICATION_ID"] = application_id
|
Validate the application collection
|
def showMonitors(cls):
Debug.info("*** monitor configuration [ {} Screen(s)] ***".format(cls.getNumberScreens()))
Debug.info("*** Primary is Screen {}".format(cls.primaryScreen))
for index, screen in enumerate(PlatformManager.getScreenDetails()):
Debug.info("Screen {}: ({}, {}, {}, {})".format(index, *screen["rect"]))
Debug.info("*** end monitor configuration ***")
|
Prints debug information about currently detected screens
|
def get_street(self, **kwargs):
params = {
'description': kwargs.get('street_name'),
'streetNumber': kwargs.get('street_number'),
'Radius': kwargs.get('radius'),
'Stops': kwargs.get('stops'),
'cultureInfo': util.language_code(kwargs.get('lang'))
}
result = self.make_request('geo', 'get_street', **params)
if not util.check_result(result, 'site'):
return False, 'UNKNOWN ERROR'
values = util.response_list(result, 'site')
return True, [emtype.Site(**a) for a in values]
|
Obtain a list of nodes related to a location within a given radius.
Not sure of its use, but...
Args:
street_name (str): Name of the street to search.
street_number (int): Street number to search.
radius (int): Radius (in meters) of the search.
stops (int): Number of the stop to search.
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Site]), or message string
in case of error.
|
def restore_collection(backup):
for k, v in six.iteritems(backup):
del tf.get_collection_ref(k)[:]
tf.get_collection_ref(k).extend(v)
|
Restore from a collection backup.
Args:
backup (dict):
|
def _build_command_ids(issued_commands):
if isinstance(issued_commands, IssuedCommand):
entry = issued_commands._proto.commandQueueEntry
return [entry.cmdId]
else:
return [issued_command._proto.commandQueueEntry.cmdId
for issued_command in issued_commands]
|
Builds a list of CommandId.
|
def _insert_text_buf(self, line, idx):
self._bytes_012[idx] = 0
self._bytes_345[idx] = 0
I = np.array([ord(c) - 32 for c in line[:self._n_cols]])
I = np.clip(I, 0, len(__font_6x8__)-1)
if len(I) > 0:
b = __font_6x8__[I]
self._bytes_012[idx, :len(I)] = b[:, :3]
self._bytes_345[idx, :len(I)] = b[:, 3:]
|
Insert text into bytes buffers
|
def get_accent_char(char):
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE
|
Get the accent of an single char, if any.
|
def _verify_student_input(self, student_input, locked):
guesses = [student_input]
try:
guesses.append(repr(ast.literal_eval(student_input)))
except Exception:
pass
if student_input.title() in self.SPECIAL_INPUTS:
guesses.append(student_input.title())
for guess in guesses:
if self._verify(guess, locked):
return guess
|
If the student's answer is correct, returns the normalized answer.
Otherwise, returns None.
|
def item_at_line(root_item, line):
previous_item = root_item
item = root_item
for item in get_item_children(root_item):
if item.line > line:
return previous_item
previous_item = item
else:
return item
|
Find and return the item of the outline explorer under which is located
the specified 'line' of the editor.
|
def update(self, validate=False):
rs = self.connection.get_all_snapshots([self.id])
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid Snapshot ID' % self.id)
return self.progress
|
Update the data associated with this snapshot by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
snapshot the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.