code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_moderation(request):
"""Return the list of publications that need moderation."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, created, publisher, publication_message,
(select array_agg(row_to_json(pd))
from pending_documents as pd
where pd.publication_id = p.id) AS models
FROM publications AS p
WHERE state = 'Waiting for moderation') AS combined_rows""")
moderations = [x[0] for x in cursor.fetchall()]
return moderations | Return the list of publications that need moderation. |
def _change_precision(self, val, base=0):
"""
Check and normalise the value of precision (must be positive integer).
Args:
val (INT): must be positive integer
base (INT): Description
Returns:
VAL (INT): Description
"""
if not isinstance(val, int):
raise TypeError('The first argument must be an integer.')
val = round(abs(val))
val = (lambda num: base if is_num(num) else num)(val)
return val | Check and normalise the value of precision (must be positive integer).
Args:
val (INT): must be positive integer
base (INT): Description
Returns:
VAL (INT): Description |
def _add_file(self, key, path):
"""Copy a file into the reference package."""
filename = os.path.basename(path)
base, ext = os.path.splitext(filename)
if os.path.exists(self.file_path(filename)):
with tempfile.NamedTemporaryFile(
dir=self.path, prefix=base, suffix=ext) as tf:
filename = os.path.basename(tf.name)
shutil.copyfile(path, self.file_path(filename))
self.contents['files'][key] = filename | Copy a file into the reference package. |
def Open(self):
"""Opens the process for reading."""
self.h_process = kernel32.OpenProcess(
PROCESS_VM_READ | PROCESS_QUERY_INFORMATION, 0, self.pid)
if not self.h_process:
raise process_error.ProcessError(
"Failed to open process (pid %d)." % self.pid)
if self.Is64bit():
si = self.GetNativeSystemInfo()
self.max_addr = si.lpMaximumApplicationAddress
else:
si = self.GetSystemInfo()
self.max_addr = 2147418111
self.min_addr = si.lpMinimumApplicationAddress | Opens the process for reading. |
def _parse_fields_http(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('IPASN._parse_fields_http() has been deprecated and will be '
'removed. You should now use IPASN.parse_fields_http().')
return self.parse_fields_http(*args, **kwargs) | Deprecated. This will be removed in a future release. |
def enable_pretty_logging(options: Any = None, logger: logging.Logger = None) -> None:
"""Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
import tornado.options
options = tornado.options.options
if options.logging is None or options.logging.lower() == "none":
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
rotate_mode = options.log_rotate_mode
if rotate_mode == "size":
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups,
encoding="utf-8",
) # type: logging.Handler
elif rotate_mode == "time":
channel = logging.handlers.TimedRotatingFileHandler(
filename=options.log_file_prefix,
when=options.log_rotate_when,
interval=options.log_rotate_interval,
backupCount=options.log_file_num_backups,
encoding="utf-8",
)
else:
error_message = (
"The value of log_rotate_mode option should be "
+ '"size" or "time", not "%s".' % rotate_mode
)
raise ValueError(error_message)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel) | Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`. |
def interleave(*arrays,**kwargs):
'''
arr1 = [1,2,3,4]
arr2 = ['a','b','c','d']
arr3 = ['@','#','%','*']
interleave(arr1,arr2,arr3)
'''
anum = arrays.__len__()
rslt = []
length = arrays[0].__len__()
for j in range(0,length):
for i in range(0,anum):
array = arrays[i]
rslt.append(array[j])
return(rslt) | arr1 = [1,2,3,4]
arr2 = ['a','b','c','d']
arr3 = ['@','#','%','*']
interleave(arr1,arr2,arr3) |
def add_or_update_records(cls, tables: I2B2Tables, records: List["ObservationFact"]) -> Tuple[int, int]:
"""
Add or update the observation_fact table as needed to reflect the contents of records
:param tables: i2b2 sql connection
:param records: records to apply
:return: number of records added / modified
"""
return cls._add_or_update_records(tables.crc_connection, tables.observation_fact, records) | Add or update the observation_fact table as needed to reflect the contents of records
:param tables: i2b2 sql connection
:param records: records to apply
:return: number of records added / modified |
def get(self, remote_file, local_file):
"""
下载文件
:param remote_file:
:param local_file:
:return:
"""
sftp = self.get_sftp()
try:
sftp.get(remote_file, local_file)
except Exception as e:
logger.error('下载文件失败')
logger.error('remote: %s, local: %s' % (remote_file, local_file))
logger.error(e) | 下载文件
:param remote_file:
:param local_file:
:return: |
def com_google_fonts_check_family_equal_font_versions(ttFonts):
"""Make sure all font files have the same version value."""
all_detected_versions = []
fontfile_versions = {}
for ttFont in ttFonts:
v = ttFont['head'].fontRevision
fontfile_versions[ttFont] = v
if v not in all_detected_versions:
all_detected_versions.append(v)
if len(all_detected_versions) != 1:
versions_list = ""
for v in fontfile_versions.keys():
versions_list += "* {}: {}\n".format(v.reader.file.name,
fontfile_versions[v])
yield WARN, ("version info differs among font"
" files of the same font project.\n"
"These were the version values found:\n"
"{}").format(versions_list)
else:
yield PASS, "All font files have the same version." | Make sure all font files have the same version value. |
def grow(self, *args):
""" Creates a region around the given point Valid arguments:
* ``grow(wh)`` - Creates a region centered on this point with a width and height of ``wh``.
* ``grow(w, h)`` - Creates a region centered on this point with a width of ``w`` and height
of ``h``.
* ``grow(Region.CREATE_X_DIRECTION, Region.CREATE_Y_DIRECTION, w, h)`` - Creates a region
with this point as one corner, expanding in the specified direction
"""
if len(args) == 1:
return Region.grow(self.x, self.y, args[0], args[0])
elif len(args) == 2:
return Region(self.x, self.y, args[0], args[1])
elif len(args) == 4:
return Region.create(self, *args)
else:
raise ValueError("Unrecognized arguments for grow") | Creates a region around the given point Valid arguments:
* ``grow(wh)`` - Creates a region centered on this point with a width and height of ``wh``.
* ``grow(w, h)`` - Creates a region centered on this point with a width of ``w`` and height
of ``h``.
* ``grow(Region.CREATE_X_DIRECTION, Region.CREATE_Y_DIRECTION, w, h)`` - Creates a region
with this point as one corner, expanding in the specified direction |
def locate(pattern, root=os.curdir):
"""Locate all files matching supplied filename pattern recursively."""
for path, dummy, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename) | Locate all files matching supplied filename pattern recursively. |
def _create_checkable_action(self, text, conf_name, editorstack_method):
"""Helper function to create a checkable action.
Args:
text (str): Text to be displayed in the action.
conf_name (str): configuration setting associated with the action
editorstack_method (str): name of EditorStack class that will be
used to update the changes in each editorstack.
"""
def toogle(checked):
self.switch_to_plugin()
self._toggle_checkable_action(checked, editorstack_method,
conf_name)
action = create_action(self, text, toggled=toogle)
action.setChecked(CONF.get('editor', conf_name))
return action | Helper function to create a checkable action.
Args:
text (str): Text to be displayed in the action.
conf_name (str): configuration setting associated with the action
editorstack_method (str): name of EditorStack class that will be
used to update the changes in each editorstack. |
def start(self):
""" Starts services. """
cert_path = os.path.join(self.work_dir, 'certificates')
public_keys_dir = os.path.join(cert_path, 'public_keys')
private_keys_dir = os.path.join(cert_path, 'private_keys')
client_secret_file = os.path.join(private_keys_dir, "client.key")
client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
server_public_file = os.path.join(public_keys_dir, "server.key")
server_public, _ = zmq.auth.load_certificate(server_public_file)
self.outgoing_msg_greenlet = gevent.spawn(self.outgoing_server_comms, server_public,
client_public, client_secret)
self.outgoing_msg_greenlet.link_exception(self.on_exception)
self.incoming_msg_greenlet = gevent.spawn(self.incoming_server_comms, server_public,
client_public, client_secret)
self.incoming_msg_greenlet.link_exception(self.on_exception)
logger.info('Waiting for detailed configuration from Beeswarm server.')
gevent.joinall([self.outgoing_msg_greenlet]) | Starts services. |
def cast_pars_dict(pars_dict):
"""Cast the bool and float elements of a parameters dict to
the appropriate python types.
"""
o = {}
for pname, pdict in pars_dict.items():
o[pname] = {}
for k, v in pdict.items():
if k == 'free':
o[pname][k] = bool(int(v))
elif k == 'name':
o[pname][k] = v
else:
o[pname][k] = float(v)
return o | Cast the bool and float elements of a parameters dict to
the appropriate python types. |
def parse_na(txt: str) -> (MetarData, Units): # type: ignore
"""
Parser for the North American METAR variant
"""
units = Units(**NA_UNITS) # type: ignore
clean = core.sanitize_report_string(txt)
wxresp = {'raw': txt, 'sanitized': clean}
wxdata, wxresp['remarks'] = core.get_remarks(clean)
wxdata, wxresp['runway_visibility'], _ = core.sanitize_report_list(wxdata)
wxdata, wxresp['station'], wxresp['time'] = core.get_station_and_time(wxdata)
wxdata, wxresp['clouds'] = core.get_clouds(wxdata)
wxdata, wxresp['wind_direction'], wxresp['wind_speed'], \
wxresp['wind_gust'], wxresp['wind_variable_direction'] = core.get_wind(wxdata, units)
wxdata, wxresp['altimeter'] = core.get_altimeter(wxdata, units, 'NA')
wxdata, wxresp['visibility'] = core.get_visibility(wxdata, units)
wxresp['other'], wxresp['temperature'], wxresp['dewpoint'] = core.get_temp_and_dew(wxdata)
condition = core.get_flight_rules(wxresp['visibility'], core.get_ceiling(wxresp['clouds'])) # type: ignore
wxresp['flight_rules'] = FLIGHT_RULES[condition]
wxresp['remarks_info'] = remarks.parse(wxresp['remarks']) # type: ignore
wxresp['time'] = core.make_timestamp(wxresp['time']) # type: ignore
return MetarData(**wxresp), units | Parser for the North American METAR variant |
def multiply_slow(x, y, prim=0x11b):
'''Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table.
This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.'''
### Define bitwise carry-less operations as inner functions ###
def cl_mult(x,y):
'''Bitwise carry-less multiplication on integers'''
z = 0
i = 0
while (y>>i) > 0:
if y & (1<<i):
z ^= x<<i
i += 1
return z
def bit_length(n):
'''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()'''
bits = 0
while n >> bits: bits += 1
return bits
def cl_div(dividend, divisor=None):
'''Bitwise carry-less long division on integers and returns the remainder'''
# Compute the position of the most significant bit for each integers
dl1 = bit_length(dividend)
dl2 = bit_length(divisor)
# If the dividend is smaller than the divisor, just exit
if dl1 < dl2:
return dividend
# Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor)
for i in _range(dl1-dl2,-1,-1):
# Check that the dividend is divisible (useless for the first iteration but important for the next ones)
if dividend & (1 << i+dl2-1):
# If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction)
dividend ^= divisor << i
return dividend
### Main GF multiplication routine ###
# Multiply the gf numbers
result = cl_mult(x,y)
# Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds
if prim > 0:
result = cl_div(result, prim)
return result | Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table.
This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial. |
def start_receive(self, fd, data=None):
"""
Cause :meth:`poll` to yield `data` when `fd` is readable.
"""
self._rfds[fd] = (data or fd, self._generation)
self._update(fd) | Cause :meth:`poll` to yield `data` when `fd` is readable. |
def measure_old_norse_syllable(syllable: list) -> Union[Length, None]:
"""
Old Norse syllables are considered as:
- short if
- long if
- overlong if
>>> measure_old_norse_syllable([m, a.lengthen(), l]).name
'long'
>>> measure_old_norse_syllable([a, l]).name
'short'
>>> measure_old_norse_syllable([s, t, ee, r, k, r]).name
'long'
>>> measure_old_norse_syllable([m, o.lengthen()]).name
'long'
:param syllable: list of Vowel and Consonant instances
:return: instance of Length (short, long or overlong)
"""
index = 0
while index < len(syllable) and not isinstance(syllable[index], Vowel):
index += 1
if index == len(syllable):
return None
else:
long_vowel_number = 0
short_vowel_number = 0
geminated_consonant_number = 0
simple_consonant_number = 0
for c in syllable[index:]:
if isinstance(c, Vowel):
if c.length == Length.long:
long_vowel_number += 1
elif c.length == Length.short:
short_vowel_number += 1
elif isinstance(c, Consonant):
if c.geminate:
geminated_consonant_number += 1
else:
simple_consonant_number += 1
if long_vowel_number == 0 and short_vowel_number == 1 and simple_consonant_number <= 1 and\
geminated_consonant_number == 0:
return Length.short
elif (short_vowel_number == 1 and (simple_consonant_number > 1 or geminated_consonant_number > 0)) or \
long_vowel_number > 0 and simple_consonant_number <= 1 and geminated_consonant_number == 0:
return Length.long
elif long_vowel_number > 0 and (simple_consonant_number > 1 or geminated_consonant_number > 0):
return Length.overlong | Old Norse syllables are considered as:
- short if
- long if
- overlong if
>>> measure_old_norse_syllable([m, a.lengthen(), l]).name
'long'
>>> measure_old_norse_syllable([a, l]).name
'short'
>>> measure_old_norse_syllable([s, t, ee, r, k, r]).name
'long'
>>> measure_old_norse_syllable([m, o.lengthen()]).name
'long'
:param syllable: list of Vowel and Consonant instances
:return: instance of Length (short, long or overlong) |
def _create_hidden_port(self, context, network_id, device_id, fixed_ips,
port_type=DEVICE_OWNER_ROUTER_INTF):
"""Creates port used specially for HA purposes."""
port = {'port': {
'tenant_id': '', # intentionally not set
'network_id': network_id,
'mac_address': ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': device_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}}
if extensions.is_extension_supported(self._core_plugin,
"dns-integration"):
port['port'].update(dns_name='')
core_plugin = bc.get_plugin()
return core_plugin.create_port(context, port) | Creates port used specially for HA purposes. |
def polygen(*coefficients):
'''Polynomial generating function'''
if not coefficients:
return lambda i: 0
else:
c0 = coefficients[0]
coefficients = coefficients[1:]
def _(i):
v = c0
for c in coefficients:
v += c*i
i *= i
return v
return _ | Polynomial generating function |
def stopping_function(results, args=None, rstate=None, M=None,
return_vals=False):
"""
The default stopping function utilized by :class:`DynamicSampler`.
Zipped parameters are passed to the function via :data:`args`.
Assigns the run a stopping value based on a weighted average of the
stopping values for the posterior and evidence::
stop = pfrac * stop_post + (1.- pfrac) * stop_evid
The evidence stopping value is based on the estimated evidence error
(i.e. standard deviation) relative to a given threshold::
stop_evid = evid_std / evid_thresh
The posterior stopping value is based on the fractional error (i.e.
standard deviation / mean) in the Kullback-Leibler (KL) divergence
relative to a given threshold::
stop_post = (kld_std / kld_mean) / post_thresh
Estimates of the mean and standard deviation are computed using `n_mc`
realizations of the input using a provided `'error'` keyword (either
`'jitter'` or `'simulate'`, which call related functions :meth:`jitter_run`
and :meth:`simulate_run` in :mod:`dynesty.utils`, respectively, or
`'sim_approx'`, which boosts `'jitter'` by a factor of two).
Returns the boolean `stop <= 1`. If `True`, the :class:`DynamicSampler`
will stop adding new samples to our results.
Parameters
----------
results : :class:`Results` instance
:class:`Results` instance.
args : dictionary of keyword arguments, optional
Arguments used to set the stopping values. Default values are
`pfrac = 1.0`, `evid_thresh = 0.1`, `post_thresh = 0.02`,
`n_mc = 128`, `error = 'sim_approx'`, and `approx = True`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
M : `map` function, optional
An alias to a `map`-like function. This allows users to pass
functions from pools (e.g., `pool.map`) to compute realizations in
parallel. By default the standard `map` function is used.
return_vals : bool, optional
Whether to return the stopping value (and its components). Default
is `False`.
Returns
-------
stop_flag : bool
Boolean flag indicating whether we have passed the desired stopping
criteria.
stop_vals : tuple of shape (3,), optional
The individual stopping values `(stop_post, stop_evid, stop)` used
to determine the stopping criteria.
"""
# Initialize values.
if args is None:
args = dict({})
if rstate is None:
rstate = np.random
if M is None:
M = map
# Initialize hyperparameters.
pfrac = args.get('pfrac', 1.0)
if not 0. <= pfrac <= 1.:
raise ValueError("The provided `pfrac` {0} is not between 0. and 1."
.format(pfrac))
evid_thresh = args.get('evid_thresh', 0.1)
if pfrac < 1. and evid_thresh < 0.:
raise ValueError("The provided `evid_thresh` {0} is not non-negative "
"even though `1. - pfrac` is {1}."
.format(evid_thresh, 1. - pfrac))
post_thresh = args.get('post_thresh', 0.02)
if pfrac > 0. and post_thresh < 0.:
raise ValueError("The provided `post_thresh` {0} is not non-negative "
"even though `pfrac` is {1}."
.format(post_thresh, pfrac))
n_mc = args.get('n_mc', 128)
if n_mc <= 1:
raise ValueError("The number of realizations {0} must be greater "
"than 1.".format(n_mc))
elif n_mc < 20:
warnings.warn("Using a small number of realizations might result in "
"excessively noisy stopping value estimates.")
error = args.get('error', 'sim_approx')
if error not in {'jitter', 'simulate', 'sim_approx'}:
raise ValueError("The chosen `'error'` option {0} is not valid."
.format(error))
if error == 'sim_approx':
error = 'jitter'
boost = 2.
else:
boost = 1.
approx = args.get('approx', True)
# Compute realizations of ln(evidence) and the KL divergence.
rlist = [results for i in range(n_mc)]
error_list = [error for i in range(n_mc)]
approx_list = [approx for i in range(n_mc)]
args = zip(rlist, error_list, approx_list)
outputs = list(M(_kld_error, args))
kld_arr, lnz_arr = np.array([(kld[-1], res.logz[-1])
for kld, res in outputs]).T
# Evidence stopping value.
lnz_std = np.std(lnz_arr)
stop_evid = np.sqrt(boost) * lnz_std / evid_thresh
# Posterior stopping value.
kld_mean, kld_std = np.mean(kld_arr), np.std(kld_arr)
stop_post = boost * (kld_std / kld_mean) / post_thresh
# Effective stopping value.
stop = pfrac * stop_post + (1. - pfrac) * stop_evid
if return_vals:
return stop <= 1., (stop_post, stop_evid, stop)
else:
return stop <= 1. | The default stopping function utilized by :class:`DynamicSampler`.
Zipped parameters are passed to the function via :data:`args`.
Assigns the run a stopping value based on a weighted average of the
stopping values for the posterior and evidence::
stop = pfrac * stop_post + (1.- pfrac) * stop_evid
The evidence stopping value is based on the estimated evidence error
(i.e. standard deviation) relative to a given threshold::
stop_evid = evid_std / evid_thresh
The posterior stopping value is based on the fractional error (i.e.
standard deviation / mean) in the Kullback-Leibler (KL) divergence
relative to a given threshold::
stop_post = (kld_std / kld_mean) / post_thresh
Estimates of the mean and standard deviation are computed using `n_mc`
realizations of the input using a provided `'error'` keyword (either
`'jitter'` or `'simulate'`, which call related functions :meth:`jitter_run`
and :meth:`simulate_run` in :mod:`dynesty.utils`, respectively, or
`'sim_approx'`, which boosts `'jitter'` by a factor of two).
Returns the boolean `stop <= 1`. If `True`, the :class:`DynamicSampler`
will stop adding new samples to our results.
Parameters
----------
results : :class:`Results` instance
:class:`Results` instance.
args : dictionary of keyword arguments, optional
Arguments used to set the stopping values. Default values are
`pfrac = 1.0`, `evid_thresh = 0.1`, `post_thresh = 0.02`,
`n_mc = 128`, `error = 'sim_approx'`, and `approx = True`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
M : `map` function, optional
An alias to a `map`-like function. This allows users to pass
functions from pools (e.g., `pool.map`) to compute realizations in
parallel. By default the standard `map` function is used.
return_vals : bool, optional
Whether to return the stopping value (and its components). Default
is `False`.
Returns
-------
stop_flag : bool
Boolean flag indicating whether we have passed the desired stopping
criteria.
stop_vals : tuple of shape (3,), optional
The individual stopping values `(stop_post, stop_evid, stop)` used
to determine the stopping criteria. |
def _rewind(self):
'''rewind to start of log'''
DFReader._rewind(self)
self.line = 0
# find the first valid line
while self.line < len(self.lines):
if self.lines[self.line].startswith("FMT, "):
break
self.line += 1 | rewind to start of log |
def convertforoutput(self,outputfile):
"""Convert from one of the source formats into target format. Relevant if converters are used in OutputTemplates. Outputfile is a CLAMOutputFile instance."""
super(CharEncodingConverter,self).convertforoutput(outputfile)
return withheaders( flask.make_response( ( line.encode(self.charset) for line in outputfile ) ) , 'text/plain; charset=' + self.charset) | Convert from one of the source formats into target format. Relevant if converters are used in OutputTemplates. Outputfile is a CLAMOutputFile instance. |
def set_port_profile_created(self, vlan_id, profile_name, device_id):
"""Sets created_on_ucs flag to True."""
with self.session.begin(subtransactions=True):
port_profile = self.session.query(
ucsm_model.PortProfile).filter_by(
vlan_id=vlan_id, profile_id=profile_name,
device_id=device_id).first()
if port_profile:
port_profile.created_on_ucs = True
self.session.merge(port_profile)
else:
new_profile = ucsm_model.PortProfile(profile_id=profile_name,
vlan_id=vlan_id,
device_id=device_id,
created_on_ucs=True)
self.session.add(new_profile) | Sets created_on_ucs flag to True. |
def get_learning_objective_ids_metadata(self):
"""get the metadata for learning objective"""
metadata = dict(self._learning_objective_ids_metadata)
metadata.update({'existing_id_values': self.my_osid_object_form._my_map['learningObjectiveIds'][0]})
return Metadata(**metadata) | get the metadata for learning objective |
def remove_col_label(self, event=None, col=None):
"""
check to see if column is required
if it is not, delete it from grid
"""
if event:
col = event.GetCol()
if not col:
return
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
elif '^^' in label:
label = label.strip('^^')
if label in self.reqd_headers:
pw.simple_warning("That header is required, and cannot be removed")
return False
else:
print('That header is not required:', label)
# remove column from wxPython grid
self.grid.remove_col(col)
# remove column from DataFrame if present
if self.grid_type in self.contribution.tables:
if label in self.contribution.tables[self.grid_type].df.columns:
del self.contribution.tables[self.grid_type].df[label]
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self) | check to see if column is required
if it is not, delete it from grid |
def preorder(self):
""" iterator for nodes: root, left, right """
if not self:
return
yield self
if self.left:
for x in self.left.preorder():
yield x
if self.right:
for x in self.right.preorder():
yield x | iterator for nodes: root, left, right |
def _get_system_volume(vm_):
'''
Construct VM system volume list from cloud profile config
'''
# Override system volume size if 'disk_size' is defined in cloud profile
disk_size = get_size(vm_)['disk']
if 'disk_size' in vm_:
disk_size = vm_['disk_size']
# Construct the system volume
volume = Volume(
name='{0} Storage'.format(vm_['name']),
size=disk_size,
disk_type=get_disk_type(vm_)
)
if 'image_password' in vm_:
image_password = vm_['image_password']
volume.image_password = image_password
# Retrieve list of SSH public keys
ssh_keys = get_public_keys(vm_)
volume.ssh_keys = ssh_keys
if 'image_alias' in vm_.keys():
volume.image_alias = vm_['image_alias']
else:
volume.image = get_image(vm_)['id']
# Set volume availability zone if defined in the cloud profile
if 'disk_availability_zone' in vm_:
volume.availability_zone = vm_['disk_availability_zone']
return volume | Construct VM system volume list from cloud profile config |
def TRUE(classical_reg):
"""
Produce a TRUE instruction.
:param classical_reg: A classical register to modify.
:return: An instruction object representing the equivalent MOVE.
"""
warn("`TRUE a` has been deprecated. Use `MOVE a 1` instead.")
if isinstance(classical_reg, int):
classical_reg = Addr(classical_reg)
return MOVE(classical_reg, 1) | Produce a TRUE instruction.
:param classical_reg: A classical register to modify.
:return: An instruction object representing the equivalent MOVE. |
def __get_strut_token(self):
"""
Move the staged loan notes to the order stage and get the struts token
from the place order HTML.
The order will not be placed until calling _confirm_order()
Returns
-------
dict
A dict with the token name and value
"""
try:
# Move to the place order page and get the struts token
response = self.lc.session.get('/portfolio/placeOrder.action')
soup = BeautifulSoup(response.text, "html5lib")
# Example HTML with the stuts token:
"""
<input type="hidden" name="struts.token.name" value="token" />
<input type="hidden" name="token" value="C4MJZP39Q86KDX8KN8SBTVCP0WSFBXEL" />
"""
# 'struts.token.name' defines the field name with the token value
strut_tag = None
strut_token_name = soup.find('input', {'name': 'struts.token.name'})
if strut_token_name and strut_token_name['value'].strip():
# Get form around the strut.token.name element
form = soup.form # assumed
for parent in strut_token_name.parents:
if parent and parent.name == 'form':
form = parent
break
# Get strut token value
strut_token_name = strut_token_name['value']
strut_tag = soup.find('input', {'name': strut_token_name})
if strut_tag and strut_tag['value'].strip():
return {'name': strut_token_name, 'value': strut_tag['value'].strip()}
# No strut token found
self.__log('No struts token! HTML: {0}'.format(response.text))
raise LendingClubError('No struts token. Please report this error.', response)
except Exception as e:
self.__log('Could not get struts token. Error message: {0}'.format(str(e)))
raise LendingClubError('Could not get struts token. Error message: {0}'.format(str(e))) | Move the staged loan notes to the order stage and get the struts token
from the place order HTML.
The order will not be placed until calling _confirm_order()
Returns
-------
dict
A dict with the token name and value |
def script(self, s):
"""
Parse a script by compiling it.
Return a :class:`Contract` or None.
"""
try:
script = self._network.script.compile(s)
script_info = self._network.contract.info_for_script(script)
return Contract(script_info, self._network)
except Exception:
return None | Parse a script by compiling it.
Return a :class:`Contract` or None. |
def key_wait():
"""Waits until the user presses a key.
Then returns a :any:`KeyDown` event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
Returns:
tdl.event.KeyDown: The pressed key.
"""
while 1:
for event in get():
if event.type == 'KEYDOWN':
return event
if event.type == 'QUIT':
# convert QUIT into alt+F4
return KeyDown('F4', '', True, False, True, False, False)
_time.sleep(.001) | Waits until the user presses a key.
Then returns a :any:`KeyDown` event.
Key events will repeat if held down.
A click to close the window will be converted into an Alt+F4 KeyDown event.
Returns:
tdl.event.KeyDown: The pressed key. |
def capture(board):
"""Try to solve the board described by board_string.
Return sequence of summaries that describe how to get to the solution.
"""
game = Game()
v = (0, 0)
stub_actor = base.Actor('capture', v, v, v, v, v, v, v, v, v)
root = base.State(board, stub_actor, stub_actor,
turn=1, actions_remaining=1)
solution_node = None
for eot in game.all_ends_of_turn(root):
# check for a solution
if eot.is_mana_drain: # quick check before checking whole board
if eot.parent.board.is_empty():
solution_node = eot
break
# if solution found, build the list of swaps
solution_sequence = list() # empty sequence (no solution) by default
if solution_node:
node = solution_node
while node:
# record each swap in the path to the root
if not isinstance(node, base.Swap):
node = node.parent
continue
summary = base.Summary(node.parent.board, node.position_pair,
None, None, None)
solution_sequence.append(summary)
node = node.parent
return tuple(reversed(solution_sequence)) | Try to solve the board described by board_string.
Return sequence of summaries that describe how to get to the solution. |
def _read_descriptions(self, password):
"""
Read and evaluate the igddesc.xml file
and the tr64desc.xml file if a password is given.
"""
descfiles = [FRITZ_IGD_DESC_FILE]
if password:
descfiles.append(FRITZ_TR64_DESC_FILE)
for descfile in descfiles:
parser = FritzDescParser(self.address, self.port, descfile)
if not self.modelname:
self.modelname = parser.get_modelname()
services = parser.get_services()
self._read_services(services) | Read and evaluate the igddesc.xml file
and the tr64desc.xml file if a password is given. |
def getlanguage(self, language=None, windowsversion=None):
"""
Get and return the manifest's language as string.
Can be either language-culture e.g. 'en-us' or a string indicating
language neutrality, e.g. 'x-ww' on Windows XP or 'none' on Vista
and later.
"""
if not language:
language = self.language
if language in (None, "", "*", "neutral"):
return (LANGUAGE_NEUTRAL_NT5,
LANGUAGE_NEUTRAL_NT6)[(windowsversion or
sys.getwindowsversion()) >= (6, )]
return language | Get and return the manifest's language as string.
Can be either language-culture e.g. 'en-us' or a string indicating
language neutrality, e.g. 'x-ww' on Windows XP or 'none' on Vista
and later. |
def _get_account_number(self, token, uuid):
"""Get fido account number."""
# Data
data = {"accessToken": token,
"uuid": uuid}
# Http request
try:
raw_res = yield from self._session.post(ACCOUNT_URL,
data=data,
headers=self._headers,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get account number")
# Load answer as json
try:
json_content = yield from raw_res.json()
account_number = json_content\
.get('getCustomerAccounts', {})\
.get('accounts', [{}])[0]\
.get('accountNumber')
except (OSError, ValueError):
raise PyFidoError("Bad json getting account number")
# Check collected data
if account_number is None:
raise PyFidoError("Can not get account number")
return account_number | Get fido account number. |
def call(method, *args, **kwargs):
'''
Calls an arbitrary pyeapi method.
'''
kwargs = clean_kwargs(**kwargs)
return getattr(pyeapi_device['connection'], method)(*args, **kwargs) | Calls an arbitrary pyeapi method. |
def run_band_structure(self,
paths,
with_eigenvectors=False,
with_group_velocities=False,
is_band_connection=False,
path_connections=None,
labels=None,
is_legacy_plot=False):
"""Run phonon band structure calculation.
Parameters
----------
paths : List of array_like
Sets of qpoints that can be passed to phonopy.set_band_structure().
Numbers of qpoints can be different.
shape of each array_like : (qpoints, 3)
with_eigenvectors : bool, optional
Flag whether eigenvectors are calculated or not. Default is False.
with_group_velocities : bool, optional
Flag whether group velocities are calculated or not. Default is
False.
is_band_connection : bool, optional
Flag whether each band is connected or not. This is achieved by
comparing similarity of eigenvectors of neghboring poins. Sometimes
this fails. Default is False.
path_connections : List of bool, optional
This is only used in graphical plot of band structure and gives
whether each path is connected to the next path or not,
i.e., if False, there is a jump of q-points. Number of elements is
the same at that of paths. Default is None.
labels : List of str, optional
This is only used in graphical plot of band structure and gives
labels of end points of each path. The number of labels is equal
to (2 - np.array(path_connections)).sum().
is_legacy_plot: bool, optional
This makes the old style band structure plot. Default is False.
"""
if self._dynamical_matrix is None:
msg = ("Dynamical matrix has not yet built.")
raise RuntimeError(msg)
if with_group_velocities:
if self._group_velocity is None:
self._set_group_velocity()
group_velocity = self._group_velocity
else:
group_velocity = None
self._band_structure = BandStructure(
paths,
self._dynamical_matrix,
with_eigenvectors=with_eigenvectors,
is_band_connection=is_band_connection,
group_velocity=group_velocity,
path_connections=path_connections,
labels=labels,
is_legacy_plot=is_legacy_plot,
factor=self._factor) | Run phonon band structure calculation.
Parameters
----------
paths : List of array_like
Sets of qpoints that can be passed to phonopy.set_band_structure().
Numbers of qpoints can be different.
shape of each array_like : (qpoints, 3)
with_eigenvectors : bool, optional
Flag whether eigenvectors are calculated or not. Default is False.
with_group_velocities : bool, optional
Flag whether group velocities are calculated or not. Default is
False.
is_band_connection : bool, optional
Flag whether each band is connected or not. This is achieved by
comparing similarity of eigenvectors of neghboring poins. Sometimes
this fails. Default is False.
path_connections : List of bool, optional
This is only used in graphical plot of band structure and gives
whether each path is connected to the next path or not,
i.e., if False, there is a jump of q-points. Number of elements is
the same at that of paths. Default is None.
labels : List of str, optional
This is only used in graphical plot of band structure and gives
labels of end points of each path. The number of labels is equal
to (2 - np.array(path_connections)).sum().
is_legacy_plot: bool, optional
This makes the old style band structure plot. Default is False. |
def save_features(self, train_features, test_features, feature_names, feature_list_id):
"""
Save features for the training and test sets to disk, along with their metadata.
Args:
train_features: A NumPy array of features for the training set.
test_features: A NumPy array of features for the test set.
feature_names: A list containing the names of the feature columns.
feature_list_id: The name for this feature list.
"""
self.save_feature_names(feature_names, feature_list_id)
self.save_feature_list(train_features, 'train', feature_list_id)
self.save_feature_list(test_features, 'test', feature_list_id) | Save features for the training and test sets to disk, along with their metadata.
Args:
train_features: A NumPy array of features for the training set.
test_features: A NumPy array of features for the test set.
feature_names: A list containing the names of the feature columns.
feature_list_id: The name for this feature list. |
def restart(self, container, instances=None, map_name=None, **kwargs):
"""
Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs) | Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput] |
def _format_conditions_and_actions(self, raw_data):
"""
This function gets a set of actions and conditionswith the following
format:
{'action-0': 'repeat',
'action-1': 'repeat',
'analysisservice-0': '30cd952b0bb04a05ac27b70ada7feab2',
'analysisservice-1': '30cd952b0bb04a05ac27b70ada7feab2',
'and_or-0': 'and',
'and_or-1': 'no',
'range0-0': '12',
'range0-1': '31',
'range1-0': '12',
'range1-1': '33',
'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf',
'setresulton-0': 'original',
'setresulton-1': 'original',
'trigger': 'submit',
'value': '',
'an_result_id-0':'rep-1',
'an_result_id-1':'rep-2'}
and returns a formatted set with the conditions and actions sorted
like this one:
{
'conditions':[{
'range1': 'X', 'range0': 'X',
'cond_row_idx':'X'
'and_or': 'and',
'analysisservice': '<as_uid>',
},
{
'range1': 'X', 'range0': 'X',
'cond_row_idx':'X'
'and_or': 'and',
'analysisservice': '<as_uid>',
}, {...}],
'trigger': 'xxx',
'actions':[
{'action':'duplicate', 'act_row_idx':'0',
'otherWS': to_another, 'analyst': 'sussan1',
'setresultdiscrete': '1', 'setresultvalue': '2',
'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf',
'setresulton': 'original','an_result_id-0':'rep-1'},
{'action':'repeat', 'act_row_idx':'1',
'otherWS': current, 'analyst': '', ...},
]
}
"""
keys = raw_data.keys()
# 'formatted_action_row' is the dict which will be added in the
# 'value' list
formatted_set = {}
# Filling the dict with the values that aren't actions or conditions
formatted_set['trigger'] = raw_data.get('trigger', '')
# Adding the conditions list to the final dictionary
formatted_set['conditions'] = self._get_sorted_conditions_list(
raw_data)
# Adding the actions list to the final dictionary
formatted_set['actions'] = self._get_sorted_actions_list(raw_data)
return formatted_set | This function gets a set of actions and conditionswith the following
format:
{'action-0': 'repeat',
'action-1': 'repeat',
'analysisservice-0': '30cd952b0bb04a05ac27b70ada7feab2',
'analysisservice-1': '30cd952b0bb04a05ac27b70ada7feab2',
'and_or-0': 'and',
'and_or-1': 'no',
'range0-0': '12',
'range0-1': '31',
'range1-0': '12',
'range1-1': '33',
'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf',
'setresulton-0': 'original',
'setresulton-1': 'original',
'trigger': 'submit',
'value': '',
'an_result_id-0':'rep-1',
'an_result_id-1':'rep-2'}
and returns a formatted set with the conditions and actions sorted
like this one:
{
'conditions':[{
'range1': 'X', 'range0': 'X',
'cond_row_idx':'X'
'and_or': 'and',
'analysisservice': '<as_uid>',
},
{
'range1': 'X', 'range0': 'X',
'cond_row_idx':'X'
'and_or': 'and',
'analysisservice': '<as_uid>',
}, {...}],
'trigger': 'xxx',
'actions':[
{'action':'duplicate', 'act_row_idx':'0',
'otherWS': to_another, 'analyst': 'sussan1',
'setresultdiscrete': '1', 'setresultvalue': '2',
'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf',
'setresulton': 'original','an_result_id-0':'rep-1'},
{'action':'repeat', 'act_row_idx':'1',
'otherWS': current, 'analyst': '', ...},
]
} |
def parse_genemap2(lines):
"""Parse the omim source file called genemap2.txt
Explanation of Phenotype field:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations that
lead to apparently abnormal laboratory test values.
Braces, "{ }", indicate mutations that contribute to susceptibility to
multifactorial disorders (e.g., diabetes, asthma) or to susceptibility
to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
Args:
lines(iterable(str))
Yields:
parsed_entry(dict)
"""
LOG.info("Parsing the omim genemap2")
header = []
for i,line in enumerate(lines):
line = line.rstrip()
if line.startswith('#'):
if i < 10:
if line.startswith('# Chromosome'):
header = line[2:].split('\t')
continue
if len(line) < 5:
continue
parsed_entry = parse_omim_line(line, header)
parsed_entry['mim_number'] = int(parsed_entry['Mim Number'])
parsed_entry['raw'] = line
# This is the approved symbol for the entry
hgnc_symbol = parsed_entry.get("Approved Symbol")
# If no approved symbol could be found choose the first of
# the gene symbols
gene_symbols = []
if parsed_entry.get('Gene Symbols'):
gene_symbols = [symbol.strip() for symbol in parsed_entry['Gene Symbols'].split(',')]
parsed_entry['hgnc_symbols'] = gene_symbols
if not hgnc_symbol and gene_symbols:
hgnc_symbol = gene_symbols[0]
parsed_entry['hgnc_symbol'] = hgnc_symbol
# Gene inheritance is a construct. It is the union of all inheritance
# patterns found in the associated phenotypes
gene_inheritance = set()
parsed_phenotypes = []
# Information about the related phenotypes
# Each related phenotype is separated by ';'
for phenotype_info in parsed_entry.get('Phenotypes', '').split(';'):
if not phenotype_info:
continue
phenotype_info = phenotype_info.lstrip()
# First symbol in description indicates phenotype status
# If no special symbol is used the phenotype is 'established'
phenotype_status = OMIM_STATUS_MAP.get(phenotype_info[0], 'established')
# Skip phenotype entries that not associated to disease
if phenotype_status == 'nondisease':
continue
phenotype_description = ""
# We will try to save the description
splitted_info = phenotype_info.split(',')
for i, text in enumerate(splitted_info):
# Everything before ([1,2,3])
# We check if we are in the part where the mim number exists
match = entry_pattern.search(text)
if not match:
phenotype_description += text
else:
# If we find the end of the entry
mimnr_match = mimnr_pattern.search(phenotype_info)
# Then if the entry have a mim number we choose that
if mimnr_match:
phenotype_mim = int(mimnr_match.group())
else:
phenotype_mim = parsed_entry['mim_number']
phenotype_description += text[:-4]
break
# Find the inheritance
inheritance = set()
inheritance_text = ','.join(splitted_info[i:])
for term in mim_inheritance_terms:
if term in inheritance_text:
inheritance.add(TERMS_MAPPER[term])
gene_inheritance.add(TERMS_MAPPER[term])
parsed_phenotypes.append(
{
'mim_number':phenotype_mim,
'inheritance': inheritance,
'description': phenotype_description.strip('?\{\}'),
'status': phenotype_status,
}
)
parsed_entry['phenotypes'] = parsed_phenotypes
parsed_entry['inheritance'] = gene_inheritance
yield parsed_entry | Parse the omim source file called genemap2.txt
Explanation of Phenotype field:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations that
lead to apparently abnormal laboratory test values.
Braces, "{ }", indicate mutations that contribute to susceptibility to
multifactorial disorders (e.g., diabetes, asthma) or to susceptibility
to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
Args:
lines(iterable(str))
Yields:
parsed_entry(dict) |
def unicorn_edit(path, **kwargs):
"""Edit Unicorn node interactively.
"""
ctx = Context(**kwargs)
ctx.timeout = None
ctx.execute_action('unicorn:edit', **{
'unicorn': ctx.repo.create_secure_service('unicorn'),
'path': path,
}) | Edit Unicorn node interactively. |
def _EvaluateExpressions(self, frame):
"""Evaluates watched expressions into a string form.
If expression evaluation fails, the error message is used as evaluated
expression string.
Args:
frame: Python stack frame of breakpoint hit.
Returns:
Array of strings where each string corresponds to the breakpoint
expression with the same index.
"""
return [self._FormatExpression(frame, expression) for expression in
self._definition.get('expressions') or []] | Evaluates watched expressions into a string form.
If expression evaluation fails, the error message is used as evaluated
expression string.
Args:
frame: Python stack frame of breakpoint hit.
Returns:
Array of strings where each string corresponds to the breakpoint
expression with the same index. |
def runInactiveDeviceCleanup(self):
"""
Runs both the deleteInactiveDevicesByAge and the deleteInactiveDevicesByQuota
methods with the configuration that was set when calling create.
"""
yield self.deleteInactiveDevicesByQuota(
self.__inactive_per_jid_max,
self.__inactive_global_max
)
yield self.deleteInactiveDevicesByAge(self.__inactive_max_age) | Runs both the deleteInactiveDevicesByAge and the deleteInactiveDevicesByQuota
methods with the configuration that was set when calling create. |
def unlink(self, request, uuid=None):
"""
Unlink all related resources, service project link and service itself.
"""
service = self.get_object()
service.unlink_descendants()
self.perform_destroy(service)
return Response(status=status.HTTP_204_NO_CONTENT) | Unlink all related resources, service project link and service itself. |
def product(*arrays):
""" Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
out = np.empty_like(ix, dtype=dtype)
for n, _ in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out | Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays. |
def _delete_unwanted_caracters(self, chain):
"""Remove not wanted char from chain
unwanted char are illegal_macro_output_chars attribute
:param chain: chain to remove char from
:type chain: str
:return: chain cleaned
:rtype: str
"""
try:
chain = chain.decode('utf8', 'replace')
except UnicodeEncodeError:
# If it is still encoded correctly, ignore...
pass
except AttributeError:
# Python 3 will raise an exception because the line is still unicode
pass
for char in self.illegal_macro_output_chars:
chain = chain.replace(char, '')
return chain | Remove not wanted char from chain
unwanted char are illegal_macro_output_chars attribute
:param chain: chain to remove char from
:type chain: str
:return: chain cleaned
:rtype: str |
def transform_txn_for_ledger(txn):
'''
Makes sure that we have integer as keys after possible deserialization from json
:param txn: txn to be transformed
:return: transformed txn
'''
txn_data = get_payload_data(txn)
txn_data[AUDIT_TXN_LEDGERS_SIZE] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGERS_SIZE].items()}
txn_data[AUDIT_TXN_LEDGER_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGER_ROOT].items()}
txn_data[AUDIT_TXN_STATE_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_STATE_ROOT].items()}
return txn | Makes sure that we have integer as keys after possible deserialization from json
:param txn: txn to be transformed
:return: transformed txn |
def paintEvent(self, event):
"""
Overloads the paint event to support rendering of hints if there are
no items in the tree.
:param event | <QPaintEvent>
"""
super(XTextEdit, self).paintEvent(event)
if self.document().isEmpty() and self.hint():
text = self.hint()
rect = self.rect()
# modify the padding on the rect
rect.setX(4)
rect.setY(4)
align = int(Qt.AlignLeft | Qt.AlignTop)
# setup the coloring options
clr = self.hintColor()
# paint the hint
with XPainter(self.viewport()) as painter:
painter.setPen(clr)
painter.drawText(rect, align | Qt.TextWordWrap, text) | Overloads the paint event to support rendering of hints if there are
no items in the tree.
:param event | <QPaintEvent> |
def head(draw=True, show=True, max_shape=256):
"""Show a volumetric rendering of a human male head."""
# inspired by http://graphicsrunner.blogspot.com/2009/01/volume-rendering-102-transfer-functions.html
import ipyvolume as ipv
from scipy.interpolate import interp1d
# First part is a simpler version of setting up the transfer function. Interpolation with higher order
# splines does not work well, the original must do sth different
colors = [[0.91, 0.7, 0.61, 0.0], [0.91, 0.7, 0.61, 80.0], [1.0, 1.0, 0.85, 82.0], [1.0, 1.0, 0.85, 256]]
x = np.array([k[-1] for k in colors])
rgb = np.array([k[:3] for k in colors])
N = 256
xnew = np.linspace(0, 256, N)
tf_data = np.zeros((N, 4))
kind = 'linear'
for channel in range(3):
f = interp1d(x, rgb[:, channel], kind=kind)
ynew = f(xnew)
tf_data[:, channel] = ynew
alphas = [[0, 0], [0, 40], [0.2, 60], [0.05, 63], [0, 80], [0.9, 82], [1.0, 256]]
x = np.array([k[1] * 1.0 for k in alphas])
y = np.array([k[0] * 1.0 for k in alphas])
f = interp1d(x, y, kind=kind)
ynew = f(xnew)
tf_data[:, 3] = ynew
tf = ipv.TransferFunction(rgba=tf_data.astype(np.float32))
head_data = ipv.datasets.head.fetch().data
if draw:
vol = ipv.volshow(head_data, tf=tf, max_shape=max_shape)
if show:
ipv.show()
return vol
else:
return head_data | Show a volumetric rendering of a human male head. |
def add_concept(self, concept_obj):
''' Add a concept to current concept list '''
if concept_obj is None:
raise Exception("Concept object cannot be None")
elif concept_obj in self.__concepts:
raise Exception("Concept object is already inside")
elif concept_obj.cidx in self.__concept_map:
raise Exception("Duplicated concept ID ({})".format(concept_obj.cidx))
self.__concepts.append(concept_obj)
self.__concept_map[concept_obj.cidx] = concept_obj
concept_obj.sent = self
return concept_obj | Add a concept to current concept list |
def delete(cls, resources, background=False, force=False):
"""Delete an ip by deleting the iface"""
if not isinstance(resources, (list, tuple)):
resources = [resources]
ifaces = []
for item in resources:
try:
ip_ = cls.info(item)
except UsageError:
cls.error("Can't find this ip %s" % item)
iface = Iface.info(ip_['iface_id'])
ifaces.append(iface['id'])
return Iface.delete(ifaces, background) | Delete an ip by deleting the iface |
def _weld_unary(array, weld_type, operation):
"""Apply operation on each element in the array.
As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h
Parameters
----------
array : numpy.ndarray or WeldObject
Data
weld_type : WeldType
Of the data
operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'}
Which unary operation to apply.
Returns
-------
WeldObject
Representation of this computation.
"""
if weld_type not in {WeldFloat(), WeldDouble()}:
raise TypeError('Unary operation supported only on scalar f32 or f64')
obj_id, weld_obj = create_weld_object(array)
weld_template = 'map({array}, |e: {type}| {op}(e))'
weld_obj.weld_code = weld_template.format(array=obj_id, type=weld_type, op=operation)
return weld_obj | Apply operation on each element in the array.
As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h
Parameters
----------
array : numpy.ndarray or WeldObject
Data
weld_type : WeldType
Of the data
operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'}
Which unary operation to apply.
Returns
-------
WeldObject
Representation of this computation. |
def add_datepart(df, fldname, drop=True, time=False, errors="raise"):
"""add_datepart converts a column of df from a datetime64 to many columns containing
the information from the date. This applies changes inplace.
Parameters:
-----------
df: A pandas data frame. df gain several new columns.
fldname: A string that is the name of the date column you wish to expand.
If it is not a datetime64 series, it will be converted to one with pd.to_datetime.
drop: If true then the original date column will be removed.
time: If true time features: Hour, Minute, Second will be added.
Examples:
---------
>>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })
>>> df
A
0 2000-03-11
1 2000-03-12
2 2000-03-13
>>> add_datepart(df, 'A')
>>> df
AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed
0 2000 3 10 11 5 71 False False False False False False 952732800
1 2000 3 10 12 6 72 False False False False False False 952819200
2 2000 3 11 13 0 73 False False False False False False 952905600
"""
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True) | add_datepart converts a column of df from a datetime64 to many columns containing
the information from the date. This applies changes inplace.
Parameters:
-----------
df: A pandas data frame. df gain several new columns.
fldname: A string that is the name of the date column you wish to expand.
If it is not a datetime64 series, it will be converted to one with pd.to_datetime.
drop: If true then the original date column will be removed.
time: If true time features: Hour, Minute, Second will be added.
Examples:
---------
>>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })
>>> df
A
0 2000-03-11
1 2000-03-12
2 2000-03-13
>>> add_datepart(df, 'A')
>>> df
AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed
0 2000 3 10 11 5 71 False False False False False False 952732800
1 2000 3 10 12 6 72 False False False False False False 952819200
2 2000 3 11 13 0 73 False False False False False False 952905600 |
def h_v_t(header, key):
"""
get header value with title
try to get key from header and consider case sensitive
e.g. header['x-log-abc'] or header['X-Log-Abc']
:param header:
:param key:
:return:
"""
if key not in header:
key = key.title()
if key not in header:
raise ValueError("Unexpected header in response, missing: " + key + " headers:\n" + str(header))
return header[key] | get header value with title
try to get key from header and consider case sensitive
e.g. header['x-log-abc'] or header['X-Log-Abc']
:param header:
:param key:
:return: |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: DocumentContext for this DocumentInstance
:rtype: twilio.rest.preview.sync.service.document.DocumentContext
"""
if self._context is None:
self._context = DocumentContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: DocumentContext for this DocumentInstance
:rtype: twilio.rest.preview.sync.service.document.DocumentContext |
def printMe(self, selfKey, selfValue):
'''Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
'''
text = '<key>{keyName}</key>\n'.format(keyName=selfKey)
if len(selfValue) == 0:
return ''
else:
valueText = ''
for element in selfValue:
if singleOrPair(element) == 'Single':
valueText += element.printMe(element.tag, element.value)
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value)
# maybe a else statement for non single non pair?
text += valueText
return text | Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text |
def p_declare_list(p):
'''declare_list : STRING EQUALS static_scalar
| declare_list COMMA STRING EQUALS static_scalar'''
if len(p) == 4:
p[0] = [ast.Directive(p[1], p[3], lineno=p.lineno(1))]
else:
p[0] = p[1] + [ast.Directive(p[3], p[5], lineno=p.lineno(2))] | declare_list : STRING EQUALS static_scalar
| declare_list COMMA STRING EQUALS static_scalar |
def get_ssh_keys(sshdir):
"""Get SSH keys"""
keys = Queue()
for root, _, files in os.walk(os.path.abspath(sshdir)):
if not files:
continue
for filename in files:
fullname = os.path.join(root, filename)
if (os.path.isfile(fullname) and fullname.endswith('_rsa') or
fullname.endswith('_dsa')):
keys.put(fullname)
return keys | Get SSH keys |
def drop_layer(self, layer):
"""Removes the named layer and the value associated with it from the node.
Parameters
----------
layer : str
Name of the layer to drop.
Raises
------
TypeError
If the node is frozen
KeyError
If the named layer does not exist
"""
if self._frozen:
raise TypeError('Frozen ConfigTree does not support modification')
for child in self._children.values():
child.drop_layer(layer)
self._layers.remove(layer) | Removes the named layer and the value associated with it from the node.
Parameters
----------
layer : str
Name of the layer to drop.
Raises
------
TypeError
If the node is frozen
KeyError
If the named layer does not exist |
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent | check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check. |
def remove_csv_from_json(d):
"""
Remove all CSV data 'values' entries from paleoData table in the JSON structure.
:param dict d: JSON data - old structure
:return dict: Metadata dictionary without CSV values
"""
logger_jsons.info("enter remove_csv_from_json")
# Check both sections
if "paleoData" in d:
d = _remove_csv_from_section(d, "paleoData")
if "chronData" in d:
d = _remove_csv_from_section(d, "chronData")
logger_jsons.info("exit remove_csv_from_json")
return d | Remove all CSV data 'values' entries from paleoData table in the JSON structure.
:param dict d: JSON data - old structure
:return dict: Metadata dictionary without CSV values |
def _check_filepath(changes):
'''
Ensure all changes are fully qualified and affect only one file.
This ensures that the diff output works and a state change is not
incorrectly reported.
'''
filename = None
for change_ in changes:
try:
cmd, arg = change_.split(' ', 1)
if cmd not in METHOD_MAP:
error = 'Command {0} is not supported (yet)'.format(cmd)
raise ValueError(error)
method = METHOD_MAP[cmd]
parts = salt.utils.args.shlex_split(arg)
if method in ['set', 'setm', 'move', 'remove']:
filename_ = parts[0]
else:
_, _, filename_ = parts
if not filename_.startswith('/files'):
error = 'Changes should be prefixed with ' \
'/files if no context is provided,' \
' change: {0}'.format(change_)
raise ValueError(error)
filename_ = re.sub('^/files|/$', '', filename_)
if filename is not None:
if filename != filename_:
error = 'Changes should be made to one ' \
'file at a time, detected changes ' \
'to {0} and {1}'.format(filename, filename_)
raise ValueError(error)
filename = filename_
except (ValueError, IndexError) as err:
log.error(err)
if 'error' not in locals():
error = 'Invalid formatted command, ' \
'see debug log for details: {0}' \
.format(change_)
else:
error = six.text_type(err)
raise ValueError(error)
filename = _workout_filename(filename)
return filename | Ensure all changes are fully qualified and affect only one file.
This ensures that the diff output works and a state change is not
incorrectly reported. |
def tostring(self, inject):
"""Get the entire text content as str"""
return inject(self, '\n'.join(document.tostring(inject) for document in self.documents)) | Get the entire text content as str |
def define_parser(self):
""" Defines xdot grammar.
@see: http://graphviz.org/doc/info/output.html#d:xdot """
# Common constructs.
point = Group(integer.setResultsName("x") +
integer.setResultsName("y"))
n_points = (integer.setResultsName("n") +
OneOrMore(point).setResultsName("points"))
n_bytes = Suppress(integer) + Suppress(minus) + \
Word(printables).setResultsName("b")
justify = ToInteger(
Literal("-1") | Literal("0") | Literal("1")
).setResultsName("j")
# Attributes ----------------------------------------------------------
# Set fill color. The color value consists of the n bytes following
# the '-'.
fill = (Literal("C").suppress() + Suppress(integer) + Suppress(minus) +
colour.setResultsName("color")).setResultsName("fill")
# Set pen color. The color value consists of the n bytes following '-'.
stroke = (Literal("c").suppress() + Suppress(integer) +
Suppress(minus) + colour.setResultsName("color")
).setResultsName("stroke")
# Set font. The font size is s points. The font name consists of the
# n bytes following '-'.
font = (Literal("F").suppress() + real.setResultsName("s") +
n_bytes).setResultsName("font")
# Set style attribute. The style value consists of the n bytes
# following '-'. The syntax of the value is the same as specified for
# a styleItem in style.
style = (Literal("S").suppress() + n_bytes).setResultsName("style")
# Shapes --------------------------------------------------------------
# Filled ellipse ((x-x0)/w)^2 + ((y-y0)/h)^2 = 1
filled_ellipse = (Literal("E").suppress() +
integer.setResultsName("x0") + integer.setResultsName("y0") +
integer.setResultsName("w") + integer.setResultsName("h")
).setResultsName("filled_ellipse")
# Unfilled ellipse ((x-x0)/w)^2 + ((y-y0)/h)^2 = 1
ellipse = (Literal("e").suppress() +
integer.setResultsName("x0") + integer.setResultsName("y0") +
integer.setResultsName("w") + integer.setResultsName("h")
).setResultsName("ellipse")
# Filled polygon using the given n points.
filled_polygon = (Literal("P").suppress() +
n_points).setResultsName("filled_polygon")
# Unfilled polygon using the given n points.
polygon = (Literal("p").suppress() +
n_points).setResultsName("polygon")
# Polyline using the given n points.
polyline = (Literal("L").suppress() +
n_points).setResultsName("polyline")
# B-spline using the given n control points.
bspline = (Literal("B").suppress() +
n_points).setResultsName("bspline")
# Filled B-spline using the given n control points.
filled_bspline = (Literal("b").suppress() +
n_points).setResultsName("filled_bspline")
# Text drawn using the baseline point (x,y). The text consists of the
# n bytes following '-'. The text should be left-aligned (centered,
# right-aligned) on the point if j is -1 (0, 1), respectively. The
# value w gives the width of the text as computed by the library.
text = (Literal("T").suppress() + integer.setResultsName("x") +
integer.setResultsName("y") + justify +
integer.setResultsName("w") + n_bytes).setResultsName("text")
# Externally-specified image drawn in the box with lower left corner
# (x,y) and upper right corner (x+w,y+h). The name of the image
# consists of the n bytes following '-'. This is usually a bitmap
# image. Note that the image size, even when converted from pixels to
# points, might be different from the required size (w,h). It is
# assumed the renderer will perform the necessary scaling.
image = (Literal("I").suppress() + integer.setResultsName("x") +
integer.setResultsName("y") + integer.setResultsName("w") +
integer.setResultsName("h") + n_bytes).setResultsName("image")
# The value of the drawing attributes consists of the concatenation of
# some (multi-)set of the 13 rendering or attribute operations.
value = (Optional(quote).suppress() + OneOrMore(filled_ellipse |
ellipse | filled_polygon | polygon | polyline | bspline |
filled_bspline | text | fill | stroke | font | style | image) +
Optional(quote).suppress()).setResultsName("value")
# Drawing operation.
# draw_ = Literal("_draw_") + Suppress(equals) + value
# # Label drawing.
# ldraw_ = Literal("_ldraw_") + Suppress(equals) + value
# # Edge head arrowhead drawing.
# hdraw_ = Literal("_hdraw_") + Suppress(equals) + value
# # Edge tail arrowhead drawing.
# tdraw_ = Literal("_tdraw_") + Suppress(equals) + value
# # Edge head label drawing.
# hldraw_ = Literal("_hldraw_") + Suppress(equals) + value
# # Edge tail label drawing.
# tldraw_ = Literal("_tldraw_") + Suppress(equals) + value
# Parse actions.
# n_points.setParseAction(self.proc_points)
# Attribute parse actions.
fill.setParseAction(self.proc_fill_color)
stroke.setParseAction(self.proc_stroke_color)
font.setParseAction(self.proc_font)
style.setParseAction(self.proc_style)
# Shape parse actions.
filled_ellipse.setParseAction(self.proc_filled_ellipse)
ellipse.setParseAction(self.proc_unfilled_ellipse)
filled_polygon.setParseAction(self.proc_filled_polygon)
polygon.setParseAction(self.proc_unfilled_polygon)
polyline.setParseAction(self.proc_polyline)
bspline.setParseAction(self.proc_unfilled_bspline)
filled_bspline.setParseAction(self.proc_filled_bspline)
text.setParseAction(self.proc_text)
image.setParseAction(self.proc_image)
return value | Defines xdot grammar.
@see: http://graphviz.org/doc/info/output.html#d:xdot |
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func)) | Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)]) |
def extract_args(self, data):
"""
It extracts irc msg arguments.
"""
args = []
data = data.strip(' ')
if ':' in data:
lhs, rhs = data.split(':', 1)
if lhs: args.extend(lhs.rstrip(' ').split(' '))
args.append(rhs)
else:
args.extend(data.split(' '))
return tuple(args) | It extracts irc msg arguments. |
def set_pin_retries(ctx, pw_attempts, admin_pin, force):
"""
Manage pin-retries.
Sets the number of attempts available before locking for each PIN.
PW_ATTEMPTS should be three integer values corresponding to the number of
attempts for the PIN, Reset Code, and Admin PIN, respectively.
"""
controller = ctx.obj['controller']
resets_pins = controller.version < (4, 0, 0)
if resets_pins:
click.echo('WARNING: Setting PIN retries will reset the values for all '
'3 PINs!')
force or click.confirm('Set PIN retry counters to: {} {} {}?'.format(
*pw_attempts), abort=True, err=True)
controller.set_pin_retries(*(pw_attempts + (admin_pin.encode('utf8'),)))
click.echo('PIN retries successfully set.')
if resets_pins:
click.echo('Default PINs are set.')
echo_default_pins() | Manage pin-retries.
Sets the number of attempts available before locking for each PIN.
PW_ATTEMPTS should be three integer values corresponding to the number of
attempts for the PIN, Reset Code, and Admin PIN, respectively. |
def dt_weekofyear(x):
"""Returns the week ordinal of the year.
:returns: an expression containing the week ordinal of the year, extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.weekofyear
Expression = dt_weekofyear(date)
Length: 3 dtype: int64 (expression)
-----------------------------------
0 42
1 6
2 46
"""
import pandas as pd
return pd.Series(x).dt.weekofyear.values | Returns the week ordinal of the year.
:returns: an expression containing the week ordinal of the year, extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.weekofyear
Expression = dt_weekofyear(date)
Length: 3 dtype: int64 (expression)
-----------------------------------
0 42
1 6
2 46 |
def close(self):
""" Closes the connection to the serial port and ensure no pending
operatoin are left """
self._serial.write(b"@c")
self._serial.read()
self._serial.close() | Closes the connection to the serial port and ensure no pending
operatoin are left |
def launch_batch_workflow(self, batch_workflow):
"""Launches GBDX batch workflow.
Args:
batch_workflow (dict): Dictionary specifying batch workflow tasks.
Returns:
Batch Workflow id (str).
"""
# hit workflow api
url = '%(base_url)s/batch_workflows' % {
'base_url': self.base_url
}
try:
r = self.gbdx_connection.post(url, json=batch_workflow)
batch_workflow_id = r.json()['batch_workflow_id']
return batch_workflow_id
except TypeError as e:
self.logger.debug('Batch Workflow not launched, reason: {0}'.format(e)) | Launches GBDX batch workflow.
Args:
batch_workflow (dict): Dictionary specifying batch workflow tasks.
Returns:
Batch Workflow id (str). |
def first_consumed_mesh(self):
"""The first consumed mesh.
:return: the first consumed mesh
:rtype: knittingpattern.Mesh.Mesh
:raises IndexError: if no mesh is consumed
.. seealso:: :attr:`number_of_consumed_meshes`
"""
for instruction in self.instructions:
if instruction.consumes_meshes():
return instruction.first_consumed_mesh
raise IndexError("{} consumes no meshes".format(self)) | The first consumed mesh.
:return: the first consumed mesh
:rtype: knittingpattern.Mesh.Mesh
:raises IndexError: if no mesh is consumed
.. seealso:: :attr:`number_of_consumed_meshes` |
def configure(self, options, conf):
""" Get the options. """
super(S3Logging, self).configure(options, conf)
self.options = options | Get the options. |
def detect_ts(df, max_anoms=0.10, direction='pos',
alpha=0.05, only_last=None, threshold=None,
e_value=False, longterm=False,
piecewise_median_period_weeks=2, plot=False,
y_log=False, xlabel = '', ylabel = 'count',
title=None, verbose=False):
"""
Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of <timestamp, value> pairs.
Args:
x: Time series as a two column data frame where the first column consists of the
timestamps and the second column consists of the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
only_last: Find and report anomalies only within the last day or hr in the time series. Options: (None | 'day' | 'hr')
threshold: Only report positive going anoms above the threshold specified. Options are: (None | 'med_max' | 'p95' | 'p99')
e_value: Add an additional column to the anoms output containing the expected value.
longterm: Increase anom detection efficacy for time series that are greater than a month.
See Details below.
piecewise_median_period_weeks: The piecewise median time window as described in Vallis, Hochenbaum, and Kejariwal (2014). Defaults to 2.
plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
indicated by circles, should also be returned.
y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
Details
'longterm' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
'title' Title for the output plot.
'verbose' Enable debug messages
The returned value is a dictionary with the following components:
anoms: Data frame containing timestamps, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series
"""
if not isinstance(df, DataFrame):
raise ValueError("data must be a single data frame.")
else:
if len(df.columns) != 2 or not df.iloc[:,1].map(np.isreal).all():
raise ValueError(("data must be a 2 column data.frame, with the"
"first column being a set of timestamps, and "
"the second coloumn being numeric values."))
if (not (df.dtypes[0].type is np.datetime64)
and not (df.dtypes[0].type is np.int64)):
df = format_timestamp(df)
if list(df.columns.values) != ["timestamp", "value"]:
df.columns = ["timestamp", "value"]
# Sanity check all input parameters
if max_anoms > 0.49:
length = len(df.value)
raise ValueError(
("max_anoms must be less than 50% of "
"the data points (max_anoms =%f data_points =%s).")
% (round(max_anoms * length, 0), length))
if not direction in ['pos', 'neg', 'both']:
raise ValueError("direction options are: pos | neg | both.")
if not (0.01 <= alpha or alpha <= 0.1):
if verbose:
import warnings
warnings.warn(("alpha is the statistical signifigance, "
"and is usually between 0.01 and 0.1"))
if only_last and not only_last in ['day', 'hr']:
raise ValueError("only_last must be either 'day' or 'hr'")
if not threshold in [None,'med_max','p95','p99']:
raise ValueError("threshold options are: None | med_max | p95 | p99")
if not isinstance(e_value, bool):
raise ValueError("e_value must be a boolean")
if not isinstance(longterm, bool):
raise ValueError("longterm must be a boolean")
if piecewise_median_period_weeks < 2:
raise ValueError(
"piecewise_median_period_weeks must be at greater than 2 weeks")
if not isinstance(plot, bool):
raise ValueError("plot must be a boolean")
if not isinstance(y_log, bool):
raise ValueError("y_log must be a boolean")
if not isinstance(xlabel, string_types):
raise ValueError("xlabel must be a string")
if not isinstance(ylabel, string_types):
raise ValueError("ylabel must be a string")
if title and not isinstance(title, string_types):
raise ValueError("title must be a string")
if not title:
title = ''
else:
title = title + " : "
gran = get_gran(df)
if gran == "day":
num_days_per_line = 7
if isinstance(only_last, string_types) and only_last == 'hr':
only_last = 'day'
else:
num_days_per_line = 1
if gran == 'sec':
df.timestamp = date_format(df.timestamp, "%Y-%m-%d %H:%M:00")
df = format_timestamp(df.groupby('timestamp').aggregate(np.sum))
# if the data is daily, then we need to bump
# the period to weekly to get multiple examples
gran_period = {
'min': 1440,
'hr': 24,
'day': 7
}
period = gran_period.get(gran)
if not period:
raise ValueError('%s granularity detected. This is currently not supported.' % gran)
num_obs = len(df.value)
clamp = (1 / float(num_obs))
if max_anoms < clamp:
max_anoms = clamp
if longterm:
if gran == "day":
num_obs_in_period = period * piecewise_median_period_weeks + 1
num_days_in_period = 7 * piecewise_median_period_weeks + 1
else:
num_obs_in_period = period * 7 * piecewise_median_period_weeks
num_days_in_period = 7 * piecewise_median_period_weeks
last_date = df.timestamp.iloc[-1]
all_data = []
for j in range(0, len(df.timestamp), num_obs_in_period):
start_date = df.timestamp.iloc[j]
end_date = min(start_date
+ datetime.timedelta(days=num_days_in_period),
df.timestamp.iloc[-1])
# if there is at least 14 days left, subset it,
# otherwise subset last_date - 14days
if (end_date - start_date).days == num_days_in_period:
sub_df = df[(df.timestamp >= start_date)
& (df.timestamp < end_date)]
else:
sub_df = df[(df.timestamp >
(last_date - datetime.timedelta(days=num_days_in_period)))
& (df.timestamp <= last_date)]
all_data.append(sub_df)
else:
all_data = [df]
all_anoms = DataFrame(columns=['timestamp', 'value'])
seasonal_plus_trend = DataFrame(columns=['timestamp', 'value'])
# Detect anomalies on all data (either entire data in one-pass,
# or in 2 week blocks if longterm=TRUE)
for i in range(len(all_data)):
directions = {
'pos': Direction(True, True),
'neg': Direction(True, False),
'both': Direction(False, True)
}
anomaly_direction = directions[direction]
# detect_anoms actually performs the anomaly detection and
# returns the results in a list containing the anomalies
# as well as the decomposed components of the time series
# for further analysis.
s_h_esd_timestamps = detect_anoms(all_data[i], k=max_anoms, alpha=alpha,
num_obs_per_period=period,
use_decomp=True,
one_tail=anomaly_direction.one_tail,
upper_tail=anomaly_direction.upper_tail,
verbose=verbose)
# store decomposed components in local variable and overwrite
# s_h_esd_timestamps to contain only the anom timestamps
data_decomp = s_h_esd_timestamps['stl']
s_h_esd_timestamps = s_h_esd_timestamps['anoms']
# -- Step 3: Use detected anomaly timestamps to extract the actual
# anomalies (timestamp and value) from the data
if s_h_esd_timestamps:
anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)]
else:
anoms = DataFrame(columns=['timestamp', 'value'])
# Filter the anomalies using one of the thresholding functions if applicable
if threshold:
# Calculate daily max values
periodic_maxes = df.groupby(
df.timestamp.map(Timestamp.date)).aggregate(np.max).value
# Calculate the threshold set by the user
if threshold == 'med_max':
thresh = periodic_maxes.median()
elif threshold == 'p95':
thresh = periodic_maxes.quantile(.95)
elif threshold == 'p99':
thresh = periodic_maxes.quantile(.99)
# Remove any anoms below the threshold
anoms = anoms[anoms.value >= thresh]
all_anoms = all_anoms.append(anoms)
seasonal_plus_trend = seasonal_plus_trend.append(data_decomp)
# Cleanup potential duplicates
try:
all_anoms.drop_duplicates(subset=['timestamp'], inplace=True)
seasonal_plus_trend.drop_duplicates(subset=['timestamp'], inplace=True)
except TypeError:
all_anoms.drop_duplicates(cols=['timestamp'], inplace=True)
seasonal_plus_trend.drop_duplicates(cols=['timestamp'], inplace=True)
# -- If only_last was set by the user,
# create subset of the data that represent the most recent day
if only_last:
start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=7)
start_anoms = df.timestamp.iloc[-1] - datetime.timedelta(days=1)
if gran is "day":
breaks = 3 * 12
num_days_per_line = 7
else:
if only_last == 'day':
breaks = 12
else:
start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=2)
# truncate to days
start_date = datetime.date(start_date.year,
start_date.month, start_date.day)
start_anoms = (df.timestamp.iloc[-1]
- datetime.timedelta(hours=1))
breaks = 3
# subset the last days worth of data
x_subset_single_day = df[df.timestamp > start_anoms]
# When plotting anoms for the last day only
# we only show the previous weeks data
x_subset_week = df[(df.timestamp <= start_anoms)
& (df.timestamp > start_date)]
if len(all_anoms) > 0:
all_anoms = all_anoms[all_anoms.timestamp >=
x_subset_single_day.timestamp.iloc[0]]
num_obs = len(x_subset_single_day.value)
# Calculate number of anomalies as a percentage
anom_pct = (len(df.value) / float(num_obs)) * 100
if anom_pct == 0:
return {
"anoms": None,
"plot": None
}
# The original R implementation handles plotting here.
# Plotting is currently not implemented in this version.
# if plot:
# plot_something()
all_anoms.index = all_anoms.timestamp
if e_value:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value,
'expected_value': seasonal_plus_trend[
seasonal_plus_trend.timestamp.isin(
all_anoms.timestamp)].value
}
else:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value
}
anoms = DataFrame(d, index=d['timestamp'].index)
return {
'anoms': anoms,
'plot': None
} | Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of <timestamp, value> pairs.
Args:
x: Time series as a two column data frame where the first column consists of the
timestamps and the second column consists of the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
only_last: Find and report anomalies only within the last day or hr in the time series. Options: (None | 'day' | 'hr')
threshold: Only report positive going anoms above the threshold specified. Options are: (None | 'med_max' | 'p95' | 'p99')
e_value: Add an additional column to the anoms output containing the expected value.
longterm: Increase anom detection efficacy for time series that are greater than a month.
See Details below.
piecewise_median_period_weeks: The piecewise median time window as described in Vallis, Hochenbaum, and Kejariwal (2014). Defaults to 2.
plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
indicated by circles, should also be returned.
y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data.
xlabel: X-axis label to be added to the output plot.
ylabel: Y-axis label to be added to the output plot.
Details
'longterm' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
'title' Title for the output plot.
'verbose' Enable debug messages
The returned value is a dictionary with the following components:
anoms: Data frame containing timestamps, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series |
def total(self):
"""Return the total number of records"""
if self._result_cache:
return self._result_cache.total
return self.all().total | Return the total number of records |
def masked_local_attention_2d(q,
k,
v,
query_shape=(8, 16),
memory_flange=(8, 16),
name=None):
"""Strided block local self-attention.
Each position in a query block can attend to all the generated queries in
the query block, which are generated in raster scan, and positions that are
generated to the left and top. The shapes are specified by query shape and
memory flange. Note that if you're using this function, you do not need to
right shift. Right shifting happens inside this function separately for each
block.
Args:
q: a Tensor with shape [batch, heads, h, w, depth_k]
k: a Tensor with shape [batch, heads, h, w, depth_k]
v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current
implementation, depth_v must be equal to depth_k.
query_shape: an tuple indicating the height and width of each query block.
query_shape = block_shape
memory_flange: an integer indicating how much to look in height and width
from each query block.
memory shape = query_shape + (block_flange[0], 2*block_flange[1])
name: an optional string
Returns:
a Tensor of shape [batch, heads, h, w, depth_v]
"""
with tf.variable_scope(
name, default_name="local_masked_self_attention_2d", values=[q, k, v]):
v_shape = common_layers.shape_list(v)
# Pad query to ensure multiple of corresponding lengths.
q = pad_to_multiple_2d(q, query_shape)
# Set up query blocks.
q_indices = gather_indices_2d(q, query_shape, query_shape)
q_new = gather_blocks_2d(q, q_indices)
# Set up key and value blocks.
k_flange, k_center = get_memory_region(k, query_shape, memory_flange,
q_indices)
v_flange, v_center = get_memory_region(v, query_shape, memory_flange,
q_indices)
if k_flange is not None:
k_new = tf.concat([k_flange, k_center], axis=3)
v_new = tf.concat([v_flange, v_center], axis=3)
else:
k_new = k_center
v_new = v_center
# Set up the masks.
query_elements = np.prod(query_shape)
padding_mask = None
if k_flange is not None:
padding_mask = tf.expand_dims(
embedding_to_padding(k_flange) * -1e9, axis=-2)
padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1])
center_attention_bias = attention_bias_lower_triangle(
np.prod(query_elements))
center_attention_bias = tf.reshape(
center_attention_bias, [1, 1, 1, query_elements, query_elements])
v_center_shape = common_layers.shape_list(v_center)
center_attention_bias = tf.tile(
center_attention_bias,
[v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1])
if padding_mask is not None:
# Combine the mask for padding and visible region.
attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4)
else:
attention_bias = center_attention_bias
output = dot_product_attention(
q_new,
k_new,
v_new,
attention_bias,
dropout_rate=0.,
name="masked_local_2d",
make_image_summary=False)
# Put representations back into original shapes.
padded_q_shape = common_layers.shape_list(q)
output = scatter_blocks_2d(output, q_indices, padded_q_shape)
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0, 0],
[-1, -1, v_shape[2], v_shape[3], -1])
return output | Strided block local self-attention.
Each position in a query block can attend to all the generated queries in
the query block, which are generated in raster scan, and positions that are
generated to the left and top. The shapes are specified by query shape and
memory flange. Note that if you're using this function, you do not need to
right shift. Right shifting happens inside this function separately for each
block.
Args:
q: a Tensor with shape [batch, heads, h, w, depth_k]
k: a Tensor with shape [batch, heads, h, w, depth_k]
v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current
implementation, depth_v must be equal to depth_k.
query_shape: an tuple indicating the height and width of each query block.
query_shape = block_shape
memory_flange: an integer indicating how much to look in height and width
from each query block.
memory shape = query_shape + (block_flange[0], 2*block_flange[1])
name: an optional string
Returns:
a Tensor of shape [batch, heads, h, w, depth_v] |
def dataframe_setup(self):
"""
Set-up a report to store the desired header: sanitized string combinations
"""
# Initialise a dictionary to store the sanitized headers and strings
genesippr_dict = dict()
# Try to open all the reports - use pandas to extract the results from any report that exists
try:
sippr_matrix = pd.read_csv(os.path.join(self.reportpath, 'genesippr.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
sippr_matrix = dict()
try:
conf_matrix = pd.read_csv(os.path.join(self.reportpath, 'confindr_report.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
conf_matrix = dict()
try:
gdcs_matrix = pd.read_csv(os.path.join(self.reportpath, 'GDCS.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
gdcs_matrix = dict()
# Populate the header:sanitized string dictionary with results from all strains
for sample in self.metadata:
genesippr_dict[sample.name] = dict()
try:
genesippr_dict[sample.name]['eae'] = self.data_sanitise(sippr_matrix[sample.name]['eae'])
except KeyError:
genesippr_dict[sample.name]['eae'] = 0
try:
genesippr_dict[sample.name]['hlyAEc'] = self.data_sanitise(sippr_matrix[sample.name]['hlyAEc'])
except KeyError:
genesippr_dict[sample.name]['hlyAEc'] = 0
try:
genesippr_dict[sample.name]['VT1'] = self.data_sanitise(sippr_matrix[sample.name]['VT1'])
except KeyError:
genesippr_dict[sample.name]['VT1'] = 0
try:
genesippr_dict[sample.name]['VT2'] = self.data_sanitise(sippr_matrix[sample.name]['VT2'])
except KeyError:
genesippr_dict[sample.name]['VT2'] = 0
try:
genesippr_dict[sample.name]['hlyALm'] = self.data_sanitise(sippr_matrix[sample.name]['hlyALm'])
except KeyError:
genesippr_dict[sample.name]['hlyALm'] = 0
try:
genesippr_dict[sample.name]['IGS'] = self.data_sanitise(sippr_matrix[sample.name]['IGS'])
except KeyError:
genesippr_dict[sample.name]['IGS'] = 0
try:
genesippr_dict[sample.name]['inlJ'] = self.data_sanitise(sippr_matrix[sample.name]['inlJ'])
except KeyError:
genesippr_dict[sample.name]['inlJ'] = 0
try:
genesippr_dict[sample.name]['invA'] = self.data_sanitise(sippr_matrix[sample.name]['invA'])
except KeyError:
genesippr_dict[sample.name]['invA'] = 0
try:
genesippr_dict[sample.name]['stn'] = self.data_sanitise(sippr_matrix[sample.name]['stn'])
except KeyError:
genesippr_dict[sample.name]['stn'] = 0
try:
genesippr_dict[sample.name]['GDCS'] = self.data_sanitise(gdcs_matrix[sample.name]['Pass/Fail'],
header='Pass/Fail')
except KeyError:
genesippr_dict[sample.name]['GDCS'] = 0
try:
genesippr_dict[sample.name]['Contamination'] = self.data_sanitise(
conf_matrix[sample.name]['ContamStatus'], header='ContamStatus')
except KeyError:
genesippr_dict[sample.name]['Contamination'] = 0
try:
genesippr_dict[sample.name]['Coverage'] = self.data_sanitise(
gdcs_matrix[sample.name]['MeanCoverage'], header='MeanCoverage')
except KeyError:
genesippr_dict[sample.name]['Coverage'] = 0
# Create a report from the header: sanitized string dictionary to be used in the creation of the report image
with open(self.image_report, 'w') as csv:
data = '{}\n'.format(','.join(self.header_list))
for strain in sorted(genesippr_dict):
data += '{str},'.format(str=strain)
for header in self.header_list[1:]:
data += '{value},'.format(value=genesippr_dict[strain][header])
data = data.rstrip(',')
data += '\n'
csv.write(data) | Set-up a report to store the desired header: sanitized string combinations |
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix):
"""This is the core model logic.
Unrolls a Bayesian RNN over the given sequence.
Args:
data_ops: A `sequence_data.SequenceDataOps` namedtuple.
embed_layer: A `snt.Embed` instance.
rnn_core: A `snt.RNNCore` instance.
output_linear: A `snt.Linear` instance.
name_prefix: A string to use to prefix local variable names.
Returns:
A 3D time-major tensor representing the model's logits for a sequence of
predictions. Shape `[time_steps, batch_size, vocab_size]`.
"""
# Embed the input index sequence.
embedded_input_seq = snt.BatchApply(
embed_layer, name="input_embed_seq")(data_ops.sparse_obs)
# Construct variables for holding the RNN state.
initial_rnn_state = nest.map_structure(
lambda t: tf.get_local_variable( # pylint: disable long lambda warning
"{}/rnn_state/{}".format(name_prefix, t.op.name), initializer=t),
rnn_core.initial_state(FLAGS.batch_size))
assign_zero_rnn_state = nest.map_structure(
lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state)
assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state))
# Unroll the RNN core over the sequence.
rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn(
cell=rnn_core,
inputs=embedded_input_seq,
initial_state=initial_rnn_state,
time_major=True)
# Persist the RNN state for the next unroll.
update_rnn_state = nest.map_structure(
tf.assign, initial_rnn_state, rnn_final_state)
with tf.control_dependencies(nest.flatten(update_rnn_state)):
rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq")
output_logits = snt.BatchApply(
output_linear, name="output_embed_seq")(rnn_output_seq)
return output_logits, assign_zero_rnn_state | This is the core model logic.
Unrolls a Bayesian RNN over the given sequence.
Args:
data_ops: A `sequence_data.SequenceDataOps` namedtuple.
embed_layer: A `snt.Embed` instance.
rnn_core: A `snt.RNNCore` instance.
output_linear: A `snt.Linear` instance.
name_prefix: A string to use to prefix local variable names.
Returns:
A 3D time-major tensor representing the model's logits for a sequence of
predictions. Shape `[time_steps, batch_size, vocab_size]`. |
def get_locations(self, locations, columns=None, **kwargs):
"""
For list of locations and list of columns return a DataFrame of the values.
:param locations: list of index locations
:param columns: list of column names
:param kwargs: will pass along these parameters to the get() method
:return: DataFrame
"""
indexes = [self._index[x] for x in locations]
return self.get(indexes, columns, **kwargs) | For list of locations and list of columns return a DataFrame of the values.
:param locations: list of index locations
:param columns: list of column names
:param kwargs: will pass along these parameters to the get() method
:return: DataFrame |
def distinct_letters(string_matrix: List[List[str]]) -> Set[str]:
"""
Diagnostic function
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']])
>>> sorted(dl)
['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w']
"""
return set([letter
for sentence in string_matrix
for word in sentence
for letter in word]) | Diagnostic function
:param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']])
>>> sorted(dl)
['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w'] |
def batch_(self, rpc_calls):
"""Batch RPC call.
Pass array of arrays: [ [ "method", params... ], ... ]
Returns array of results.
"""
batch_data = []
for rpc_call in rpc_calls:
AuthServiceProxy.__id_count += 1
m = rpc_call.pop(0)
batch_data.append({"jsonrpc":"2.0", "method":m, "params":rpc_call, "id":AuthServiceProxy.__id_count})
postdata = json.dumps(batch_data, default=EncodeDecimal)
log.debug("--> "+postdata)
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
results = []
responses = self._get_response()
for response in responses:
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
results.append(response['result'])
return results | Batch RPC call.
Pass array of arrays: [ [ "method", params... ], ... ]
Returns array of results. |
def new(cls, ns_path, script, campaign_dir, runner_type='Auto',
overwrite=False, optimized=True, check_repo=True):
"""
Create a new campaign from an ns-3 installation and a campaign
directory.
This method will create a DatabaseManager, which will install a
database in the specified campaign_dir. If a database is already
available at the ns_path described in the specified campaign_dir and
its configuration matches config, this instance is used instead. If the
overwrite argument is set to True instead, the specified directory is
wiped and a new campaign is created in its place.
Furthermore, this method will initialize a SimulationRunner, of type
specified by the runner_type parameter, which will be locked on the
ns-3 installation at ns_path and set up to run the desired script.
Finally, note that creation of a campaign requires a git repository to
be initialized at the specified ns_path. This will allow SEM to save
the commit at which the simulations are run, enforce reproducibility
and avoid mixing results coming from different versions of ns-3 and its
libraries.
Args:
ns_path (str): path to the ns-3 installation to employ in this
campaign.
script (str): ns-3 script that will be executed to run simulations.
campaign_dir (str): path to the directory in which to save the
simulation campaign database.
runner_type (str): implementation of the SimulationRunner to use.
Value can be: SimulationRunner (for running sequential
simulations locally), ParallelRunner (for running parallel
simulations locally), GridRunner (for running simulations using
a DRMAA-compatible parallel task scheduler). Use Auto to
automatically pick the best runner.
overwrite (bool): whether to overwrite already existing
campaign_dir folders. This deletes the directory if and only if
it only contains files that were detected to be created by sem.
optimized (bool): whether to configure the runner to employ an
optimized ns-3 build.
"""
# Convert paths to be absolute
ns_path = os.path.abspath(ns_path)
campaign_dir = os.path.abspath(campaign_dir)
# Verify if the specified campaign is already available
if Path(campaign_dir).exists() and not overwrite:
# Try loading
manager = CampaignManager.load(campaign_dir, ns_path,
runner_type=runner_type,
optimized=optimized,
check_repo=check_repo)
if manager.db.get_script() == script:
return manager
else:
del manager
# Initialize runner
runner = CampaignManager.create_runner(ns_path, script,
runner_type=runner_type,
optimized=optimized)
# Get list of parameters to save in the DB
params = runner.get_available_parameters()
# Get current commit
commit = ""
if check_repo:
from git import Repo, exc
commit = Repo(ns_path).head.commit.hexsha
# Create a database manager from the configuration
db = DatabaseManager.new(script=script,
params=params,
commit=commit,
campaign_dir=campaign_dir,
overwrite=overwrite)
return cls(db, runner, check_repo) | Create a new campaign from an ns-3 installation and a campaign
directory.
This method will create a DatabaseManager, which will install a
database in the specified campaign_dir. If a database is already
available at the ns_path described in the specified campaign_dir and
its configuration matches config, this instance is used instead. If the
overwrite argument is set to True instead, the specified directory is
wiped and a new campaign is created in its place.
Furthermore, this method will initialize a SimulationRunner, of type
specified by the runner_type parameter, which will be locked on the
ns-3 installation at ns_path and set up to run the desired script.
Finally, note that creation of a campaign requires a git repository to
be initialized at the specified ns_path. This will allow SEM to save
the commit at which the simulations are run, enforce reproducibility
and avoid mixing results coming from different versions of ns-3 and its
libraries.
Args:
ns_path (str): path to the ns-3 installation to employ in this
campaign.
script (str): ns-3 script that will be executed to run simulations.
campaign_dir (str): path to the directory in which to save the
simulation campaign database.
runner_type (str): implementation of the SimulationRunner to use.
Value can be: SimulationRunner (for running sequential
simulations locally), ParallelRunner (for running parallel
simulations locally), GridRunner (for running simulations using
a DRMAA-compatible parallel task scheduler). Use Auto to
automatically pick the best runner.
overwrite (bool): whether to overwrite already existing
campaign_dir folders. This deletes the directory if and only if
it only contains files that were detected to be created by sem.
optimized (bool): whether to configure the runner to employ an
optimized ns-3 build. |
def delete_floatingip(self, floatingip_id):
'''
Deletes the specified floatingip
'''
ret = self.network_conn.delete_floatingip(floatingip_id)
return ret if ret else True | Deletes the specified floatingip |
def _emit_no_set_found(environment_name, product_name):
"""
writes to std out and logs if no connection string is found for deployment
:param environment_name:
:param product_name:
:return:
"""
sys.stdout.write(colorama.Fore.YELLOW + 'No connections found in global config file '
'in environment: {0} for product: {1}'
.format(environment_name, product_name) +
colorama.Fore.RESET)
sys.stdout.write('\n')
logger.warning('No connections found in environment: {0} for product: {1}'
.format(environment_name, product_name)) | writes to std out and logs if no connection string is found for deployment
:param environment_name:
:param product_name:
:return: |
def radius_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_wind_url(self):
"""Get wind arrow url."""
wind_direction = self.f_d.get('wind_direction', None)
if wind_direction is not None:
rounded = int(5 * round(float(wind_direction)/5))
return WIND_ARROW_URL.format(rounded) | Get wind arrow url. |
def execute(self):
"""
self.params = {
"ActionScriptType" : "None",
"ExecutableEntityId" : "01pd0000001yXtYAAU",
"IsDumpingHeap" : True,
"Iteration" : 1,
"Line" : 3,
"ScopeId" : "005d0000000xxzsAAA"
}
"""
config.logger.debug('logging self')
config.logger.debug(self.params )
if 'project_name' in self.params:
self.params.pop('project_name', None)
if 'settings' in self.params:
self.params.pop('settings', None)
create_result = config.sfdc_client.create_apex_checkpoint(self.params)
if type(create_result) is list:
create_result = create_result[0]
IndexApexOverlaysCommand(params=self.params).execute()
if type(create_result) is not str and type(create_result) is not unicode:
return json.dumps(create_result)
else:
return create_result | self.params = {
"ActionScriptType" : "None",
"ExecutableEntityId" : "01pd0000001yXtYAAU",
"IsDumpingHeap" : True,
"Iteration" : 1,
"Line" : 3,
"ScopeId" : "005d0000000xxzsAAA"
} |
def _complete_last_byte(self, packet):
"""Pad until the packet length is a multiple of 8 (bytes)."""
padded_size = self.get_size()
padding_bytes = padded_size - len(packet)
if padding_bytes > 0:
packet += Pad(padding_bytes).pack()
return packet | Pad until the packet length is a multiple of 8 (bytes). |
def Division(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Divides one vertex by another
:param left: the vertex to be divided
:param right: the vertex to divide
"""
return Double(context.jvm_view().DivisionVertex, label, cast_to_double_vertex(left), cast_to_double_vertex(right)) | Divides one vertex by another
:param left: the vertex to be divided
:param right: the vertex to divide |
def _unparse_entry_record(self, entry):
"""
:type entry: Dict[string, List[string]]
:param entry: Dictionary holding an entry
"""
for attr_type in sorted(entry.keys()):
for attr_value in entry[attr_type]:
self._unparse_attr(attr_type, attr_value) | :type entry: Dict[string, List[string]]
:param entry: Dictionary holding an entry |
def example_load_data(self):
"""
加载数据
"""
# 特征向量
self.x = constant([[0.7, 0.9]])
# 权重向量, w1代表神经网络的第一层,w2代表神经网络的第二层
self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))
self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1)) | 加载数据 |
def get_github_hostname_user_repo_from_url(url):
"""Return hostname, user and repository to fork from.
:param url: The URL to parse
:return: hostname, user, repository
"""
parsed = parse.urlparse(url)
if parsed.netloc == '':
# Probably ssh
host, sep, path = parsed.path.partition(":")
if "@" in host:
username, sep, host = host.partition("@")
else:
path = parsed.path[1:].rstrip('/')
host = parsed.netloc
user, repo = path.split("/", 1)
return host, user, repo[:-4] if repo.endswith('.git') else repo | Return hostname, user and repository to fork from.
:param url: The URL to parse
:return: hostname, user, repository |
def add_device_not_active_callback(self, callback):
"""Register callback to be invoked when a device is not responding."""
_LOGGER.debug('Added new callback %s ', callback)
self._cb_device_not_active.append(callback) | Register callback to be invoked when a device is not responding. |
def get_as_type_with_default(self, index, value_type, default_value):
"""
Converts array element into a value defined by specied typecode.
If conversion is not possible it returns default value.
:param index: an index of element to get.
:param value_type: the TypeCode that defined the type of the result
:param default_value: the default value
:return: element value defined by the typecode or default value if conversion is not supported.
"""
value = self[index]
return TypeConverter.to_type_with_default(value_type, value, default_value) | Converts array element into a value defined by specied typecode.
If conversion is not possible it returns default value.
:param index: an index of element to get.
:param value_type: the TypeCode that defined the type of the result
:param default_value: the default value
:return: element value defined by the typecode or default value if conversion is not supported. |
def get_ip_prefixes_from_bird(filename):
"""Build a list of IP prefixes found in Bird configuration.
Arguments:
filename (str): The absolute path of the Bird configuration file.
Notes:
It can only parse a file with the following format
define ACAST_PS_ADVERTISE =
[
10.189.200.155/32,
10.189.200.255/32
];
Returns:
A list of IP prefixes.
"""
prefixes = []
with open(filename, 'r') as bird_conf:
lines = bird_conf.read()
for line in lines.splitlines():
line = line.strip(', ')
if valid_ip_prefix(line):
prefixes.append(line)
return prefixes | Build a list of IP prefixes found in Bird configuration.
Arguments:
filename (str): The absolute path of the Bird configuration file.
Notes:
It can only parse a file with the following format
define ACAST_PS_ADVERTISE =
[
10.189.200.155/32,
10.189.200.255/32
];
Returns:
A list of IP prefixes. |
def create_access_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_access_token_request(
request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(
request.client_key,
request.resource_owner_key,
request)
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code | Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401 |
def chmod(self, mode):
"""
Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param mode: new permissions
@type mode: int
"""
self.sftp._log(DEBUG, 'chmod(%s, %r)' % (hexlify(self.handle), mode))
attr = SFTPAttributes()
attr.st_mode = mode
self.sftp._request(CMD_FSETSTAT, self.handle, attr) | Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param mode: new permissions
@type mode: int |
def compute_all_sg_permutations(positions, # scaled positions
rotations, # scaled
translations, # scaled
lattice, # column vectors
symprec):
"""Compute a permutation for every space group operation.
See 'compute_permutation_for_rotation' for more info.
Output has shape (num_rot, num_pos)
"""
out = [] # Finally the shape is fixed as (num_sym, num_pos_of_supercell).
for (sym, t) in zip(rotations, translations):
rotated_positions = np.dot(positions, sym.T) + t
out.append(compute_permutation_for_rotation(positions,
rotated_positions,
lattice,
symprec))
return np.array(out, dtype='intc', order='C') | Compute a permutation for every space group operation.
See 'compute_permutation_for_rotation' for more info.
Output has shape (num_rot, num_pos) |
Subsets and Splits