code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def to_string(x):
"""
Utf8 conversion
:param x:
:return:
"""
if isinstance(x, bytes):
return x.decode('utf-8')
if isinstance(x, basestring):
return x | Utf8 conversion
:param x:
:return: |
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
pass
head, tail = os.path.split(path)
base_filename, sep, _ = tail.partition('.')
if not hasattr(sys, 'implementation'):
# Python <= 3.2
raise NotImplementedError('No sys.implementation')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([base_filename, sep, tag, suffixes[0]])
return os.path.join(head, _PYCACHE, filename) | Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised. |
def Parse(self,url,song_name,flag):
'''
It will the resource URL if song is found,
Otherwise it will return the list of songs that can be downloaded
'''
file_download=FileDownload()
html=file_download.get_html_response(url)
if flag == False:
soup=BeautifulSoup(html)
a_list=soup.findAll('a','touch')
#print a_list
text=[str(x) for x in a_list]
text=''.join(text)
text=text.lower()
string1='download in 48 kbps'
string2='download in 128 kbps'
string3='download in 320 kbps'
href=''
if string3 in text:
print 'Downloading in 320 kbps'
href=a_list[2].get('href')
elif string2 in text:
print 'Downloading in 128 kbps'
href=a_list[1].get('href')
elif string1 in text:
print 'Downloading in 48 kbps'
href=a_list[0].get('href')
else:
self.missing_schema(html,song_name)
quit()
return href
else:
x,href=self.check_if_song_name(html)
links = []
if x==True:
links=self.list_of_all_href(html)
else:
file_download=FileDownload()
file_download.file_download_cross_platform(href)
quit()
return links | It will the resource URL if song is found,
Otherwise it will return the list of songs that can be downloaded |
def get_file_link(self, file_key):
'''Gets link to file
Args:
file_key key for the file
return (status code, ?)
'''
#does not work
self._raise_unimplemented_error()
uri = '/'.join([self.api_uri,
self.files_suffix,
file_key,
self.file_link_suffix,
])
return self._req('get', uri) | Gets link to file
Args:
file_key key for the file
return (status code, ?) |
def check_exc_info(self, node):
"""
Reports a violation if exc_info keyword is used with logging.error or logging.exception.
"""
if self.current_logging_level not in ('error', 'exception'):
return
for kw in node.keywords:
if kw.arg == 'exc_info':
if self.current_logging_level == 'error':
violation = ERROR_EXC_INFO_VIOLATION
else:
violation = REDUNDANT_EXC_INFO_VIOLATION
self.violations.append((node, violation)) | Reports a violation if exc_info keyword is used with logging.error or logging.exception. |
def rm_parameter(self, name):
"""
Removes a parameter to the existing Datamat.
Fails if parameter doesn't exist.
"""
if name not in self._parameters:
raise ValueError("no '%s' parameter found" % (name))
del self._parameters[name]
del self.__dict__[name] | Removes a parameter to the existing Datamat.
Fails if parameter doesn't exist. |
def scale_subplots(subplots=None, xlim='auto', ylim='auto'):
"""Set the x and y axis limits for a collection of subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
xlim : None | 'auto' | (xmin, xmax)
'auto' : sets the limits according to the most
extreme values of data encountered.
ylim : None | 'auto' | (ymin, ymax)
"""
auto_axis = ''
if xlim == 'auto':
auto_axis += 'x'
if ylim == 'auto':
auto_axis += 'y'
autoscale_subplots(subplots, auto_axis)
for loc, ax in numpy.ndenumerate(subplots):
if 'x' not in auto_axis:
ax.set_xlim(xlim)
if 'y' not in auto_axis:
ax.set_ylim(ylim) | Set the x and y axis limits for a collection of subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
xlim : None | 'auto' | (xmin, xmax)
'auto' : sets the limits according to the most
extreme values of data encountered.
ylim : None | 'auto' | (ymin, ymax) |
def build_modules(is_training, vocab_size):
"""Construct the modules used in the graph."""
# Construct the custom getter which implements Bayes by Backprop.
if is_training:
estimator_mode = tf.constant(bbb.EstimatorModes.sample)
else:
estimator_mode = tf.constant(bbb.EstimatorModes.mean)
lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter(
posterior_builder=lstm_posterior_builder,
prior_builder=custom_scale_mixture_prior_builder,
kl_builder=bbb.stochastic_kl_builder,
sampling_mode_tensor=estimator_mode)
non_lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter(
posterior_builder=non_lstm_posterior_builder,
prior_builder=custom_scale_mixture_prior_builder,
kl_builder=bbb.stochastic_kl_builder,
sampling_mode_tensor=estimator_mode)
embed_layer = snt.Embed(
vocab_size=vocab_size,
embed_dim=FLAGS.embedding_size,
custom_getter=non_lstm_bbb_custom_getter,
name="input_embedding")
cores = []
for i in range(FLAGS.n_layers):
cores.append(
snt.LSTM(FLAGS.hidden_size,
custom_getter=lstm_bbb_custom_getter,
forget_bias=0.0,
name="lstm_layer_{}".format(i)))
rnn_core = snt.DeepRNN(
cores,
skip_connections=False,
name="deep_lstm_core")
# Do BBB on weights but not biases of output layer.
output_linear = snt.Linear(
vocab_size, custom_getter={"w": non_lstm_bbb_custom_getter})
return embed_layer, rnn_core, output_linear | Construct the modules used in the graph. |
def visitArrayExpr(self, ctx: jsgParser.ArrayExprContext):
""" arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET; """
from pyjsg.parser_impl.jsg_ebnf_parser import JSGEbnf
from pyjsg.parser_impl.jsg_valuetype_parser import JSGValueType
self._types = [JSGValueType(self._context, vt) for vt in ctx.valueType()]
if ctx.ebnfSuffix():
self._ebnf = JSGEbnf(self._context, ctx.ebnfSuffix()) | arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET; |
def send_s3_xsd(self, url_xsd): # pragma: no cover
"""This method will not be re-run always, only locally and when xsd
are regenerated, read the test_008_force_s3_creation on test folder
"""
if self.check_s3(self.domain, urlparse(url_xsd).path[1:]):
return url_xsd
response = urllib2.urlopen(url_xsd)
content = response.read()
cached = NamedTemporaryFile(delete=False)
named = cached.name
# Find all urls in the main xslt file.
urls = re.findall(r'href=[\'"]?([^\'" >]+)', content)
# mapping in the main file the url's
for original_url in urls:
content = content.replace(
original_url, self.s3_url(original_url))
with cached as cache:
cache.write(content)
created_url = self.cache_s3(url_xsd, named)
print('Created Url Ok!: %s' % created_url)
# Mapping all internal url in the file to s3 cached env.
for original_url in urls:
# Expecting 1 level of deepest links in xsd if more, refactor this.
response = urllib2.urlopen(original_url)
content = response.read()
# Find all urls in the main xslt file.
in_urls = re.findall(r'href=[\'"]?([^\'" >]+)', content)
# mapping in the main file the url's
for orig_url in in_urls:
content = content.replace(
orig_url, self.s3_url(orig_url))
cached = NamedTemporaryFile(delete=False)
with cached as cache:
cache.write(content)
named = cached.name
new_url = self.cache_s3(original_url, named)
print('Created Url Ok!: %s' % new_url)
return created_url | This method will not be re-run always, only locally and when xsd
are regenerated, read the test_008_force_s3_creation on test folder |
def _add_references(self, rec):
""" Adds the reference to the record """
for ref in self.document.getElementsByTagName('ref'):
for ref_type, doi, authors, collaboration, journal, volume, page, year,\
label, arxiv, publisher, institution, unstructured_text,\
external_link, report_no, editors in self._get_reference(ref):
subfields = []
if doi:
subfields.append(('a', doi))
for author in authors:
subfields.append(('h', author))
for editor in editors:
subfields.append(('e', editor))
if year:
subfields.append(('y', year))
if unstructured_text:
if page:
subfields.append(('m', unstructured_text + ', ' + page))
else:
subfields.append(('m', unstructured_text))
if collaboration:
subfields.append(('c', collaboration))
if institution:
subfields.append(('m', institution))
if publisher:
subfields.append(('p', publisher))
if arxiv:
subfields.append(('r', arxiv))
if report_no:
subfields.append(('r', report_no))
if external_link:
subfields.append(('u', external_link))
if label:
subfields.append(('o', label))
if ref_type == 'book':
if journal:
subfields.append(('t', journal))
if volume:
subfields.append(('m', volume))
elif page and not unstructured_text:
subfields.append(('m', page))
else:
if volume and page:
subfields.append(('s', journal + "," + volume + "," + page))
elif journal:
subfields.append(('t', journal))
if ref_type:
subfields.append(('d', ref_type))
if not subfields:
#misc-type references
try:
r = ref.getElementsByTagName('mixed-citation')[0]
text = xml_to_text(r)
label = text.split()[0]
text = " ".join(text.split()[1:])
subfields.append(('s', text))
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
except IndexError:
#references without 'mixed-citation' tag
try:
r = ref.getElementsByTagName('note')[0]
subfields.append(('s', xml_to_text(r)))
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
except IndexError:
#references without 'note' tag
subfields.append(('s', xml_to_text(ref)))
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
else:
record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) | Adds the reference to the record |
def log_loss(oracle, test_seq, ab=[], m_order=None, verbose=False):
""" Evaluate the average log-loss of a sequence given an oracle """
if not ab:
ab = oracle.get_alphabet()
if verbose:
print(' ')
logP = 0.0
context = []
increment = np.floor((len(test_seq) - 1) / 100)
bar_count = -1
maxContextLength = 0
avgContext = 0
for i, t in enumerate(test_seq):
p, c = predict(oracle, context, ab, verbose=False)
if len(c) < len(context):
context = context[-len(c):]
logP -= np.log2(p[ab[t]])
context.append(t)
if m_order is not None:
if len(context) > m_order:
context = context[-m_order:]
avgContext += float(len(context)) / len(test_seq)
if verbose:
percentage = np.mod(i, increment)
if percentage == 0:
bar_count += 1
if len(context) > maxContextLength:
maxContextLength = len(context)
sys.stdout.write('\r')
sys.stdout.write("\r[" + "=" * bar_count +
" " * (100 - bar_count) + "] " +
str(bar_count) + "% " +
str(i) + "/" + str(len(test_seq) - 1) + " Current max length: " + str(
maxContextLength))
sys.stdout.flush()
return logP / len(test_seq), avgContext | Evaluate the average log-loss of a sequence given an oracle |
def _sort_resources_per_hosting_device(resources):
"""This function will sort the resources on hosting device.
The sorting on hosting device is done by looking up the
`hosting_device` attribute of the resource, and its `id`.
:param resources: a dict with key of resource name
:return dict sorted on the hosting device of input resource. Format:
hosting_devices = {
'hd_id1' : {'routers':[routers],
'removed_routers':[routers], .... }
'hd_id2' : {'routers':[routers], .. }
.......
}
"""
hosting_devices = {}
for key in resources.keys():
for r in resources.get(key) or []:
if r.get('hosting_device') is None:
continue
hd_id = r['hosting_device']['id']
hosting_devices.setdefault(hd_id, {})
hosting_devices[hd_id].setdefault(key, []).append(r)
return hosting_devices | This function will sort the resources on hosting device.
The sorting on hosting device is done by looking up the
`hosting_device` attribute of the resource, and its `id`.
:param resources: a dict with key of resource name
:return dict sorted on the hosting device of input resource. Format:
hosting_devices = {
'hd_id1' : {'routers':[routers],
'removed_routers':[routers], .... }
'hd_id2' : {'routers':[routers], .. }
.......
} |
def stop(self):
"""Return the last day of the period as an Instant instance.
>>> period('year', 2014).stop
Instant((2014, 12, 31))
>>> period('month', 2014).stop
Instant((2014, 12, 31))
>>> period('day', 2014).stop
Instant((2014, 12, 31))
>>> period('year', '2012-2-29').stop
Instant((2013, 2, 28))
>>> period('month', '2012-2-29').stop
Instant((2012, 3, 28))
>>> period('day', '2012-2-29').stop
Instant((2012, 2, 29))
>>> period('year', '2012-2-29', 2).stop
Instant((2014, 2, 28))
>>> period('month', '2012-2-29', 2).stop
Instant((2012, 4, 28))
>>> period('day', '2012-2-29', 2).stop
Instant((2012, 3, 1))
"""
unit, start_instant, size = self
year, month, day = start_instant
if unit == ETERNITY:
return Instant((float("inf"), float("inf"), float("inf")))
if unit == 'day':
if size > 1:
day += size - 1
month_last_day = calendar.monthrange(year, month)[1]
while day > month_last_day:
month += 1
if month == 13:
year += 1
month = 1
day -= month_last_day
month_last_day = calendar.monthrange(year, month)[1]
else:
if unit == 'month':
month += size
while month > 12:
year += 1
month -= 12
else:
assert unit == 'year', 'Invalid unit: {} of type {}'.format(unit, type(unit))
year += size
day -= 1
if day < 1:
month -= 1
if month == 0:
year -= 1
month = 12
day += calendar.monthrange(year, month)[1]
else:
month_last_day = calendar.monthrange(year, month)[1]
if day > month_last_day:
month += 1
if month == 13:
year += 1
month = 1
day -= month_last_day
return Instant((year, month, day)) | Return the last day of the period as an Instant instance.
>>> period('year', 2014).stop
Instant((2014, 12, 31))
>>> period('month', 2014).stop
Instant((2014, 12, 31))
>>> period('day', 2014).stop
Instant((2014, 12, 31))
>>> period('year', '2012-2-29').stop
Instant((2013, 2, 28))
>>> period('month', '2012-2-29').stop
Instant((2012, 3, 28))
>>> period('day', '2012-2-29').stop
Instant((2012, 2, 29))
>>> period('year', '2012-2-29', 2).stop
Instant((2014, 2, 28))
>>> period('month', '2012-2-29', 2).stop
Instant((2012, 4, 28))
>>> period('day', '2012-2-29', 2).stop
Instant((2012, 3, 1)) |
def brightness(im):
'''
Return the brightness of an image
Args:
im(numpy): image
Returns:
float, average brightness of an image
'''
im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(im_hsv)
height, weight = v.shape[:2]
total_bright = 0
for i in v:
total_bright = total_bright+sum(i)
return float(total_bright)/(height*weight) | Return the brightness of an image
Args:
im(numpy): image
Returns:
float, average brightness of an image |
def complete_extra(self, args):
"Completions for the 'extra' command."
# treat the last arg as a path and complete it
if len(args) == 0:
return self._listdir('./')
return self._complete_path(args[-1]) | Completions for the 'extra' command. |
def setEditorData( self, editor, value ):
"""
Sets the value for the given editor to the inputed value.
:param editor | <QWidget>
value | <variant>
"""
# set the data for a multitagedit
if ( isinstance(editor, XMultiTagEdit) ):
if ( not isinstance(value, list) ):
value = [nativestring(value)]
else:
value = map(nativestring, value)
editor.setTags(value)
editor.setCurrentItem(editor.createItem())
# set the data for a combobox
elif ( isinstance(editor, QComboBox) ):
i = editor.findText(nativestring(value))
editor.setCurrentIndex(i)
editor.lineEdit().selectAll()
# set the data for a line edit
elif ( isinstance(editor, QLineEdit) ):
editor.setText(nativestring(value))
editor.selectAll() | Sets the value for the given editor to the inputed value.
:param editor | <QWidget>
value | <variant> |
def strfdelta(tdelta: Union[datetime.timedelta, int, float, str],
fmt='{D:02}d {H:02}h {M:02}m {S:02}s',
inputtype='timedelta'):
"""
Convert a ``datetime.timedelta`` object or a regular number to a custom-
formatted string, just like the ``strftime()`` method does for
``datetime.datetime`` objects.
The ``fmt`` argument allows custom formatting to be specified. Fields can
include ``seconds``, ``minutes``, ``hours``, ``days``, and ``weeks``. Each
field is optional.
Some examples:
.. code-block:: none
'{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default)
'{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02'
'{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02'
'{H}h {S}s' --> '72h 800s'
The ``inputtype`` argument allows ``tdelta`` to be a regular number,
instead of the default behaviour of treating it as a ``datetime.timedelta``
object. Valid ``inputtype`` strings:
.. code-block:: none
'timedelta', # treats input as a datetime.timedelta
's', 'seconds',
'm', 'minutes',
'h', 'hours',
'd', 'days',
'w', 'weeks'
Modified from
https://stackoverflow.com/questions/538666/python-format-timedelta-to-string
""" # noqa
# Convert tdelta to integer seconds.
if inputtype == 'timedelta':
remainder = int(tdelta.total_seconds())
elif inputtype in ['s', 'seconds']:
remainder = int(tdelta)
elif inputtype in ['m', 'minutes']:
remainder = int(tdelta) * 60
elif inputtype in ['h', 'hours']:
remainder = int(tdelta) * 3600
elif inputtype in ['d', 'days']:
remainder = int(tdelta) * 86400
elif inputtype in ['w', 'weeks']:
remainder = int(tdelta) * 604800
else:
raise ValueError("Bad inputtype: {}".format(inputtype))
f = Formatter()
desired_fields = [field_tuple[1] for field_tuple in f.parse(fmt)]
possible_fields = ('W', 'D', 'H', 'M', 'S')
constants = {'W': 604800, 'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
values = {}
for field in possible_fields:
if field in desired_fields and field in constants:
values[field], remainder = divmod(remainder, constants[field])
return f.format(fmt, **values) | Convert a ``datetime.timedelta`` object or a regular number to a custom-
formatted string, just like the ``strftime()`` method does for
``datetime.datetime`` objects.
The ``fmt`` argument allows custom formatting to be specified. Fields can
include ``seconds``, ``minutes``, ``hours``, ``days``, and ``weeks``. Each
field is optional.
Some examples:
.. code-block:: none
'{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default)
'{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02'
'{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02'
'{H}h {S}s' --> '72h 800s'
The ``inputtype`` argument allows ``tdelta`` to be a regular number,
instead of the default behaviour of treating it as a ``datetime.timedelta``
object. Valid ``inputtype`` strings:
.. code-block:: none
'timedelta', # treats input as a datetime.timedelta
's', 'seconds',
'm', 'minutes',
'h', 'hours',
'd', 'days',
'w', 'weeks'
Modified from
https://stackoverflow.com/questions/538666/python-format-timedelta-to-string |
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype('int64')
return result | Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame |
def _structure_frozenset(self, obj, cl):
"""Convert an iterable into a potentially generic frozenset."""
if is_bare(cl) or cl.__args__[0] is Any:
return frozenset(obj)
else:
elem_type = cl.__args__[0]
dispatch = self._structure_func.dispatch
return frozenset(dispatch(elem_type)(e, elem_type) for e in obj) | Convert an iterable into a potentially generic frozenset. |
def _handle_msg(self, msg):
"""When a BGP message is received, send it to peer.
Open messages are validated here. Peer handler is called to handle each
message except for *Open* and *Notification* message. On receiving
*Notification* message we close connection with peer.
"""
LOG.debug('Received msg from %s << %s', self._remotename, msg)
# If we receive open message we try to bind to protocol
if msg.type == BGP_MSG_OPEN:
if self.state == BGP_FSM_OPEN_SENT:
# Validate open message.
self._validate_open_msg(msg)
self.recv_open_msg = msg
self.state = BGP_FSM_OPEN_CONFIRM
self._peer.state.bgp_state = self.state
# Try to bind this protocol to peer.
self._is_bound = self._peer.bind_protocol(self)
# If this protocol failed to bind to peer.
if not self._is_bound:
# Failure to bind to peer indicates connection collision
# resolution choose different instance of protocol and this
# instance has to close. Before closing it sends
# appropriate notification msg. to peer.
raise bgp.CollisionResolution()
# If peer sends Hold Time as zero, then according to RFC we do
# not set Hold Time and Keep Alive timer.
if msg.hold_time == 0:
LOG.info('The Hold Time sent by the peer is zero, hence '
'not setting any Hold Time and Keep Alive'
' timers.')
else:
# Start Keep Alive timer considering Hold Time preference
# of the peer.
self._start_timers(msg.hold_time)
self._send_keepalive()
# Peer does not see open message.
return
else:
# If we receive a Open message out of order
LOG.error('Open message received when current state is not '
'OpenSent')
# Received out-of-order open message
# We raise Finite state machine error
raise bgp.FiniteStateMachineError()
elif msg.type == BGP_MSG_NOTIFICATION:
if self._peer:
self._signal_bus.bgp_notification_received(self._peer, msg)
# If we receive notification message
LOG.error('Received notification message, hence closing '
'connection %s', msg)
self._socket.close()
return
# If we receive keepalive or update message, we reset expire timer.
if (msg.type == BGP_MSG_KEEPALIVE or
msg.type == BGP_MSG_UPDATE):
if self._expiry:
self._expiry.reset()
# Call peer message handler for appropriate messages.
if (msg.type in
(BGP_MSG_UPDATE, BGP_MSG_KEEPALIVE, BGP_MSG_ROUTE_REFRESH)):
self._peer.handle_msg(msg)
# We give chance to other threads to run.
self.pause(0) | When a BGP message is received, send it to peer.
Open messages are validated here. Peer handler is called to handle each
message except for *Open* and *Notification* message. On receiving
*Notification* message we close connection with peer. |
def add_fs(self, name, fs, write=False, priority=0):
# type: (Text, FS, bool, int) -> None
"""Add a filesystem to the MultiFS.
Arguments:
name (str): A unique name to refer to the filesystem being
added.
fs (FS or str): The filesystem (instance or URL) to add.
write (bool): If this value is True, then the ``fs`` will
be used as the writeable FS (defaults to False).
priority (int): An integer that denotes the priority of the
filesystem being added. Filesystems will be searched in
descending priority order and then by the reverse order
they were added. So by default, the most recently added
filesystem will be looked at first.
"""
if isinstance(fs, text_type):
fs = open_fs(fs)
if not isinstance(fs, FS):
raise TypeError("fs argument should be an FS object or FS URL")
self._filesystems[name] = _PrioritizedFS(
priority=(priority, self._sort_index), fs=fs
)
self._sort_index += 1
self._resort()
if write:
self.write_fs = fs
self._write_fs_name = name | Add a filesystem to the MultiFS.
Arguments:
name (str): A unique name to refer to the filesystem being
added.
fs (FS or str): The filesystem (instance or URL) to add.
write (bool): If this value is True, then the ``fs`` will
be used as the writeable FS (defaults to False).
priority (int): An integer that denotes the priority of the
filesystem being added. Filesystems will be searched in
descending priority order and then by the reverse order
they were added. So by default, the most recently added
filesystem will be looked at first. |
def remove_ip(enode, portlbl, addr, shell=None):
"""
Remove an IP address from an interface.
All parameters left as ``None`` are ignored and thus no configuration
action is taken for that parameter (left "as-is").
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str portlbl: Port label to configure. Port label will be mapped to
real port automatically.
:param str addr: IPv4 or IPv6 address to remove from the interface:
- IPv4 address to remove from the interface in the form
``'192.168.20.20'`` or ``'192.168.20.20/24'``.
- IPv6 address to remove from the interface in the form
``'2001::1'`` or ``'2001::1/120'``.
:param str shell: Shell name to execute commands.
If ``None``, use the Engine Node default shell.
"""
assert portlbl
assert ip_interface(addr)
port = enode.ports[portlbl]
cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)
response = enode(cmd, shell=shell)
assert not response | Remove an IP address from an interface.
All parameters left as ``None`` are ignored and thus no configuration
action is taken for that parameter (left "as-is").
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str portlbl: Port label to configure. Port label will be mapped to
real port automatically.
:param str addr: IPv4 or IPv6 address to remove from the interface:
- IPv4 address to remove from the interface in the form
``'192.168.20.20'`` or ``'192.168.20.20/24'``.
- IPv6 address to remove from the interface in the form
``'2001::1'`` or ``'2001::1/120'``.
:param str shell: Shell name to execute commands.
If ``None``, use the Engine Node default shell. |
def send(self, target, nick, msg, msgtype, ignore_length=False, filters=None):
"""Send a message.
Records the message in the log.
"""
if not isinstance(msg, str):
raise Exception("Trying to send a %s to irc, only strings allowed." % type(msg).__name__)
if filters is None:
filters = self.outputfilter[target]
for i in filters:
if target != self.config['core']['ctrlchan']:
msg = i(msg)
# Avoid spam from commands that produce excessive output.
if not ignore_length:
# Ignore everything after the first 800 chars.
msg = misc.truncate_msg(msg, 800)
# We can't send messages > 512 bytes to irc.
max_len = misc.get_max_length(target, msgtype)
msgs = self.build_split_msg(msg, max_len)
for i in msgs:
self.do_log(target, nick, i, msgtype)
if msgtype == 'action':
self.rate_limited_send('action', target, i)
else:
self.rate_limited_send('privmsg', target, i) | Send a message.
Records the message in the log. |
def policy_present(name, rules):
'''
Ensure a Vault policy with the given name and rules is present.
name
The name of the policy
rules
Rules formatted as in-line HCL
.. code-block:: yaml
demo-policy:
vault.policy_present:
- name: foo/bar
- rules: |
path "secret/top-secret/*" {
policy = "deny"
}
path "secret/not-very-secret/*" {
policy = "write"
}
'''
url = "v1/sys/policy/{0}".format(name)
response = __utils__['vault.make_request']('GET', url)
try:
if response.status_code == 200:
return _handle_existing_policy(name, rules, response.json()['rules'])
elif response.status_code == 404:
return _create_new_policy(name, rules)
else:
response.raise_for_status()
except Exception as e:
return {
'name': name,
'changes': {},
'result': False,
'comment': 'Failed to get policy: {0}'.format(e)
} | Ensure a Vault policy with the given name and rules is present.
name
The name of the policy
rules
Rules formatted as in-line HCL
.. code-block:: yaml
demo-policy:
vault.policy_present:
- name: foo/bar
- rules: |
path "secret/top-secret/*" {
policy = "deny"
}
path "secret/not-very-secret/*" {
policy = "write"
} |
def plot_di(fignum, DIblock):
global globals
"""
plots directions on equal area net
Parameters
_________
fignum : matplotlib figure number
DIblock : nested list of dec, inc pairs
"""
X_down, X_up, Y_down, Y_up = [], [], [], [] # initialize some variables
plt.figure(num=fignum)
#
# plot the data - separate upper and lower hemispheres
#
for rec in DIblock:
Up, Down = 0, 0
XY = pmag.dimap(rec[0], rec[1])
if rec[1] >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
else:
X_up.append(XY[0])
Y_up.append(XY[1])
#
if len(X_down) > 0:
# plt.scatter(X_down,Y_down,marker='s',c='r')
plt.scatter(X_down, Y_down, marker='o', c='blue')
if globals != 0:
globals.DIlist = X_down
globals.DIlisty = Y_down
if len(X_up) > 0:
# plt.scatter(X_up,Y_up,marker='s',facecolor='none',edgecolor='black')
plt.scatter(X_up, Y_up, marker='o',
facecolor='white', edgecolor='blue')
if globals != 0:
globals.DIlist = X_up
globals.DIlisty = Y_up | plots directions on equal area net
Parameters
_________
fignum : matplotlib figure number
DIblock : nested list of dec, inc pairs |
def get_current_span():
"""
Access current request context and extract current Span from it.
:return:
Return current span associated with the current request context.
If no request context is present in thread local, or the context
has no span, return None.
"""
# Check against the old, ScopeManager-less implementation,
# for backwards compatibility.
context = RequestContextManager.current_context()
if context is not None:
return context.span
active = opentracing.tracer.scope_manager.active
return active.span if active else None | Access current request context and extract current Span from it.
:return:
Return current span associated with the current request context.
If no request context is present in thread local, or the context
has no span, return None. |
def emotes(self, emotes):
"""Set the emotes
:param emotes: the key of the emotes tag
:type emotes: :class:`str`
:returns: None
:rtype: None
:raises: None
"""
if emotes is None:
self._emotes = []
return
es = []
for estr in emotes.split('/'):
es.append(Emote.from_str(estr))
self._emotes = es | Set the emotes
:param emotes: the key of the emotes tag
:type emotes: :class:`str`
:returns: None
:rtype: None
:raises: None |
def name(self) -> str:
"""OpenSSL uses a different naming convention than the corresponding RFCs.
"""
return OPENSSL_TO_RFC_NAMES_MAPPING[self.ssl_version].get(self.openssl_name, self.openssl_name) | OpenSSL uses a different naming convention than the corresponding RFCs. |
def show_user(self, user):
"""Get the info for a specific user.
Returns a delegate that will receive the user in a callback."""
url = '/users/show/%s.xml' % (user)
d = defer.Deferred()
self.__downloadPage(url, txml.Users(lambda u: d.callback(u))) \
.addErrback(lambda e: d.errback(e))
return d | Get the info for a specific user.
Returns a delegate that will receive the user in a callback. |
def get_i_name(self, num, is_oai=None):
"""
This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` property.
"""
if num not in (1, 2):
raise ValueError("`num` parameter have to be 1 or 2!")
if is_oai is None:
is_oai = self.oai_marc
i_name = "ind" if not is_oai else "i"
return i_name + str(num) | This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` property. |
def scan(repos, options):
"""Given a repository list [(path, vcsname), ...], scan each of them."""
ignore_set = set()
repos = repos[::-1] # Create a queue we can push and pop from
while repos:
directory, dotdir = repos.pop()
ignore_this = any(pat in directory for pat in options.ignore_patterns)
if ignore_this:
if options.verbose:
output(b'Ignoring repo: %s' % directory)
output(b'')
continue
vcsname, get_status = SYSTEMS[dotdir]
lines, subrepos = get_status(directory, ignore_set, options)
# We want to tackle subrepos immediately after their repository,
# so we put them at the front of the queue.
subrepos = [(os.path.join(directory, r), dotdir) for r in subrepos]
repos.extend(reversed(subrepos))
if lines is None: # signal that we should ignore this one
continue
if lines or options.verbose:
output(b'%s - %s' % (directory, vcsname))
for line in lines:
output(line)
output(b'') | Given a repository list [(path, vcsname), ...], scan each of them. |
def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
'''
Train a network from a specific graph.
'''
global sess
with tf.Graph().as_default():
train_model = GAG(cfg, embed, p_graph)
train_model.build_net(is_training=True)
tf.get_variable_scope().reuse_variables()
dev_model = GAG(cfg, embed, p_graph)
dev_model.build_net(is_training=False)
with tf.Session() as sess:
if restore_path is not None:
restore_mapping = dict(zip(restore_shared, restore_shared))
logger.debug('init shared variables from {}, restore_scopes: {}'.format(restore_path, restore_shared))
init_from_checkpoint(restore_path, restore_mapping)
logger.debug('init variables')
logger.debug(sess.run(tf.report_uninitialized_variables()))
init = tf.global_variables_initializer()
sess.run(init)
# writer = tf.summary.FileWriter('%s/graph/'%execution_path, sess.graph)
logger.debug('assign to graph')
saver = tf.train.Saver()
train_loss = None
bestacc = 0
patience = 5
patience_increase = 2
improvement_threshold = 0.995
for epoch in range(max_epoch):
logger.debug('begin to train')
train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss))
dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch(
dev_batches, dev_model, False)
answers = generate_predict_json(
position1, position2, ids, contexts)
if save_path is not None:
logger.info('save prediction file to {}'.format(save_path))
with open(os.path.join(save_path, 'epoch%d.prediction' % epoch), 'w') as file:
json.dump(answers, file)
else:
answers = json.dumps(answers)
answers = json.loads(answers)
iter = epoch + 1
acc = evaluate.evaluate_with_predictions(
args.dev_file, answers)
logger.debug('Send intermediate acc: %s', str(acc))
nni.report_intermediate_result(acc)
logger.debug('Send intermediate result done.')
if acc > bestacc:
if acc * improvement_threshold > bestacc:
patience = max(patience, iter * patience_increase)
bestacc = acc
if save_path is not None:
logger.info('save model & prediction to {}'.format(save_path))
saver.save(sess, os.path.join(save_path, 'epoch%d.model' % epoch))
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump(
(position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' %
(epoch, acc, bestacc))
if patience <= iter:
break
logger.debug('save done.')
return train_loss, bestacc | Train a network from a specific graph. |
def construct_routes(self):
""" Gets modules routes.py and converts to module imports """
modules = self.evernode_app.get_modules()
for module_name in modules:
with self.app.app_context():
module = importlib.import_module(
'modules.%s.routes' % (module_name))
for route in module.routes:
self.routes.append(self.make_route(route))
if self.app.config['DEBUG']:
print('--- Loaded Modules ---')
print("Loaded Modules: " + str(modules)) | Gets modules routes.py and converts to module imports |
def object_clean(self):
"""
Remove large attributes from the metadata objects
"""
for sample in self.metadata:
try:
delattr(sample[self.analysistype], 'aaidentity')
delattr(sample[self.analysistype], 'aaalign')
delattr(sample[self.analysistype], 'aaindex')
delattr(sample[self.analysistype], 'ntalign')
delattr(sample[self.analysistype], 'ntindex')
delattr(sample[self.analysistype], 'dnaseq')
delattr(sample[self.analysistype], 'blastresults')
except AttributeError:
pass | Remove large attributes from the metadata objects |
def get_key(dotenv_path, key_to_get, verbose=False):
"""
Gets the value of a given key from the given .env
If the .env path given doesn't exist, fails
:param dotenv_path: path
:param key_to_get: key
:param verbose: verbosity flag, raise warning if path does not exist
:return: value of variable from environment file or None
"""
key_to_get = str(key_to_get)
if not os.path.exists(dotenv_path):
if verbose:
warnings.warn(f"Can't read {dotenv_path}, it doesn't exist.")
return None
dotenv_as_dict = dotenv_values(dotenv_path)
if key_to_get in dotenv_as_dict:
return dotenv_as_dict[key_to_get]
else:
if verbose:
warnings.warn(f"key {key_to_get} not found in {dotenv_path}.")
return None | Gets the value of a given key from the given .env
If the .env path given doesn't exist, fails
:param dotenv_path: path
:param key_to_get: key
:param verbose: verbosity flag, raise warning if path does not exist
:return: value of variable from environment file or None |
def create_from_xml(resultFile, resultElem, columns=None,
all_columns=False, columns_relevant_for_diff=set()):
'''
This function extracts everything necessary for creating a RunSetResult object
from the "result" XML tag of a benchmark result file.
It returns a RunSetResult object, which is not yet fully initialized.
To finish initializing the object, call collect_data()
before using it for anything else
(this is to separate the possibly costly collect_data() call from object instantiation).
'''
attributes = RunSetResult._extract_attributes_from_result(resultFile, resultElem)
if not columns:
columns = RunSetResult._extract_existing_columns_from_result(resultFile, resultElem, all_columns)
summary = RunSetResult._extract_summary_from_result(resultElem, columns)
return RunSetResult([(result, resultFile) for result in _get_run_tags_from_xml(resultElem)],
attributes, columns, summary, columns_relevant_for_diff) | This function extracts everything necessary for creating a RunSetResult object
from the "result" XML tag of a benchmark result file.
It returns a RunSetResult object, which is not yet fully initialized.
To finish initializing the object, call collect_data()
before using it for anything else
(this is to separate the possibly costly collect_data() call from object instantiation). |
def plot2dhist(xdata,ydata,cmap='binary',interpolation='nearest',
fig=None,logscale=True,xbins=None,ybins=None,
nbins=50,pts_only=False,**kwargs):
"""Plots a 2d density histogram of provided data
:param xdata,ydata: (array-like)
Data to plot.
:param cmap: (optional)
Colormap to use for density plot.
:param interpolation: (optional)
Interpolation scheme for display (passed to ``plt.imshow``).
:param fig: (optional)
Argument passed to :func:`setfig`.
:param logscale: (optional)
If ``True`` then the colormap will be based on a logarithmic
scale, rather than linear.
:param xbins,ybins: (optional)
Bin edges to use (if ``None``, then use ``np.histogram2d`` to
find bins automatically).
:param nbins: (optional)
Number of bins to use (if ``None``, then use ``np.histogram2d`` to
find bins automatically).
:param pts_only: (optional)
If ``True``, then just a scatter plot of the points is made,
rather than the density plot.
:param **kwargs:
Keyword arguments passed either to ``plt.plot`` or ``plt.imshow``
depending upon whether ``pts_only`` is set to ``True`` or not.
"""
setfig(fig)
if pts_only:
plt.plot(xdata,ydata,**kwargs)
return
ok = (~np.isnan(xdata) & ~np.isnan(ydata) &
~np.isinf(xdata) & ~np.isinf(ydata))
if ~ok.sum() > 0:
logging.warning('{} x values and {} y values are nan'.format(np.isnan(xdata).sum(),
np.isnan(ydata).sum()))
logging.warning('{} x values and {} y values are inf'.format(np.isinf(xdata).sum(),
np.isinf(ydata).sum()))
if xbins is not None and ybins is not None:
H,xs,ys = np.histogram2d(xdata[ok],ydata[ok],bins=(xbins,ybins))
else:
H,xs,ys = np.histogram2d(xdata[ok],ydata[ok],bins=nbins)
H = H.T
if logscale:
H = np.log(H)
extent = [xs[0],xs[-1],ys[0],ys[-1]]
plt.imshow(H,extent=extent,interpolation=interpolation,
aspect='auto',cmap=cmap,origin='lower',**kwargs) | Plots a 2d density histogram of provided data
:param xdata,ydata: (array-like)
Data to plot.
:param cmap: (optional)
Colormap to use for density plot.
:param interpolation: (optional)
Interpolation scheme for display (passed to ``plt.imshow``).
:param fig: (optional)
Argument passed to :func:`setfig`.
:param logscale: (optional)
If ``True`` then the colormap will be based on a logarithmic
scale, rather than linear.
:param xbins,ybins: (optional)
Bin edges to use (if ``None``, then use ``np.histogram2d`` to
find bins automatically).
:param nbins: (optional)
Number of bins to use (if ``None``, then use ``np.histogram2d`` to
find bins automatically).
:param pts_only: (optional)
If ``True``, then just a scatter plot of the points is made,
rather than the density plot.
:param **kwargs:
Keyword arguments passed either to ``plt.plot`` or ``plt.imshow``
depending upon whether ``pts_only`` is set to ``True`` or not. |
def update_dns_server(self, service_name, deployment_name, dns_server_name, address):
'''
Updates the ip address of a DNS server.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Specifies the name of the DNS server.
address:
Specifies the IP address of the DNS server.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('dns_server_name', dns_server_name)
_validate_not_none('address', address)
return self._perform_put(
self._get_dns_server_path(service_name,
deployment_name,
dns_server_name),
_XmlSerializer.dns_server_to_xml(dns_server_name, address),
as_async=True) | Updates the ip address of a DNS server.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Specifies the name of the DNS server.
address:
Specifies the IP address of the DNS server. |
def _get_curvature(nodes, tangent_vec, s):
r"""Compute the signed curvature of a curve at :math:`s`.
Computed via
.. math::
\frac{B'(s) \times B''(s)}{\left\lVert B'(s) \right\rVert_2^3}
.. image:: ../images/get_curvature.png
:align: center
.. testsetup:: get-curvature
import numpy as np
import bezier
from bezier._curve_helpers import evaluate_hodograph
from bezier._curve_helpers import get_curvature
.. doctest:: get-curvature
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [1.0, 0.75, 0.5, 0.25, 0.0],
... [0.0, 2.0 , -2.0, 2.0 , 0.0],
... ])
>>> s = 0.5
>>> tangent_vec = evaluate_hodograph(s, nodes)
>>> tangent_vec
array([[-1.],
[ 0.]])
>>> curvature = get_curvature(nodes, tangent_vec, s)
>>> curvature
-12.0
.. testcleanup:: get-curvature
import make_images
make_images.get_curvature(nodes, s, tangent_vec, curvature)
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes of a curve.
tangent_vec (numpy.ndarray): The already computed value of
:math:`B'(s)`
s (float): The parameter value along the curve.
Returns:
float: The signed curvature.
"""
_, num_nodes = np.shape(nodes)
if num_nodes == 2: # Lines have no curvature.
return 0.0
# NOTE: We somewhat replicate code in ``evaluate_hodograph()`` here.
first_deriv = nodes[:, 1:] - nodes[:, :-1]
second_deriv = first_deriv[:, 1:] - first_deriv[:, :-1]
concavity = (
(num_nodes - 1)
* (num_nodes - 2)
* evaluate_multi(second_deriv, np.asfortranarray([s]))
)
curvature = _helpers.cross_product(
tangent_vec.ravel(order="F"), concavity.ravel(order="F")
)
# NOTE: We convert to 1D to make sure NumPy uses vector norm.
curvature /= np.linalg.norm(tangent_vec[:, 0], ord=2) ** 3
return curvature | r"""Compute the signed curvature of a curve at :math:`s`.
Computed via
.. math::
\frac{B'(s) \times B''(s)}{\left\lVert B'(s) \right\rVert_2^3}
.. image:: ../images/get_curvature.png
:align: center
.. testsetup:: get-curvature
import numpy as np
import bezier
from bezier._curve_helpers import evaluate_hodograph
from bezier._curve_helpers import get_curvature
.. doctest:: get-curvature
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [1.0, 0.75, 0.5, 0.25, 0.0],
... [0.0, 2.0 , -2.0, 2.0 , 0.0],
... ])
>>> s = 0.5
>>> tangent_vec = evaluate_hodograph(s, nodes)
>>> tangent_vec
array([[-1.],
[ 0.]])
>>> curvature = get_curvature(nodes, tangent_vec, s)
>>> curvature
-12.0
.. testcleanup:: get-curvature
import make_images
make_images.get_curvature(nodes, s, tangent_vec, curvature)
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes of a curve.
tangent_vec (numpy.ndarray): The already computed value of
:math:`B'(s)`
s (float): The parameter value along the curve.
Returns:
float: The signed curvature. |
def _getTransformation(self):
"""_getTransformation(self) -> PyObject *"""
CheckParent(self)
val = _fitz.Page__getTransformation(self)
val = Matrix(val)
return val | _getTransformation(self) -> PyObject * |
def from_mask(cls, dh_mask, lwin, nwin=None, weights=None):
"""
Construct localization windows that are optimally concentrated within
the region specified by a mask.
Usage
-----
x = SHWindow.from_mask(dh_mask, lwin, [nwin, weights])
Returns
-------
x : SHWindow class instance
Parameters
----------
dh_mask :ndarray, shape (nlat, nlon)
A Driscoll and Healy (1994) sampled grid describing the
concentration region R. All elements should either be 1 (for inside
the concentration region) or 0 (for outside the concentration
region). The grid must have dimensions nlon=nlat or nlon=2*nlat,
where nlat is even.
lwin : int
The spherical harmonic bandwidth of the localization windows.
nwin : int, optional, default = (lwin+1)**2
The number of best concentrated eigenvalues and eigenfunctions to
return.
weights ndarray, optional, default = None
Taper weights used with the multitaper spectral analyses.
"""
if nwin is None:
nwin = (lwin + 1)**2
else:
if nwin > (lwin + 1)**2:
raise ValueError('nwin must be less than or equal to ' +
'(lwin + 1)**2. lwin = {:d} and nwin = {:d}'
.format(lwin, nwin))
if dh_mask.shape[0] % 2 != 0:
raise ValueError('The number of latitude bands in dh_mask ' +
'must be even. nlat = {:d}'
.format(dh_mask.shape[0]))
if dh_mask.shape[1] == dh_mask.shape[0]:
_sampling = 1
elif dh_mask.shape[1] == 2 * dh_mask.shape[0]:
_sampling = 2
else:
raise ValueError('dh_mask must be dimensioned as (n, n) or ' +
'(n, 2 * n). Input shape is ({:d}, {:d})'
.format(dh_mask.shape[0], dh_mask.shape[1]))
mask_lm = _shtools.SHExpandDH(dh_mask, sampling=_sampling, lmax_calc=0)
area = mask_lm[0, 0, 0] * 4 * _np.pi
tapers, eigenvalues = _shtools.SHReturnTapersMap(dh_mask, lwin,
ntapers=nwin)
return SHWindowMask(tapers, eigenvalues, weights, area, copy=False) | Construct localization windows that are optimally concentrated within
the region specified by a mask.
Usage
-----
x = SHWindow.from_mask(dh_mask, lwin, [nwin, weights])
Returns
-------
x : SHWindow class instance
Parameters
----------
dh_mask :ndarray, shape (nlat, nlon)
A Driscoll and Healy (1994) sampled grid describing the
concentration region R. All elements should either be 1 (for inside
the concentration region) or 0 (for outside the concentration
region). The grid must have dimensions nlon=nlat or nlon=2*nlat,
where nlat is even.
lwin : int
The spherical harmonic bandwidth of the localization windows.
nwin : int, optional, default = (lwin+1)**2
The number of best concentrated eigenvalues and eigenfunctions to
return.
weights ndarray, optional, default = None
Taper weights used with the multitaper spectral analyses. |
def server_sends_binary(self, message, name=None, connection=None, label=None):
"""Send raw binary `message`.
If server `name` is not given, uses the latest server. Optional message
`label` is shown on logs.
Examples:
| Server sends binary | Hello! |
| Server sends binary | ${some binary} | Server1 | label=DebugMessage |
| Server sends binary | ${some binary} | connection=my_connection |
"""
server, name = self._servers.get_with_name(name)
server.send(message, alias=connection)
self._register_send(server, label, name, connection=connection) | Send raw binary `message`.
If server `name` is not given, uses the latest server. Optional message
`label` is shown on logs.
Examples:
| Server sends binary | Hello! |
| Server sends binary | ${some binary} | Server1 | label=DebugMessage |
| Server sends binary | ${some binary} | connection=my_connection | |
async def sinter(self, keys, *args):
"Return the intersection of sets specified by ``keys``"
args = list_or_args(keys, args)
return await self.execute_command('SINTER', *args) | Return the intersection of sets specified by ``keys`` |
def leaders_in(self, leaderboard_name, current_page, **options):
'''
Retrieve a page of leaders from the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param current_page [int] Page to retrieve from the named leaderboard.
@param options [Hash] Options to be used when retrieving the page from the named leaderboard.
@return a page of leaders from the named leaderboard.
'''
if current_page < 1:
current_page = 1
page_size = options.get('page_size', self.page_size)
index_for_redis = current_page - 1
starting_offset = (index_for_redis * page_size)
if starting_offset < 0:
starting_offset = 0
ending_offset = (starting_offset + page_size) - 1
raw_leader_data = self._range_method(
self.redis_connection,
leaderboard_name,
int(starting_offset),
int(ending_offset),
withscores=False)
return self._parse_raw_members(
leaderboard_name, raw_leader_data, **options) | Retrieve a page of leaders from the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param current_page [int] Page to retrieve from the named leaderboard.
@param options [Hash] Options to be used when retrieving the page from the named leaderboard.
@return a page of leaders from the named leaderboard. |
def load_json(file):
"""Load JSON file at app start"""
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, file)) as jfile:
data = json.load(jfile)
return data | Load JSON file at app start |
def _clean_record(self, record):
"""Remove all fields with `None` values"""
for k, v in dict(record).items():
if isinstance(v, dict):
v = self._clean_record(v)
if v is None:
record.pop(k)
return record | Remove all fields with `None` values |
def getAllAnnotationSets(self):
"""
Returns all variant annotation sets on the server.
"""
for variantSet in self.getAllVariantSets():
iterator = self._client.search_variant_annotation_sets(
variant_set_id=variantSet.id)
for variantAnnotationSet in iterator:
yield variantAnnotationSet | Returns all variant annotation sets on the server. |
def config(self, averaging=1, datarate=15, mode=MODE_NORMAL):
"""
Set the base config for sensor
:param averaging: Sets the numer of samples that are internally averaged
:param datarate: Datarate in hertz
:param mode: one of the MODE_* constants
"""
averaging_conf = {
1: 0,
2: 1,
4: 2,
8: 3
}
if averaging not in averaging_conf.keys():
raise Exception('Averaging should be one of: 1,2,4,8')
datarates = {
0.75: 0,
1.5: 1,
3: 2,
7.5: 4,
15: 5,
30: 6,
75: 7
}
if datarate not in datarates.keys():
raise Exception(
'Datarate of {} Hz is not support choose one of: {}'.format(datarate, ', '.join(datarates.keys())))
config_a = 0
config_a &= averaging_conf[averaging] << 5
config_a &= datarates[datarate] << 2
config_a &= mode
self.i2c_write_register(0x00, config_a) | Set the base config for sensor
:param averaging: Sets the numer of samples that are internally averaged
:param datarate: Datarate in hertz
:param mode: one of the MODE_* constants |
def show_top_losses(self, k:int, max_len:int=70)->None:
"""
Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of
actual class. `max_len` is the maximum number of tokens displayed.
"""
from IPython.display import display, HTML
items = []
tl_val,tl_idx = self.top_losses()
for i,idx in enumerate(tl_idx):
if k <= 0: break
k -= 1
tx,cl = self.data.dl(self.ds_type).dataset[idx]
cl = cl.data
classes = self.data.classes
txt = ' '.join(tx.text.split(' ')[:max_len]) if max_len is not None else tx.text
tmp = [txt, f'{classes[self.pred_class[idx]]}', f'{classes[cl]}', f'{self.losses[idx]:.2f}',
f'{self.probs[idx][cl]:.2f}']
items.append(tmp)
items = np.array(items)
names = ['Text', 'Prediction', 'Actual', 'Loss', 'Probability']
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
with pd.option_context('display.max_colwidth', -1):
display(HTML(df.to_html(index=False))) | Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of
actual class. `max_len` is the maximum number of tokens displayed. |
def _edges_replaced(self, object, name, old, new):
""" Handles a list of edges being set.
"""
self._delete_edges(old)
self._add_edges(new) | Handles a list of edges being set. |
def add_data(self, id, key, value):
"""Add new data item.
:param str id: Entry id within ``SDfile``.
:param str key: Data item key.
:param str value: Data item value.
:return: None.
:rtype: :py:obj:`None`.
"""
self[str(id)]['data'].setdefault(key, [])
self[str(id)]['data'][key].append(value) | Add new data item.
:param str id: Entry id within ``SDfile``.
:param str key: Data item key.
:param str value: Data item value.
:return: None.
:rtype: :py:obj:`None`. |
def most_even_chunk(string, group):
"""Divide a string into a list of strings as even as possible."""
counts = [0] + most_even(len(string), group)
indices = accumulate(counts)
slices = window(indices, 2)
return [string[slice(*one)] for one in slices] | Divide a string into a list of strings as even as possible. |
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
return all([
domain.in_domain(array)
for domain, array in
zip(self._domains, separate_struct_array(points, self._dtypes))
]) | Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool` |
def vertical_gradient(self, x0, y0, x1, y1, start, end):
"""Draw a vertical gradient"""
x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1)
grad = gradient_list(start, end, y1 - y0)
for x in range(x0, x1 + 1):
for y in range(y0, y1 + 1):
self.point(x, y, grad[y - y0]) | Draw a vertical gradient |
def shuffle_step(entries, step):
'''
Shuffle the step
'''
answer = []
for i in range(0, len(entries), step):
sub = entries[i:i+step]
shuffle(sub)
answer += sub
return answer | Shuffle the step |
def paste_buffer(pymux, variables):
"""
Paste clipboard content into buffer.
"""
pane = pymux.arrangement.get_active_pane()
pane.process.write_input(get_app().clipboard.get_data().text, paste=True) | Paste clipboard content into buffer. |
def _default_return_columns(self):
"""
Return a list of the model elements that does not include lookup functions
or other functions that take parameters.
"""
return_columns = []
parsed_expr = []
for key, value in self.components._namespace.items():
if hasattr(self.components, value):
sig = signature(getattr(self.components, value))
# The `*args` reference handles the py2.7 decorator.
if len(set(sig.parameters) - {'args'}) == 0:
expr = self.components._namespace[key]
if not expr in parsed_expr:
return_columns.append(key)
parsed_expr.append(expr)
return return_columns | Return a list of the model elements that does not include lookup functions
or other functions that take parameters. |
def update_record(self, name, new_data, condition, update_only=False,
debug=False):
"""
Find the first row in self.df with index == name
and condition == True.
Update that record with new_data, then delete any
additional records where index == name and condition == True.
Change is inplace
"""
# add numeric index column temporarily
self.df['num'] = list(range(len(self.df)))
df_data = self.df
condition2 = (df_data.index == name)
# edit first of existing data that meets condition
if len(df_data[condition & condition2]) > 0: #we have one or more records to update or delete
# list of all rows where condition is true and index == name
inds = df_data[condition & condition2]['num']
#inds = df_data[condition]['num'] # list of all rows where condition is true
existing_data = dict(df_data.iloc[inds.iloc[0]]) # get first record of existing_data from dataframe
existing_data.update(new_data) # update existing data with new interpretations
# update row
self.update_row(inds.iloc[0], existing_data)
# now remove all the remaining records of same condition
if len(inds) > 1:
for ind in inds[1:]:
print("deleting redundant records for:", name)
df_data = self.delete_row(ind)
else:
if update_only:
print("no record found for that condition, not updating ", name)
else:
print('no record found - creating new one for ', name)
# add new row
df_data = self.add_row(name, new_data)
# sort so that all rows for an item are together
df_data.sort_index(inplace=True)
# redo temporary index
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data | Find the first row in self.df with index == name
and condition == True.
Update that record with new_data, then delete any
additional records where index == name and condition == True.
Change is inplace |
def pipe():
"""create an inter-process communication pipe
:returns:
a pair of :class:`File` objects ``(read, write)`` for the two ends of
the pipe
"""
r, w = os.pipe()
return File.fromfd(r, 'rb'), File.fromfd(w, 'wb') | create an inter-process communication pipe
:returns:
a pair of :class:`File` objects ``(read, write)`` for the two ends of
the pipe |
def start_session(self):
"""
Starts a Salesforce session and determines which SF instance to use for future requests.
"""
if self.has_active_session():
raise Exception("Session already in progress.")
response = requests.post(self._get_login_url(),
headers=self._get_login_headers(),
data=self._get_login_xml())
response.raise_for_status()
root = ET.fromstring(response.text)
for e in root.iter("%ssessionId" % self.SOAP_NS):
if self.session_id:
raise Exception("Invalid login attempt. Multiple session ids found.")
self.session_id = e.text
for e in root.iter("%sserverUrl" % self.SOAP_NS):
if self.server_url:
raise Exception("Invalid login attempt. Multiple server urls found.")
self.server_url = e.text
if not self.has_active_session():
raise Exception("Invalid login attempt resulted in null sessionId [%s] and/or serverUrl [%s]." %
(self.session_id, self.server_url))
self.hostname = urlsplit(self.server_url).hostname | Starts a Salesforce session and determines which SF instance to use for future requests. |
def get_sql_type(self, instance, counter_name):
'''
Return the type of the performance counter so that we can report it to
Datadog correctly
If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and
PERF_AVERAGE_BULK), the name of the base counter will also be returned
'''
with self.get_managed_cursor(instance, self.DEFAULT_DB_KEY) as cursor:
cursor.execute(COUNTER_TYPE_QUERY, (counter_name,))
(sql_type,) = cursor.fetchone()
if sql_type == PERF_LARGE_RAW_BASE:
self.log.warning("Metric {} is of type Base and shouldn't be reported this way".format(counter_name))
base_name = None
if sql_type in [PERF_AVERAGE_BULK, PERF_RAW_LARGE_FRACTION]:
# This is an ugly hack. For certains type of metric (PERF_RAW_LARGE_FRACTION
# and PERF_AVERAGE_BULK), we need two metrics: the metrics specified and
# a base metrics to get the ratio. There is no unique schema so we generate
# the possible candidates and we look at which ones exist in the db.
candidates = (
counter_name + " base",
counter_name.replace("(ms)", "base"),
counter_name.replace("Avg ", "") + " base",
)
try:
cursor.execute(BASE_NAME_QUERY, candidates)
base_name = cursor.fetchone().counter_name.strip()
self.log.debug("Got base metric: {} for metric: {}".format(base_name, counter_name))
except Exception as e:
self.log.warning("Could not get counter_name of base for metric: {}".format(e))
return sql_type, base_name | Return the type of the performance counter so that we can report it to
Datadog correctly
If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and
PERF_AVERAGE_BULK), the name of the base counter will also be returned |
def route(**kwargs):
"""
Route a request to different views based on http verb.
Kwargs should be 'GET', 'POST', 'PUT', 'DELETE' or 'ELSE',
where the first four map to a view to route to for that type of
request method/verb, and 'ELSE' maps to a view to pass the request
to if the given request method/verb was not specified.
"""
def routed(request, *args2, **kwargs2):
method = request.method
if method in kwargs:
req_method = kwargs[method]
return req_method(request, *args2, **kwargs2)
elif 'ELSE' in kwargs:
return kwargs['ELSE'](request, *args2, **kwargs2)
else:
raise Http404()
return routed | Route a request to different views based on http verb.
Kwargs should be 'GET', 'POST', 'PUT', 'DELETE' or 'ELSE',
where the first four map to a view to route to for that type of
request method/verb, and 'ELSE' maps to a view to pass the request
to if the given request method/verb was not specified. |
def sample_initial(self, nlive=500, update_interval=None,
first_update=None, maxiter=None, maxcall=None,
logl_max=np.inf, dlogz=0.01, live_points=None):
"""
Generate a series of initial samples from a nested sampling
run using a fixed number of live points using an internal
sampler from :mod:`~dynesty.nestedsamplers`. Instantiates a
generator that will be called by the user.
Parameters
----------
nlive : int, optional
The number of live points to use for the baseline nested
sampling run. Default is `500`.
update_interval : int or float, optional
If an integer is passed, only update the bounding distribution
every `update_interval`-th likelihood call. If a float is passed,
update the bound after every `round(update_interval * nlive)`-th
likelihood call. Larger update intervals can be more efficient
when the likelihood function is quick to evaluate. If no value is
provided, defaults to the value passed during initialization.
first_update : dict, optional
A dictionary containing parameters governing when the sampler will
first update the bounding distribution from the unit cube
(`'none'`) to the one specified by `sample`.
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
dlogz : float, optional
Iteration will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. The default is
`0.01`.
logl_max : float, optional
Iteration will stop when the sampled ln(likelihood) exceeds the
threshold set by `logl_max`. Default is no bound (`np.inf`).
live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim)
A set of live points used to initialize the nested sampling run.
Contains `live_u`, the coordinates on the unit cube, `live_v`, the
transformed variables, and `live_logl`, the associated
loglikelihoods. By default, if these are not provided the initial
set of live points will be drawn from the unit `npdim`-cube.
**WARNING: It is crucial that the initial set of live points have
been sampled from the prior. Failure to provide a set of valid
live points will lead to incorrect results.**
Returns
-------
worst : int
Index of the live point with the worst likelihood. This is our
new dead point sample.
ustar : `~numpy.ndarray` with shape (npdim,)
Position of the sample.
vstar : `~numpy.ndarray` with shape (ndim,)
Transformed position of the sample.
loglstar : float
Ln(likelihood) of the sample.
logvol : float
Ln(prior volume) within the sample.
logwt : float
Ln(weight) of the sample.
logz : float
Cumulative ln(evidence) up to the sample (inclusive).
logzvar : float
Estimated cumulative variance on `logz` (inclusive).
h : float
Cumulative information up to the sample (inclusive).
nc : int
Number of likelihood calls performed before the new
live point was accepted.
worst_it : int
Iteration when the live (now dead) point was originally proposed.
boundidx : int
Index of the bound the dead point was originally drawn from.
bounditer : int
Index of the bound being used at the current iteration.
eff : float
The cumulative sampling efficiency (in percent).
delta_logz : float
The estimated remaining evidence expressed as the ln(ratio) of the
current evidence.
"""
# Initialize inputs.
if maxcall is None:
maxcall = sys.maxsize
if maxiter is None:
maxiter = sys.maxsize
if nlive <= 2 * self.npdim:
warnings.warn("Beware: `nlive_init <= 2 * ndim`!")
# Reset saved results to avoid any possible conflicts.
self.reset()
# Initialize the first set of live points.
if live_points is None:
self.nlive_init = nlive
self.live_u = self.rstate.rand(self.nlive_init, self.npdim)
if self.use_pool_ptform:
self.live_v = np.array(list(self.M(self.prior_transform,
np.array(self.live_u))))
else:
self.live_v = np.array(list(map(self.prior_transform,
np.array(self.live_u))))
if self.use_pool_logl:
self.live_logl = np.array(list(self.M(self.loglikelihood,
np.array(self.live_v))))
else:
self.live_logl = np.array(list(map(self.loglikelihood,
np.array(self.live_v))))
else:
self.live_u, self.live_v, self.live_logl = live_points
self.nlive_init = len(self.live_u)
# Convert all `-np.inf` log-likelihoods to finite large numbers.
# Necessary to keep estimators in our sampler from breaking.
for i, logl in enumerate(self.live_logl):
if not np.isfinite(logl):
if np.sign(logl) < 0:
self.live_logl[i] = -1e300
else:
raise ValueError("The log-likelihood ({0}) of live "
"point {1} located at u={2} v={3} "
" is invalid."
.format(logl, i, self.live_u[i],
self.live_v[i]))
# (Re-)bundle live points.
live_points = [self.live_u, self.live_v, self.live_logl]
self.live_init = [np.array(l) for l in live_points]
self.ncall += self.nlive_init
self.live_bound = np.zeros(self.nlive_init, dtype='int')
self.live_it = np.zeros(self.nlive_init, dtype='int')
# Initialize the internal `sampler` object.
if update_interval is None:
update_interval = self.update_interval
if isinstance(update_interval, float):
update_interval = int(round(self.update_interval * nlive))
bounding = self.bounding
if bounding == 'none':
update_interval = np.inf # no need to update with no bounds
if first_update is None:
first_update = self.first_update
self.sampler = _SAMPLERS[bounding](self.loglikelihood,
self.prior_transform,
self.npdim, self.live_init,
self.method, update_interval,
first_update,
self.rstate, self.queue_size,
self.pool, self.use_pool,
self.kwargs)
self.bound = self.sampler.bound
# Run the sampler internally as a generator.
for i in range(1):
for it, results in enumerate(self.sampler.sample(maxiter=maxiter,
save_samples=False,
maxcall=maxcall, dlogz=dlogz)):
# Grab results.
(worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
eff, delta_logz) = results
# Save our base run (which we will use later).
self.base_id.append(worst)
self.base_u.append(ustar)
self.base_v.append(vstar)
self.base_logl.append(loglstar)
self.base_logvol.append(logvol)
self.base_logwt.append(logwt)
self.base_logz.append(logz)
self.base_logzvar.append(logzvar)
self.base_h.append(h)
self.base_nc.append(nc)
self.base_it.append(worst_it)
self.base_n.append(self.nlive_init)
self.base_boundidx.append(boundidx)
self.base_bounditer.append(bounditer)
self.base_scale.append(self.sampler.scale)
# Save a copy of the results.
self.saved_id.append(worst)
self.saved_u.append(ustar)
self.saved_v.append(vstar)
self.saved_logl.append(loglstar)
self.saved_logvol.append(logvol)
self.saved_logwt.append(logwt)
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_nc.append(nc)
self.saved_it.append(worst_it)
self.saved_n.append(self.nlive_init)
self.saved_boundidx.append(boundidx)
self.saved_bounditer.append(bounditer)
self.saved_scale.append(self.sampler.scale)
# Increment relevant counters.
self.ncall += nc
self.eff = 100. * self.it / self.ncall
self.it += 1
yield (worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
self.eff, delta_logz)
for it, results in enumerate(self.sampler.add_live_points()):
# Grab results.
(worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
eff, delta_logz) = results
# Save our base run (which we will use later).
self.base_id.append(worst)
self.base_u.append(ustar)
self.base_v.append(vstar)
self.base_logl.append(loglstar)
self.base_logvol.append(logvol)
self.base_logwt.append(logwt)
self.base_logz.append(logz)
self.base_logzvar.append(logzvar)
self.base_h.append(h)
self.base_nc.append(nc)
self.base_it.append(worst_it)
self.base_n.append(self.nlive_init - it)
self.base_boundidx.append(boundidx)
self.base_bounditer.append(bounditer)
self.base_scale.append(self.sampler.scale)
# Save a copy of the results.
self.saved_id.append(worst)
self.saved_u.append(ustar)
self.saved_v.append(vstar)
self.saved_logl.append(loglstar)
self.saved_logvol.append(logvol)
self.saved_logwt.append(logwt)
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_nc.append(nc)
self.saved_it.append(worst_it)
self.saved_n.append(self.nlive_init - it)
self.saved_boundidx.append(boundidx)
self.saved_bounditer.append(bounditer)
self.saved_scale.append(self.sampler.scale)
# Increment relevant counters.
self.eff = 100. * self.it / self.ncall
self.it += 1
yield (worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
self.eff, delta_logz)
self.base = True # baseline run complete
self.saved_batch = np.zeros(len(self.saved_id), dtype='int') # batch
self.saved_batch_nlive.append(self.nlive_init) # initial nlive
self.saved_batch_bounds.append((-np.inf, np.inf)) | Generate a series of initial samples from a nested sampling
run using a fixed number of live points using an internal
sampler from :mod:`~dynesty.nestedsamplers`. Instantiates a
generator that will be called by the user.
Parameters
----------
nlive : int, optional
The number of live points to use for the baseline nested
sampling run. Default is `500`.
update_interval : int or float, optional
If an integer is passed, only update the bounding distribution
every `update_interval`-th likelihood call. If a float is passed,
update the bound after every `round(update_interval * nlive)`-th
likelihood call. Larger update intervals can be more efficient
when the likelihood function is quick to evaluate. If no value is
provided, defaults to the value passed during initialization.
first_update : dict, optional
A dictionary containing parameters governing when the sampler will
first update the bounding distribution from the unit cube
(`'none'`) to the one specified by `sample`.
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
dlogz : float, optional
Iteration will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. The default is
`0.01`.
logl_max : float, optional
Iteration will stop when the sampled ln(likelihood) exceeds the
threshold set by `logl_max`. Default is no bound (`np.inf`).
live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim)
A set of live points used to initialize the nested sampling run.
Contains `live_u`, the coordinates on the unit cube, `live_v`, the
transformed variables, and `live_logl`, the associated
loglikelihoods. By default, if these are not provided the initial
set of live points will be drawn from the unit `npdim`-cube.
**WARNING: It is crucial that the initial set of live points have
been sampled from the prior. Failure to provide a set of valid
live points will lead to incorrect results.**
Returns
-------
worst : int
Index of the live point with the worst likelihood. This is our
new dead point sample.
ustar : `~numpy.ndarray` with shape (npdim,)
Position of the sample.
vstar : `~numpy.ndarray` with shape (ndim,)
Transformed position of the sample.
loglstar : float
Ln(likelihood) of the sample.
logvol : float
Ln(prior volume) within the sample.
logwt : float
Ln(weight) of the sample.
logz : float
Cumulative ln(evidence) up to the sample (inclusive).
logzvar : float
Estimated cumulative variance on `logz` (inclusive).
h : float
Cumulative information up to the sample (inclusive).
nc : int
Number of likelihood calls performed before the new
live point was accepted.
worst_it : int
Iteration when the live (now dead) point was originally proposed.
boundidx : int
Index of the bound the dead point was originally drawn from.
bounditer : int
Index of the bound being used at the current iteration.
eff : float
The cumulative sampling efficiency (in percent).
delta_logz : float
The estimated remaining evidence expressed as the ln(ratio) of the
current evidence. |
def fromid(self, item_id):
"""
Initializes an instance of Story for given item_id.
It is assumed that the story referenced by item_id is valid
and does not raise any HTTP errors.
item_id is an int.
"""
if not item_id:
raise Exception('Need an item_id for a story')
# get details about a particular story
soup = get_item_soup(item_id)
# this post has not been scraped, so we explititly get all info
story_id = item_id
rank = -1
# to extract meta information about the post
info_table = soup.findChildren('table')[2]
# [0] = title, domain, [1] = points, user, time, comments
info_rows = info_table.findChildren('tr')
# title, domain
title_row = info_rows[0].findChildren('td')[1]
title = title_row.find('a').text
try:
domain = title_row.find('span').string[2:-2]
# domain found
is_self = False
link = title_row.find('a').get('href')
except AttributeError:
# self post
domain = BASE_URL
is_self = True
link = '%s/item?id=%s' % (BASE_URL, item_id)
# points, user, time, comments
meta_row = info_rows[1].findChildren('td')[1].contents
# [<span id="score_7024626">789 points</span>, u' by ', <a href="user?id=endianswap">endianswap</a>,
# u' 8 hours ago | ', <a href="item?id=7024626">238 comments</a>]
points = int(re.match(r'^(\d+)\spoint.*', meta_row[0].text).groups()[0])
submitter = meta_row[2].text
submitter_profile = '%s/%s' % (BASE_URL, meta_row[2].get('href'))
published_time = ' '.join(meta_row[3].strip().split()[:3])
comments_link = '%s/item?id=%s' % (BASE_URL, item_id)
try:
num_comments = int(re.match(r'(\d+)\s.*', meta_row[
4].text).groups()[0])
except AttributeError:
num_comments = 0
story = Story(rank, story_id, title, link, domain, points, submitter,
published_time, submitter_profile, num_comments,
comments_link, is_self)
return story | Initializes an instance of Story for given item_id.
It is assumed that the story referenced by item_id is valid
and does not raise any HTTP errors.
item_id is an int. |
def main(arguments=None):
"""
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="DEBUG",
options_first=False,
projectName="tastic"
)
arguments, settings, log, dbConn = su.setup()
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if varname == "import":
varname = "iimport"
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
if init:
from os.path import expanduser
home = expanduser("~")
filepath = home + "/.config/tastic/tastic.yaml"
try:
cmd = """open %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
except:
pass
try:
cmd = """start %(filepath)s""" % locals()
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
except:
pass
# CALL FUNCTIONS/OBJECTS
if sort or archive:
ws = workspace(
log=log,
settings=settings,
fileOrWorkspacePath=pathToFileOrWorkspace
)
if sort:
ws.sort()
if archive:
ws.archive_done()
if sync:
tp = syncc(
log=log,
settings=settings,
workspaceRoot=pathToWorkspace,
workspaceName=workspaceName,
syncFolder=pathToSyncFolder,
editorialRootPath=editorialRootPath,
includeFileTags=fileTagsFlag
)
tp.sync()
if reminders:
r = reminderss(
log=log,
settings=settings
)
r.import_list(
listName=listName,
pathToTaskpaperDoc=pathToTaskpaperDoc
)
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return | *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* |
def intervalAdd(self, a, b, val):
"""Variant, adds val to t[a], to t[a + 1] ... and to t[b]
:param int a b: with 1 <= a <= b
"""
self.add(a, +val)
self.add(b + 1, -val) | Variant, adds val to t[a], to t[a + 1] ... and to t[b]
:param int a b: with 1 <= a <= b |
def check_can_approve(self, request, application, roles):
""" Check the person's authorization. """
try:
authorised_persons = self.get_authorised_persons(application)
authorised_persons.get(pk=request.user.pk)
return True
except Person.DoesNotExist:
return False | Check the person's authorization. |
def get_bundle(self, bundle_id=None):
# type: (Union[Bundle, int]) -> Bundle
"""
Retrieves the :class:`~pelix.framework.Bundle` object for the bundle
matching the given ID (int). If no ID is given (None), the bundle
associated to this context is returned.
:param bundle_id: A bundle ID (optional)
:return: The requested :class:`~pelix.framework.Bundle` object
:raise BundleException: The given ID doesn't exist or is invalid
"""
if bundle_id is None:
# Current bundle
return self.__bundle
elif isinstance(bundle_id, Bundle):
# Got a bundle (compatibility with older install_bundle())
bundle_id = bundle_id.get_bundle_id()
return self.__framework.get_bundle_by_id(bundle_id) | Retrieves the :class:`~pelix.framework.Bundle` object for the bundle
matching the given ID (int). If no ID is given (None), the bundle
associated to this context is returned.
:param bundle_id: A bundle ID (optional)
:return: The requested :class:`~pelix.framework.Bundle` object
:raise BundleException: The given ID doesn't exist or is invalid |
def disable(self, name=None):
"""Disable one or all actions."""
if name is None:
for name in self._actions_dict:
self.disable(name)
return
self._actions_dict[name].qaction.setEnabled(False) | Disable one or all actions. |
def _get_results(self, page):
"""Find every div tag containing torrent details on given page,
then parse the results into a list of Torrents and return them"""
soup = _get_soup(page)
details = soup.find_all("tr", class_="odd")
even = soup.find_all("tr", class_="even")
# Join the results
for i in range(len(even)):
details.insert((i * 2)+1, even[i])
return self._parse_details(details) | Find every div tag containing torrent details on given page,
then parse the results into a list of Torrents and return them |
def rowsBeforeRow(self, rowObject, count):
"""
Wrapper around L{rowsBeforeItem} which accepts the web ID for a item
instead of the item itself.
@param rowObject: a dictionary mapping strings to column values, sent
from the client. One of those column values must be C{__id__} to
uniquely identify a row.
@param count: an integer, the number of rows to return.
"""
webID = rowObject['__id__']
return self.rowsBeforeItem(
self.webTranslator.fromWebID(webID),
count) | Wrapper around L{rowsBeforeItem} which accepts the web ID for a item
instead of the item itself.
@param rowObject: a dictionary mapping strings to column values, sent
from the client. One of those column values must be C{__id__} to
uniquely identify a row.
@param count: an integer, the number of rows to return. |
def request(schema):
"""
Decorate a function with a request schema.
"""
def wrapper(func):
setattr(func, REQUEST, schema)
return func
return wrapper | Decorate a function with a request schema. |
def _discarded_reads2_out_file_name(self):
"""Checks if file name is set for discarded reads2 output.
Returns absolute path."""
if self.Parameters['-4'].isOn():
discarded_reads2 = self._absolute(str(self.Parameters['-4'].Value))
else:
raise ValueError(
"No discarded-reads2 (flag -4) output path specified")
return discarded_reads2 | Checks if file name is set for discarded reads2 output.
Returns absolute path. |
def paste(location):
"""paste a file or directory that has been previously copied"""
copyData = settings.getDataFile()
if not location:
location = "."
try:
data = pickle.load(open(copyData, "rb"))
speech.speak("Pasting " + data["copyLocation"] + " to current directory.")
except:
speech.fail("It doesn't look like you've copied anything yet.")
speech.fail("Type 'hallie copy <file>' to copy a file or folder.")
return
process, error = subprocess.Popen(["cp", "-r", data["copyLocation"], location], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
if "denied" in process:
speech.fail("Unable to paste your file successfully. This is most likely due to a permission issue. You can try to run me as sudo!") | paste a file or directory that has been previously copied |
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"]) | Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard. |
def _compute_video_hash(videofile):
""" compute videofile's hash
reference: https://docs.google.com/document/d/1w5MCBO61rKQ6hI5m9laJLWse__yTYdRugpVyz4RzrmM/preview
"""
seek_positions = [None] * 4
hash_result = []
with open(videofile, 'rb') as fp:
total_size = os.fstat(fp.fileno()).st_size
if total_size < 8192 + 4096:
raise exceptions.InvalidFileError(
'the video[{}] is too small'.format(os.path.basename(videofile)))
seek_positions[0] = 4096
seek_positions[1] = total_size // 3 * 2
seek_positions[2] = total_size // 3
seek_positions[3] = total_size - 8192
for pos in seek_positions:
fp.seek(pos, 0)
data = fp.read(4096)
m = hashlib.md5(data)
hash_result.append(m.hexdigest())
return ';'.join(hash_result) | compute videofile's hash
reference: https://docs.google.com/document/d/1w5MCBO61rKQ6hI5m9laJLWse__yTYdRugpVyz4RzrmM/preview |
def list_theme():
"""List all available Engineer themes."""
from engineer.themes import ThemeManager
themes = ThemeManager.themes()
col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()]))
themes = ThemeManager.themes_by_finder()
for finder in sorted(themes.iterkeys()):
if len(themes[finder]) > 0:
puts("%s: " % finder)
for theme in sorted(themes[finder], key=lambda _: _.id):
with indent(4):
puts(
columns(
[colored.cyan("%s:" % theme.id), col1],
[colored.white(theme.root_path, bold=True), col2]
)
) | List all available Engineer themes. |
def random_density(qubits: Union[int, Qubits]) -> Density:
"""
Returns: A randomly sampled Density from the Hilbert–Schmidt
ensemble of quantum states
Ref: "Induced measures in the space of mixed quantum states" Karol
Zyczkowski, Hans-Juergen Sommers, J. Phys. A34, 7111-7125 (2001)
https://arxiv.org/abs/quant-ph/0012101
"""
N, qubits = qubits_count_tuple(qubits)
size = (2**N, 2**N)
ginibre_ensemble = (np.random.normal(size=size) +
1j * np.random.normal(size=size)) / np.sqrt(2.0)
matrix = ginibre_ensemble @ np.transpose(np.conjugate(ginibre_ensemble))
matrix /= np.trace(matrix)
return Density(matrix, qubits=qubits) | Returns: A randomly sampled Density from the Hilbert–Schmidt
ensemble of quantum states
Ref: "Induced measures in the space of mixed quantum states" Karol
Zyczkowski, Hans-Juergen Sommers, J. Phys. A34, 7111-7125 (2001)
https://arxiv.org/abs/quant-ph/0012101 |
def handle_device_json(self, data):
"""Manage the device json list."""
self._device_json.insert(0, data)
self._device_json.pop() | Manage the device json list. |
def extract_token_and_qualifier(text, line=0, column=0):
'''
Extracts the token a qualifier from the text given the line/colum
(see test_extract_token_and_qualifier for examples).
:param unicode text:
:param int line: 0-based
:param int column: 0-based
'''
# Note: not using the tokenize module because text should be unicode and
# line/column refer to the unicode text (otherwise we'd have to know
# those ranges after converted to bytes).
if line < 0:
line = 0
if column < 0:
column = 0
if isinstance(text, bytes):
text = text.decode('utf-8')
lines = text.splitlines()
try:
text = lines[line]
except IndexError:
return TokenAndQualifier(u'', u'')
if column >= len(text):
column = len(text)
text = text[:column]
token = u''
qualifier = u''
temp_token = []
for i in range(column - 1, -1, -1):
c = text[i]
if c in identifier_part or isidentifier(c) or c == u'.':
temp_token.append(c)
else:
break
temp_token = u''.join(reversed(temp_token))
if u'.' in temp_token:
temp_token = temp_token.split(u'.')
token = u'.'.join(temp_token[:-1])
qualifier = temp_token[-1]
else:
qualifier = temp_token
return TokenAndQualifier(token, qualifier) | Extracts the token a qualifier from the text given the line/colum
(see test_extract_token_and_qualifier for examples).
:param unicode text:
:param int line: 0-based
:param int column: 0-based |
def plot(self):
"""
Plot posterior from simple nonstochetric regression.
"""
figure()
plot_envelope(self.M, self.C, self.xplot)
for i in range(3):
f = Realization(self.M, self.C)
plot(self.xplot,f(self.xplot))
plot(self.abundance, self.frye, 'k.', markersize=4)
xlabel('Female abundance')
ylabel('Frye density')
title(self.name)
axis('tight') | Plot posterior from simple nonstochetric regression. |
def prepend_to_file(path, data, bufsize=1<<15):
"""TODO:
* Add a random string to the backup file.
* Restore permissions after copy.
"""
# Backup the file #
backupname = path + os.extsep + 'bak'
# Remove previous backup if it exists #
try: os.unlink(backupname)
except OSError: pass
os.rename(path, backupname)
# Open input/output files, note: outputfile's permissions lost #
with open(backupname) as inputfile:
with open(path, 'w') as outputfile:
outputfile.write(data)
buf = inputfile.read(bufsize)
while buf:
outputfile.write(buf)
buf = inputfile.read(bufsize)
# Remove backup on success #
os.remove(backupname) | TODO:
* Add a random string to the backup file.
* Restore permissions after copy. |
def ai(board, who='x'):
"""
Returns best next board
>>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']]
>>> ai(b)
< Board |xo.xo.x..| >
"""
return sorted(board.possible(), key=lambda b: value(b, who))[-1] | Returns best next board
>>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']]
>>> ai(b)
< Board |xo.xo.x..| > |
def read_namespaced_network_policy(self, name, namespace, **kwargs):
"""
read the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_network_policy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs)
return data | read the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_network_policy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1NetworkPolicy
If the method is called asynchronously,
returns the request thread. |
def get_preview_url(self, data_type='L1C'):
"""Returns url location of full resolution L1C preview
:return:
"""
if self.data_source is DataSource.SENTINEL2_L1C or self.safe_type is EsaSafeType.OLD_TYPE:
return self.get_url(AwsConstants.PREVIEW_JP2)
return self.get_qi_url('{}_PVI.jp2'.format(data_type)) | Returns url location of full resolution L1C preview
:return: |
def load_vcf(
path,
genome=None,
reference_vcf_key="reference",
only_passing=True,
allow_extended_nucleotides=False,
include_info=True,
chunk_size=10 ** 5,
max_variants=None,
sort_key=variant_ascending_position_sort_key,
distinct=True):
"""
Load reference name and Variant objects from the given VCF filename.
Currently only local files are supported by this function (no http). If you
call this on an HTTP URL, it will fall back to `load_vcf`.
Parameters
----------
path : str
Path to VCF (*.vcf) or compressed VCF (*.vcf.gz).
genome : {pyensembl.Genome, reference name, Ensembl version int}, optional
Optionally pass in a PyEnsembl Genome object, name of reference, or
PyEnsembl release version to specify the reference associated with a
VCF (otherwise infer reference from VCF using reference_vcf_key)
reference_vcf_key : str, optional
Name of metadata field which contains path to reference FASTA
file (default = 'reference')
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
allow_extended_nucleotides : boolean, default False
Allow characters other that A,C,T,G in the ref and alt strings.
include_info : boolean, default True
Whether to parse the INFO and per-sample columns. If you don't need
these, set to False for faster parsing.
chunk_size: int, optional
Number of records to load in memory at once.
max_variants : int, optional
If specified, return only the first max_variants variants.
sort_key : fn
Function which maps each element to a sorting criterion.
Set to None to not to sort the variants.
distinct : boolean, default True
Don't keep repeated variants
"""
require_string(path, "Path or URL to VCF")
parsed_path = parse_url_or_path(path)
if parsed_path.scheme and parsed_path.scheme.lower() != "file":
# pandas.read_table nominally supports HTTP, but it tends to crash on
# large files and does not support gzip. Switching to the python-based
# implementation of read_table (with engine="python") helps with some
# issues but introduces a new set of problems (e.g. the dtype parameter
# is not accepted). For these reasons, we're currently not attempting
# to load VCFs over HTTP with pandas directly, and instead download it
# to a temporary file and open that.
(filename, headers) = urllib.request.urlretrieve(path)
try:
# The downloaded file has no file extension, which confuses pyvcf
# for gziped files in Python 3. We rename it to have the correct
# file extension.
new_filename = "%s.%s" % (
filename, parsed_path.path.split(".")[-1])
os.rename(filename, new_filename)
filename = new_filename
return load_vcf(
filename,
genome=genome,
reference_vcf_key=reference_vcf_key,
only_passing=only_passing,
allow_extended_nucleotides=allow_extended_nucleotides,
include_info=include_info,
chunk_size=chunk_size,
max_variants=max_variants,
sort_key=sort_key,
distinct=distinct)
finally:
logger.info("Removing temporary file: %s", filename)
os.unlink(filename)
# Loading a local file.
# The file will be opened twice: first to parse the header with pyvcf, then
# by pandas to read the data.
# PyVCF reads the metadata immediately and stops at the first line with
# data. We can close the file after that.
handle = PyVCFReaderFromPathOrURL(path)
handle.close()
genome = infer_genome_from_vcf(
genome,
handle.vcf_reader,
reference_vcf_key)
df_iterator = read_vcf_into_dataframe(
path,
include_info=include_info,
sample_names=handle.vcf_reader.samples if include_info else None,
chunk_size=chunk_size)
if include_info:
def sample_info_parser(unparsed_sample_info_strings, format_string):
"""
Given a format string like "GT:AD:ADP:DP:FS"
and a list of sample info strings where each entry is like
"0/1:3,22:T=3,G=22:25:33", return a dict that maps:
sample name -> field name -> value. Uses pyvcf to parse the fields.
"""
return pyvcf_calls_to_sample_info_list(
handle.vcf_reader._parse_samples(
unparsed_sample_info_strings, format_string, None))
else:
sample_info_parser = None
return dataframes_to_variant_collection(
df_iterator,
source_path=path,
info_parser=handle.vcf_reader._parse_info if include_info else None,
only_passing=only_passing,
max_variants=max_variants,
sample_names=handle.vcf_reader.samples if include_info else None,
sample_info_parser=sample_info_parser,
variant_kwargs={
'ensembl': genome,
'allow_extended_nucleotides': allow_extended_nucleotides},
variant_collection_kwargs={
'sort_key': sort_key,
'distinct': distinct}) | Load reference name and Variant objects from the given VCF filename.
Currently only local files are supported by this function (no http). If you
call this on an HTTP URL, it will fall back to `load_vcf`.
Parameters
----------
path : str
Path to VCF (*.vcf) or compressed VCF (*.vcf.gz).
genome : {pyensembl.Genome, reference name, Ensembl version int}, optional
Optionally pass in a PyEnsembl Genome object, name of reference, or
PyEnsembl release version to specify the reference associated with a
VCF (otherwise infer reference from VCF using reference_vcf_key)
reference_vcf_key : str, optional
Name of metadata field which contains path to reference FASTA
file (default = 'reference')
only_passing : boolean, optional
If true, any entries whose FILTER field is not one of "." or "PASS" is
dropped.
allow_extended_nucleotides : boolean, default False
Allow characters other that A,C,T,G in the ref and alt strings.
include_info : boolean, default True
Whether to parse the INFO and per-sample columns. If you don't need
these, set to False for faster parsing.
chunk_size: int, optional
Number of records to load in memory at once.
max_variants : int, optional
If specified, return only the first max_variants variants.
sort_key : fn
Function which maps each element to a sorting criterion.
Set to None to not to sort the variants.
distinct : boolean, default True
Don't keep repeated variants |
def mine_block(self, *args: Any, **kwargs: Any) -> BaseBlock:
"""
Mine the current block. Proxies to self.pack_block method.
"""
packed_block = self.pack_block(self.block, *args, **kwargs)
final_block = self.finalize_block(packed_block)
# Perform validation
self.validate_block(final_block)
return final_block | Mine the current block. Proxies to self.pack_block method. |
def fetch(self, resource_class):
"""Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance.
"""
if issubclass(resource_class, Entry):
params = None
content_type = getattr(resource_class, '__content_type__', None)
if content_type is not None:
params = {'content_type': resource_class.__content_type__}
return RequestArray(self.dispatcher, utils.path_for_class(resource_class), self.config.resolve_links,
params=params)
else:
remote_path = utils.path_for_class(resource_class)
if remote_path is None:
raise Exception('Invalid resource type \"{0}\".'.format(resource_class))
return RequestArray(self.dispatcher, remote_path, self.config.resolve_links) | Construct a :class:`.Request` for the given resource type.
Provided an :class:`.Entry` subclass, the Content Type ID will be inferred and requested explicitly.
Examples::
client.fetch(Asset)
client.fetch(Entry)
client.fetch(ContentType)
client.fetch(CustomEntryClass)
:param resource_class: The type of resource to be fetched.
:return: :class:`.Request` instance. |
def real_time_scheduling(self, availability, oauth, event, target_calendars=()):
"""Generates an real time scheduling link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:available_periods - A dict stating the available periods for the event
:start_interval - A Integer representing the start_interval of the event
:buffer - A dict representing the buffer for the event
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference.
"""
args = {
'oauth': oauth,
'event': event,
'target_calendars': target_calendars
}
if availability:
options = {}
options['participants'] = self.map_availability_participants(availability.get('participants', None))
options['required_duration'] = self.map_availability_required_duration(availability.get('required_duration', None))
options['start_interval'] = self.map_availability_required_duration(availability.get('start_interval', None))
options['buffer'] = self.map_availability_buffer(availability.get('buffer', None))
self.translate_available_periods(availability['available_periods'])
options['available_periods'] = availability['available_periods']
args['availability'] = options
return self.request_handler.post(endpoint='real_time_scheduling', data=args, use_api_key=True).json() | Generates an real time scheduling link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:participants - A dict stating who is required for the availability
call
:required_duration - A dict stating the length of time the event will
last for
:available_periods - A dict stating the available periods for the event
:start_interval - A Integer representing the start_interval of the event
:buffer - A dict representing the buffer for the event
:param dict oauth: - A dict describing the OAuth flow required:
:scope - A String representing the scopes to ask for
within the OAuth flow
:redirect_uri - A String containing a url to redirect the
user to after completing the OAuth flow.
:scope - A String representing additional state to
be passed within the OAuth flow.
:param dict event: - A dict describing the event
:param list target_calendars: - An list of dics stating into which calendars
to insert the created event
See http://www.cronofy.com/developers/api#upsert-event for reference. |
def end_span(self, *args, **kwargs):
"""End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span.
"""
cur_span = self.current_span()
if cur_span is None and self._spans_list:
cur_span = self._spans_list[-1]
if cur_span is None:
logging.warning('No active span, cannot do end_span.')
return
cur_span.finish()
self.span_context.span_id = cur_span.parent_span.span_id if \
cur_span.parent_span else None
if isinstance(cur_span.parent_span, trace_span.Span):
execution_context.set_current_span(cur_span.parent_span)
else:
execution_context.set_current_span(None)
with self._spans_list_condition:
if cur_span in self._spans_list:
span_datas = self.get_span_datas(cur_span)
self.exporter.export(span_datas)
self._spans_list.remove(cur_span)
return cur_span | End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span. |
def handle(self, *args, **kwargs):
"""
Command handler for the "metrics" command.
"""
frequency = kwargs['frequency']
frequencies = settings.STATISTIC_FREQUENCY_ALL if frequency == 'a' else (frequency.split(',') if ',' in frequency else [frequency])
if kwargs['list']:
maintenance.list_statistics()
# if we're supposed to calculate the latest statistics
elif kwargs['calculate']:
maintenance.calculate_statistics(maintenance.get_statistic_by_name(kwargs['calculate']), frequencies)
# pure reset of statistic(s)
elif kwargs['reset']:
maintenance.reset_statistics(maintenance.get_statistic_by_name(kwargs['reset']), frequencies, kwargs['reset_cumulative'])
# recalculation of statistic(s)
elif kwargs['recalculate']:
maintenance.reset_statistics(maintenance.get_statistic_by_name(kwargs['recalculate']), frequencies, kwargs['reset_cumulative'], True) | Command handler for the "metrics" command. |
def isclose(a, b, rtol=1e-5, atol=1e-8):
"""This is essentially np.isclose, but slightly faster."""
return abs(a - b) < (atol + rtol * abs(b)) | This is essentially np.isclose, but slightly faster. |
def GetArchiveInfo(self):
'''Obtains information on CHM archive.
This function checks the /#SYSTEM file inside the CHM archive to
obtain the index, home page, topics, encoding and title. It is called
from LoadCHM.
'''
self.searchable = extra.is_searchable(self.file)
self.lcid = None
result, ui = chmlib.chm_resolve_object(self.file, '/#SYSTEM')
if (result != chmlib.CHM_RESOLVE_SUCCESS):
sys.stderr.write('GetArchiveInfo: #SYSTEM does not exist\n')
return 0
size, text = chmlib.chm_retrieve_object(self.file, ui, 4l, ui.length)
if (size == 0):
sys.stderr.write('GetArchiveInfo: file size = 0\n')
return 0
buff = array.array('B', text)
index = 0
while (index < size):
cursor = buff[index] + (buff[index+1] * 256)
if (cursor == 0):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.topics = '/' + text[index:index+cursor-1]
elif (cursor == 1):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.index = '/' + text[index:index+cursor-1]
elif (cursor == 2):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.home = '/' + text[index:index+cursor-1]
elif (cursor == 3):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.title = text[index:index+cursor-1]
elif (cursor == 4):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.lcid = buff[index] + (buff[index+1] * 256)
elif (cursor == 6):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
tmp = text[index:index+cursor-1]
if not self.topics:
tmp1 = '/' + tmp + '.hhc'
tmp2 = '/' + tmp + '.hhk'
res1, ui1 = chmlib.chm_resolve_object(self.file, tmp1)
res2, ui2 = chmlib.chm_resolve_object(self.file, tmp2)
if not self.topics and res1 == chmlib.CHM_RESOLVE_SUCCESS:
self.topics = '/' + tmp + '.hhc'
if not self.index and res2 == chmlib.CHM_RESOLVE_SUCCESS:
self.index = '/' + tmp + '.hhk'
elif (cursor == 16):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.encoding = text[index:index+cursor-1]
else:
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
index += cursor
self.GetWindowsInfo()
if not self.lcid:
self.lcid = extra.get_lcid(self.file)
return 1 | Obtains information on CHM archive.
This function checks the /#SYSTEM file inside the CHM archive to
obtain the index, home page, topics, encoding and title. It is called
from LoadCHM. |
def get_all_fields(obj):
"""Returns a list of all field names on the instance."""
fields = []
for f in obj._meta.fields:
fname = f.name
get_choice = "get_" + fname + "_display"
if hasattr(obj, get_choice):
value = getattr(obj, get_choice)()
else:
try:
value = getattr(obj, fname)
except Exception:
value = None
if isinstance(value, list):
value = ",".join(str(v) for v in value)
if f.editable and value and f.name:
fields.append(
{"label": f.verbose_name, "name": f.name, "value": value}
)
return fields | Returns a list of all field names on the instance. |
def copy(self):
""" Copy the currently selected text to the clipboard, removing prompts.
"""
if self._page_control is not None and self._page_control.hasFocus():
self._page_control.copy()
elif self._control.hasFocus():
text = self._control.textCursor().selection().toPlainText()
if text:
lines = map(self._transform_prompt, text.splitlines())
text = '\n'.join(lines)
QtGui.QApplication.clipboard().setText(text)
else:
self.log.debug("frontend widget : unknown copy target") | Copy the currently selected text to the clipboard, removing prompts. |
def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element("no2index")
reference_time_node = ET.SubElement(root_node, "reference_time")
reference_time_node.text = str(self._reference_time)
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
interval_node = ET.SubElement(root_node, "interval")
interval_node.text = str(self._interval)
no2_samples_node = ET.SubElement(root_node, "no2_samples")
for smpl in self._no2_samples:
s = smpl.copy()
# turn values to 12 decimal digits-formatted strings
s['label'] = s['label']
s['value'] = '{:.12e}'.format(s['value'])
s['precision'] = '{:.12e}'.format(s['precision'])
xmlutils.create_DOM_node_from_dict(s, "no2_sample",
no2_samples_node)
root_node.append(self._location._to_DOM())
return root_node | Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object |
def WaitProcessing(obj, eng, callbacks, exc_info):
"""Take actions when WaitProcessing is raised.
..note::
We're essentially doing HaltProcessing, plus `obj.set_action` and
object status `WAITING` instead of `HALTED`.
This is not present in TransitionActions so that's why it is not
calling super in this case.
"""
e = exc_info[1]
obj.set_action(e.action, e.message)
obj.save(status=eng.object_status.WAITING,
callback_pos=eng.state.callback_pos,
id_workflow=eng.uuid)
eng.save(WorkflowStatus.HALTED)
eng.log.warning("Workflow '%s' waiting at task %s with message: %s",
eng.name, eng.current_taskname or "Unknown", e.message)
db.session.commit()
# Call super which will reraise
TransitionActions.HaltProcessing(
obj, eng, callbacks, exc_info
) | Take actions when WaitProcessing is raised.
..note::
We're essentially doing HaltProcessing, plus `obj.set_action` and
object status `WAITING` instead of `HALTED`.
This is not present in TransitionActions so that's why it is not
calling super in this case. |
def check_spot_requests(self, requests, tags=None):
"""Check status of one or more EC2 spot instance requests.
:param requests: List of EC2 spot instance request IDs.
:type requests: list
:param tags:
:type tags: dict
:return: List of boto.ec2.instance.Instance's created, order corresponding to requests param (None if request
still open, boto.ec2.instance.Reservation if request is no longer open)
:rtype: list
"""
instances = [None] * len(requests)
ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests)
for req in ec2_requests:
if req.instance_id:
instance = self.retry_on_ec2_error(self.ec2.get_only_instances, req.instance_id)[0]
if not instance:
raise EC2ManagerException('Failed to get instance with id %s for %s request %s'
% (req.instance_id, req.status.code, req.id))
instances[requests.index(req.id)] = instance
self.retry_on_ec2_error(self.ec2.create_tags, [instance.id], tags or {})
logger.info('Request %s is %s and %s.',
req.id,
req.status.code,
req.state)
logger.info('%s is %s at %s (%s)',
instance.id,
instance.state,
instance.public_dns_name,
instance.ip_address)
elif req.state != "open":
# return the request so we don't try again
instances[requests.index(req.id)] = req
return instances | Check status of one or more EC2 spot instance requests.
:param requests: List of EC2 spot instance request IDs.
:type requests: list
:param tags:
:type tags: dict
:return: List of boto.ec2.instance.Instance's created, order corresponding to requests param (None if request
still open, boto.ec2.instance.Reservation if request is no longer open)
:rtype: list |
def mkdir(name, path):
'''Create an empty directory in the virtual folder.
\b
NAME: Name of a virtual folder.
PATH: The name or path of directory. Parent directories are created automatically
if they do not exist.
'''
with Session() as session:
try:
session.VFolder(name).mkdir(path)
print_done('Done.')
except Exception as e:
print_error(e)
sys.exit(1) | Create an empty directory in the virtual folder.
\b
NAME: Name of a virtual folder.
PATH: The name or path of directory. Parent directories are created automatically
if they do not exist. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.