code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def hull(self):
"""
Bounding polygon as a convex hull.
"""
from scipy.spatial import ConvexHull
if len(self.coordinates) >= 4:
inds = ConvexHull(self.coordinates).vertices
return self.coordinates[inds]
else:
return self.coordinates | Bounding polygon as a convex hull. |
def lagcrp_helper(egg, match='exact', distance='euclidean',
ts=None, features=None):
"""
Computes probabilities for each transition distance (probability that a word
recalled will be a given distance--in presentation order--from the previous
recalled word).
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each float is the probability of transition distance (distnaces indexed by
position, from -(n-1) to (n-1), excluding zero
"""
def lagcrp(rec, lstlen):
"""Computes lag-crp for a given recall list"""
def check_pair(a, b):
if (a>0 and b>0) and (a!=b):
return True
else:
return False
def compute_actual(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in range(0,len(rec)-1):
a=rec[trial]
b=rec[trial+1]
if check_pair(a, b) and (a not in recalled) and (b not in recalled):
arr[b-a]+=1
recalled.append(a)
return arr
def compute_possible(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in rec:
if np.isnan(trial):
pass
else:
lbound=int(1-trial)
ubound=int(lstlen-trial)
chances=list(range(lbound,0))+list(range(1,ubound+1))
for each in recalled:
if each-trial in chances:
chances.remove(each-trial)
arr[chances]+=1
recalled.append(trial)
return arr
actual = compute_actual(rec, lstlen)
possible = compute_possible(rec, lstlen)
crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)]
crp.insert(int(len(crp) / 2), np.nan)
return crp
def nlagcrp(distmat, ts=None):
def lagcrp_model(s):
idx = list(range(0, -s, -1))
return np.array([list(range(i, i+s)) for i in idx])
# remove nan columns
distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T
model = lagcrp_model(distmat.shape[1])
lagcrp = np.zeros(ts * 2)
for rdx in range(len(distmat)-1):
item = distmat[rdx, :]
next_item = distmat[rdx+1, :]
if not np.isnan(item).any() and not np.isnan(next_item).any():
outer = np.outer(item, next_item)
lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts))))
lagcrp /= ts
lagcrp = list(lagcrp)
lagcrp.insert(int(len(lagcrp) / 2), np.nan)
return np.array(lagcrp)
def _format(p, r):
p = np.matrix([np.array(i) for i in p])
if p.shape[0]==1:
p=p.T
r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r)
r = np.matrix([np.array(i) for i in r])
if r.shape[0]==1:
r=r.T
return p, r
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if not ts:
ts = egg.pres.shape[1]
if match in ['exact', 'best']:
lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat]
elif match is 'smooth':
lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0))
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(lagcrp, axis=0) | Computes probabilities for each transition distance (probability that a word
recalled will be a given distance--in presentation order--from the previous
recalled word).
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each float is the probability of transition distance (distnaces indexed by
position, from -(n-1) to (n-1), excluding zero |
def import_teamocil(sconf):
"""Return tmuxp config from a `teamocil`_ yaml config.
.. _teamocil: https://github.com/remiprev/teamocil
Parameters
----------
sconf : dict
python dict for session configuration
Notes
-----
Todos:
- change 'root' to a cd or start_directory
- width in pane -> main-pain-width
- with_env_var
- clear
- cmd_separator
"""
tmuxp_config = {}
if 'session' in sconf:
sconf = sconf['session']
if 'name' in sconf:
tmuxp_config['session_name'] = sconf['name']
else:
tmuxp_config['session_name'] = None
if 'root' in sconf:
tmuxp_config['start_directory'] = sconf.pop('root')
tmuxp_config['windows'] = []
for w in sconf['windows']:
windowdict = {'window_name': w['name']}
if 'clear' in w:
windowdict['clear'] = w['clear']
if 'filters' in w:
if 'before' in w['filters']:
for b in w['filters']['before']:
windowdict['shell_command_before'] = w['filters']['before']
if 'after' in w['filters']:
for b in w['filters']['after']:
windowdict['shell_command_after'] = w['filters']['after']
if 'root' in w:
windowdict['start_directory'] = w.pop('root')
if 'splits' in w:
w['panes'] = w.pop('splits')
if 'panes' in w:
for p in w['panes']:
if 'cmd' in p:
p['shell_command'] = p.pop('cmd')
if 'width' in p:
# todo support for height/width
p.pop('width')
windowdict['panes'] = w['panes']
if 'layout' in w:
windowdict['layout'] = w['layout']
tmuxp_config['windows'].append(windowdict)
return tmuxp_config | Return tmuxp config from a `teamocil`_ yaml config.
.. _teamocil: https://github.com/remiprev/teamocil
Parameters
----------
sconf : dict
python dict for session configuration
Notes
-----
Todos:
- change 'root' to a cd or start_directory
- width in pane -> main-pain-width
- with_env_var
- clear
- cmd_separator |
def _swap_optimizer_allows(self, p1, p2):
"""Identify easily discarded meaningless swaps.
This is motivated by the cost of millions of swaps being simulated.
"""
# setup local shortcuts
a = self._array
tile1 = a[p1]
tile2 = a[p2]
# 1) disallow same tiles
if tile1 == tile2:
return False
# 2) disallow matches unless a wildcard is involved
if tile1.matches(tile2) and not any(t.is_wildcard()
for t in (tile1, tile2)):
return False
# 3) disallow when both tiles (post-swap) are surrounded by non-matches
center_other_pairs = ((p1, p2), (p2, p1))
class MatchedTiles(Exception):
pass
try:
for center_p, other_p in center_other_pairs:
up_down_left_right = ((center_p[0] - 1, center_p[1]),
(center_p[0] + 1, center_p[1]),
(center_p[0], center_p[1] - 1),
(center_p[0], center_p[1] + 1))
post_swap_center_tile = a[other_p]
for surrounding_p in up_down_left_right:
# ignore out of bounds positions
# and ignore the inner swap which is handled elsewhere
if any((not (0 <= surrounding_p[0] <= 7), # out of bounds
not (0 <= surrounding_p[1] <= 7), # out of bounds
surrounding_p == other_p)): # inner swap
continue
surrounding_tile = a[surrounding_p]
if post_swap_center_tile.matches(surrounding_tile):
raise MatchedTiles()
except MatchedTiles:
pass # if any match found, stop checking and pass this filter
else:
return False # if no match is found, then this can be filtered
return True | Identify easily discarded meaningless swaps.
This is motivated by the cost of millions of swaps being simulated. |
def order_by(self, key):
"""
Returns new Enumerable sorted in ascending order by given key
:param key: key to sort by as lambda expression
:return: new Enumerable object
"""
if key is None:
raise NullArgumentError(u"No key for sorting given")
kf = [OrderingDirection(key, reverse=False)]
return SortedEnumerable(key_funcs=kf, data=self._data) | Returns new Enumerable sorted in ascending order by given key
:param key: key to sort by as lambda expression
:return: new Enumerable object |
def thread_safe(method):
""" wraps method with lock acquire/release cycle
decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock """
@functools.wraps(method)
def _locker(self, *args, **kwargs):
assert hasattr(self, 'lock'), \
'thread_safe decorator applied to method {0}.{1}: missing required field {0}.lock'\
.format(self.__class__.__name__, method.__name__)
try:
self.lock.acquire()
return method(self, *args, **kwargs)
finally:
try:
self.lock.release()
except:
sys.stderr.write('Exception on releasing lock at method {0}'.format(method.__name__))
traceback.print_exc(file=sys.stderr)
return _locker | wraps method with lock acquire/release cycle
decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock |
def save_task(task, broker):
"""
Saves the task package to Django or the cache
"""
# SAVE LIMIT < 0 : Don't save success
if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
return
# enqueues next in a chain
if task.get('chain', None):
django_q.tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
# SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
db.close_old_connections()
try:
if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
Success.objects.last().delete()
# check if this task has previous results
if Task.objects.filter(id=task['id'], name=task['name']).exists():
existing_task = Task.objects.get(id=task['id'], name=task['name'])
# only update the result if it hasn't succeeded yet
if not existing_task.success:
existing_task.stopped = task['stopped']
existing_task.result = task['result']
existing_task.success = task['success']
existing_task.save()
else:
Task.objects.create(id=task['id'],
name=task['name'],
func=task['func'],
hook=task.get('hook'),
args=task['args'],
kwargs=task['kwargs'],
started=task['started'],
stopped=task['stopped'],
result=task['result'],
group=task.get('group'),
success=task['success']
)
except Exception as e:
logger.error(e) | Saves the task package to Django or the cache |
def save(self, **kwargs):
"""
Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field.
"""
entry = super(FormForForm, self).save(commit=False)
entry.form = self.form
entry.entry_time = now()
entry.save()
entry_fields = entry.fields.values_list("field_id", flat=True)
new_entry_fields = []
for field in self.form_fields:
field_key = "field_%s" % field.id
value = self.cleaned_data[field_key]
if value and self.fields[field_key].widget.needs_multipart_form:
value = fs.save(join("forms", str(uuid4()), value.name), value)
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
if field.id in entry_fields:
field_entry = entry.fields.get(field_id=field.id)
field_entry.value = value
field_entry.save()
else:
new = {"entry": entry, "field_id": field.id, "value": value}
new_entry_fields.append(FieldEntry(**new))
if new_entry_fields:
FieldEntry.objects.bulk_create(new_entry_fields)
return entry | Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field. |
def add_subtract(st, max_iter=7, max_npart='calc', max_mem=2e8,
always_check_remove=False, **kwargs):
"""
Automatically adds and subtracts missing & extra particles.
Operates by removing bad particles then adding missing particles on
repeat, until either no particles are added/removed or after `max_iter`
attempts.
Parameters
----------
st: :class:`peri.states.State`
The state to add and subtract particles to.
max_iter : Int, optional
The maximum number of add-subtract loops to use. Default is 7.
Terminates after either max_iter loops or when nothing has changed.
max_npart : Int or 'calc', optional
The maximum number of particles to add before optimizing the non-psf
globals. Default is ``'calc'``, which uses 5% of the initial number
of particles.
max_mem : Int, optional
The maximum memory to use for optimization after adding max_npart
particles. Default is 2e8.
always_check_remove : Bool, optional
Set to True to always check whether to remove particles. If ``False``,
only checks for removal while particles were removed on the previous
attempt. Default is False.
Other Parameters
----------------
invert : Bool, optional
``True`` if the particles are dark on a bright background, ``False``
if they are bright on a dark background. Default is ``True``.
min_rad : Float, optional
Particles with radius below ``min_rad`` are automatically deleted.
Default is ``'calc'`` = median rad - 25* radius std.
max_rad : Float, optional
Particles with radius above ``max_rad`` are automatically deleted.
Default is ``'calc'`` = median rad + 15* radius std, but you should
change this for your particle sizes.
min_edge_dist : Float, optional
Particles closer to the edge of the padded image than this are
automatically deleted. Default is 2.0.
check_rad_cutoff : 2-element float list.
Particles with ``radii < check_rad_cutoff[0]`` or ``> check...[1]``
are checked if they should be deleted (not automatic). Default is
``[3.5, 15]``.
check_outside_im : Bool, optional
Set to True to check whether to delete particles whose positions are
outside the un-padded image.
rad : Float, optional
The initial radius for added particles; added particles radii are
not fit until the end of ``add_subtract``. Default is ``'calc'``,
which uses the median radii of active particles.
tries : Int, optional
The number of particles to attempt to remove or add, per iteration.
Default is 50.
im_change_frac : Float, optional
How good the change in error needs to be relative to the change in
the difference image. Default is 0.2; i.e. if the error does not
decrease by 20% of the change in the difference image, do not add
the particle.
min_derr : Float, optional
The minimum change in the state's error to keep a particle in the
image. Default is ``'3sig'`` which uses ``3*st.sigma``.
do_opt : Bool, optional
Set to False to avoid optimizing particle positions after adding.
minmass : Float, optional
The minimum mass for a particle to be identified as a feature,
as used by trackpy. Defaults to a decent guess.
use_tp : Bool, optional
Set to True to use trackpy to find missing particles inside the
image. Not recommended since trackpy deliberately cuts out particles
at the edge of the image. Default is ``False``.
Returns
-------
total_changed : Int
The total number of adds and subtracts done on the data. Not the
same as ``changed_inds.size`` since the same particle or particle
index can be added/subtracted multiple times.
added_positions : [N_added,3] numpy.ndarray
The positions of particles that have been added at any point in the
add-subtract cycle.
removed_positions : [N_added,3] numpy.ndarray
The positions of particles that have been removed at any point in
the add-subtract cycle.
Notes
------
Occasionally after the intial featuring a cluster of particles is
featured as 1 big particle. To fix these mistakes, it helps to set
max_rad to a physical value. This removes the big particle and allows
it to be re-featured by (several passes of) the adds.
The added/removed positions returned are whether or not the position
has been added or removed ever. It's possible that a position is
added, then removed during a later iteration.
"""
if max_npart == 'calc':
max_npart = 0.05 * st.obj_get_positions().shape[0]
total_changed = 0
_change_since_opt = 0
removed_poses = []
added_poses0 = []
added_poses = []
nr = 1 # Check removal on the first loop
for _ in range(max_iter):
if (nr != 0) or (always_check_remove):
nr, rposes = remove_bad_particles(st, **kwargs)
na, aposes = add_missing_particles(st, **kwargs)
current_changed = na + nr
removed_poses.extend(rposes)
added_poses0.extend(aposes)
total_changed += current_changed
_change_since_opt += current_changed
if current_changed == 0:
break
elif _change_since_opt > max_npart:
_change_since_opt *= 0
CLOG.info('Start add_subtract optimization.')
opt.do_levmarq(st, opt.name_globals(st, remove_params=st.get(
'psf').params), max_iter=1, run_length=4, num_eig_dirs=3,
max_mem=max_mem, eig_update_frequency=2, rz_order=0,
use_accel=True)
CLOG.info('After optimization:\t{:.6}'.format(st.error))
# Optimize the added particles' radii:
for p in added_poses0:
i = st.obj_closest_particle(p)
opt.do_levmarq_particles(st, np.array([i]), max_iter=2, damping=0.3)
added_poses.append(st.obj_get_positions()[i])
return total_changed, np.array(removed_poses), np.array(added_poses) | Automatically adds and subtracts missing & extra particles.
Operates by removing bad particles then adding missing particles on
repeat, until either no particles are added/removed or after `max_iter`
attempts.
Parameters
----------
st: :class:`peri.states.State`
The state to add and subtract particles to.
max_iter : Int, optional
The maximum number of add-subtract loops to use. Default is 7.
Terminates after either max_iter loops or when nothing has changed.
max_npart : Int or 'calc', optional
The maximum number of particles to add before optimizing the non-psf
globals. Default is ``'calc'``, which uses 5% of the initial number
of particles.
max_mem : Int, optional
The maximum memory to use for optimization after adding max_npart
particles. Default is 2e8.
always_check_remove : Bool, optional
Set to True to always check whether to remove particles. If ``False``,
only checks for removal while particles were removed on the previous
attempt. Default is False.
Other Parameters
----------------
invert : Bool, optional
``True`` if the particles are dark on a bright background, ``False``
if they are bright on a dark background. Default is ``True``.
min_rad : Float, optional
Particles with radius below ``min_rad`` are automatically deleted.
Default is ``'calc'`` = median rad - 25* radius std.
max_rad : Float, optional
Particles with radius above ``max_rad`` are automatically deleted.
Default is ``'calc'`` = median rad + 15* radius std, but you should
change this for your particle sizes.
min_edge_dist : Float, optional
Particles closer to the edge of the padded image than this are
automatically deleted. Default is 2.0.
check_rad_cutoff : 2-element float list.
Particles with ``radii < check_rad_cutoff[0]`` or ``> check...[1]``
are checked if they should be deleted (not automatic). Default is
``[3.5, 15]``.
check_outside_im : Bool, optional
Set to True to check whether to delete particles whose positions are
outside the un-padded image.
rad : Float, optional
The initial radius for added particles; added particles radii are
not fit until the end of ``add_subtract``. Default is ``'calc'``,
which uses the median radii of active particles.
tries : Int, optional
The number of particles to attempt to remove or add, per iteration.
Default is 50.
im_change_frac : Float, optional
How good the change in error needs to be relative to the change in
the difference image. Default is 0.2; i.e. if the error does not
decrease by 20% of the change in the difference image, do not add
the particle.
min_derr : Float, optional
The minimum change in the state's error to keep a particle in the
image. Default is ``'3sig'`` which uses ``3*st.sigma``.
do_opt : Bool, optional
Set to False to avoid optimizing particle positions after adding.
minmass : Float, optional
The minimum mass for a particle to be identified as a feature,
as used by trackpy. Defaults to a decent guess.
use_tp : Bool, optional
Set to True to use trackpy to find missing particles inside the
image. Not recommended since trackpy deliberately cuts out particles
at the edge of the image. Default is ``False``.
Returns
-------
total_changed : Int
The total number of adds and subtracts done on the data. Not the
same as ``changed_inds.size`` since the same particle or particle
index can be added/subtracted multiple times.
added_positions : [N_added,3] numpy.ndarray
The positions of particles that have been added at any point in the
add-subtract cycle.
removed_positions : [N_added,3] numpy.ndarray
The positions of particles that have been removed at any point in
the add-subtract cycle.
Notes
------
Occasionally after the intial featuring a cluster of particles is
featured as 1 big particle. To fix these mistakes, it helps to set
max_rad to a physical value. This removes the big particle and allows
it to be re-featured by (several passes of) the adds.
The added/removed positions returned are whether or not the position
has been added or removed ever. It's possible that a position is
added, then removed during a later iteration. |
def transform_y(self, tfms:TfmList=None, **kwargs):
"Set `tfms` to be applied to the targets only."
_check_kwargs(self.y, tfms, **kwargs)
self.tfm_y=True
if tfms is None:
self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms)))
self.tfmargs_y = {**self.tfmargs, **kwargs}
else:
tfms = list(filter(lambda t: t.use_on_y, tfms))
self.tfms_y,self.tfmargs_y = tfms,kwargs
return self | Set `tfms` to be applied to the targets only. |
def write(self, str):
'''Write string str to the underlying file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written.'''
if self.closed: raise ValueError('File closed')
if self._mode in _allowed_read:
raise Exception('File opened for read only')
if self._valid is not None:
raise Exception('file already finalized')
if not self._done_header:
self._write_header()
# Encrypt and write the data
encrypted = self._crypto.encrypt(str)
self._checksumer.update(encrypted)
self._fp.write(encrypted) | Write string str to the underlying file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written. |
def exists(self):
""":type: bool
True when the object actually exists (and can be accessed by
the current user) in Fedora
"""
# If we made the object under the pretext that it doesn't exist in
# fedora yet, then assume it doesn't exist in fedora yet.
if self._create:
return False
# If we can get a valid object profile, regardless of its contents,
# then this object exists. If not, then it doesn't.
try:
self.getProfile()
return True
except RequestFailed:
return False | :type: bool
True when the object actually exists (and can be accessed by
the current user) in Fedora |
def fetch(self):
"""
Fetch a ChallengeInstance
:returns: Fetched ChallengeInstance
:rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ChallengeInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
factor_sid=self._solution['factor_sid'],
sid=self._solution['sid'],
) | Fetch a ChallengeInstance
:returns: Fetched ChallengeInstance
:rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeInstance |
def add_row(self, label, row_data, columns=""):
"""
Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace
"""
# use provided column order, making sure you don't lose any values
# from self.df.columns
if len(columns):
if sorted(self.df.columns) == sorted(columns):
self.df.columns = columns
else:
new_columns = []
new_columns.extend(columns)
for col in self.df.columns:
if col not in new_columns:
new_columns.append(col)
# makes sure all columns have data or None
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None
# (make sure you are working with strings)
self.df.index = self.df.index.astype(str)
label = str(label)
# create a new row with suffix "new"
# (this ensures that you get a unique, new row,
# instead of adding on to an existing row with the same label)
self.df.loc[label + "new"] = pd.Series(row_data)
# rename it to be correct
self.df.rename(index={label + "new": label}, inplace=True)
# use next line to sort index inplace
#self.df.sort_index(inplace=True)
return self.df | Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace |
def set_cell(self, index, value):
"""
Sets the value of a single cell. If the index is not in the current index then a new index will be created.
:param index: index value
:param value: value to set
:return: nothing
"""
if self._sort:
exists, i = sorted_exists(self._index, index)
if not exists:
self._insert_row(i, index)
else:
try:
i = self._index.index(index)
except ValueError:
i = len(self._index)
self._add_row(index)
self._data[i] = value | Sets the value of a single cell. If the index is not in the current index then a new index will be created.
:param index: index value
:param value: value to set
:return: nothing |
def returner(ret):
'''
Signal a Django server that a return is available
'''
signaled = dispatch.Signal(providing_args=['ret']).send(sender='returner', ret=ret)
for signal in signaled:
log.debug(
'Django returner function \'returner\' signaled %s '
'which responded with %s', signal[0], signal[1]
) | Signal a Django server that a return is available |
def norm_package_version(version):
"""Normalize a version by removing extra spaces and parentheses."""
if version:
version = ','.join(v.strip() for v in version.split(',')).strip()
if version.startswith('(') and version.endswith(')'):
version = version[1:-1]
version = ''.join(v for v in version if v.strip())
else:
version = ''
return version | Normalize a version by removing extra spaces and parentheses. |
def dict_array_bytes(ary, template):
"""
Return the number of bytes required by an array
Arguments
---------------
ary : dict
Dictionary representation of an array
template : dict
A dictionary of key-values, used to replace any
string values in the array with concrete integral
values
Returns
-----------
The number of bytes required to represent
the array.
"""
shape = shape_from_str_tuple(ary['shape'], template)
dtype = dtype_from_str(ary['dtype'], template)
return array_bytes(shape, dtype) | Return the number of bytes required by an array
Arguments
---------------
ary : dict
Dictionary representation of an array
template : dict
A dictionary of key-values, used to replace any
string values in the array with concrete integral
values
Returns
-----------
The number of bytes required to represent
the array. |
def check(a, b):
"""
Checks to see if the two values are equal to each other.
:param a | <str>
b | <str>
:return <bool>
"""
aencrypt = encrypt(a)
bencrypt = encrypt(b)
return a == b or a == bencrypt or aencrypt == b | Checks to see if the two values are equal to each other.
:param a | <str>
b | <str>
:return <bool> |
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
ia.do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label) | Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon. |
def delete(self, *keys):
"""Removes the specified keys. A key is ignored if it does not exist.
Returns :data:`True` if all keys are removed.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of keys that
will be removed. When a key to remove holds a value other than a
string, the individual complexity for this key is ``O(M)`` where
``M`` is the number of elements in the list, set, sorted set or
hash. Removing a single key that holds a string value is ``O(1)``.
:param keys: One or more keys to remove
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'DEL'] + list(keys), len(keys)) | Removes the specified keys. A key is ignored if it does not exist.
Returns :data:`True` if all keys are removed.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of keys that
will be removed. When a key to remove holds a value other than a
string, the individual complexity for this key is ``O(M)`` where
``M`` is the number of elements in the list, set, sorted set or
hash. Removing a single key that holds a string value is ``O(1)``.
:param keys: One or more keys to remove
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError` |
def _handle_ping(client, topic, dct):
"""Internal method that will be called when receiving ping message."""
if dct['type'] == 'request':
resp = {
'type': 'answer',
'name': client.name,
'source': dct
}
client.publish('ping', resp) | Internal method that will be called when receiving ping message. |
def __system_multiCall(calls, **kwargs):
"""
Call multiple RPC methods at once.
:param calls: An array of struct like {"methodName": string, "params": array }
:param kwargs: Internal data
:type calls: list
:type kwargs: dict
:return:
"""
if not isinstance(calls, list):
raise RPCInvalidParams('system.multicall first argument should be a list, {} given.'.format(type(calls)))
handler = kwargs.get(HANDLER_KEY)
results = []
for call in calls:
try:
result = handler.execute_procedure(call['methodName'], args=call.get('params'))
# From https://mirrors.talideon.com/articles/multicall.html:
# "Notice that regular return values are always nested inside a one-element array. This allows you to
# return structs from functions without confusing them with faults."
results.append([result])
except RPCException as e:
results.append({
'faultCode': e.code,
'faultString': e.message,
})
except Exception as e:
results.append({
'faultCode': RPC_INTERNAL_ERROR,
'faultString': str(e),
})
return results | Call multiple RPC methods at once.
:param calls: An array of struct like {"methodName": string, "params": array }
:param kwargs: Internal data
:type calls: list
:type kwargs: dict
:return: |
def serialize(self, obj, method='json', beautify=False, raise_exception=False):
"""Alias of helper.string.serialization.serialize"""
return self.helper.string.serialization.serialize(
obj=obj, method=method, beautify=beautify, raise_exception=raise_exception) | Alias of helper.string.serialization.serialize |
def cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
try:
cidr_bits = int(cidr_bits)
if not 1 <= cidr_bits <= 32:
return ''
except ValueError:
return ''
netmask = ''
for idx in range(4):
if idx:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits)))
cidr_bits = 0
return netmask | Returns an IPv4 netmask |
def _parse_rd(self, config):
""" _parse_rd scans the provided configuration block and extracts
the vrf rd. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vrf configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute
"""
match = RD_RE.search(config)
if match:
value = match.group('value')
else:
value = match
return dict(rd=value) | _parse_rd scans the provided configuration block and extracts
the vrf rd. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vrf configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute |
def SkyCoord(self,*args,**kwargs):
"""
NAME:
SkyCoord
PURPOSE:
return the position as an astropy SkyCoord
INPUT:
t - (optional) time at which to get the position
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
SkyCoord(t)
HISTORY:
2015-06-02 - Written - Bovy (IAS)
"""
kwargs.pop('quantity',None) # rm useless keyword to no conflict later
_check_roSet(self,kwargs,'SkyCoord')
radec= self._radec(*args,**kwargs)
tdist= self.dist(quantity=False,*args,**kwargs)
if not _APY3: # pragma: no cover
return coordinates.SkyCoord(radec[:,0]*units.degree,
radec[:,1]*units.degree,
distance=tdist*units.kpc,
frame='icrs')
pmrapmdec= self._pmrapmdec(*args,**kwargs)
vlos= self._lbdvrpmllpmbb(*args,**kwargs)[:,3]
# Also return the Galactocentric frame used
v_sun= coordinates.CartesianDifferential(\
nu.array([-self._solarmotion[0],
self._solarmotion[1]+self._vo,
self._solarmotion[2]])*units.km/units.s)
return coordinates.SkyCoord(radec[:,0]*units.degree,
radec[:,1]*units.degree,
distance=tdist*units.kpc,
pm_ra_cosdec=pmrapmdec[:,0]\
*units.mas/units.yr,
pm_dec=pmrapmdec[:,1]*units.mas/units.yr,
radial_velocity=vlos*units.km/units.s,
frame='icrs',
galcen_distance=\
nu.sqrt(self._ro**2.+self._zo**2.)\
*units.kpc,
z_sun=self._zo*units.kpc,
galcen_v_sun=v_sun) | NAME:
SkyCoord
PURPOSE:
return the position as an astropy SkyCoord
INPUT:
t - (optional) time at which to get the position
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
SkyCoord(t)
HISTORY:
2015-06-02 - Written - Bovy (IAS) |
def d3logpdf_df3(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the third derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{3}\\log p(y|\\lambda(f))}{df^{3}} = \\frac{d^{3}\\log p(y|\\lambda(f)}{d\\lambda(f)^{3}}\\left(\\frac{d\\lambda(f)}{df}\\right)^{3} + 3\\frac{d^{2}\\log p(y|\\lambda(f)}{d\\lambda(f)^{2}}\\frac{d\\lambda(f)}{df}\\frac{d^{2}\\lambda(f)}{df^{2}} + \\frac{d\\log p(y|\\lambda(f)}{d\\lambda(f)}\\frac{d^{3}\\lambda(f)}{df^{3}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: third derivative of log likelihood evaluated for this point
:rtype: float
"""
if isinstance(self.gp_link, link_functions.Identity):
d3logpdf_df3 = self.d3logpdf_dlink3(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d3link_df3 = self.gp_link.d3transf_df3(f)
d3logpdf_df3 = chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3)
return d3logpdf_df3 | Evaluates the link function link(f) then computes the third derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{3}\\log p(y|\\lambda(f))}{df^{3}} = \\frac{d^{3}\\log p(y|\\lambda(f)}{d\\lambda(f)^{3}}\\left(\\frac{d\\lambda(f)}{df}\\right)^{3} + 3\\frac{d^{2}\\log p(y|\\lambda(f)}{d\\lambda(f)^{2}}\\frac{d\\lambda(f)}{df}\\frac{d^{2}\\lambda(f)}{df^{2}} + \\frac{d\\log p(y|\\lambda(f)}{d\\lambda(f)}\\frac{d^{3}\\lambda(f)}{df^{3}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: third derivative of log likelihood evaluated for this point
:rtype: float |
def canonical_url(configs, endpoint_type=PUBLIC):
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address) | Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit. |
def setVersion(self, date_issued, version_id=None):
"""
Legacy function...
should use the other set_* for version and date
as of 2016-10-20 used in:
dipper/sources/HPOAnnotations.py 139:
dipper/sources/CTD.py 99:
dipper/sources/BioGrid.py 100:
dipper/sources/MGI.py 255:
dipper/sources/EOM.py 93:
dipper/sources/Coriell.py 200:
dipper/sources/MMRRC.py 77:
# TODO set as deprecated
:param date_issued:
:param version_id:
:return:
"""
if date_issued is not None:
self.set_date_issued(date_issued)
elif version_id is not None:
self.set_version_by_num(version_id)
else:
LOG.error("date or version not set!")
# TODO throw error
return
if version_id is not None:
self.set_version_by_num(version_id)
else:
LOG.info("set version to %s", self.version)
self.set_version_by_date(date_issued)
LOG.info("set version to %s", self.version)
return | Legacy function...
should use the other set_* for version and date
as of 2016-10-20 used in:
dipper/sources/HPOAnnotations.py 139:
dipper/sources/CTD.py 99:
dipper/sources/BioGrid.py 100:
dipper/sources/MGI.py 255:
dipper/sources/EOM.py 93:
dipper/sources/Coriell.py 200:
dipper/sources/MMRRC.py 77:
# TODO set as deprecated
:param date_issued:
:param version_id:
:return: |
def _dict_rpartition(
in_dict,
keys,
delimiter=DEFAULT_TARGET_DELIM,
ordered_dict=False):
'''
Helper function to:
- Ensure all but the last key in `keys` exist recursively in `in_dict`.
- Return the dict at the one-to-last key, and the last key
:param dict in_dict: The dict to work with.
:param str keys: The delimited string with one or more keys.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return tuple(dict, str)
'''
if delimiter in keys:
all_but_last_keys, _, last_key = keys.rpartition(delimiter)
ensure_dict_key(in_dict,
all_but_last_keys,
delimiter=delimiter,
ordered_dict=ordered_dict)
dict_pointer = salt.utils.data.traverse_dict(in_dict,
all_but_last_keys,
default=None,
delimiter=delimiter)
else:
dict_pointer = in_dict
last_key = keys
return dict_pointer, last_key | Helper function to:
- Ensure all but the last key in `keys` exist recursively in `in_dict`.
- Return the dict at the one-to-last key, and the last key
:param dict in_dict: The dict to work with.
:param str keys: The delimited string with one or more keys.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return tuple(dict, str) |
def plane_intersection(strike1, dip1, strike2, dip2):
"""
Finds the intersection of two planes. Returns a plunge/bearing of the linear
intersection of the two planes.
Also accepts sequences of strike1s, dip1s, strike2s, dip2s.
Parameters
----------
strike1, dip1 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
first plane(s).
strike2, dip2 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
second plane(s).
Returns
-------
plunge, bearing : arrays
The plunge and bearing(s) (in degrees) of the line representing the
intersection of the two planes.
"""
norm1 = sph2cart(*pole(strike1, dip1))
norm2 = sph2cart(*pole(strike2, dip2))
norm1, norm2 = np.array(norm1), np.array(norm2)
lon, lat = cart2sph(*np.cross(norm1, norm2, axis=0))
return geographic2plunge_bearing(lon, lat) | Finds the intersection of two planes. Returns a plunge/bearing of the linear
intersection of the two planes.
Also accepts sequences of strike1s, dip1s, strike2s, dip2s.
Parameters
----------
strike1, dip1 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
first plane(s).
strike2, dip2 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
second plane(s).
Returns
-------
plunge, bearing : arrays
The plunge and bearing(s) (in degrees) of the line representing the
intersection of the two planes. |
def _openResources(self):
""" Opens the root Dataset.
"""
logger.info("Opening: {}".format(self._fileName))
self._ncGroup = Dataset(self._fileName) | Opens the root Dataset. |
def uuid_from_time(time_arg, node=None, clock_seq=None):
"""
Converts a datetime or timestamp to a type 1 :class:`uuid.UUID`.
:param time_arg:
The time to use for the timestamp portion of the UUID.
This can either be a :class:`datetime` object or a timestamp
in seconds (as returned from :meth:`time.time()`).
:type datetime: :class:`datetime` or timestamp
:param node:
None integer for the UUID (up to 48 bits). If not specified, this
field is randomized.
:type node: long
:param clock_seq:
Clock sequence field for the UUID (up to 14 bits). If not specified,
a random sequence is generated.
:type clock_seq: int
:rtype: :class:`uuid.UUID`
"""
if hasattr(time_arg, 'utctimetuple'):
seconds = int(calendar.timegm(time_arg.utctimetuple()))
microseconds = (seconds * 1e6) + time_arg.time().microsecond
else:
microseconds = int(time_arg * 1e6)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
intervals = int(microseconds * 10) + 0x01b21dd213814000
time_low = intervals & 0xffffffff
time_mid = (intervals >> 32) & 0xffff
time_hi_version = (intervals >> 48) & 0x0fff
if clock_seq is None:
clock_seq = random.getrandbits(14)
else:
if clock_seq > 0x3fff:
raise ValueError('clock_seq is out of range (need a 14-bit value)')
clock_seq_low = clock_seq & 0xff
clock_seq_hi_variant = 0x80 | ((clock_seq >> 8) & 0x3f)
if node is None:
node = random.getrandbits(48)
return uuid.UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1) | Converts a datetime or timestamp to a type 1 :class:`uuid.UUID`.
:param time_arg:
The time to use for the timestamp portion of the UUID.
This can either be a :class:`datetime` object or a timestamp
in seconds (as returned from :meth:`time.time()`).
:type datetime: :class:`datetime` or timestamp
:param node:
None integer for the UUID (up to 48 bits). If not specified, this
field is randomized.
:type node: long
:param clock_seq:
Clock sequence field for the UUID (up to 14 bits). If not specified,
a random sequence is generated.
:type clock_seq: int
:rtype: :class:`uuid.UUID` |
def Or(*xs, simplify=True):
"""Expression disjunction (sum, OR) operator
If *simplify* is ``True``, return a simplified expression.
"""
xs = [Expression.box(x).node for x in xs]
y = exprnode.or_(*xs)
if simplify:
y = y.simplify()
return _expr(y) | Expression disjunction (sum, OR) operator
If *simplify* is ``True``, return a simplified expression. |
def delete(self, path):
"""
Ensure that roots of our managers can't be deleted. This should be
enforced by https://github.com/ipython/ipython/pull/8168, but rogue
implementations might override this behavior.
"""
path = normalize_api_path(path)
if path in self.managers:
raise HTTPError(
400, "Can't delete root of %s" % self.managers[path]
)
return self.__delete(path) | Ensure that roots of our managers can't be deleted. This should be
enforced by https://github.com/ipython/ipython/pull/8168, but rogue
implementations might override this behavior. |
def _sign_block(self, block):
""" The block should be complete and the final
signature from the publishing validator (this validator) needs to
be added.
"""
block_header = block.block_header
header_bytes = block_header.SerializeToString()
signature = self._identity_signer.sign(header_bytes)
block.set_signature(signature)
return block | The block should be complete and the final
signature from the publishing validator (this validator) needs to
be added. |
def get_contents_static(self, block_alias, context):
"""Returns contents of a static block."""
if 'request' not in context:
# No use in further actions as we won't ever know current URL.
return ''
current_url = context['request'].path
# Resolve current view name to support view names as block URLs.
try:
resolver_match = resolve(current_url)
namespace = ''
if resolver_match.namespaces:
# More than one namespace, really? Hmm.
namespace = resolver_match.namespaces[0]
resolved_view_name = ':%s:%s' % (namespace, resolver_match.url_name)
except Resolver404:
resolved_view_name = None
self._cache_init()
cache_entry_name = cache_get_key(block_alias)
siteblocks_static = self._cache_get(cache_entry_name)
if not siteblocks_static:
blocks = Block.objects.filter(alias=block_alias, hidden=False).only('url', 'contents')
siteblocks_static = [defaultdict(list), defaultdict(list)]
for block in blocks:
if block.url == '*':
url_re = block.url
elif block.url.startswith(':'):
url_re = block.url
# Normalize URL name to include namespace.
if url_re.count(':') == 1:
url_re = ':%s' % url_re
else:
url_re = re.compile(r'%s' % block.url)
if block.access_guest:
siteblocks_static[self.IDX_GUEST][url_re].append(block.contents)
elif block.access_loggedin:
siteblocks_static[self.IDX_AUTH][url_re].append(block.contents)
else:
siteblocks_static[self.IDX_GUEST][url_re].append(block.contents)
siteblocks_static[self.IDX_AUTH][url_re].append(block.contents)
self._cache_set(cache_entry_name, siteblocks_static)
self._cache_save()
user = getattr(context['request'], 'user', None)
is_authenticated = getattr(user, 'is_authenticated', False)
if not DJANGO_2:
is_authenticated = is_authenticated()
if is_authenticated:
lookup_area = siteblocks_static[self.IDX_AUTH]
else:
lookup_area = siteblocks_static[self.IDX_GUEST]
static_block_contents = ''
if '*' in lookup_area:
static_block_contents = choice(lookup_area['*'])
elif resolved_view_name in lookup_area:
static_block_contents = choice(lookup_area[resolved_view_name])
else:
for url, contents in lookup_area.items():
if url.match(current_url):
static_block_contents = choice(contents)
break
return static_block_contents | Returns contents of a static block. |
def messages(self):
""" Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages
"""
if self._session:
result = []
for msg in self._session.messages:
ex = _create_exception_by_message(msg)
result.append((type(ex), ex))
return result
else:
return None | Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages |
def lowwrap(self, fname):
"""
Wraps the fname method when the C code expects a different kind of
callback than we have in the fusepy API. (The wrapper is usually for
performing some checks or transfromations which could be done in C but
is simpler if done in Python.)
Currently `open` and `create` are wrapped: a boolean flag is added
which indicates if the result is to be kept during the opened file's
lifetime or can be thrown away. Namely, it's considered disposable
if it's an instance of FuseFileInfo.
"""
fun = getattr(self, fname)
if fname in ('open', 'create'):
def wrap(*a, **kw):
res = fun(*a, **kw)
if not res or type(res) == type(0):
return res
else:
return (res, type(res) != FuseFileInfo)
elif fname == 'utimens':
def wrap(path, acc_sec, acc_nsec, mod_sec, mod_nsec):
ts_acc = Timespec(tv_sec = acc_sec, tv_nsec = acc_nsec)
ts_mod = Timespec(tv_sec = mod_sec, tv_nsec = mod_nsec)
return fun(path, ts_acc, ts_mod)
else:
wrap = fun
return wrap | Wraps the fname method when the C code expects a different kind of
callback than we have in the fusepy API. (The wrapper is usually for
performing some checks or transfromations which could be done in C but
is simpler if done in Python.)
Currently `open` and `create` are wrapped: a boolean flag is added
which indicates if the result is to be kept during the opened file's
lifetime or can be thrown away. Namely, it's considered disposable
if it's an instance of FuseFileInfo. |
def order_target_value(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent`
"""
if not self._can_order_asset(asset):
return None
target_amount = self._calculate_order_value_amount(asset, target)
amount = self._calculate_order_target_amount(asset, target_amount)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent` |
def geojson_handler(geojson, hType='map'):
"""Restructure a GeoJSON object in preparation to be added directly by add_map_data or add_data_set methods.
The geojson will be broken down to fit a specific Highcharts (highmaps) type, either map, mapline or mappoint.
Meta data in GeoJSON's properties object will be copied directly over to object['properties']
1. geojson is the map data (GeoJSON) to be converted
2. hType is the type of highmap types. "map" will return GeoJSON polygons and multipolygons.
"mapline" will return GeoJSON linestrings and multilinestrings.
"mappoint" will return GeoJSON points and multipoints.
default: "map"
"""
hType_dict = {
'map': ['polygon', 'multipolygon'],
'mapline': ['linestring', 'multilinestring'],
'mappoint': ['point', 'multipoint'],
}
oldlist = [x for x in geojson['features'] if x['geometry']['type'].lower() in hType_dict[hType]]
newlist = []
for each_dict in oldlist:
geojson_type = each_dict['geometry']['type'].lower()
if hType == 'mapline':
newlist.append(
{'name': each_dict['properties'].get('name', None),
'path': _coordinates_to_path(each_dict['geometry']['coordinates'], hType, geojson_type),
'properties': each_dict['properties'],
}
)
elif hType == 'map':
newlist.append(
{'name': each_dict['properties']['name'],
'path': _coordinates_to_path(each_dict['geometry']['coordinates'], hType, geojson_type),
'properties': each_dict['properties'],
}
)
elif hType == 'mappoint':
newlist.append(
{'name': each_dict['properties']['name'],
'x': each_dict['geometry']['coordinates'][0],
'y': -each_dict['geometry']['coordinates'][1],
'properties': each_dict['properties'],
}
)
return newlist | Restructure a GeoJSON object in preparation to be added directly by add_map_data or add_data_set methods.
The geojson will be broken down to fit a specific Highcharts (highmaps) type, either map, mapline or mappoint.
Meta data in GeoJSON's properties object will be copied directly over to object['properties']
1. geojson is the map data (GeoJSON) to be converted
2. hType is the type of highmap types. "map" will return GeoJSON polygons and multipolygons.
"mapline" will return GeoJSON linestrings and multilinestrings.
"mappoint" will return GeoJSON points and multipoints.
default: "map" |
def list_unique(cls):
'''Return all unique namespaces
:returns: a list of all predicates
:rtype: list of ckan.model.semantictag.Predicate objects
'''
query = meta.Session.query(Predicate).distinct(Predicate.namespace)
return query.all() | Return all unique namespaces
:returns: a list of all predicates
:rtype: list of ckan.model.semantictag.Predicate objects |
def list_security_groups(call=None):
'''
Lists all security groups available to the user and the user's groups.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_security_groups opennebula
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_security_groups function must be called with -f or --function.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
secgroup_pool = server.one.secgrouppool.info(auth, -2, -1, -1)[1]
groups = {}
for group in _get_xml(secgroup_pool):
groups[group.find('NAME').text] = _xml_to_dict(group)
return groups | Lists all security groups available to the user and the user's groups.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_security_groups opennebula |
def __to_file(self, message_no):
""" Write a single message to file """
filename = self.__create_file_name(message_no)
try:
with codecs.open(filename, mode='w',
encoding=self.messages[message_no].encoding)\
as file__:
file__.write(self.messages[message_no].output)
except IOError as excep:
print 'Unable for open the file \'{0}\' for writing. The '\
'following exception was raised:'.format(filename)
print excep
print 'Exiting!'
sys.exit(2)
return filename | Write a single message to file |
def absolute_path(path=None, base_dir=None):
"""
Return absolute path if path is local.
Parameters:
-----------
path : path to file
base_dir : base directory used for absolute path
Returns:
--------
absolute path
"""
if path_is_remote(path):
return path
else:
if os.path.isabs(path):
return path
else:
if base_dir is None or not os.path.isabs(base_dir):
raise TypeError("base_dir must be an absolute path.")
return os.path.abspath(os.path.join(base_dir, path)) | Return absolute path if path is local.
Parameters:
-----------
path : path to file
base_dir : base directory used for absolute path
Returns:
--------
absolute path |
def start(self):
"""Starts the advertise loop.
Returns the result of the first ad request.
"""
if self.running:
raise Exception('Advertiser is already running')
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
self.running = True
answer = tornado.gen.Future()
self._schedule_ad(0, answer)
return answer | Starts the advertise loop.
Returns the result of the first ad request. |
def apply_handler_to_all_logs(handler: logging.Handler,
remove_existing: bool = False) -> None:
"""
Applies a handler to all logs, optionally removing existing handlers.
Should ONLY be called from the ``if __name__ == 'main'`` script;
see https://docs.python.org/3.4/howto/logging.html#library-config.
Generally MORE SENSIBLE just to apply a handler to the root logger.
Args:
handler: the handler to apply
remove_existing: remove existing handlers from logger first?
"""
# noinspection PyUnresolvedReferences
for name, obj in logging.Logger.manager.loggerDict.items():
if remove_existing:
obj.handlers = [] # http://stackoverflow.com/questions/7484454
obj.addHandler(handler) | Applies a handler to all logs, optionally removing existing handlers.
Should ONLY be called from the ``if __name__ == 'main'`` script;
see https://docs.python.org/3.4/howto/logging.html#library-config.
Generally MORE SENSIBLE just to apply a handler to the root logger.
Args:
handler: the handler to apply
remove_existing: remove existing handlers from logger first? |
def _save_or_update(self):
"""Save or update the private state needed by the cloud provider.
"""
with self._resource_lock:
if not self._config or not self._config._storage_path:
raise Exception("self._config._storage path is undefined")
if not self._config._base_name:
raise Exception("self._config._base_name is undefined")
if not os.path.exists(self._config._storage_path):
os.makedirs(self._config._storage_path)
path = self._get_cloud_provider_storage_path()
with open(path, 'wb') as storage:
pickle.dump(self._config, storage, pickle.HIGHEST_PROTOCOL)
pickle.dump(self._subscriptions, storage,
pickle.HIGHEST_PROTOCOL) | Save or update the private state needed by the cloud provider. |
def by_image_seq(blocks, image_seq):
"""Filter blocks to return only those associated with the provided image_seq number.
Argument:
List:blocks -- List of block objects to sort.
Int:image_seq -- image_seq number found in ec_hdr.
Returns:
List -- List of block indexes matching image_seq number.
"""
return list(filter(lambda block: blocks[block].ec_hdr.image_seq == image_seq, blocks)) | Filter blocks to return only those associated with the provided image_seq number.
Argument:
List:blocks -- List of block objects to sort.
Int:image_seq -- image_seq number found in ec_hdr.
Returns:
List -- List of block indexes matching image_seq number. |
def subscribe_to_events(config, subscriber, events, model=None):
""" Helper function to subscribe to group of events.
:param config: Pyramid contig instance.
:param subscriber: Event subscriber function.
:param events: Sequence of events to subscribe to.
:param model: Model predicate value.
"""
kwargs = {}
if model is not None:
kwargs['model'] = model
for evt in events:
config.add_subscriber(subscriber, evt, **kwargs) | Helper function to subscribe to group of events.
:param config: Pyramid contig instance.
:param subscriber: Event subscriber function.
:param events: Sequence of events to subscribe to.
:param model: Model predicate value. |
def ssh_cmd(self, name, ssh_command):
"""
SSH into given container and executre command if given
"""
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password') | SSH into given container and executre command if given |
def open_bucket(bucket_name,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None):
"""Open an S3 Bucket resource.
Parameters
----------
bucket_name : `str`
Name of the S3 bucket.
aws_access_key_id : `str`, optional
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`, optional
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Returns
-------
bucket : Boto3 S3 Bucket instance
The S3 bucket as a Boto3 instance.
"""
session = boto3.session.Session(
profile_name=aws_profile,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
return bucket | Open an S3 Bucket resource.
Parameters
----------
bucket_name : `str`
Name of the S3 bucket.
aws_access_key_id : `str`, optional
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`, optional
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Returns
-------
bucket : Boto3 S3 Bucket instance
The S3 bucket as a Boto3 instance. |
def detrend(arr, x=None, deg=5, tol=1e-3, maxloop=10):
"""Compute a baseline trend of a signal"""
xx = numpy.arange(len(arr)) if x is None else x
base = arr.copy()
trend = base
pol = numpy.ones((deg + 1,))
for _ in range(maxloop):
pol_new = numpy.polyfit(xx, base, deg)
pol_norm = numpy.linalg.norm(pol)
diff_pol_norm = numpy.linalg.norm(pol - pol_new)
if diff_pol_norm / pol_norm < tol:
break
pol = pol_new
trend = numpy.polyval(pol, xx)
base = numpy.minimum(base, trend)
return trend | Compute a baseline trend of a signal |
def refresh(self):
"""
Refresh this model from the server.
Updates attributes with the server-defined values. This is useful where the Model
instance came from a partial response (eg. a list query) and additional details
are required.
Existing attribute values will be overwritten.
"""
r = self._client.request('GET', self.url)
return self._deserialize(r.json(), self._manager) | Refresh this model from the server.
Updates attributes with the server-defined values. This is useful where the Model
instance came from a partial response (eg. a list query) and additional details
are required.
Existing attribute values will be overwritten. |
def update_host_datetime(host, username, password, protocol=None, port=None, host_names=None):
'''
Update the date/time on the given host or list of host_names. This function should be
used with caution since network delays and execution delays can result in time skews.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts should update their date/time.
If host_names is not provided, the date/time will be updated for the ``host``
location instead. This is useful for when service instance connection
information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.update_date_time my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.update_date_time my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
date_time_manager = _get_date_time_mgr(host_ref)
try:
date_time_manager.UpdateDateTime(datetime.datetime.utcnow())
except vim.fault.HostConfigFault as err:
msg = '\'vsphere.update_date_time\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
ret.update({host_name: {'Datetime Updated': True}})
return ret | Update the date/time on the given host or list of host_names. This function should be
used with caution since network delays and execution delays can result in time skews.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts should update their date/time.
If host_names is not provided, the date/time will be updated for the ``host``
location instead. This is useful for when service instance connection
information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.update_date_time my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.update_date_time my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]' |
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must point
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the file
transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
if chunked_transfer and not self.base64md5:
# MD5 for the stream has to be calculated on the fly.
m = md5()
else:
m = None
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 3 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 3:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024)/self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
if m:
m.update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
self.size = data_len
if chunked_transfer:
http_conn.send('0\r\n')
if m:
# Use the chunked trailer for the digest
hd = m.hexdigest()
self.md5, self.base64md5 = self.get_md5_from_hexdigest(hd)
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
response = http_conn.getresponse()
body = response.read()
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
return response
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return response
else:
raise provider.storage_response_error(
response.status, response.reason, body)
if not headers:
headers = {}
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if headers.has_key('Content-Encoding'):
self.content_encoding = headers['Content-Encoding']
if headers.has_key('Content-Type'):
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if headers['Content-Type']:
self.content_type = headers['Content-Type']
else:
# Delete null Content-Type value to skip sending that header.
del headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers,
sender=sender,
query_args=query_args)
self.handle_version_headers(resp, force=True) | Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must point
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the file
transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available. |
def _list_metric_descriptors(args, _):
"""Lists the metric descriptors in the project."""
project_id = args['project']
pattern = args['type'] or '*'
descriptors = gcm.MetricDescriptors(project_id=project_id)
dataframe = descriptors.as_dataframe(pattern=pattern)
return _render_dataframe(dataframe) | Lists the metric descriptors in the project. |
def bootstrap_paginate(parser, token):
"""
Renders a Page object as a Twitter Bootstrap styled pagination bar.
Compatible with Bootstrap 3.x and 4.x only.
Example::
{% bootstrap_paginate page_obj range=10 %}
Named Parameters::
range - The size of the pagination bar (ie, if set to 10 then, at most,
10 page numbers will display at any given time) Defaults to
None, which shows all pages.
size - Accepts "small", and "large". Defaults to
None which is the standard size.
show_prev_next - Accepts "true" or "false". Determines whether or not
to show the previous and next page links. Defaults to
"true"
show_first_last - Accepts "true" or "false". Determines whether or not
to show the first and last page links. Defaults to
"false"
previous_label - The text to display for the previous page link.
Defaults to "←"
next_label - The text to display for the next page link. Defaults to
"→"
first_label - The text to display for the first page link. Defaults to
"«"
last_label - The text to display for the last page link. Defaults to
"»"
url_view_name - The named URL to use. Defaults to None. If None, then the
default template simply appends the url parameter as a
relative URL link, eg: <a href="?page=1">1</a>
url_param_name - The name of the parameter to use in the URL. If
url_view_name is set to None, this string is used as the
parameter name in the relative URL path. If a URL
name is specified, this string is used as the
parameter name passed into the reverse() method for
the URL.
url_extra_args - This is used only in conjunction with url_view_name.
When referencing a URL, additional arguments may be
passed in as a list.
url_extra_kwargs - This is used only in conjunction with url_view_name.
When referencing a URL, additional named arguments
may be passed in as a dictionary.
url_get_params - The other get parameters to pass, only the page
number will be overwritten. Use this to preserve
filters.
url_anchor - The anchor to use in URLs. Defaults to None.
extra_pagination_classes - A space separated list of CSS class names
that will be added to the top level <ul>
HTML element. In particular, this can be
utilized in Bootstrap 4 installatinos to
add the appropriate alignment classes from
Flexbox utilites, eg: justify-content-center
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (Page object reference)" % bits[0])
page = parser.compile_filter(bits[1])
kwargs = {}
bits = bits[2:]
kwarg_re = re.compile(r'(\w+)=(.+)')
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to bootstrap_pagination paginate tag")
name, value = match.groups()
kwargs[name] = parser.compile_filter(value)
return BootstrapPaginationNode(page, kwargs) | Renders a Page object as a Twitter Bootstrap styled pagination bar.
Compatible with Bootstrap 3.x and 4.x only.
Example::
{% bootstrap_paginate page_obj range=10 %}
Named Parameters::
range - The size of the pagination bar (ie, if set to 10 then, at most,
10 page numbers will display at any given time) Defaults to
None, which shows all pages.
size - Accepts "small", and "large". Defaults to
None which is the standard size.
show_prev_next - Accepts "true" or "false". Determines whether or not
to show the previous and next page links. Defaults to
"true"
show_first_last - Accepts "true" or "false". Determines whether or not
to show the first and last page links. Defaults to
"false"
previous_label - The text to display for the previous page link.
Defaults to "←"
next_label - The text to display for the next page link. Defaults to
"→"
first_label - The text to display for the first page link. Defaults to
"«"
last_label - The text to display for the last page link. Defaults to
"»"
url_view_name - The named URL to use. Defaults to None. If None, then the
default template simply appends the url parameter as a
relative URL link, eg: <a href="?page=1">1</a>
url_param_name - The name of the parameter to use in the URL. If
url_view_name is set to None, this string is used as the
parameter name in the relative URL path. If a URL
name is specified, this string is used as the
parameter name passed into the reverse() method for
the URL.
url_extra_args - This is used only in conjunction with url_view_name.
When referencing a URL, additional arguments may be
passed in as a list.
url_extra_kwargs - This is used only in conjunction with url_view_name.
When referencing a URL, additional named arguments
may be passed in as a dictionary.
url_get_params - The other get parameters to pass, only the page
number will be overwritten. Use this to preserve
filters.
url_anchor - The anchor to use in URLs. Defaults to None.
extra_pagination_classes - A space separated list of CSS class names
that will be added to the top level <ul>
HTML element. In particular, this can be
utilized in Bootstrap 4 installatinos to
add the appropriate alignment classes from
Flexbox utilites, eg: justify-content-center |
def get_countries(is_legacy_xml=False):
"""
The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Args:
is_legacy_xml (:obj:`bool`): Whether to use the older country code
list (iso_3166-1_list_en.xml).
Returns:
dict: A mapping of country codes as the keys to the country names as
the values.
"""
# Initialize the countries dictionary.
countries = {}
# Set the data directory based on if the script is a frozen executable.
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
data_dir = path.dirname(sys.executable) # pragma: no cover
else:
data_dir = path.dirname(__file__)
if is_legacy_xml:
log.debug('Opening country code legacy XML: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r',
encoding='ISO-8859-1')
# Read the file.
data = f.read()
# Check if there is data.
if not data: # pragma: no cover
return {}
# Parse the data to get the DOM.
dom = parseString(data)
# Retrieve the country entries.
entries = dom.getElementsByTagName('ISO_3166-1_Entry')
# Iterate through the entries and add to the countries dictionary.
for entry in entries:
# Retrieve the country code and name from the DOM.
code = entry.getElementsByTagName(
'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
name = entry.getElementsByTagName(
'ISO_3166-1_Country_name')[0].firstChild.data
# Add to the countries dictionary.
countries[code] = name.title()
else:
log.debug('Opening country code CSV: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r',
encoding='utf-8')
# Create csv reader object.
csv_reader = csv.reader(f, delimiter=',', quotechar='"')
# Iterate through the rows and add to the countries dictionary.
for row in csv_reader:
# Retrieve the country code and name columns.
code = row[0]
name = row[1]
# Add to the countries dictionary.
countries[code] = name
return countries | The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Args:
is_legacy_xml (:obj:`bool`): Whether to use the older country code
list (iso_3166-1_list_en.xml).
Returns:
dict: A mapping of country codes as the keys to the country names as
the values. |
def get_romfile_path(game, inttype=Integrations.DEFAULT):
"""
Return the path to a given game's romfile
"""
for extension in EMU_EXTENSIONS.keys():
possible_path = get_file_path(game, "rom" + extension, inttype)
if possible_path:
return possible_path
raise FileNotFoundError("No romfiles found for game: %s" % game) | Return the path to a given game's romfile |
def mac_address_table_static_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table")
static = ET.SubElement(mac_address_table, "static")
forward_key = ET.SubElement(static, "forward")
forward_key.text = kwargs.pop('forward')
interface_type_key = ET.SubElement(static, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(static, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
vlan_key = ET.SubElement(static, "vlan")
vlan_key.text = kwargs.pop('vlan')
vlanid_key = ET.SubElement(static, "vlanid")
vlanid_key.text = kwargs.pop('vlanid')
mac_address = ET.SubElement(static, "mac-address")
mac_address.text = kwargs.pop('mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def send_wrapped(self, text):
"""
Send text padded and wrapped to the user's screen width.
"""
lines = word_wrap(text, self.columns)
for line in lines:
self.send_cc(line + '\n') | Send text padded and wrapped to the user's screen width. |
def date_this_century(self, before_today=True, after_today=False):
"""
Gets a Date object for the current century.
:param before_today: include days in current century before today
:param after_today: include days in current century after today
:example Date('2012-04-04')
:return Date
"""
today = date.today()
this_century_start = date(today.year - (today.year % 100), 1, 1)
next_century_start = date(this_century_start.year + 100, 1, 1)
if before_today and after_today:
return self.date_between_dates(
this_century_start, next_century_start)
elif not before_today and after_today:
return self.date_between_dates(today, next_century_start)
elif not after_today and before_today:
return self.date_between_dates(this_century_start, today)
else:
return today | Gets a Date object for the current century.
:param before_today: include days in current century before today
:param after_today: include days in current century after today
:example Date('2012-04-04')
:return Date |
def char(self, c: str) -> None:
"""Parse the specified character.
Args:
c: One-character string.
Raises:
EndOfInput: If past the end of `self.input`.
UnexpectedInput: If the next character is different from `c`.
"""
if self.peek() == c:
self.offset += 1
else:
raise UnexpectedInput(self, f"char '{c}'") | Parse the specified character.
Args:
c: One-character string.
Raises:
EndOfInput: If past the end of `self.input`.
UnexpectedInput: If the next character is different from `c`. |
def import_locations(self, gpx_file):
"""Import GPX data files.
``import_locations()`` returns a list with :class:`~gpx.Waypoint`
objects.
It expects data files in GPX format, as specified in `GPX 1.1 Schema
Documentation`_, which is XML such as::
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<gpx version="1.1" creator="PocketGPSWorld.com"
xmlns="http://www.topografix.com/GPX/1/1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd">
<wpt lat="52.015" lon="-0.221">
<name>Home</name>
<desc>My place</desc>
</wpt>
<wpt lat="52.167" lon="0.390">
<name>MSR</name>
<desc>Microsoft Research, Cambridge</desc>
</wpt>
</gpx>
The reader uses the :mod:`ElementTree` module, so should be very fast
when importing data. The above file processed by
``import_locations()`` will return the following ``list`` object::
[Waypoint(52.015, -0.221, "Home", "My place"),
Waypoint(52.167, 0.390, "MSR", "Microsoft Research, Cambridge")]
Args:
gpx_file (iter): GPX data to read
Returns:
list: Locations with optional comments
.. _GPX 1.1 Schema Documentation: http://www.topografix.com/GPX/1/1/
"""
self._gpx_file = gpx_file
data = utils.prepare_xml_read(gpx_file, objectify=True)
try:
self.metadata.import_metadata(data.metadata)
except AttributeError:
pass
for waypoint in data.wpt:
latitude = waypoint.get('lat')
longitude = waypoint.get('lon')
try:
name = waypoint.name.text
except AttributeError:
name = None
try:
description = waypoint.desc.text
except AttributeError:
description = None
try:
elevation = float(waypoint.ele.text)
except AttributeError:
elevation = None
try:
time = utils.Timestamp.parse_isoformat(waypoint.time.text)
except AttributeError:
time = None
self.append(Waypoint(latitude, longitude, name, description,
elevation, time)) | Import GPX data files.
``import_locations()`` returns a list with :class:`~gpx.Waypoint`
objects.
It expects data files in GPX format, as specified in `GPX 1.1 Schema
Documentation`_, which is XML such as::
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<gpx version="1.1" creator="PocketGPSWorld.com"
xmlns="http://www.topografix.com/GPX/1/1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd">
<wpt lat="52.015" lon="-0.221">
<name>Home</name>
<desc>My place</desc>
</wpt>
<wpt lat="52.167" lon="0.390">
<name>MSR</name>
<desc>Microsoft Research, Cambridge</desc>
</wpt>
</gpx>
The reader uses the :mod:`ElementTree` module, so should be very fast
when importing data. The above file processed by
``import_locations()`` will return the following ``list`` object::
[Waypoint(52.015, -0.221, "Home", "My place"),
Waypoint(52.167, 0.390, "MSR", "Microsoft Research, Cambridge")]
Args:
gpx_file (iter): GPX data to read
Returns:
list: Locations with optional comments
.. _GPX 1.1 Schema Documentation: http://www.topografix.com/GPX/1/1/ |
def is_reversible(P):
""" Returns if P is reversible on its weakly connected sets """
import msmtools.analysis as msmana
# treat each weakly connected set separately
sets = connected_sets(P, strong=False)
for s in sets:
Ps = P[s, :][:, s]
if not msmana.is_transition_matrix(Ps):
return False # isn't even a transition matrix!
pi = msmana.stationary_distribution(Ps)
X = pi[:, None] * Ps
if not np.allclose(X, X.T):
return False
# survived.
return True | Returns if P is reversible on its weakly connected sets |
def get_operation_pattern(server_url, request_url_pattern):
"""Return an updated request URL pattern with the server URL removed."""
if server_url[-1] == "/":
# operations have to start with a slash, so do not remove it
server_url = server_url[:-1]
if is_absolute(server_url):
return request_url_pattern.replace(server_url, "", 1)
return path_qs(request_url_pattern).replace(server_url, "", 1) | Return an updated request URL pattern with the server URL removed. |
def create_mon_path(path, uid=-1, gid=-1):
"""create the mon path if it does not exist"""
if not os.path.exists(path):
os.makedirs(path)
os.chown(path, uid, gid); | create the mon path if it does not exist |
def get_first_comments_or_remarks(recID=-1,
ln=CFG_SITE_LANG,
nb_comments='all',
nb_reviews='all',
voted=-1,
reported=-1,
user_info=None,
show_reviews=False):
"""
Gets nb number comments/reviews or remarks.
In the case of comments, will get both comments and reviews
Comments and remarks sorted by most recent date, reviews sorted by highest helpful score
:param recID: record id
:param ln: language
:param nb_comments: number of comment or remarks to get
:param nb_reviews: number of reviews or remarks to get
:param voted: 1 if user has voted for a remark
:param reported: 1 if user has reported a comment or review
:return: if comment, tuple (comments, reviews) both being html of first nb comments/reviews
if remark, tuple (remakrs, None)
"""
_ = gettext_set_language(ln)
warnings = []
voted = wash_url_argument(voted, 'int')
reported = wash_url_argument(reported, 'int')
# check recID argument
if not isinstance(recID, int):
return ()
# comment or review. NB: suppressed reference to basket (handled in
# webbasket)
if recID >= 1:
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
res_reviews = query_retrieve_comments_or_remarks(
recID=recID,
display_order="hh",
ranking=1,
limit=nb_comments,
user_info=user_info)
nb_res_reviews = len(res_reviews)
# check nb argument
if isinstance(nb_reviews, int) and nb_reviews < len(res_reviews):
first_res_reviews = res_reviews[:nb_reviews]
else:
first_res_reviews = res_reviews
if CFG_WEBCOMMENT_ALLOW_COMMENTS:
res_comments = query_retrieve_comments_or_remarks(
recID=recID,
display_order="od",
ranking=0,
limit=nb_reviews,
user_info=user_info)
nb_res_comments = len(res_comments)
# check nb argument
if isinstance(nb_comments, int) and nb_comments < len(
res_comments):
first_res_comments = res_comments[:nb_comments]
else:
first_res_comments = res_comments
else: # error
try:
raise InvenioWebCommentError(
_('%(recid)s is an invalid record ID', recid=recID))
except InvenioWebCommentError as exc:
register_exception()
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
# errors.append(('ERR_WEBCOMMENT_RECID_INVALID', recID)) #!FIXME dont
# return error anywhere since search page
# comment
if recID >= 1:
comments = reviews = ""
if reported > 0:
try:
raise InvenioWebCommentWarning(
_('Your feedback has been recorded, many thanks.'))
except InvenioWebCommentWarning as exc:
register_exception(stream='warning')
warnings.append((exc.message, 'green'))
# warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED_GREEN_TEXT',))
elif reported == 0:
try:
raise InvenioWebCommentWarning(
_('Your feedback could not be recorded, please try again.'))
except InvenioWebCommentWarning as exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
# warnings.append(('WRN_WEBCOMMENT_FEEDBACK_NOT_RECORDED_RED_TEXT',))
if CFG_WEBCOMMENT_ALLOW_COMMENTS: # normal comments
grouped_comments = group_comments_by_round(
first_res_comments,
ranking=0)
comments = webcomment_templates.tmpl_get_first_comments_without_ranking(
recID,
ln,
grouped_comments,
nb_res_comments,
warnings)
if show_reviews:
if CFG_WEBCOMMENT_ALLOW_REVIEWS: # ranked comments
# calculate average score
avg_score = calculate_avg_score(res_reviews)
if voted > 0:
try:
raise InvenioWebCommentWarning(
_('Your feedback has been recorded, many thanks.'))
except InvenioWebCommentWarning as exc:
register_exception(stream='warning')
warnings.append((exc.message, 'green'))
# warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED_GREEN_TEXT',))
elif voted == 0:
try:
raise InvenioWebCommentWarning(
_('Your feedback could not be recorded, please try again.'))
except InvenioWebCommentWarning as exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
# warnings.append(('WRN_WEBCOMMENT_FEEDBACK_NOT_RECORDED_RED_TEXT',))
grouped_reviews = group_comments_by_round(
first_res_reviews,
ranking=0)
reviews = webcomment_templates.tmpl_get_first_comments_with_ranking(
recID,
ln,
grouped_reviews,
nb_res_reviews,
avg_score,
warnings)
return (comments, reviews)
# remark
else:
return(webcomment_templates.tmpl_get_first_remarks(first_res_comments, ln, nb_res_comments), None) | Gets nb number comments/reviews or remarks.
In the case of comments, will get both comments and reviews
Comments and remarks sorted by most recent date, reviews sorted by highest helpful score
:param recID: record id
:param ln: language
:param nb_comments: number of comment or remarks to get
:param nb_reviews: number of reviews or remarks to get
:param voted: 1 if user has voted for a remark
:param reported: 1 if user has reported a comment or review
:return: if comment, tuple (comments, reviews) both being html of first nb comments/reviews
if remark, tuple (remakrs, None) |
async def set_failover_mode(mode):
"""Example of printing the current upstream."""
jar = aiohttp.CookieJar(unsafe=True)
websession = aiohttp.ClientSession(cookie_jar=jar)
try:
modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession)
await modem.login(password=sys.argv[2])
await modem.set_failover_mode(mode)
await modem.logout()
except eternalegypt.Error:
print("Could not login")
await websession.close() | Example of printing the current upstream. |
def seektime(self, disk):
"""
Gives seek latency on disk which is a very good indication to the `type` of the disk.
it's a very good way to verify if the underlying disk type is SSD or HDD
:param disk: disk path or name (/dev/sda, or sda)
:return: a dict as follows {'device': '<device-path>', 'elapsed': <seek-time in us', 'type': '<SSD or HDD>'}
"""
args = {
'disk': disk,
}
self._seektime_chk.check(args)
return self._client.json("disk.seektime", args) | Gives seek latency on disk which is a very good indication to the `type` of the disk.
it's a very good way to verify if the underlying disk type is SSD or HDD
:param disk: disk path or name (/dev/sda, or sda)
:return: a dict as follows {'device': '<device-path>', 'elapsed': <seek-time in us', 'type': '<SSD or HDD>'} |
def bootstrapping_dtrajs(dtrajs, lag, N_full, nbs=10000, active_set=None):
"""
Perform trajectory based re-sampling.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
lag time
N_full : int
Number of states in discrete trajectories.
nbs : int, optional
Number of bootstrapping samples
active_set : ndarray
Indices of active set, all count matrices will be restricted
to active set.
Returns
-------
smean : ndarray(N,)
mean values of singular values
sdev : ndarray(N,)
standard deviations of singular values
"""
# Get the number of simulations:
Q = len(dtrajs)
# Get the number of states in the active set:
if active_set is not None:
N = active_set.size
else:
N = N_full
# Build up a matrix of count matrices for each simulation. Size is Q*N^2:
traj_ind = []
state1 = []
state2 = []
q = 0
for traj in dtrajs:
traj_ind.append(q*np.ones(traj[:-lag].size))
state1.append(traj[:-lag])
state2.append(traj[lag:])
q += 1
traj_inds = np.concatenate(traj_ind)
pairs = N_full * np.concatenate(state1) + np.concatenate(state2)
data = np.ones(pairs.size)
Ct_traj = scipy.sparse.coo_matrix((data, (traj_inds, pairs)), shape=(Q, N_full*N_full))
Ct_traj = Ct_traj.tocsr()
# Perform re-sampling:
svals = np.zeros((nbs, N))
for s in range(nbs):
# Choose selection:
sel = np.random.choice(Q, Q, replace=True)
# Compute count matrix for selection:
Ct_sel = Ct_traj[sel, :].sum(axis=0)
Ct_sel = np.asarray(Ct_sel).reshape((N_full, N_full))
if active_set is not None:
from pyemma.util.linalg import submatrix
Ct_sel = submatrix(Ct_sel, active_set)
svals[s, :] = scl.svdvals(Ct_sel)
# Compute mean and uncertainties:
smean = np.mean(svals, axis=0)
sdev = np.std(svals, axis=0)
return smean, sdev | Perform trajectory based re-sampling.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
lag time
N_full : int
Number of states in discrete trajectories.
nbs : int, optional
Number of bootstrapping samples
active_set : ndarray
Indices of active set, all count matrices will be restricted
to active set.
Returns
-------
smean : ndarray(N,)
mean values of singular values
sdev : ndarray(N,)
standard deviations of singular values |
def file_md5(self, resource):
"""Deprecated alias for *resource_md5*."""
warnings.warn(
"file_md5 is deprecated; use resource_md5 instead",
DeprecationWarning, stacklevel=2)
return self.resource_md5(resource) | Deprecated alias for *resource_md5*. |
def enterEvent(self, event):
"""
Mark the hovered state as being true.
:param event | <QtCore.QEnterEvent>
"""
super(XViewPanelItem, self).enterEvent(event)
# store the hover state and mark for a repaint
self._hovered = True
self.update() | Mark the hovered state as being true.
:param event | <QtCore.QEnterEvent> |
def reboot(name, call=None):
'''
reboot a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
conn.reboot_server(datacenter_id=datacenter_id, server_id=node['id'])
return True | reboot a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name |
def _get(self, uri, params=None, headers=None):
"""
Simple GET request for a given path.
"""
if not headers:
headers = self._get_headers()
logging.debug("URI=" + str(uri))
logging.debug("HEADERS=" + str(headers))
response = self.session.get(uri, headers=headers, params=params)
logging.debug("STATUS=" + str(response.status_code))
if response.status_code == 200:
return response.json()
else:
logging.error(b"ERROR=" + response.content)
response.raise_for_status() | Simple GET request for a given path. |
def create_install_template_skin(self):
"""
Create an example ckan extension for this environment and install it
"""
ckan_extension_template(self.name, self.target)
self.install_package_develop('ckanext-' + self.name + 'theme') | Create an example ckan extension for this environment and install it |
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`CreatePassiveOffer`.
"""
selling = self.selling.to_xdr_object()
buying = self.buying.to_xdr_object()
price = Operation.to_xdr_price(self.price)
price = Xdr.types.Price(price['n'], price['d'])
amount = Operation.to_xdr_amount(self.amount)
create_passive_offer_op = Xdr.types.CreatePassiveOfferOp(
selling, buying, amount, price)
self.body.type = Xdr.const.CREATE_PASSIVE_OFFER
self.body.createPassiveOfferOp = create_passive_offer_op
return super(CreatePassiveOffer, self).to_xdr_object() | Creates an XDR Operation object that represents this
:class:`CreatePassiveOffer`. |
def email_domain_disposable(value):
"""
Confirms that the email address is not using a disposable service.
@param {str} value
@returns {None}
@raises AssertionError
"""
domain = helpers.get_domain_from_email_address(value)
if domain.lower() in disposable_domains:
raise ValidationError(MESSAGE_USE_COMPANY_EMAIL) | Confirms that the email address is not using a disposable service.
@param {str} value
@returns {None}
@raises AssertionError |
def get_tags(self):
"""
::
GET /:login/machines/:id/tags
:Returns: complete set of tags for this machine
:rtype: :py:class:`dict`
A local copy is not kept because these are essentially search keys.
"""
j, _ = self.datacenter.request('GET', self.path + '/tags')
return j | ::
GET /:login/machines/:id/tags
:Returns: complete set of tags for this machine
:rtype: :py:class:`dict`
A local copy is not kept because these are essentially search keys. |
def delete_location(self, location_name):
"""
Remove location with name location_name from self.locations.
If the location had any sites, change site.location to "".
"""
location = self.find_by_name(location_name, self.locations)
if not location:
return False
sites = location.sites
self.locations.remove(location)
for site in sites:
if site:
site.location = ''
del location
return sites | Remove location with name location_name from self.locations.
If the location had any sites, change site.location to "". |
def get(self, remote, local=None):
""" Gets the file from FTP server
local can be:
a file: opened for writing, left open
a string: path to output file
None: contents are returned
"""
if isinstance(local, file_type): # open file, leave open
local_file = local
elif local is None: # return string
local_file = buffer_type()
else: # path to file, open, write/close return None
local_file = open(local, 'wb')
self.conn.retrbinary("RETR %s" % remote, local_file.write)
if isinstance(local, file_type):
pass
elif local is None:
contents = local_file.getvalue()
local_file.close()
return contents
else:
local_file.close()
return None | Gets the file from FTP server
local can be:
a file: opened for writing, left open
a string: path to output file
None: contents are returned |
def expiry_time(ns, cavs):
''' Returns the minimum time of any time-before caveats found
in the given list or None if no such caveats were found.
The ns parameter is
:param ns: used to determine the standard namespace prefix - if
the standard namespace is not found, the empty prefix is assumed.
:param cavs: a list of pymacaroons.Caveat
:return: datetime.DateTime or None.
'''
prefix = ns.resolve(STD_NAMESPACE)
time_before_cond = condition_with_prefix(
prefix, COND_TIME_BEFORE)
t = None
for cav in cavs:
if not cav.first_party():
continue
cav = cav.caveat_id_bytes.decode('utf-8')
name, rest = parse_caveat(cav)
if name != time_before_cond:
continue
try:
et = pyrfc3339.parse(rest, utc=True).replace(tzinfo=None)
if t is None or et < t:
t = et
except ValueError:
continue
return t | Returns the minimum time of any time-before caveats found
in the given list or None if no such caveats were found.
The ns parameter is
:param ns: used to determine the standard namespace prefix - if
the standard namespace is not found, the empty prefix is assumed.
:param cavs: a list of pymacaroons.Caveat
:return: datetime.DateTime or None. |
def Enumerate():
"""See base class."""
hid_guid = GUID()
hid.HidD_GetHidGuid(ctypes.byref(hid_guid))
devices = setupapi.SetupDiGetClassDevsA(
ctypes.byref(hid_guid), None, None, 0x12)
index = 0
interface_info = DeviceInterfaceData()
interface_info.cbSize = ctypes.sizeof(DeviceInterfaceData) # pylint: disable=invalid-name
out = []
while True:
result = setupapi.SetupDiEnumDeviceInterfaces(
devices, 0, ctypes.byref(hid_guid), index,
ctypes.byref(interface_info))
index += 1
if not result:
break
detail_len = wintypes.DWORD()
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info), None, 0,
ctypes.byref(detail_len), None)
detail_len = detail_len.value
if detail_len == 0:
# skip this device, some kind of error
continue
buf = ctypes.create_string_buffer(detail_len)
interface_detail = DeviceInterfaceDetailData.from_buffer(buf)
interface_detail.cbSize = ctypes.sizeof(DeviceInterfaceDetailData)
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info),
ctypes.byref(interface_detail), detail_len, None, None)
if not result:
raise ctypes.WinError()
descriptor = base.DeviceDescriptor()
# This is a bit of a hack to work around a limitation of ctypes and
# "header" structures that are common in windows. DevicePath is a
# ctypes array of length 1, but it is backed with a buffer that is much
# longer and contains a null terminated string. So, we read the null
# terminated string off DevicePath here. Per the comment above, the
# alignment of this struct varies depending on architecture, but
# in all cases the path string starts 1 DWORD into the structure.
#
# The path length is:
# length of detail buffer - header length (1 DWORD)
path_len = detail_len - ctypes.sizeof(wintypes.DWORD)
descriptor.path = ctypes.string_at(
ctypes.addressof(interface_detail.DevicePath), path_len)
device = None
try:
device = OpenDevice(descriptor.path, True)
except WindowsError as e: # pylint: disable=undefined-variable
if e.winerror == ERROR_ACCESS_DENIED: # Access Denied, e.g. a keyboard
continue
else:
raise e
try:
FillDeviceAttributes(device, descriptor)
FillDeviceCapabilities(device, descriptor)
out.append(descriptor.ToPublicDict())
finally:
kernel32.CloseHandle(device)
return out | See base class. |
def _get_socket_addresses(self):
"""Get Socket address information.
:rtype: list
"""
family = socket.AF_UNSPEC
if not socket.has_ipv6:
family = socket.AF_INET
try:
addresses = socket.getaddrinfo(self._parameters['hostname'],
self._parameters['port'], family,
socket.SOCK_STREAM)
except socket.gaierror as why:
raise AMQPConnectionError(why)
return addresses | Get Socket address information.
:rtype: list |
def _compose_chapters(self):
"""
Creates a chapters
and appends them to list
"""
for count in range(self.chapter_count):
chapter_num = count + 1
c = Chapter(self.markov, chapter_num)
self.chapters.append(c) | Creates a chapters
and appends them to list |
def iter_markers(self):
"""
Generate a (marker_code, segment_offset) 2-tuple for each marker in
the JPEG *stream*, in the order they occur in the stream.
"""
marker_finder = _MarkerFinder.from_stream(self._stream)
start = 0
marker_code = None
while marker_code != JPEG_MARKER_CODE.EOI:
marker_code, segment_offset = marker_finder.next(start)
marker = _MarkerFactory(
marker_code, self._stream, segment_offset
)
yield marker
start = segment_offset + marker.segment_length | Generate a (marker_code, segment_offset) 2-tuple for each marker in
the JPEG *stream*, in the order they occur in the stream. |
def _deep_value(*args, **kwargs):
""" Drills down into tree using the keys
"""
node, keys = args[0], args[1:]
for key in keys:
node = node.get(key, {})
default = kwargs.get('default', {})
if node in ({}, [], None):
node = default
return node | Drills down into tree using the keys |
def row_contributions(self, X):
"""Returns the row contributions towards each principal component.
Each row contribution towards each principal component is equivalent to the amount of
inertia it contributes. This is calculated by dividing the squared row coordinates by the
eigenvalue associated to each principal component.
"""
utils.validation.check_is_fitted(self, 's_')
return np.square(self.row_coordinates(X)).div(self.eigenvalues_, axis='columns') | Returns the row contributions towards each principal component.
Each row contribution towards each principal component is equivalent to the amount of
inertia it contributes. This is calculated by dividing the squared row coordinates by the
eigenvalue associated to each principal component. |
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf] | Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade. |
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv', 'json'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See export_csv for more csv saving options.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
if format is None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
elif filename.endswith(('.json')):
format = 'json'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary' and format is not 'json':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary' and 'json'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
elif format is 'json':
self.export_json(url)
else:
raise ValueError("Unsupported format: {}".format(format)) | Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv', 'json'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See export_csv for more csv saving options.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv') |
def add_identity_parser(subparsers, parent_parser):
"""Creates the arg parsers needed for the identity command and
its subcommands.
"""
# identity
parser = subparsers.add_parser(
'identity',
help='Works with optional roles, policies, and permissions',
description='Provides subcommands to work with roles and policies.')
identity_parsers = parser.add_subparsers(
title="subcommands",
dest="subcommand")
identity_parsers.required = True
# policy
policy_parser = identity_parsers.add_parser(
'policy',
help='Provides subcommands to display existing policies and create '
'new policies',
description='Provides subcommands to list the current policies '
'stored in state and to create new policies.')
policy_parsers = policy_parser.add_subparsers(
title='policy',
dest='policy_cmd')
policy_parsers.required = True
# policy create
create_parser = policy_parsers.add_parser(
'create',
help='Creates batches of sawtooth-identity transactions for setting a '
'policy',
description='Creates a policy that can be set to a role or changes a '
'policy without resetting the role.')
create_parser.add_argument(
'-k', '--key',
type=str,
help='specify the signing key for the resulting batches')
create_target_group = create_parser.add_mutually_exclusive_group()
create_target_group.add_argument(
'-o', '--output',
type=str,
help='specify the output filename for the resulting batches')
create_target_group.add_argument(
'--url',
type=str,
help="identify the URL of a validator's REST API",
default='http://localhost:8008')
create_parser.add_argument(
'--wait',
type=int,
default=15,
help="set time, in seconds, to wait for the policy to commit when "
"submitting to the REST API.")
create_parser.add_argument(
'name',
type=str,
help='name of the new policy')
create_parser.add_argument(
'rule',
type=str,
nargs="+",
help='rule with the format "PERMIT_KEY <key>" or "DENY_KEY <key> '
'(multiple "rule" arguments can be specified)')
# policy list
list_parser = policy_parsers.add_parser(
'list',
help='Lists the current policies',
description='Lists the policies that are currently set in state.')
list_parser.add_argument(
'--url',
type=str,
help="identify the URL of a validator's REST API",
default='http://localhost:8008')
list_parser.add_argument(
'--format',
default='default',
choices=['default', 'csv', 'json', 'yaml'],
help='choose the output format')
# role
role_parser = identity_parsers.add_parser(
'role',
help='Provides subcommands to display existing roles and create '
'new roles',
description='Provides subcommands to list the current roles '
'stored in state and to create new roles.')
role_parsers = role_parser.add_subparsers(
title='role',
dest='role_cmd')
role_parsers.required = True
# role create
create_parser = role_parsers.add_parser(
'create',
help='Creates a new role that can be used to enforce permissions',
description='Creates a new role that can be used to enforce '
'permissions.')
create_parser.add_argument(
'-k', '--key',
type=str,
help='specify the signing key for the resulting batches')
create_parser.add_argument(
'--wait',
type=int,
default=15,
help='set time, in seconds, to wait for a role to commit '
'when submitting to the REST API.')
create_target_group = create_parser.add_mutually_exclusive_group()
create_target_group.add_argument(
'-o', '--output',
type=str,
help='specify the output filename for the resulting batches')
create_target_group.add_argument(
'--url',
type=str,
help="the URL of a validator's REST API",
default='http://localhost:8008')
create_parser.add_argument(
'name',
type=str,
help='name of the role')
create_parser.add_argument(
'policy',
type=str,
help='identify policy that role will be restricted to')
# role list
list_parser = role_parsers.add_parser(
'list',
help='Lists the current keys and values of roles',
description='Displays the roles that are currently set in state.')
list_parser.add_argument(
'--url',
type=str,
help="identify the URL of a validator's REST API",
default='http://localhost:8008')
list_parser.add_argument(
'--format',
default='default',
choices=['default', 'csv', 'json', 'yaml'],
help='choose the output format') | Creates the arg parsers needed for the identity command and
its subcommands. |
def _isinstance(expr, classname):
"""Check whether `expr` is an instance of the class with name
`classname`
This is like the builtin `isinstance`, but it take the `classname` a
string, instead of the class directly. Useful for when we don't want to
import the class for which we want to check (also, remember that
printer choose rendering method based on the class name, so this is
totally ok)
"""
for cls in type(expr).__mro__:
if cls.__name__ == classname:
return True
return False | Check whether `expr` is an instance of the class with name
`classname`
This is like the builtin `isinstance`, but it take the `classname` a
string, instead of the class directly. Useful for when we don't want to
import the class for which we want to check (also, remember that
printer choose rendering method based on the class name, so this is
totally ok) |
def _watchdog_queue(self):
"""
从queue里取出字符执行命令
"""
while not self.quit:
k = self.queue.get()
if k == 'q': # 退出
self.quit = True
self.switch_queue.put('main') | 从queue里取出字符执行命令 |
def _SGraphFromJsonTree(json_str):
"""
Convert the Json Tree to SGraph
"""
g = json.loads(json_str)
vertices = [_Vertex(x['id'],
dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'id']))
for x in g['vertices']]
edges = [_Edge(x['src'], x['dst'],
dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'src' and k != 'dst']))
for x in g['edges']]
sg = _SGraph().add_vertices(vertices)
if len(edges) > 0:
sg = sg.add_edges(edges)
return sg | Convert the Json Tree to SGraph |
def generate_payload(self, config=None, context=None):
"""
Generate payload by iterating over registered plugins. Merges .
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload.
"""
for name, plugin in iteritems(self._registered):
if plugin.supports(config, context):
logger.debug('Returning payload from plugin %s' % name)
return plugin.generate_payload(config, context)
logger.debug('No active plugin to generate payload')
return {
'context': context
} | Generate payload by iterating over registered plugins. Merges .
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload. |
def __make_id(receiver):
"""Generate an identifier for a callable signal receiver.
This is used when disconnecting receivers, where we need to correctly
establish equivalence between the input receiver and the receivers assigned
to a signal.
Args:
receiver: A callable object.
Returns:
An identifier for the receiver.
"""
if __is_bound_method(receiver):
return (id(receiver.__func__), id(receiver.__self__))
return id(receiver) | Generate an identifier for a callable signal receiver.
This is used when disconnecting receivers, where we need to correctly
establish equivalence between the input receiver and the receivers assigned
to a signal.
Args:
receiver: A callable object.
Returns:
An identifier for the receiver. |
def do_authorization(self, transactionid, amt):
"""Shortcut for the DoAuthorization method.
Use the TRANSACTIONID from DoExpressCheckoutPayment for the
``transactionid``. The latest version of the API does not support the
creation of an Order from `DoDirectPayment`.
The `amt` should be the same as passed to `DoExpressCheckoutPayment`.
Flow for a payment involving a `DoAuthorization` call::
1. One or many calls to `SetExpressCheckout` with pertinent order
details, returns `TOKEN`
1. `DoExpressCheckoutPayment` with `TOKEN`, `PAYMENTACTION` set to
Order, `AMT` set to the amount of the transaction, returns
`TRANSACTIONID`
1. `DoAuthorization` with `TRANSACTIONID` and `AMT` set to the
amount of the transaction.
1. `DoCapture` with the `AUTHORIZATIONID` (the `TRANSACTIONID`
returned by `DoAuthorization`)
"""
args = self._sanitize_locals(locals())
return self._call('DoAuthorization', **args) | Shortcut for the DoAuthorization method.
Use the TRANSACTIONID from DoExpressCheckoutPayment for the
``transactionid``. The latest version of the API does not support the
creation of an Order from `DoDirectPayment`.
The `amt` should be the same as passed to `DoExpressCheckoutPayment`.
Flow for a payment involving a `DoAuthorization` call::
1. One or many calls to `SetExpressCheckout` with pertinent order
details, returns `TOKEN`
1. `DoExpressCheckoutPayment` with `TOKEN`, `PAYMENTACTION` set to
Order, `AMT` set to the amount of the transaction, returns
`TRANSACTIONID`
1. `DoAuthorization` with `TRANSACTIONID` and `AMT` set to the
amount of the transaction.
1. `DoCapture` with the `AUTHORIZATIONID` (the `TRANSACTIONID`
returned by `DoAuthorization`) |
def __detect_os_identity_api_version(self):
"""
Return preferred OpenStack Identity API version (either one of the two strings ``'2'`` or ``'3'``) or ``None``.
The following auto-detection strategies are tried (in this order):
#. Read the environmental variable `OS_IDENTITY_API_VERSION` and check if its value is one of the two strings ``'2'`` or ``'3'``;
#. Check if a version tag like ``/v3`` or ``/v2.0`` ends the OpenStack auth URL.
If none of the above worked, return ``None``.
For more information on ``OS_IDENTITY_API_VERSION``, please see
`<https://docs.openstack.org/developer/python-openstackclient/authentication.html>`_.
"""
ver = os.getenv('OS_IDENTITY_API_VERSION', '')
if ver == '3':
log.debug(
"Using OpenStack Identity API v3"
" because of environmental variable setting `OS_IDENTITY_API_VERSION=3`")
return '3'
elif ver == '2' or ver.startswith('2.'):
log.debug(
"Using OpenStack Identity API v2"
" because of environmental variable setting `OS_IDENTITY_API_VERSION=2`")
return '2'
elif self._os_auth_url.endswith('/v3'):
log.debug(
"Using OpenStack Identity API v3 because of `/v3` ending in auth URL;"
" set environmental variable OS_IDENTITY_API_VERSION to force use of Identity API v2 instead.")
return '3'
elif self._os_auth_url.endswith('/v2.0'):
log.debug(
"Using OpenStack Identity API v2 because of `/v2.0` ending in auth URL;"
" set environmental variable OS_IDENTITY_API_VERSION to force use of Identity API v3 instead.")
return '2'
else:
# auto-detection failed, need to probe
return None | Return preferred OpenStack Identity API version (either one of the two strings ``'2'`` or ``'3'``) or ``None``.
The following auto-detection strategies are tried (in this order):
#. Read the environmental variable `OS_IDENTITY_API_VERSION` and check if its value is one of the two strings ``'2'`` or ``'3'``;
#. Check if a version tag like ``/v3`` or ``/v2.0`` ends the OpenStack auth URL.
If none of the above worked, return ``None``.
For more information on ``OS_IDENTITY_API_VERSION``, please see
`<https://docs.openstack.org/developer/python-openstackclient/authentication.html>`_. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.