Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
377,700 | def do_quality(self, quality):
if (quality == or quality == ):
self.logger.debug("quality: converting to gray")
self.image = self.image.convert()
elif (quality == ):
self.logger.debug("quality: converting to bitonal")
self.image = self.image.convert()
else:
if (self.image.mode.startswith()):
self.logger.debug("quality: fudged conversion from mode %s to I"
% (self.image.mode))
self.image = self.image.convert()
self.image = self.image.point(lambda i: i * (1.0 / 256.0))
if (self.image.mode not in (, , , )):
self.logger.debug("quality: converting from mode %s to RGB"
% (self.image.mode))
self.image = self.image.convert()
else:
self.logger.debug("quality: quality (nop)") | Apply value of quality parameter.
For PIL docs see
<http://pillow.readthedocs.org/en/latest/reference/Image.html#PIL.Image.Image.convert> |
377,701 | def save_models(self, model_path):
for group, condition_model_set in self.condition_models.items():
for model_name, model_obj in condition_model_set.items():
out_filename = model_path + \
"{0}_{1}_condition.pkl".format(group,
model_name.replace(" ", "-"))
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for group, size_model_set in self.size_models.items():
for model_name, model_obj in size_model_set.items():
out_filename = model_path + \
"{0}_{1}_size.pkl".format(group,
model_name.replace(" ", "-"))
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for group, dist_model_set in self.size_distribution_models.items():
for model_type, model_objs in dist_model_set.items():
for model_name, model_obj in model_objs.items():
out_filename = model_path + \
"{0}_{1}_{2}_sizedist.pkl".format(group,
model_name.replace(" ", "-"),
model_type)
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for model_type, track_type_models in self.track_models.items():
for group, track_model_set in track_type_models.items():
for model_name, model_obj in track_model_set.items():
out_filename = model_path + \
"{0}_{1}_{2}_track.pkl".format(group,
model_name.replace(" ", "-"),
model_type)
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
return | Save machine learning models to pickle files. |
377,702 | def find(self, pair, default=None):
pair = normalizers.normalizeKerningKey(pair)
value = self._find(pair, default)
if value != default:
value = normalizers.normalizeKerningValue(value)
return value | Returns the value for the kerning pair.
**pair** is a ``tuple`` of two :ref:`type-string`\s, and the returned
values will either be :ref:`type-int-float` or ``None``
if no pair was found. ::
>>> font.kerning[("A", "V")]
-25 |
377,703 | def get_bucket(self, hash_name, bucket_key):
if hash_name in self.buckets:
if bucket_key in self.buckets[hash_name]:
return self.buckets[hash_name][bucket_key]
return [] | Returns bucket content as list of tuples (vector, data). |
377,704 | def convert_mapper(self, tomap):
frommap = self.crdmap
if frommap == tomap:
return
if hasattr(self, ):
x0, y0 = frommap.offset_pt((self.x, self.y), (self.radius, 0))
pts = frommap.to_data(((self.x, self.y), (x0, y0)))
pts = tomap.data_to(pts)
self.radius = np.fabs(pts[1][0] - pts[0][0])
elif hasattr(self, ):
x0, y0 = frommap.offset_pt((self.x, self.y), (self.xradius,
self.yradius))
pts = frommap.to_data(((self.x, self.y), (x0, y0)))
pts = tomap.data_to(pts)
self.xradius = np.fabs(pts[1][0] - pts[0][0])
self.yradius = np.fabs(pts[1][1] - pts[0][1])
data_pts = self.get_data_points()
self.crdmap = tomap
self.set_data_points(data_pts) | Converts our object from using one coordinate map to another.
NOTE: In some cases this only approximately preserves the
equivalent point values when transforming between coordinate
spaces. |
377,705 | def minimize_t0s(means, weights, combs):
def make_quality_function(means, weights, combs):
def quality_function(t0s):
sq_sum = 0
for mean, comb, weight in zip(means, combs, weights):
sq_sum += ((mean - (t0s[comb[1]] - t0s[comb[0]])) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(means, weights, combs)
t0s = np.random.rand(31)
bounds = [(0, 0)] + [(-10., 10.)] * 30
opt_t0s = optimize.minimize(qfunc, t0s, bounds=bounds)
return opt_t0s | Varies t0s to minimize the deviation of the gaussian means from zero.
Parameters
----------
means: numpy array of means of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_t0s: optimal t0 values for all PMTs |
377,706 | def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses | Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction. |
377,707 | def dispatch_command(function, *args, **kwargs):
parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER)
set_default_command(parser, function)
dispatch(parser, *args, **kwargs) | A wrapper for :func:`dispatch` that creates a one-command parser.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_command(foo)
...is a shortcut for::
parser = ArgumentParser()
set_default_command(parser, foo)
dispatch(parser)
This function can be also used as a decorator. |
377,708 | def updateHeader(self, wcsname=None, reusename=False):
self.openFile()
verbose_level = 1
if not self.perform_update:
verbose_level = 0
if self.perform_update:
wcscorr.init_wcscorr(self._im.hdu)
extlist = []
wcscorr_extname = self.ext_name
if self.ext_name == "PRIMARY":
extlist = [0]
else:
for ext in range(1,self.nvers+1):
extlist.append((self.ext_name,ext))
if ( not in self._im.hdu[self.ext_name,ext].header and
self._im.hdu.fileinfo(0)[] == ):
self._im.hdu[self.ext_name,ext].header[] =
if not self.identityfit and self.goodmatch and \
self.fit[][0] != np.nan:
updatehdr.updatewcs_with_shift(self._im.hdu, self.refWCS,
wcsname=wcsname, reusename=reusename,
fitgeom=self.fit_pars[],
xsh=self.fit[][0],ysh=self.fit[][1],
rot=self.fit[],scale=self.fit[][0],
fit=self.fit[], verbose=verbose_level,
xrms=self.fit[][],
yrms=self.fit[][])
wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0])
altkeys = []
for k in wnames:
if wnames[k] == wcsname:
altkeys.append(k)
if len(altkeys) > 1 and in altkeys:
altkeys.remove()
if len(altkeys) == 0:
next_key =
else:
next_key = altkeys[-1]
if self.perform_update:
log.info(%next_key)
self.next_key = next_key
else:
if reusename:
next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname)
if next_key is None:
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
else:
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
if self.perform_update:
wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0])
if len(wnames) == 0:
pri_wcsname = None
else:
if not in wnames:
self._im.hdu[extlist[0]].header[] =
wnames[] =
pri_wcsname = wnames[]
next_pkey = altwcs.getKeyFromName(fits.getheader(self.name, extlist[0], memmap=False),pri_wcsname)
log.info(%next_pkey)
altwcs.archiveWCS(self._im.hdu, extlist,
wcskey=next_pkey, wcsname=pri_wcsname,
reusekey=True)
if reusename:
next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname)
if next_key is None:
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
else:
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
for ext in extlist:
self._im.hdu[ext].header[] = wcsname
altwcs.archiveWCS(self._im.hdu, extlist,
wcskey=next_key,wcsname=wcsname, reusekey=reusename)
self.next_key =
wcscorr.update_wcscorr(self._im.hdu, wcs_id=wcsname,
extname=self.ext_name) | Update header of image with shifts computed by *perform_fit()*. |
377,709 | def rootChild_resetPassword(self, req, webViewer):
from xmantissa.ixmantissa import IWebTranslator, IPreferenceAggregator
return URL.fromString(
IWebTranslator(self.store).linkTo(
IPreferenceAggregator(self.store).storeID)) | Redirect authenticated users to their settings page (hopefully they
have one) when they try to reset their password.
This is the wrong way for this functionality to be implemented. See
#2524. |
377,710 | def _get_table_info(self):
self.rowid = None
self.fields = []
self.field_info = {}
self.cursor.execute( %self.name)
for row in self.cursor.fetchall():
field,typ,null,key,default,extra = row
self.fields.append(field)
self.field_info[field] = {:typ,:null,:key,
:default,:extra}
if extra == :
self.rowid = field | Database-specific method to get field names |
377,711 | def get_cameras_schedule(self):
resource = "schedule"
schedule_event = self.publish_and_get_event(resource)
if schedule_event:
return schedule_event.get()
return None | Return the schedule set for cameras. |
377,712 | def add(self, *nodes):
for node in nodes:
node.set_parent(self)
self.add_sibling(node) | Adds nodes as siblings
:param nodes: GraphNode(s) |
377,713 | def check_webhook_secret(app_configs=None, **kwargs):
from . import settings as djstripe_settings
messages = []
secret = djstripe_settings.WEBHOOK_SECRET
if secret and not secret.startswith("whsec_"):
messages.append(
checks.Warning(
"DJSTRIPE_WEBHOOK_SECRET does not look valid",
hint="It should start with whsec_...",
id="djstripe.W003",
)
)
return messages | Check that DJSTRIPE_WEBHOOK_SECRET looks correct |
377,714 | def iter_actions(self):
ns =
scpd_body = requests.get(self.base_url + self.scpd_url).content
tree = XML.fromstring(scpd_body)
vartypes = {}
srvStateTables = tree.findall(.format(ns))
for srvStateTable in srvStateTables:
statevars = srvStateTable.findall(.format(ns))
for state in statevars:
name = state.findtext(.format(ns))
datatype = state.findtext(.format(ns))
default = state.findtext(.format(ns))
value_list_elt = state.find(.format(ns))
if value_list_elt is None:
value_list_elt = ()
value_list = [item.text for item in value_list_elt] or None
value_range_elt = state.find(.format(ns))
if value_range_elt is None:
value_range_elt = ()
value_range = [item.text for item in value_range_elt] or None
vartypes[name] = Vartype(datatype, default, value_list,
value_range)
actionLists = tree.findall(.format(ns))
for actionList in actionLists:
actions = actionList.findall(.format(ns))
for i in actions:
action_name = i.findtext(.format(ns))
argLists = i.findall(.format(ns))
for argList in argLists:
args_iter = argList.findall(.format(ns))
in_args = []
out_args = []
for arg in args_iter:
arg_name = arg.findtext(.format(ns))
direction = arg.findtext(.format(ns))
related_variable = arg.findtext(
.format(ns))
vartype = vartypes[related_variable]
if direction == "in":
in_args.append(Argument(arg_name, vartype))
else:
out_args.append(Argument(arg_name, vartype))
yield Action(action_name, in_args, out_args) | Yield the service's actions with their arguments.
Yields:
`Action`: the next action.
Each action is an Action namedtuple, consisting of action_name
(a string), in_args (a list of Argument namedtuples consisting of name
and argtype), and out_args (ditto), eg::
Action(
name='SetFormat',
in_args=[
Argument(name='DesiredTimeFormat', vartype=<Vartype>),
Argument(name='DesiredDateFormat', vartype=<Vartype>)],
out_args=[]
) |
377,715 | def login(self, access_token=""):
if access_token:
credentials = argparse.Namespace(token=access_token, refresh_token=None, id_token=None)
else:
scopes = ["openid", "email", "offline_access"]
from google_auth_oauthlib.flow import InstalledAppFlow
flow = InstalledAppFlow.from_client_config(self.application_secrets, scopes=scopes)
msg = "Authentication successful. Please close this tab and run HCA CLI commands in the terminal."
credentials = flow.run_local_server(success_message=msg, audience=self._audience)
self.config.oauth2_token = dict(access_token=credentials.token,
refresh_token=credentials.refresh_token,
id_token=credentials.id_token,
expires_at="-1",
token_type="Bearer")
print("Storing access credentials") | Configure and save {prog} authentication credentials.
This command may open a browser window to ask for your
consent to use web service authentication credentials. |
377,716 | def find_children(self, pattern=r".*", flags=0, candidates=None):
if candidates is None:
candidates = []
for child in self.__children:
if re.search(pattern, child.name, flags):
child not in candidates and candidates.append(child)
child.find_children(pattern, flags, candidates)
return candidates | Finds the children matching the given patten.
Usage::
>>> node_a = AbstractCompositeNode("MyNodeA")
>>> node_b = AbstractCompositeNode("MyNodeB", node_a)
>>> node_c = AbstractCompositeNode("MyNodeC", node_a)
>>> node_a.find_children("c", re.IGNORECASE)
[<AbstractCompositeNode object at 0x101078040>]
:param pattern: Matching pattern.
:type pattern: unicode
:param flags: Matching regex flags.
:type flags: int
:param candidates: Matching candidates.
:type candidates: list
:return: Matching children.
:rtype: list |
377,717 | def coverage(fn):
fp = TraceFuncCoverage(fn)
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn | Mark `fn` for line coverage analysis.
Results will be printed to sys.stdout on program termination.
Usage::
def fn(...):
...
fn = coverage(fn)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@coverage
def fn(...):
... |
377,718 | def _split_audio_by_duration(self, audio_abs_path,
results_abs_path, duration_seconds):
total_seconds = self._get_audio_duration_seconds(audio_abs_path)
current_segment = 0
while current_segment <= total_seconds // duration_seconds + 1:
if current_segment + duration_seconds > total_seconds:
ending_second = total_seconds
else:
ending_second = current_segment + duration_seconds
self._audio_segment_extractor(
audio_abs_path,
results_abs_path.replace("*", "{:03d}".format(
current_segment)),
starting_second=current_segment, duration=(ending_second -
current_segment))
current_segment += 1 | Calculates the length of each segment and passes it to
self._audio_segment_extractor
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav. Here, we've added `*` at staging
step, which we'll replace.
duration_seconds : int |
377,719 | def broken_faces(mesh, color=None):
adjacency = nx.from_edgelist(mesh.face_adjacency)
broken = [k for k, v in dict(adjacency.degree()).items()
if v != 3]
broken = np.array(broken)
if color is not None:
color = np.array(color)
if not (color.shape == (4,) or color.shape == (3,)):
color = [255, 0, 0, 255]
mesh.visual.face_colors[broken] = color
return broken | Return the index of faces in the mesh which break the
watertight status of the mesh.
Parameters
--------------
mesh: Trimesh object
color: (4,) uint8, will set broken faces to this color
None, will not alter mesh colors
Returns
---------------
broken: (n, ) int, indexes of mesh.faces |
377,720 | def write_long_at(self, n, pos, pack_into=Struct().pack_into):
if 0 <= n <= 0xFFFFFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError(, n)
return self | Write an unsigned 32bit value at a specific position in the buffer.
Used for writing tables and frames. |
377,721 | def _multicall_callback(self, values, calls):
result = KojiMultiCallIterator(values)
result.connection = self.connection
result.calls = calls
return result | Fires when we get information back from the XML-RPC server.
This is processes the raw results of system.multicall into a usable
iterator of values (and/or Faults).
:param values: list of data txkoji.Connection.call()
:param calls: list of calls we sent in this multicall RPC
:returns: KojiMultiCallIterator with the resulting values from all our
calls. |
377,722 | def get_social_login(self, *args, **kwargs):
social_login = super(SocialConnectMixin, self).get_social_login(*args, **kwargs)
social_login.state[] = AuthProcess.CONNECT
return social_login | Set the social login process state to connect rather than login
Refer to the implementation of get_social_login in base class and to the
allauth.socialaccount.helpers module complete_social_login function. |
377,723 | def auth_required(realm, auth_func):
def auth_decorator(func):
def inner(self, *args, **kw):
if self.get_authenticated_user(auth_func, realm):
return func(self, *args, **kw)
return inner
return auth_decorator | Decorator that protect methods with HTTP authentication. |
377,724 | def as_new_format(self, format="ATR"):
first_data = len(self.header)
raw = self.rawdata[first_data:]
data = add_atr_header(raw)
newraw = SegmentData(data)
image = self.__class__(newraw)
return image | Create a new disk image in the specified format |
377,725 | def isInRoom(self, _id):
if SockJSRoomHandler._room.has_key(self._gcls() + _id):
if self in SockJSRoomHandler._room[self._gcls() + _id]:
return True
return False | Check a given user is in given room |
377,726 | def create_snapshot(self, systemId, snapshotSpecificationObject):
self.conn.connection._check_login()
response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/System::", systemId, ), json=snapshotSpecificationObject.__to_dict__())
return response | Create snapshot for list of volumes
:param systemID: Cluster ID
:param snapshotSpecificationObject: Of class SnapshotSpecification
:rtype: SnapshotGroupId |
377,727 | def update(self, read, write, manage):
data = values.of({: read, : write, : manage, })
payload = self._version.update(
,
self._uri,
data=data,
)
return SyncMapPermissionInstance(
self._version,
payload,
service_sid=self._solution[],
map_sid=self._solution[],
identity=self._solution[],
) | Update the SyncMapPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncMapPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionInstance |
377,728 | def listTheExtras(self, deleteAlso):
extras = configobj.get_extra_values(self)
expanded = [ (x+ \
( bool(len(x[0])<1 and hasattr(self[x[1]], )), ) \
) for x in extras]
retval =
if expanded:
retval = flattened2str(expanded, extra=1)
if deleteAlso:
for tup_to_del in extras:
target = self
location = tup_to_del[0]
for subdict in location: target = target[subdict]
target.pop(tup_to_del[1])
return retval | Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items. |
377,729 | def get_is_group_member(self, grp_name, user):
self.project_service.set_auth(self._token_project)
return self.project_service.get_is_group_member(grp_name, user) | Check if the given user is a member of the named group.
Note that a group maintainer is not considered a member unless the
user is also explicitly added as a member.
Args:
name (string): Name of group.
user_name (string): User of interest.
Returns:
(bool): False if user not a member. |
377,730 | def RemoveClass(self, class_name):
if class_name not in self._class_mapping:
raise problems.NonexistentMapping(class_name)
del self._class_mapping[class_name] | Removes an entry from the list of known classes.
Args:
class_name: A string with the class name that is to be removed.
Raises:
NonexistentMapping if there is no class with the specified class_name. |
377,731 | def MeetsConditions(knowledge_base, source):
source_conditions_met = True
os_conditions = ConvertSupportedOSToConditions(source)
if os_conditions:
source.conditions.append(os_conditions)
for condition in source.conditions:
source_conditions_met &= artifact_utils.CheckCondition(
condition, knowledge_base)
return source_conditions_met | Check conditions on the source. |
377,732 | def safe_listget(list_, index, default=):
if index >= len(list_):
return default
ret = list_[index]
if ret is None:
return default
return ret | depricate |
377,733 | def multi_to_dict(multi):
return dict(
(key, value[0] if len(value) == 1 else value)
for key, value in multi.to_dict(False).items()
) | Transform a Werkzeug multidictionnary into a flat dictionnary |
377,734 | def make_pipeline(context):
primary_share = IsPrimaryShare()
not_wi = ~IEXCompany.symbol.latest.endswith()
not_lp_name = ~IEXCompany.companyName.latest.matches()
have_market_cap = IEXKeyStats.marketcap.latest >= 1
price = USEquityPricing.close.latest
AtLeastPrice = (price >= context.MyLeastPrice)
AtMostPrice = (price <= context.MyMostPrice)
tradeable_stocks = (
primary_share
& not_wi
& not_lp_name
& have_market_cap
& AtLeastPrice
& AtMostPrice
)
LowVar = 6
HighVar = 40
log.info(
%
(context.MaxCandidates, LowVar, HighVar))
base_universe = AverageDollarVolume(
window_length=20,
mask=tradeable_stocks
).percentile_between(LowVar, HighVar)
ShortAvg = SimpleMovingAverage(
inputs=[USEquityPricing.close],
window_length=3,
mask=base_universe
)
LongAvg = SimpleMovingAverage(
inputs=[USEquityPricing.close],
window_length=45,
mask=base_universe
)
percent_difference = (ShortAvg - LongAvg) / LongAvg
stocks_worst = percent_difference.bottom(context.MaxCandidates)
securities_to_trade = (stocks_worst)
return Pipeline(
columns={
: stocks_worst
},
screen=(securities_to_trade),
) | Create our pipeline. |
377,735 | def send_request(user_session, method, request):
if user_session.session:
session = user_session.session
try:
method = method.upper() if method else
if method == GET:
if request.filename:
return file_download(user_session, request)
response = session.get(
request.href,
params=request.params,
headers=request.headers,
timeout=user_session.timeout)
response.encoding =
counters.update(read=1)
if logger.isEnabledFor(logging.DEBUG):
debug(response)
if response.status_code not in (200, 204, 304):
raise SMCOperationFailure(response)
elif method == POST:
if request.files:
return file_upload(user_session, method, request)
response = session.post(
request.href,
data=json.dumps(request.json, cls=CacheEncoder),
headers=request.headers,
params=request.params)
response.encoding =
counters.update(create=1)
if logger.isEnabledFor(logging.DEBUG):
debug(response)
if response.status_code not in (200, 201, 202):
raise SMCOperationFailure(response)
elif method == PUT:
if request.files:
return file_upload(user_session, method, request)
request.headers.update(Etag=request.etag)
response = session.put(
request.href,
data=json.dumps(request.json, cls=CacheEncoder),
params=request.params,
headers=request.headers)
counters.update(update=1)
if logger.isEnabledFor(logging.DEBUG):
debug(response)
if response.status_code != 200:
raise SMCOperationFailure(response)
elif method == DELETE:
response = session.delete(
request.href,
headers=request.headers)
counters.update(delete=1)
if response.status_code in (409,):
req = session.get(request.href)
etag = req.headers.get()
response = session.delete(
request.href,
headers={: etag})
response.encoding =
if logger.isEnabledFor(logging.DEBUG):
debug(response)
if response.status_code not in (200, 204):
raise SMCOperationFailure(response)
else:
return SMCResult(msg= % method,
user_session=user_session)
except SMCOperationFailure as error:
if error.code in (401,):
user_session.refresh()
return send_request(user_session, method, request)
raise error
except requests.exceptions.RequestException as e:
raise SMCConnectionError(
% e)
else:
return SMCResult(response, user_session=user_session)
else:
raise SMCConnectionError() | Send request to SMC
:param Session user_session: session object
:param str method: method for request
:param SMCRequest request: request object
:raises SMCOperationFailure: failure with reason
:rtype: SMCResult |
377,736 | def jensen_shannon(logu, self_normalized=False, name=None):
with tf.compat.v1.name_scope(name, "jensen_shannon", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
npdt = logu.dtype.as_numpy_dtype
y = tf.nn.softplus(logu)
if self_normalized:
y -= np.log(2).astype(npdt)
return tf.exp(logu) * logu - (1. + tf.exp(logu)) * y | The Jensen-Shannon Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
```none
f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
```
When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, m] + KL[q, m]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
For more information, see:
Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
Inf. Th., 37, 145-151, 1991.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`. |
377,737 | def _prefix_from_uri(self, uriname):
uripart, tag = uriname.split(, maxsplit=1)
uri = uripart.replace(, )
return self.REVERSE_NS[uri] + + tag | Given a fully qualified XML name, find a prefix
e.g. {http://ns.adobe.com/pdf/1.3/}Producer -> pdf:Producer |
377,738 | def status(
message: str = None,
progress: float = None,
section_message: str = None,
section_progress: float = None,
):
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
if message is not None:
step.progress_message = message
if progress is not None:
step.progress = max(0.0, min(1.0, progress))
if section_message is not None:
step.sub_progress_message = section_message
if section_progress is not None:
step.sub_progress = section_progress | Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained. |
377,739 | def replace_find_selection(self, focus_replace_text=False):
if self.editor is not None:
replace_text = to_text_string(self.replace_text.currentText())
search_text = to_text_string(self.search_text.currentText())
case = self.case_button.isChecked()
words = self.words_button.isChecked()
re_flags = re.MULTILINE if case else re.IGNORECASE|re.MULTILINE
re_pattern = None
if self.re_button.isChecked():
pattern = search_text
else:
pattern = re.escape(search_text)
replace_text = re.escape(replace_text)
if words:
pattern = r.format(pattern=pattern)
try:
re_pattern = re.compile(pattern, flags=re_flags)
re_pattern.sub(replace_text, )
except re.error as e:
return
selected_text = to_text_string(self.editor.get_selected_text())
replacement = re_pattern.sub(replace_text, selected_text)
if replacement != selected_text:
cursor = self.editor.textCursor()
cursor.beginEditBlock()
cursor.removeSelectedText()
if not self.re_button.isChecked():
replacement = re.sub(r, r, replacement)
cursor.insertText(replacement)
cursor.endEditBlock()
if focus_replace_text:
self.replace_text.setFocus()
else:
self.editor.setFocus() | Replace and find in the current selection |
377,740 | def get_permissions(self, token, resource_scopes_tuples=None,
submit_request=False, ticket=None):
headers = {
"Authorization": "Bearer %s" % token,
: ,
}
data = [
(, ),
(, self._client_id),
(, True),
]
if resource_scopes_tuples:
for atuple in resource_scopes_tuples:
data.append((, .join(atuple)))
data.append((, submit_request))
elif ticket:
data.append((, ticket))
authz_info = {}
try:
response = self._realm.client.post(
self.well_known[],
data=urlencode(data),
headers=headers,
)
error = response.get()
if error:
self.logger.warning(
,
error,
response.get()
)
else:
token = response.get()
decoded_token = self._decode_token(token.split()[1])
authz_info = decoded_token.get(, {})
except KeycloakClientError as error:
self.logger.warning(str(error))
return authz_info | Request permissions for user from keycloak server.
https://www.keycloak.org/docs/latest/authorization_services/index
.html#_service_protection_permission_api_papi
:param str token: client access token
:param Iterable[Tuple[str, str]] resource_scopes_tuples:
list of tuples (resource, scope)
:param boolean submit_request: submit request if not allowed to access?
:param str ticket: Permissions ticket
rtype: dict |
377,741 | def add_child(self, child):
if child:
if child.tag in self.contained_children:
self.children.append(child)
else:
raise ETD_MS_StructureException(
%
(child.tag, self.tag)
) | Add a child object to the current one.
Checks the contained_children list to make sure that the object
is allowable, and throws an exception if not. |
377,742 | def select_previous(self):
self.footer.clear_message()
if self.selected == 0:
self.footer.draw_message("Cannot move beyond first toot.", Color.GREEN)
return
old_index = self.selected
new_index = self.selected - 1
self.selected = new_index
self.redraw_after_selection_change(old_index, new_index) | Move to the previous status in the timeline. |
377,743 | def get_orgas(self):
r = self._request()
if not r:
return None
retour = []
for data in r.json()[]:
o = Orga()
o.__dict__.update(data)
o.pk = o.id
retour.append(o)
return retour | Return the list of pk for all orgas |
377,744 | def enable(self):
nwin = self.nwin.value()
for label, xs, ys, nx, ny in \
zip(self.label[:nwin], self.xs[:nwin], self.ys[:nwin],
self.nx[:nwin], self.ny[:nwin]):
label.config(state=)
xs.enable()
ys.enable()
nx.enable()
ny.enable()
for label, xs, ys, nx, ny in \
zip(self.label[nwin:], self.xs[nwin:], self.ys[nwin:],
self.nx[nwin:], self.ny[nwin:]):
label.config(state=)
xs.disable()
ys.disable()
nx.disable()
ny.disable()
self.nwin.enable()
self.xbin.enable()
self.ybin.enable()
self.sbutt.enable() | Enables all settings |
377,745 | def _create_session(self):
self.driver = requests.Session(**self.driver_args)
self.update_headers(self.current_headers)
self.update_cookies(self.current_cookies)
self.set_proxy(self.current_proxy) | Creates a fresh session with the default header (random UA) |
377,746 | def yiq_to_rgb(y, i=None, q=None):
if type(y) in [list,tuple]:
y, i, q = y
r = y + (i * 0.9562) + (q * 0.6210)
g = y - (i * 0.2717) - (q * 0.6485)
b = y - (i * 1.1053) + (q * 1.7020)
return (r, g, b) | Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '({}, {}, {})'.format(*[round(v, 6) for v in yiq_to_rgb(0.592263, 0.458874, -0.0499818)])
'(1.0, 0.5, 1e-06)' |
377,747 | def write_and_return(
command, ack, serial_connection, timeout=DEFAULT_WRITE_TIMEOUT):
clear_buffer(serial_connection)
with serial_with_temp_timeout(
serial_connection, timeout) as device_connection:
response = _write_to_device_and_return(command, ack, device_connection)
return response | Write a command and return the response |
377,748 | def _parse_args(self, args):
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs) | self.parser->self.parsed_data |
377,749 | def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec):
ast_type = ast.__class__.__name__
trigger_functions = rule.get("trigger_function", [])
trigger_types = rule.get("trigger_type", [])
rule_subject = rule.get("subject")
rule_relation = rule.get("relation")
rule_object = rule.get("object")
log.debug(f"Running {rule_relation} Type: {ast_type}")
if isinstance(ast, Function):
function_name = ast.name
args = ast.args
parent_function = ast.parent_function
if function_name in trigger_functions:
if rule_subject == "trigger_value":
subject = ast
if rule_object == "args":
for arg in args:
log.debug(f"1: {subject} {arg}")
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast)
elif rule_object == "parent_function" and parent_function:
log.debug(f"2: {subject} {parent_function}")
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast)
elif ast_type in trigger_types:
if rule_subject == "trigger_value":
subject = ast
if rule_object == "args":
for arg in args:
log.debug(f"3: {subject} {arg}")
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast)
elif rule_object == "parent_function" and parent_function:
log.debug(f"4: {subject} {parent_function}")
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast)
if isinstance(ast, NSArg):
term = "{}:{}".format(ast.namespace, ast.value)
parent_function = ast.parent_function
if ast_type in trigger_types:
if rule_subject == "trigger_value":
subject = term
if rule_object == "args":
for arg in args:
log.debug(f"5: {subject} {arg}")
edge_ast = BELAst(subject, rule_relation, arg, spec)
edges.append(edge_ast)
elif rule_object == "parent_function" and parent_function:
log.debug(f"6: {subject} {parent_function}")
edge_ast = BELAst(subject, rule_relation, parent_function, spec)
edges.append(edge_ast)
if hasattr(ast, "args"):
for arg in ast.args:
process_rule(edges, arg, rule, spec) | Process computed edge rule
Recursively processes BELAst versus a single computed edge rule
Args:
edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs
ast (Function): BEL Function AST
rule (Mapping[str, Any]: computed edge rule |
377,750 | def get_polygon_pattern_rules(declarations, dirs):
property_map = {: , : ,
: , : ,
: , : }
property_names = property_map.keys()
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height \
= values.has_key() \
and post_process_symbolizer_image_file(str(values[].value), dirs) \
or (None, None, None, None)
poly_pattern_width = values.has_key() and values[].value or poly_pattern_width
poly_pattern_height = values.has_key() and values[].value or poly_pattern_height
symbolizer = poly_pattern_file and output.PolygonPatternSymbolizer(poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height)
if symbolizer:
rules.append(make_rule(filter, symbolizer))
return rules | Given a list of declarations, return a list of output.Rule objects.
Optionally provide an output directory for local copies of image files. |
377,751 | def _lines_only(shape):
lines = _explode_lines(shape)
if len(lines) == 1:
return lines[0]
else:
return MultiLineString(lines) | Extract the lines (LineString, MultiLineString) from any geometry. We
expect the input to be mostly lines, such as the result of an intersection
between a line and a polygon. The main idea is to remove points, and any
other geometry which might throw a wrench in the works. |
377,752 | def activate(self):
self.open()
a = lvm_lv_activate(self.handle)
self.close()
if a != 0:
raise CommitError("Failed to activate LV.") | Activates the logical volume.
*Raises:*
* HandleError |
377,753 | def get_emerg():
key = "emerg:{}".format(datetime.datetime.now().date())
cached = cache.get(key)
cached = None
if cached:
logger.debug("Returning emergency info from cache")
return cached
else:
result = get_emerg_result()
cache.set(key, result, timeout=settings.CACHE_AGE["emerg"])
return result | Get the cached FCPS emergency page, or check it again.
Timeout defined in settings.CACHE_AGE["emerg"] |
377,754 | def delete(self, membershipId):
check_type(membershipId, basestring, may_be_none=False)
self._session.delete(API_ENDPOINT + + membershipId) | Delete a team membership, by ID.
Args:
membershipId(basestring): The team membership ID.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. |
377,755 | def fetch(self):
tlen = self.end - self.start
dfs = []
lrange = [x for x in (self.start + timedelta(n)
for n in range(tlen.days))]
mrange = []
for dt in lrange:
if datetime(dt.year, dt.month, 1) not in mrange:
mrange.append(datetime(dt.year, dt.month, 1))
lrange = mrange
for date in lrange:
self.curr_date = date
tdf = super(MonthlySummaryReader, self).fetch()
if self.output_format == :
if not tdf.empty:
tdf[] = date.strftime(self.date_format)
dfs.append(tdf)
if self.output_format == :
result = pd.concat(dfs) if len(dfs) > 0 else pd.DataFrame()
return result.set_index()
else:
return dfs | Unfortunately, IEX's API can only retrieve data one day or one month
at a time. Rather than specifying a date range, we will have to run
the read function for each date provided.
:return: DataFrame |
377,756 | def set_physical_page_for_file(self, pageId, ocrd_file, order=None, orderlabel=None):
for el_fptr in self._tree.getroot().findall(
%
ocrd_file.ID, namespaces=NS):
el_fptr.getparent().remove(el_fptr)
el_structmap = self._tree.getroot().find(, NS)
if el_structmap is None:
el_structmap = ET.SubElement(self._tree.getroot(), TAG_METS_STRUCTMAP)
el_structmap.set(, )
el_seqdiv = el_structmap.find(, NS)
if el_seqdiv is None:
el_seqdiv = ET.SubElement(el_structmap, TAG_METS_DIV)
el_seqdiv.set(, )
el_pagediv = el_seqdiv.find( % pageId, NS)
if el_pagediv is None:
el_pagediv = ET.SubElement(el_seqdiv, TAG_METS_DIV)
el_pagediv.set(, )
el_pagediv.set(, pageId)
if order:
el_pagediv.set(, order)
if orderlabel:
el_pagediv.set(, orderlabel)
el_fptr = ET.SubElement(el_pagediv, TAG_METS_FPTR)
el_fptr.set(, ocrd_file.ID) | Create a new physical page |
377,757 | def play(self):
if self.state == PygAnimation.PLAYING:
pass
elif self.state == PygAnimation.STOPPED:
self.index = 0
self.elapsed = 0
self.playingStartTime = time.time()
self.elapsedStopTime = self.endTimesList[-1]
self.nextElapsedThreshold = self.endTimesList[0]
self.nIterationsLeft = self.nTimes
elif self.state == PygAnimation.PAUSED:
self.playingStartTime = time.time() - self.elapsedAtPause
self.elapsed = self.elapsedAtPause
self.elapsedStopTime = self.endTimesList[-1]
self.nextElapsedThreshold = self.endTimesList[self.index]
self.state = PygAnimation.PLAYING | Starts an animation playing. |
377,758 | def get_dataset(self, dsid, info):
dsid_name = dsid.name
if dsid_name in self.cache:
logger.debug(, dsid_name)
return self.cache[dsid_name]
if dsid_name in [, ] and dsid_name not in self.nc:
dsid_name = dsid_name +
logger.debug(, dsid_name)
variable = self.nc[dsid_name]
variable = self.remove_timedim(variable)
variable = self.scale_dataset(dsid, variable, info)
if dsid_name.endswith():
self.upsample_geolocation(dsid, info)
return self.cache[dsid.name]
return variable | Load a dataset. |
377,759 | def _get_indices(num_results, sequence_indices, dtype, name=None):
with tf.compat.v1.name_scope(name, ,
[num_results, sequence_indices]):
if sequence_indices is None:
num_results = tf.cast(num_results, dtype=dtype)
sequence_indices = tf.range(num_results, dtype=dtype)
else:
sequence_indices = tf.cast(sequence_indices, dtype)
indices = sequence_indices + 1
return tf.reshape(indices, [-1, 1, 1]) | Generates starting points for the Halton sequence procedure.
The k'th element of the sequence is generated starting from a positive integer
which must be distinct for each `k`. It is conventional to choose the starting
point as `k` itself (or `k+1` if k is zero based). This function generates
the starting integers for the required elements and reshapes the result for
later use.
Args:
num_results: Positive scalar `Tensor` of dtype int32. The number of samples
to generate. If this parameter is supplied, then `sequence_indices`
should be None.
sequence_indices: `Tensor` of dtype int32 and rank 1. The entries
index into the Halton sequence starting with 0 and hence, must be whole
numbers. For example, sequence_indices=[0, 5, 6] will produce the first,
sixth and seventh elements of the sequence. If this parameter is not None
then `n` must be None.
dtype: The dtype of the sample. One of `float32` or `float64`.
Default is `float32`.
name: Python `str` name which describes ops created by this function.
Returns:
indices: `Tensor` of dtype `dtype` and shape = `[n, 1, 1]`. |
377,760 | def add_variable(self, variable, card=0):
if variable not in self.variables:
self.variables.append(variable)
else:
warn(.format(var=variable))
self.cardinalities[variable] = card
self.transition_models[variable] = {} | Add a variable to the model.
Parameters:
-----------
variable: any hashable python object
card: int
Representing the cardinality of the variable to be added.
Examples:
---------
>>> from pgmpy.models import MarkovChain as MC
>>> model = MC()
>>> model.add_variable('x', 4) |
377,761 | def isdir(self):
if self.type == RAR_BLOCK_FILE:
return (self.flags & RAR_FILE_DIRECTORY) == RAR_FILE_DIRECTORY
return False | Returns True if entry is a directory. |
377,762 | def _split_classes_by_kind(self, class_name_to_definition):
for class_name in class_name_to_definition:
inheritance_set = self._inheritance_sets[class_name]
is_vertex = ORIENTDB_BASE_VERTEX_CLASS_NAME in inheritance_set
is_edge = ORIENTDB_BASE_EDGE_CLASS_NAME in inheritance_set
if is_vertex and is_edge:
raise AssertionError(u
u.format(class_name, inheritance_set))
elif is_vertex:
self._vertex_class_names.add(class_name)
elif is_edge:
self._edge_class_names.add(class_name)
else:
self._non_graph_class_names.add(class_name)
self._vertex_class_names = frozenset(self._vertex_class_names)
self._edge_class_names = frozenset(self._edge_class_names)
self._non_graph_class_names = frozenset(self._non_graph_class_names) | Assign each class to the vertex, edge or non-graph type sets based on its kind. |
377,763 | def rebuildDay( self ):
scene = self.scene()
if ( not scene ):
return
start_date = self.dateStart()
end_date = self.dateEnd()
min_date = scene.minimumDate()
max_date = scene.maximumDate()
if ( not (min_date <= end_date and start_date <= max_date)):
self.hide()
self.setPath(QPainterPath())
return
if ( start_date < min_date ):
start_date = min_date
start_inrange = False
else:
start_inrange = True
if ( max_date < end_date ):
end_date = max_date
end_inrange = False
else:
end_inrange = True
path = QPainterPath()
self.setPos(0, 0)
pad = 2
offset = 18
height = 16
if ( not self.isAllDay() ):
start_dtime = QDateTime(self.dateStart(), self.timeStart())
end_dtime = QDateTime(self.dateStart(),
self.timeEnd().addSecs(-30*60))
start_rect = scene.dateTimeRect(start_dtime)
end_rect = scene.dateTimeRect(end_dtime)
left = start_rect.left() + pad
top = start_rect.top() + pad
right = start_rect.right() - pad
bottom = end_rect.bottom() - pad
path.moveTo(left, top)
path.lineTo(right, top)
path.lineTo(right, bottom)
path.lineTo(left, bottom)
path.lineTo(left, top)
data = (left + 6,
top + 6,
right - left - 12,
bottom - top - 12,
Qt.AlignTop | Qt.AlignLeft,
% (self.timeStart().toString()[:-1],
self.timeEnd().toString(),
self.title()))
self._textData.append(data)
self.setPath(path)
self.show() | Rebuilds the current item in day mode. |
377,764 | def get_connection(self, command_name, *keys, **options):
self._checkpid()
try:
connection = self._available_connections[self._pattern_idx].pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections[self._pattern_idx].add(connection)
self._next_pattern()
return connection | Get a connection from the pool |
377,765 | def enrich_backend(url, clean, backend_name, backend_params, cfg_section_name,
ocean_index=None,
ocean_index_enrich=None,
db_projects_map=None, json_projects_map=None,
db_sortinghat=None,
no_incremental=False, only_identities=False,
github_token=None, studies=False, only_studies=False,
url_enrich=None, events_enrich=False,
db_user=None, db_password=None, db_host=None,
do_refresh_projects=False, do_refresh_identities=False,
author_id=None, author_uuid=None, filter_raw=None,
filters_raw_prefix=None, jenkins_rename_file=None,
unaffiliated_group=None, pair_programming=False,
node_regex=False, studies_args=None, es_enrich_aliases=None,
last_enrich_date=None, projects_json_repo=None):
backend = None
enrich_index = None
if ocean_index or ocean_index_enrich:
clean = False
elastic_ocean = get_elastic(url, ocean_index, clean, ocean_backend)
ocean_backend.set_elastic(elastic_ocean)
logger.info("Adding enrichment data to %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
if db_sortinghat and enrich_backend.has_identities():
total_ids = load_identities(ocean_backend, enrich_backend)
logger.info("Total identities loaded %i ", total_ids)
if only_identities:
logger.info("Only SH identities added. Enrich not done!")
else:
if not events_enrich:
enrich_count = enrich_items(ocean_backend, enrich_backend)
if enrich_count is not None:
logger.info("Total items enriched %i ", enrich_count)
else:
enrich_count = enrich_items(ocean_backend, enrich_backend, events=True)
if enrich_count is not None:
logger.info("Total events enriched %i ", enrich_count)
if studies:
do_studies(ocean_backend, enrich_backend, studies_args)
except Exception as ex:
if backend:
logger.error("Error enriching ocean from %s (%s): %s",
backend_name, backend.origin, ex, exc_info=True)
else:
logger.error("Error enriching ocean %s", ex, exc_info=True)
logger.info("Done %s ", backend_name) | Enrich Ocean index |
377,766 | def get_sources(self, skydir=None, distance=None, cuts=None,
minmax_ts=None, minmax_npred=None,
exclude=None, square=False, coordsys=,
names=None):
if skydir is None:
skydir = self.skydir
if exclude is None:
exclude = []
rsrc, srcs = self.get_sources_by_position(skydir,
distance,
square=square,
coordsys=coordsys)
o = []
for s in srcs + self.diffuse_sources:
if names and s.name not in names:
continue
if s.name in exclude:
continue
if not s.check_cuts(cuts):
continue
ts = s[]
npred = s[]
if not utils.apply_minmax_selection(ts, minmax_ts):
continue
if not utils.apply_minmax_selection(npred, minmax_npred):
continue
o.append(s)
return o | Retrieve list of source objects satisfying the following
selections:
* Angular separation from ``skydir`` or ROI center (if
``skydir`` is None) less than ``distance``.
* Cuts on source properties defined in ``cuts`` list.
* TS and Npred in range specified by ``minmax_ts`` and ``minmax_npred``.
* Name matching a value in ``names``
Sources can be excluded from the selection by adding their
name to the ``exclude`` list.
Returns
-------
srcs : list
List of source objects. |
377,767 | def initialize(self, params, qubits):
if isinstance(qubits, QuantumRegister):
qubits = qubits[:]
else:
qubits = _convert_to_bits([qubits], [qbit for qreg in self.qregs for qbit in qreg])[0]
return self.append(Initialize(params), qubits) | Apply initialize to circuit. |
377,768 | def remove_root_bin(self, bin_id):
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=bin_id)
return self._hierarchy_session.remove_root(id_=bin_id) | Removes a root bin.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
raise: NotFound - ``bin_id`` not a root
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
377,769 | def _get_overlaps_tensor(self, L):
n, m = L.shape
LY = np.array([np.where(L == y, 1, 0) for y in range(self.k_0, self.k + 1)])
O = np.einsum("abc,dbe,fbg->cegadf", LY, LY, LY) / n
return torch.from_numpy(O).float() | Transforms the input label matrix to a three-way overlaps tensor.
Args:
L: (np.array) An n x m array of LF output labels, in {0,...,k} if
self.abstains, else in {1,...,k}, generated by m conditionally
independent LFs on n data points
Outputs:
O: (torch.Tensor) A (m, m, m, k, k, k) tensor of the label-specific
empirical overlap rates; that is,
O[i,j,k,y1,y2,y3] = P(\lf_i = y1, \lf_j = y2, \lf_k = y3)
where this quantity is computed empirically by this function, based
on the label matrix L. |
377,770 | def _members(self):
return {
key: value
for key, value in self.__dict__.items()
if not key.startswith("_") and not isinstance(value, Model)
} | Return a dict of non-private members. |
377,771 | def execute_command(self, command):
self.info_log("executing command: %s" % command)
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
k = paramiko.RSAKey.from_private_key_file(
self.browser_config.get()
)
ssh.connect(
self.private_ip,
username=self.browser_config.get(),
pkey=k
)
sleep_time = 0.1
stdout = []
stderr = []
ssh_transport = ssh.get_transport()
channel = ssh_transport.open_session()
channel.setblocking(0)
channel.exec_command(command)
while True:
while channel.recv_ready():
stdout.append(channel.recv(1000))
while channel.recv_stderr_ready():
stderr.append(channel.recv_stderr(1000))
if channel.exit_status_ready():
break
sleep(sleep_time)
ssh_transport.close()
ssh.close()
return b.join(stdout), b.join(stderr)
except Exception as e:
msg = "Execute_command exception: %s" % str(e)
self.error_log(msg)
raise Exception(msg) | Execute a command on the node
Args:
command (str) |
377,772 | def spliced_offset(self, position):
assert type(position) == int, \
"Position argument must be an integer, got %s : %s" % (
position, type(position))
if position < self.start or position > self.end:
raise ValueError(
"Invalid position: %d (must be between %d and %d)" % (
position,
self.start,
self.end))
unspliced_offset = self.offset(position)
total_spliced_offset = 0
for exon in self.exons:
exon_unspliced_start, exon_unspliced_end = self.offset_range(
exon.start, exon.end)
if exon_unspliced_start <= unspliced_offset <= exon_unspliced_end:
exon_offset = unspliced_offset - exon_unspliced_start
return total_spliced_offset + exon_offset
else:
exon_length = len(exon)
total_spliced_offset += exon_length
raise ValueError(
"Couldn't find position %d on any exon of %s" % (
position, self.id)) | Convert from an absolute chromosomal position to the offset into
this transcript"s spliced mRNA.
Position must be inside some exon (otherwise raise exception). |
377,773 | def _parse_ical_string(ical_string):
start_time = ical_string.splitlines()[0].replace(DTSTART, )
if "RRULE" in ical_string:
days = ical_string.splitlines()[1].replace(REPEAT, )
if days == "RRULE:FREQ=DAILY":
days = []
else:
days = days.split()
else:
days = None
start_time = start_time.splitlines()[0].split()[1]
datetime_object = datetime.strptime(start_time, )
return datetime_object, days | SU,MO,TU,WE,TH,FR,SA
DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=WEEKLY;BYDAY=SA
DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=DAILY
DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA
DTSTART;TZID=America/New_York:20180718T174500 |
377,774 | def admm_linearized(x, f, g, L, tau, sigma, niter, **kwargs):
r
if not isinstance(L, Operator):
raise TypeError(
.format(L))
if x not in L.domain:
raise OpDomainError(
.format(x, L.domain))
tau, tau_in = float(tau), tau
if tau <= 0:
raise ValueError(.format(tau_in))
sigma, sigma_in = float(sigma), sigma
if sigma <= 0:
raise ValueError(.format(sigma_in))
niter, niter_in = int(niter), niter
if niter < 0 or niter != niter_in:
raise ValueError(
.format(niter_in))
callback = kwargs.pop(, None)
if callback is not None and not callable(callback):
raise TypeError(.format(callback))
z = L.range.zero()
u = L.range.zero()
tmp_ran = L(x)
tmp_dom = L.domain.element()
prox_tau_f = f.proximal(tau)
prox_sigma_g = g.proximal(sigma)
for _ in range(niter):
tmp_ran += u
tmp_ran -= z
L.adjoint(tmp_ran, out=tmp_dom)
x.lincomb(1, x, -tau / sigma, tmp_dom)
prox_tau_f(x, out=x)
L(x, out=tmp_ran)
prox_sigma_g(tmp_ran + u, out=z)
u += tmp_ran
u -= z
if callback is not None:
callback(x) | r"""Generic linearized ADMM method for convex problems.
ADMM stands for "Alternating Direction Method of Multipliers" and
is a popular convex optimization method. This variant solves problems
of the form ::
min_x [ f(x) + g(Lx) ]
with convex ``f`` and ``g``, and a linear operator ``L``. See Section
4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_
and the Notes for more mathematical details.
Parameters
----------
x : ``L.domain`` element
Starting point of the iteration, updated in-place.
f, g : `Functional`
The functions ``f`` and ``g`` in the problem definition. They
need to implement the ``proximal`` method.
L : linear `Operator`
The linear operator that is composed with ``g`` in the problem
definition. It must fulfill ``L.domain == f.domain`` and
``L.range == g.domain``.
tau, sigma : positive float
Step size parameters for the update of the variables.
niter : non-negative int
Number of iterations.
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
Given :math:`x^{(0)}` (the provided ``x``) and
:math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following
iteration:
.. math::
x^{(k+1)} &= \mathrm{prox}_{\tau f} \left[
x^{(k)} - \sigma^{-1}\tau L^*\big(
L x^{(k)} - z^{(k)} + u^{(k)}
\big)
\right]
z^{(k+1)} &= \mathrm{prox}_{\sigma g}\left(
L x^{(k+1)} + u^{(k)}
\right)
u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)}
The step size parameters :math:`\tau` and :math:`\sigma` must satisfy
.. math::
0 < \tau < \frac{\sigma}{\|L\|^2}
to guarantee convergence.
The name "linearized ADMM" comes from the fact that in the
minimization subproblem for the :math:`x` variable, this variant
uses a linearization of a quadratic term in the augmented Lagrangian
of the generic ADMM, in order to make the step expressible with
the proximal operator of :math:`f`.
Another name for this algorithm is *split inexact Uzawa method*.
References
----------
[PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and
Trends in Optimization, 1(3) (2014), pp 123-231. |
377,775 | def com_google_fonts_check_production_glyphs_similarity(ttFont, api_gfonts_ttFont):
def glyphs_surface_area(ttFont):
from fontTools.pens.areaPen import AreaPen
glyphs = {}
glyph_set = ttFont.getGlyphSet()
area_pen = AreaPen(glyph_set)
for glyph in glyph_set.keys():
glyph_set[glyph].draw(area_pen)
area = area_pen.value
area_pen.value = 0
glyphs[glyph] = area
return glyphs
bad_glyphs = []
these_glyphs = glyphs_surface_area(ttFont)
gfonts_glyphs = glyphs_surface_area(api_gfonts_ttFont)
shared_glyphs = set(these_glyphs) & set(gfonts_glyphs)
this_upm = ttFont[].unitsPerEm
gfonts_upm = api_gfonts_ttFont[].unitsPerEm
for glyph in shared_glyphs:
this_glyph_area = (these_glyphs[glyph] / this_upm) * gfonts_upm
gfont_glyph_area = (gfonts_glyphs[glyph] / gfonts_upm) * this_upm
if abs(this_glyph_area - gfont_glyph_area) > 7000:
bad_glyphs.append(glyph)
if bad_glyphs:
yield WARN, ("Following glyphs differ greatly from"
" Google Fonts version: [{}]").format(", ".join(bad_glyphs))
else:
yield PASS, ("Glyphs are similar in"
" comparison to the Google Fonts version.") | Glyphs are similiar to Google Fonts version? |
377,776 | def backpropagate_3d_tilted(uSin, angles, res, nm, lD=0,
tilted_axis=[0, 1, 0],
coords=None, weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval=None,
intp_order=2, dtype=None,
num_cores=_ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r
A = angles.shape[0]
if angles.shape not in [(A,), (A, 1), (A, 3)]:
raise ValueError("`angles` must have shape (A,) or (A,3)!")
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > _ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(_ncores))
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * int(dtype.name.strip("float"))))
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
angles = np.array(angles, copy=copy)
angles = np.squeeze(angles)
lny, lnx = uSin.shape[1], uSin.shape[2]
ln = lnx
orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
if padding[0]:
padx = orderx - lnx
else:
padx = 0
if padding[1]:
pady = ordery - lny
else:
pady = 0
padyl = np.int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = np.int(np.ceil(padx / 2))
padxr = padx - padxl
lNx, lNy = lnx + padx, lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
tilted_axis = norm_vec(tilted_axis)
angz = np.arctan2(tilted_axis[0], tilted_axis[1])
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
tilted_axis_yz = norm_vec(np.dot(rotmat, tilted_axis))
if len(angles.shape) == 1:
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
angles = sphere_points_from_angles_and_tilt(angles, tilted_axis_yz)
else:
if weight_angles:
warnings.warn("3D angular weighting not yet supported!")
weights = 1
for ii in range(angles.shape[0]):
angles[ii] = norm_vec(np.dot(rotmat, angles[ii]))
if weight_angles:
uSin *= weights
km = (2 * np.pi * nm) / res
fx = np.fft.fftfreq(lNx)
fy = np.fft.fftfreq(lNy)
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
dphi0 = 2 * np.pi / A
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
filter_klp = (kx**2 + ky**2 < km**2)
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
u, v, _w = tilted_axis
filterabs = np.abs(kx*v+ky*u) * filter_klp
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
center = lNz / 2.0
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
Mp = M.reshape(lNy, lNx)
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
pass
else:
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv})
if count is not None:
count.value += 1
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
angles = rotate_points_to_axis(points=angles, axis=tilted_axis_yz)
for aa in np.arange(A):
if padval is None:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
oneslice *= filterabs * prefactor / (lNx * lNy)
for p in range(len(zv)):
if save_memory:
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
out=inarr)
else:
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:padyl+lny, padxl:padxl+lnx]
fil_p_t = filtered_proj.transpose(2, 1, 0)[:, ::-1, :]
_drot, drotinv = rotation_matrix_from_point_planerot(angles[aa],
plane_angle=angz,
ret_inv=True)
c = 0.5 * np.array(fil_p_t.shape) - .5
offset = c - np.dot(drotinv, c)
outarr.real += scipy.ndimage.interpolation.affine_transform(
fil_p_t.real, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if not onlyreal:
outarr.imag += scipy.ndimage.interpolation.affine_transform(
fil_p_t.imag, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if count is not None:
count.value += 1
return outarr | r"""3D backpropagation with a tilted axis of rotation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm with
a rotational axis that is tilted by :math:`\theta_\mathrm{tilt}`
w.r.t. the imaging plane :cite:`Mueller2015tilted`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j}^\mathrm{tilt} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}\right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with a modified rotational operator :math:`D_{-\phi_j}^\mathrm{tilt}`
and a different filter in Fourier space
:math:`|k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}|` when compared
to :func:`backpropagate_3d`.
.. versionadded:: 0.1.2
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: ndarray of shape (A,3) or 1D array of length A
If the shape is (A,3), then `angles` consists of vectors
on the unit sphere that correspond to the direction
of illumination and acquisition (s₀). If the shape is (A,),
then `angles` is a one-dimensional array of angles in radians
that determines the angular position :math:`\phi_j`.
In both cases, `tilted_axis` must be set according to the
tilt of the rotational axis.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
tilted_axis: list of floats
The coordinates [x, y, z] on a unit sphere representing the
tilted axis of rotation. The default is (0,1,0),
which corresponds to a rotation about the y-axis and
follows the behavior of :func:`odtbrain.backpropagate_3d`.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
This currently only works when `angles` has the shape (A,).
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas for it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.affine_transform` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This implementation can deal with projection angles that are not
distributed along a circle about the rotational axis. If there are
slight deviations from this circle, simply pass the 3D rotational
positions instead of the 1D angles to the `angles` argument. In
principle, this should improve the reconstruction. The general
problem here is that the backpropagation algorithm requires a
ramp filter in Fourier space that is oriented perpendicular to the
rotational axis. If the sample does not rotate about a single axis,
then a 1D parametric representation of this rotation must be found
to correctly determine the filter in Fourier space. Such a
parametric representation could e.g. be a spiral between the poles
of the unit sphere (but this kind of rotation is probably difficult
to implement experimentally).
If you have input images with rectangular shape, e.g. Nx!=Ny and
the rotational axis deviates by approximately PI/2 from the axis
(0,1,0), then data might get cropped in the reconstruction volume.
You can avoid that by rotating your input data and the rotational
axis by PI/2. For instance, change`tilted_axis` from [1,0,0] to
[0,1,0] and `np.rot90` the sinogram images.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`). |
377,777 | def damping_kraus_map(p=0.10):
damping_op = np.sqrt(p) * np.array([[0, 1],
[0, 0]])
residual_kraus = np.diag([1, np.sqrt(1 - p)])
return [residual_kraus, damping_op] | Generate the Kraus operators corresponding to an amplitude damping
noise channel.
:param float p: The one-step damping probability.
:return: A list [k1, k2] of the Kraus operators that parametrize the map.
:rtype: list |
377,778 | def trimSegments(self, minPermanence=None, minNumSyns=None):
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
totalSegsRemoved, totalSynsRemoved = 0, 0
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self._trimSegmentsInCell(
colIdx=c, cellIdx=i, segList=self.cells[c][i],
minPermanence=minPermanence, minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return totalSegsRemoved, totalSynsRemoved | This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
:param minPermanence: (float) Any syn whose permanence is 0 or <
``minPermanence`` will be deleted. If None is passed in, then
``self.connectedPerm`` is used.
:param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses
remaining in it will be deleted. If None is passed in, then
``self.activationThreshold`` is used.
:returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved`` |
377,779 | def to_holvi_dict(self):
self._jsondata["items"] = []
for item in self.items:
self._jsondata["items"].append(item.to_holvi_dict())
self._jsondata["issue_date"] = self.issue_date.isoformat()
self._jsondata["due_date"] = self.due_date.isoformat()
self._jsondata["receiver"] = self.receiver.to_holvi_dict()
return {k: v for (k, v) in self._jsondata.items() if k in self._valid_keys} | Convert our Python object to JSON acceptable to Holvi API |
377,780 | def build_ast(expression, debug = False):
G = DiGraph()
stack = []
for n in expression:
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
if(n.tvalue == ):
if in arg1.tvalue and arg2.ttype == and not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split()[0] + + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() | build an AST from an Excel formula expression in reverse polish notation |
377,781 | def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False,
start=None, stop=None, step=None):
assert sortby is not None,
return HDF5SortedView(source, where=where, name=name,
sortby=sortby, checkCSI=checkCSI,
start=start, stop=stop, step=step) | Provides access to an HDF5 table, sorted by an indexed column, e.g.::
>>> import petl as etl
>>> import tables
>>> # set up a new hdf5 table to demonstrate with
... h5file = tables.open_file('example.h5', mode='w', title='Test file')
>>> h5file.create_group('/', 'testgroup', 'Test Group')
/testgroup (Group) 'Test Group'
children := []
>>> class FooBar(tables.IsDescription):
... foo = tables.Int32Col(pos=0)
... bar = tables.StringCol(6, pos=2)
...
>>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, 'Test Table')
>>> # load some data into the table
... table1 = (('foo', 'bar'),
... (3, b'asdfgh'),
... (2, b'qwerty'),
... (1, b'zxcvbn'))
>>> for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
...
>>> h5table.cols.foo.create_csindex() # CS index is required
0
>>> h5file.flush()
>>> h5file.close()
>>> #
... # access the data, sorted by the indexed column
... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable',
... sortby='foo')
>>> table2
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'zxcvbn' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'asdfgh' |
+-----+-----------+ |
377,782 | def argparser(self):
core_parser = self.core_parser
core_parser.add_argument(, , type=str, help="The range to search for use")
return core_parser | Argparser option with search functionality specific for ranges. |
377,783 | def update_service(name, service_map):
if name in service_map:
service = service_map[name]
data = service.update()
if not data:
logger.warning(, name)
else:
data[] = service.service_name
CACHE[name] = dict(data=data, updated=datetime.now())
else:
logger.warning(, name)
if name in CACHE:
return add_time(CACHE[name])
return {} | Get an update from the specified service.
Arguments:
name (:py:class:`str`): The name of the service.
service_map (:py:class:`dict`): A mapping of service names to
:py:class:`flash.service.core.Service` instances.
Returns:
:py:class:`dict`: The updated data. |
377,784 | def _set_general_compilers(self):
for c, c_info in self.compilers.items():
compiler_cls = c_info["cls"](template=c_info["template"])
c_info["channels"] = []
for p in self.processes:
if not any([isinstance(p, x) for x in self.skip_class]):
if c in p.compiler:
channels = ["{}_{}".format(i, p.pid) for i in
p.compiler[c]]
c_info["channels"].extend(channels)
if c_info["channels"]:
compiler_cls.set_compiler_channels(c_info["channels"],
operator="join")
self.processes.append(compiler_cls) | Adds compiler channels to the :attr:`processes` attribute.
This method will iterate over the pipeline's processes and check
if any process is feeding channels to a compiler process. If so, that
compiler process is added to the pipeline and those channels are
linked to the compiler via some operator. |
377,785 | def github_repos(organization, github_url, github_token):
headers = {"Authorization": "token {}".format(github_token)}
next_cursor = None
while next_cursor is not False:
params = {: query, : {
: organization, : next_cursor}}
response = requests.post(github_url, headers=headers, json=params)
result = response.json()
if response.status_code != 200 or in result:
raise ValueError("Github api error %s" % (
response.content.decode(),))
repos = jmespath.search(
, result)
for r in repos:
yield r
page_info = jmespath.search(
, result)
if page_info:
next_cursor = (page_info[] and
page_info[] or False)
else:
next_cursor = False | Return all github repositories in an organization. |
377,786 | def cublasZgbmv(handle, trans, m, n, kl, ku, alpha, A, lda,
x, incx, beta, y, incy):
status = _libcublas.cublasZgbmv_v2(handle,
trans, m, n, kl, ku,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status) | Matrix-vector product for complex general banded matrix. |
377,787 | def caesar(shift, data, shift_ranges=(, )):
alphabet = dict(
(chr(c), chr((c - s + shift) % (e - s + 1) + s))
for s, e in map(lambda r: (ord(r[0]), ord(r[-1])), shift_ranges)
for c in range(s, e + 1)
)
return .join(alphabet.get(c, c) for c in data) | Apply a caesar cipher to a string.
The caesar cipher is a substition cipher where each letter in the given
alphabet is replaced by a letter some fixed number down the alphabet.
If ``shift`` is ``1``, *A* will become *B*, *B* will become *C*, etc...
You can define the alphabets that will be shift by specifying one or more
shift ranges. The characters will than be shifted within the given ranges.
Args:
shift(int): The shift to apply.
data(str): The string to apply the cipher to.
shift_ranges(list of str): Which alphabets to shift.
Returns:
str: The string with the caesar cipher applied.
Examples:
>>> caesar(16, 'Pwnypack')
'Fmdofqsa'
>>> caesar(-16, 'Fmdofqsa')
'Pwnypack'
>>> caesar(16, 'PWNYpack', shift_ranges=('AZ',))
'FMDOpack'
>>> caesar(16, 'PWNYpack', shift_ranges=('Az',))
'`g^iFqsA' |
377,788 | def check_file_list_cache(opts, form, list_cache, w_lock):
refresh_cache = False
save_cache = True
serial = salt.payload.Serial(opts)
wait_lock(w_lock, list_cache, 5 * 60)
if not os.path.isfile(list_cache) and _lock_cache(w_lock):
refresh_cache = True
else:
attempt = 0
while attempt < 11:
try:
if os.path.exists(w_lock):
wait_lock(w_lock, list_cache, 15 * 60)
if os.path.exists(list_cache):
cache_stat = os.stat(list_cache)
current_time = int(time.time())
file_mtime = int(cache_stat.st_mtime)
if file_mtime > current_time:
log.debug(
,
list_cache, current_time, file_mtime
)
age = 0
else:
age = current_time - file_mtime
else:
age = opts.get(, 20) + 1
if age < 0:
log.warning()
if 0 <= age < opts.get(, 20):
with salt.utils.files.fopen(list_cache, ) as fp_:
log.debug(
"Returning file list from cache: age=%s cache_time=%s %s",
age, opts.get(, 20), list_cache
)
return salt.utils.data.decode(serial.load(fp_).get(form, [])), False, False
elif _lock_cache(w_lock):
refresh_cache = True
break
except Exception:
time.sleep(0.2)
attempt += 1
continue
if attempt > 10:
save_cache = False
refresh_cache = True
return None, refresh_cache, save_cache | Checks the cache file to see if there is a new enough file list cache, and
returns the match (if found, along with booleans used by the fileserver
backend to determine if the cache needs to be refreshed/written). |
377,789 | def get_random(self, size=10):
bin_centers_ravel = np.array(np.meshgrid(*self.bin_centers(),
indexing=)).reshape(self.dimensions, -1).T
hist_ravel = self.histogram.ravel()
hist_ravel = hist_ravel.astype(np.float) / np.nansum(hist_ravel)
result = bin_centers_ravel[np.random.choice(len(bin_centers_ravel),
p=hist_ravel,
size=size)]
for dim_i in range(self.dimensions):
bin_edges = self.bin_edges[dim_i]
bin_widths = np.diff(bin_edges)
index_of_bin = np.searchsorted(bin_edges, result[:, dim_i]) - 1
result[:, dim_i] += (np.random.rand(size) - 0.5) * bin_widths[index_of_bin]
return result | Returns (size, n_dim) array of random variates from the histogram.
Inside the bins, a uniform distribution is assumed
Note this assumes the histogram is an 'events per bin', not a pdf.
TODO: test more. |
377,790 | def put_attribute(self, id, key, value, **kwargs):
kwargs[] = True
if kwargs.get():
return self.put_attribute_with_http_info(id, key, value, **kwargs)
else:
(data) = self.put_attribute_with_http_info(id, key, value, **kwargs)
return data | Add attribute to the BuildRecord.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_attribute(id, key, value, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: BuildRecord id (required)
:param str key: Attribute key (required)
:param str value: Attribute value (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
377,791 | def UNIFAC(T, xs, chemgroups, cached=None, subgroup_data=None,
interaction_data=None, modified=False):
rs temperature, liquid mole fractions,
and optionally the subgroup data and interaction parameter data of your
choice. The default is to use the original UNIFAC model, with the latest
parameters published by DDBST. The model supports modified forms (Dortmund,
NIST) when the `modified` parameter is True.
Parameters
----------
T : float
Temperature of the system, [K]
xs : list[float]
Mole fractions of all species in the system in the liquid phase, [-]
chemgroups : list[dict]
List of dictionaries of subgroup IDs and their counts for all species
in the mixture, [-]
subgroup_data : dict[UNIFAC_subgroup]
UNIFAC subgroup data; available dictionaries in this module are UFSG
(original), DOUFSG (Dortmund), or NISTUFSG ([4]_).
interaction_data : dict[dict[tuple(a_mn, b_mn, c_mn)]]
UNIFAC interaction parameter data; available dictionaries in this
module are UFIP (original), DOUFIP2006 (Dortmund parameters as
published by 2006), DOUFIP2016 (Dortmund parameters as published by
2016), and NISTUFIP ([4]_).
modified : bool
True if using the modified form and temperature dependence, otherwise
False.
Returns
-------
gammas : list[float]
Activity coefficients of all species in the mixture, [-]
Notes
-----
The actual implementation of UNIFAC is formulated slightly different than
the formulas above for computational efficiency. DDBST switched to using
the more efficient forms in their publication, but the numerical results
are identical.
The model is as follows:
.. math::
\ln \gamma_i = \ln \gamma_i^c + \ln \gamma_i^r
**Combinatorial component**
.. math::
\ln \gamma_i^c = \ln \frac{\phi_i}{x_i} + \frac{z}{2} q_i
\ln\frac{\theta_i}{\phi_i} + L_i - \frac{\phi_i}{x_i}
\sum_{j=1}^{n} x_j L_j
\theta_i = \frac{x_i q_i}{\sum_{j=1}^{n} x_j q_j}
\phi_i = \frac{x_i r_i}{\sum_{j=1}^{n} x_j r_j}
L_i = 5(r_i - q_i)-(r_i-1)
**Residual component**
.. math::
\ln \gamma_i^r = \sum_{k}^n \nu_k^{(i)} \left[ \ln \Gamma_k
- \ln \Gamma_k^{(i)} \right]
\ln \Gamma_k = Q_k \left[1 - \ln \sum_m \Theta_m \Psi_{mk} - \sum_m
\frac{\Theta_m \Psi_{km}}{\sum_n \Theta_n \Psi_{nm}}\right]
\Theta_m = \frac{Q_m X_m}{\sum_{n} Q_n X_n}
X_m = \frac{ \sum_j \nu^j_m x_j}{\sum_j \sum_n \nu_n^j x_j}
**R and Q**
.. math::
r_i = \sum_{k=1}^{n} \nu_k R_k
q_i = \sum_{k=1}^{n}\nu_k Q_k
The newer forms of UNIFAC (Dortmund, NIST) calculate the combinatorial
part slightly differently:
.. math::
\ln \gamma_i^c = 1 - {V}_i) - 5q_i \left(1
- \frac{V_i}{F_i}+ \ln\left(\frac{V_i}{F_i}\right)\right)
V
cmps = range(len(xs))
if subgroup_data is None:
subgroups = UFSG
else:
subgroups = subgroup_data
if interaction_data is None:
interactions = UFIP
else:
interactions = interaction_data
if not cached:
rs = []
qs = []
for groups in chemgroups:
ri = 0.
qi = 0.
for group, count in groups.items():
ri += subgroups[group].R*count
qi += subgroups[group].Q*count
rs.append(ri)
qs.append(qi)
group_counts = {}
for groups in chemgroups:
for group, count in groups.items():
if group in group_counts:
group_counts[group] += count
else:
group_counts[group] = count
else:
rs, qs, group_counts = cached
group_sum = sum(count*xs[i] for i in cmps for count in chemgroups[i].values())
group_count_xs = {}
for group in group_counts:
tot_numerator = sum(chemgroups[i][group]*xs[i] for i in cmps if group in chemgroups[i])
group_count_xs[group] = tot_numerator/group_sum
rsxs = sum([rs[i]*xs[i] for i in cmps])
Vis = [rs[i]/rsxs for i in cmps]
qsxs = sum([qs[i]*xs[i] for i in cmps])
Fis = [qs[i]/qsxs for i in cmps]
if modified:
rsxs2 = sum([rs[i]**0.75*xs[i] for i in cmps])
Vis2 = [rs[i]**0.75/rsxs2 for i in cmps]
loggammacs = [1. - Vis2[i] + log(Vis2[i]) - 5.*qs[i]*(1. - Vis[i]/Fis[i]
+ log(Vis[i]/Fis[i])) for i in cmps]
else:
loggammacs = [1. - Vis[i] + log(Vis[i]) - 5.*qs[i]*(1. - Vis[i]/Fis[i]
+ log(Vis[i]/Fis[i])) for i in cmps]
Q_sum_term = sum([subgroups[group].Q*group_count_xs[group] for group in group_counts])
area_fractions = {group: subgroups[group].Q*group_count_xs[group]/Q_sum_term
for group in group_counts.keys()}
UNIFAC_psis = {k: {m:(UNIFAC_psi(T, m, k, subgroups, interactions, modified=modified))
for m in group_counts} for k in group_counts}
loggamma_groups = {}
for k in group_counts:
sum1, sum2 = 0., 0.
for m in group_counts:
sum1 += area_fractions[m]*UNIFAC_psis[k][m]
sum3 = sum(area_fractions[n]*UNIFAC_psis[m][n] for n in group_counts)
sum2 -= area_fractions[m]*UNIFAC_psis[m][k]/sum3
loggamma_groups[k] = subgroups[k].Q*(1. - log(sum1) + sum2)
loggammars = []
for groups in chemgroups:
chem_loggamma_groups = {}
chem_group_sum = sum(groups.values())
chem_group_count_xs = {group: count/chem_group_sum for group, count in groups.items()}
Q_sum_term = sum([subgroups[group].Q*chem_group_count_xs[group] for group in groups])
chem_area_fractions = {group: subgroups[group].Q*chem_group_count_xs[group]/Q_sum_term
for group in groups.keys()}
for k in groups:
sum1, sum2 = 0., 0.
for m in groups:
sum1 += chem_area_fractions[m]*UNIFAC_psis[k][m]
sum3 = sum(chem_area_fractions[n]*UNIFAC_psis[m][n] for n in groups)
sum2 -= chem_area_fractions[m]*UNIFAC_psis[m][k]/sum3
chem_loggamma_groups[k] = subgroups[k].Q*(1. - log(sum1) + sum2)
tot = sum([count*(loggamma_groups[group] - chem_loggamma_groups[group])
for group, count in groups.items()])
loggammars.append(tot)
return [exp(loggammacs[i]+loggammars[i]) for i in cmps] | r'''Calculates activity coefficients using the UNIFAC model (optionally
modified), given a mixture's temperature, liquid mole fractions,
and optionally the subgroup data and interaction parameter data of your
choice. The default is to use the original UNIFAC model, with the latest
parameters published by DDBST. The model supports modified forms (Dortmund,
NIST) when the `modified` parameter is True.
Parameters
----------
T : float
Temperature of the system, [K]
xs : list[float]
Mole fractions of all species in the system in the liquid phase, [-]
chemgroups : list[dict]
List of dictionaries of subgroup IDs and their counts for all species
in the mixture, [-]
subgroup_data : dict[UNIFAC_subgroup]
UNIFAC subgroup data; available dictionaries in this module are UFSG
(original), DOUFSG (Dortmund), or NISTUFSG ([4]_).
interaction_data : dict[dict[tuple(a_mn, b_mn, c_mn)]]
UNIFAC interaction parameter data; available dictionaries in this
module are UFIP (original), DOUFIP2006 (Dortmund parameters as
published by 2006), DOUFIP2016 (Dortmund parameters as published by
2016), and NISTUFIP ([4]_).
modified : bool
True if using the modified form and temperature dependence, otherwise
False.
Returns
-------
gammas : list[float]
Activity coefficients of all species in the mixture, [-]
Notes
-----
The actual implementation of UNIFAC is formulated slightly different than
the formulas above for computational efficiency. DDBST switched to using
the more efficient forms in their publication, but the numerical results
are identical.
The model is as follows:
.. math::
\ln \gamma_i = \ln \gamma_i^c + \ln \gamma_i^r
**Combinatorial component**
.. math::
\ln \gamma_i^c = \ln \frac{\phi_i}{x_i} + \frac{z}{2} q_i
\ln\frac{\theta_i}{\phi_i} + L_i - \frac{\phi_i}{x_i}
\sum_{j=1}^{n} x_j L_j
\theta_i = \frac{x_i q_i}{\sum_{j=1}^{n} x_j q_j}
\phi_i = \frac{x_i r_i}{\sum_{j=1}^{n} x_j r_j}
L_i = 5(r_i - q_i)-(r_i-1)
**Residual component**
.. math::
\ln \gamma_i^r = \sum_{k}^n \nu_k^{(i)} \left[ \ln \Gamma_k
- \ln \Gamma_k^{(i)} \right]
\ln \Gamma_k = Q_k \left[1 - \ln \sum_m \Theta_m \Psi_{mk} - \sum_m
\frac{\Theta_m \Psi_{km}}{\sum_n \Theta_n \Psi_{nm}}\right]
\Theta_m = \frac{Q_m X_m}{\sum_{n} Q_n X_n}
X_m = \frac{ \sum_j \nu^j_m x_j}{\sum_j \sum_n \nu_n^j x_j}
**R and Q**
.. math::
r_i = \sum_{k=1}^{n} \nu_k R_k
q_i = \sum_{k=1}^{n}\nu_k Q_k
The newer forms of UNIFAC (Dortmund, NIST) calculate the combinatorial
part slightly differently:
.. math::
\ln \gamma_i^c = 1 - {V'}_i + \ln({V'}_i) - 5q_i \left(1
- \frac{V_i}{F_i}+ \ln\left(\frac{V_i}{F_i}\right)\right)
V'_i = \frac{r_i^{3/4}}{\sum_j r_j^{3/4}x_j}
This is more clear when looking at the full rearranged form as in [3]_.
Examples
--------
>>> UNIFAC(T=333.15, xs=[0.5, 0.5], chemgroups=[{1:2, 2:4}, {1:1, 2:1, 18:1}])
[1.4276025835624173, 1.3646545010104225]
>>> UNIFAC(373.15, [0.2, 0.3, 0.2, 0.2],
... [{9:6}, {78:6}, {1:1, 18:1}, {1:1, 2:1, 14:1}],
... subgroup_data=DOUFSG, interaction_data=DOUFIP2006, modified=True)
[1.186431113706829, 1.440280133911197, 1.204479833499608, 1.9720706090299824]
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
.. [2] Fredenslund, Aage, Russell L. Jones, and John M. Prausnitz. "Group
Contribution Estimation of Activity Coefficients in Nonideal Liquid
Mixtures." AIChE Journal 21, no. 6 (November 1, 1975): 1086-99.
doi:10.1002/aic.690210607.
.. [3] Jakob, Antje, Hans Grensemann, Jürgen Lohmann, and Jürgen Gmehling.
"Further Development of Modified UNIFAC (Dortmund): Revision and
Extension 5." Industrial & Engineering Chemistry Research 45, no. 23
(November 1, 2006): 7924-33. doi:10.1021/ie060355c.
.. [4] Kang, Jeong Won, Vladimir Diky, and Michael Frenkel. "New Modified
UNIFAC Parameters Using Critically Evaluated Phase Equilibrium Data."
Fluid Phase Equilibria 388 (February 25, 2015): 128-41.
doi:10.1016/j.fluid.2014.12.042. |
377,792 | def idmap_get_new(connection, old, tbl):
cursor = connection.cursor()
cursor.execute("SELECT new FROM _idmap_ WHERE old == ?", (old,))
new = cursor.fetchone()
if new is not None:
return ilwd.ilwdchar(new[0])
new = tbl.get_next_id()
cursor.execute("INSERT INTO _idmap_ VALUES (?, ?)", (old, new))
return new | From the old ID string, obtain a replacement ID string by either
grabbing it from the _idmap_ table if one has already been assigned
to the old ID, or by using the current value of the Table
instance's next_id class attribute. In the latter case, the new ID
is recorded in the _idmap_ table, and the class attribute
incremented by 1.
This function is for internal use, it forms part of the code used
to re-map row IDs when merging multiple documents. |
377,793 | def _wait_for_ip(name, session):
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
if status.startswith():
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
,
delta.seconds, name
)
if delta.seconds > 180:
log.warning()
break
time.sleep(5) | Wait for IP to be available during create() |
377,794 | def get_value(self, key, default={}, nested=True, decrypt=True):
key = key.lstrip()
if key.endswith("."):
key = key[:-1]
if nested:
path = key.split(".")
curr = self.settings
for p in path[:-1]:
curr = curr.get(p, {})
try:
value = curr[path[-1]]
except KeyError:
return default
value = self.decrypt(value, path)
return value
else:
return self.settings.get(key, default) | Retrieve a value from the configuration based on its key. The key
may be nested.
:param str key: A path to the value, with nested levels joined by '.'
:param default: Value to return if the key does not exist (defaults to :code:`dict()`)
:param bool decrypt: If :code:`True`, decrypt an encrypted value before returning
(if encrypted). Defaults to :code:`True`. |
377,795 | def redirect_stdout(new_stdout):
old_stdout, sys.stdout = sys.stdout, new_stdout
try:
yield None
finally:
sys.stdout = old_stdout | Redirect the stdout
Args:
new_stdout (io.StringIO): New stdout to use instead |
377,796 | def get_datafeeds(self, datafeed_id=None, params=None):
return self.transport.perform_request(
"GET", _make_path("_ml", "datafeeds", datafeed_id), params=params
) | `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_
:arg datafeed_id: The ID of the datafeeds to fetch
:arg allow_no_datafeeds: Whether to ignore if a wildcard expression
matches no datafeeds. (This includes `_all` string or when no
datafeeds have been specified) |
377,797 | def tobinary(self):
entrylen = struct.calcsize(self.ENTRYSTRUCT)
rslt = []
for (dpos, dlen, ulen, flag, typcd, nm) in self.data:
nmlen = len(nm) + 1
toclen = nmlen + entrylen
if toclen % 16 == 0:
pad =
else:
padlen = 16 - (toclen % 16)
pad = *padlen
nmlen = nmlen + padlen
rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+,
nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+pad))
return .join(rslt) | Return self as a binary string. |
377,798 | def rm_env(user, name):
*
lst = list_tab(user)
ret =
rm_ = None
for ind in range(len(lst[])):
if name == lst[][ind][]:
rm_ = ind
if rm_ is not None:
lst[].pop(rm_)
ret =
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat[]:
return comdat[]
return ret | Remove cron environment variable for a specified user.
CLI Example:
.. code-block:: bash
salt '*' cron.rm_env root MAILTO |
377,799 | def add_key(self, key):
if key not in self.value:
self.value[key] = ReducedMetric(self.reducer) | Adds a new key to this metric |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.