Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
385,000 | def add_with_properties(self, model, name=None, update_dict=None, bulk=True, **kwargs):
if self.category != Category.INSTANCE:
raise APIError("Part should be of category INSTANCE")
name = name or model.name
action =
properties_update_dict = dict()
for prop_name_or_id, property_value in update_dict.items():
if is_uuid(prop_name_or_id):
properties_update_dict[prop_name_or_id] = property_value
else:
properties_update_dict[model.property(prop_name_or_id).id] = property_value
if bulk:
r = self._client._request(, self._client._build_url(),
data=dict(
name=name,
model=model.id,
parent=self.id,
properties=json.dumps(properties_update_dict),
**kwargs
),
params=dict(select_action=action))
if r.status_code != requests.codes.created:
raise APIError(.format(str(r), r.content))
return Part(r.json()[][0], client=self._client)
else:
new_part = self.add(model, name=name)
new_part.update(update_dict=update_dict, bulk=bulk)
return new_part | Add a part and update its properties in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param model: model of the part which to add a new instance, should follow the model tree in KE-chain
:type model: :class:`Part`
:param name: (optional) name provided for the new instance as string otherwise use the name of the model
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property_id (from the property models)
and values being property values
:type update_dict: dict or None
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the newly created :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises APIError: in case an Error occurs
Examples
--------
>>> bike = client.scope('Bike Project').part('Bike')
>>> wheel_model = client.scope('Bike Project').model('Wheel')
>>> bike.add_with_properties(wheel_model, 'Wooden Wheel', {'Spokes': 11, 'Material': 'Wood'}) |
385,001 | def get_event(self, client, check):
data = self._request(, .format(client, check))
return data.json() | Returns an event for a given client & check name. |
385,002 | def stop_server(self):
if self.rpc_server is not None:
try:
self.rpc_server.socket.shutdown(socket.SHUT_RDWR)
except:
log.warning("Failed to shut down server socket")
self.rpc_server.shutdown() | Stop serving. Also stops the thread. |
385,003 | def _any_bound_condition_fails_criterion(agent, criterion):
bc_agents = [bc.agent for bc in agent.bound_conditions]
for b in bc_agents:
if not criterion(b):
return True
return False | Returns True if any bound condition fails to meet the specified
criterion.
Parameters
----------
agent: Agent
The agent whose bound conditions we evaluate
criterion: function
Evaluates criterion(a) for each a in a bound condition and returns True
if any agents fail to meet the criterion.
Returns
-------
any_meets: bool
True if and only if any of the agents in a bound condition fail to match
the specified criteria |
385,004 | def delete(self, request, bot_id, hook_id, id, format=None):
bot = self.get_bot(bot_id, request.user)
hook = self.get_hook(hook_id, bot, request.user)
recipient = self.get_recipient(id, hook, request.user)
recipient.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | Delete an existing telegram recipient
---
responseMessages:
- code: 401
message: Not authenticated |
385,005 | def from_json(cls, service_dict):
sd = service_dict.copy()
service_endpoint = sd.get(cls.SERVICE_ENDPOINT)
if not service_endpoint:
logger.error(
)
raise IndexError
_type = sd.get()
if not _type:
logger.error()
raise IndexError
sd.pop(cls.SERVICE_ENDPOINT)
sd.pop()
return cls(
service_endpoint,
_type,
sd
) | Create a service object from a JSON string. |
385,006 | def clone(self, document):
wrapped = document.wrap()
if in wrapped:
del wrapped[]
return type(document).unwrap(wrapped, session=self) | Serialize a document, remove its _id, and deserialize as a new
object |
385,007 | def tt_qr(X, left_to_right=True):
X = X.round(eps=0)
numDims = X.d
coresX = tt.tensor.to_list(X)
if left_to_right:
for dim in xrange(0, numDims-1):
coresX = cores_orthogonalization_step(
coresX, dim, left_to_right=left_to_right)
last_core = coresX[numDims-1]
r1, n, r2 = last_core.shape
last_core, rr = np.linalg.qr(reshape(last_core, (-1, r2)))
coresX[numDims-1] = reshape(last_core, (r1, n, -1))
else:
for dim in xrange(numDims-1, 0, -1):
coresX = cores_orthogonalization_step(
coresX, dim, left_to_right=left_to_right)
last_core = coresX[0]
r1, n, r2 = last_core.shape
last_core, rr = np.linalg.qr(
np.transpose(reshape(last_core, (r1, -1)))
)
coresX[0] = reshape(
np.transpose(last_core),
(-1, n, r2))
rr = np.transpose(rr)
return tt.tensor.from_list(coresX), rr | Orthogonalizes a TT tensor from left to right or
from right to left.
:param: X - thensor to orthogonalise
:param: direction - direction. May be 'lr/LR' or 'rl/RL'
for left/right orthogonalization
:return: X_orth, R - orthogonal tensor and right (left)
upper (lower) triangular matrix
>>> import tt, numpy as np
>>> x = tt.rand(np.array([2, 3, 4, 5]), d=4)
>>> x_q, r = tt_qr(x, left_to_right=True)
>>> np.allclose((rm[0][0]*x_q).norm(), x.norm())
True
>>> x_u, l = tt_qr(x, left_to_right=False)
>>> np.allclose((l[0][0]*x_u).norm(), x.norm())
True |
385,008 | def remove_breakpoint(self, event_type, bp=None, filter_func=None):
if bp is None and filter_func is None:
raise ValueError()
try:
if bp is not None:
self._breakpoints[event_type].remove(bp)
else:
self._breakpoints[event_type] = [ b for b in self._breakpoints[event_type] if not filter_func(b) ]
except ValueError:
l.error(, bp, event_type) | Removes a breakpoint.
:param bp: The breakpoint to remove.
:param filter_func: A filter function to specify whether each breakpoint should be removed or not. |
385,009 | def add(self, album, objects, object_type=None, **kwds):
return self._add_remove("add", album, objects, object_type,
**kwds) | Endpoint: /album/<id>/<type>/add.json
Add objects (eg. Photos) to an album.
The objects are a list of either IDs or Trovebox objects.
If Trovebox objects are used, the object type is inferred
automatically.
Returns the updated album object. |
385,010 | def docs(root_url, path):
root_url = root_url.rstrip()
path = path.lstrip()
if root_url == OLD_ROOT_URL:
return .format(path)
else:
return .format(root_url, path) | Generate URL for path in the Taskcluster docs. |
385,011 | def normal(self):
d = self.B - self.A
return Line([-d.y, d.x], [d.y, -d.x]) | :return: Line
Returns a Line normal (perpendicular) to this Line. |
385,012 | def write(url, content, **args):
with HTTPResource(url, **args) as resource:
resource.write(content) | Put the object/collection into a file URL. |
385,013 | def resort_client_actions(portal):
sorted_actions = [
"edit",
"contacts",
"view",
"analysisrequests",
"batches",
"samplepoints",
"profiles",
"templates",
"specs",
"orders",
"reports_listing"
]
type_info = portal.portal_types.getTypeInfo("Client")
actions = filter(lambda act: act.id in sorted_actions, type_info._actions)
missing = filter(lambda act: act.id not in sorted_actions, type_info._actions)
actions = sorted(actions, key=lambda act: sorted_actions.index(act.id))
if missing:
actions.extend(missing)
type_info._actions = actions | Resorts client action views |
385,014 | def multiCall(*commands, dependent=True, bundle=False,
print_result=False, print_commands=False):
results = []
dependent_failed = False
for command in commands:
if not dependent_failed:
response = call(command, print_result=print_result,
print_commands=print_commands)
if (response.returncode == 1) and dependent:
dependent_failed = True
else:
response = None
results.append(response)
if bundle:
result = Result()
for response in results:
if not response:
continue
elif response.returncode == 1:
result.returncode = 1
result.extendInformation(response)
processed_response = result
else:
processed_response = results
return processed_response | Calls the function 'call' multiple times, given sets of commands |
385,015 | def ensure_mingw_drive(win32_path):
r
win32_drive, _path = splitdrive(win32_path)
mingw_drive = + win32_drive[:-1].lower()
mingw_path = mingw_drive + _path
return mingw_path | r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(result)
/c/Program Files/Foobar |
385,016 | def getFactors(self, aLocation, axisOnly=False, allFactors=False):
deltas = []
aLocation.expand(self.getAxisNames())
limits = getLimits(self._allLocations(), aLocation)
for deltaLocationTuple, (mathItem, deltaName) in sorted(self.items()):
deltaLocation = Location(deltaLocationTuple)
deltaLocation.expand( self.getAxisNames())
factor = self._accumulateFactors(aLocation, deltaLocation, limits, axisOnly)
if not (factor-_EPSILON < 0 < factor+_EPSILON) or allFactors:
deltas.append((factor, mathItem, deltaName))
deltas = sorted(deltas, key=itemgetter(0), reverse=True)
return deltas | Return a list of all factors and math items at aLocation.
factor, mathItem, deltaName
all = True: include factors that are zero or near-zero |
385,017 | def reprString(self, string, length):
if isinstance(string, int):
length = min(length, self.length - string)
string = self.string[string:string + length]
voc = self.voc
res = self.tokSep.join((voc[id] for id in string[:length]))
if self.unit == UNIT_WORD:
res = res.replace(" \n", "\n")
res = res.replace("\n ", "\n")
if self.unit == UNIT_CHARACTER:
res = res.encode(self.encoding)
return res | Output a string of length tokens in the original form.
If string is an integer, it is considered as an offset in the text.
Otherwise string is considered as a sequence of ids (see voc and
tokId).
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString(0, 3)
'mis'
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString([1, 4, 1, 3, 3, 2], 5)
'isipp'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString(0, 3)
'missi ssi ppi'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString([1, 3, 2], 3)
'missi ssi ppi' |
385,018 | def clear_caches(delete_all=False):
global _time_caches
if delete_all:
_time_caches = []
_parser = { "default": CodeParser() }
else:
for tc in _time_caches:
for key, (t, value) in list(tc.items()):
if t < time.time():
del tc[key] | Fortpy caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing. |
385,019 | def save_model(self, request, obj, form, change):
obj.save()
if notification:
if obj.parent_msg is None:
sender_label =
recipients_label =
else:
sender_label =
recipients_label =
notification.send([obj.sender], sender_label, {: obj,})
if form.cleaned_data[] == :
recipients = User.objects.exclude(pk=obj.recipient.pk)
else:
recipients = []
group = form.cleaned_data[]
if group:
group = Group.objects.get(pk=group)
recipients.extend(
list(group.user_set.exclude(pk=obj.recipient.pk)))
for user in recipients:
obj.pk = None
obj.recipient = user
obj.save()
if notification:
notification.send([user], recipients_label, { : obj,}) | Saves the message for the recipient and looks in the form instance
for other possible recipients. Prevents duplication by excludin the
original recipient from the list of optional recipients.
When changing an existing message and choosing optional recipients,
the message is effectively resent to those users. |
385,020 | def add_requirements(self, metadata_path):
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if in pkg_info or in pkg_info:
warnings.warn()
del pkg_info[]
del pkg_info[]
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info) | Add additional requirements from setup.cfg to file metadata_path |
385,021 | def copy(self):
new_digest = Digest(self.digest_type)
libcrypto.EVP_MD_CTX_copy(new_digest.ctx, self.ctx)
return new_digest | Creates copy of the digest CTX to allow to compute digest
while being able to hash more data |
385,022 | def fill_triangle(setter, x0, y0, x1, y1, x2, y2, color=None, aa=False):
a = b = y = last = 0
if y0 > y1:
y0, y1 = y1, y0
x0, x1 = x1, x0
if y1 > y2:
y2, y1 = y1, y2
x2, x1 = x1, x2
if y0 > y1:
y0, y1 = y1, y0
x0, x1 = x1, x0
if y0 == y2:
a = b = x0
if x1 < a:
a = x1
elif x1 > b:
b = x1
if x2 < a:
a = x2
elif x2 > b:
b = x2
_draw_fast_hline(setter, a, y0, b - a + 1, color, aa)
dx01 = x1 - x0
dy01 = y1 - y0
dx02 = x2 - x0
dy02 = y2 - y0
dx12 = x2 - x1
dy12 = y2 - y1
sa = 0
sb = 0
if y1 == y2:
last = y1
else:
last = y1 - 1
for y in range(y, last + 1):
a = x0 + sa / dy01
b = x0 + sb / dy02
sa += dx01
sb += dx02
if a > b:
a, b = b, a
_draw_fast_hline(setter, a, y, b - a + 1, color, aa)
sa = dx12 * (y - y1)
sb = dx02 * (y - y0)
for y in range(y, y2 + 1):
a = x1 + sa / dy12
b = x0 + sb / dy02
sa += dx12
sb += dx02
if a > b:
a, b = b, a
_draw_fast_hline(setter, a, y, b - a + 1, color, aa) | Draw solid triangle with points x0,y0 - x1,y1 - x2,y2 |
385,023 | def is_all_field_none(self):
if self._id_ is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._type_ is not None:
return False
if self._cvc2 is not None:
return False
if self._status is not None:
return False
if self._expiry_time is not None:
return False
return True | :rtype: bool |
385,024 | def key_81_CosSin_2009():
r
dlf = DigitalFilter(, )
dlf.base = np.array([
3.354626279025119e-04, 4.097349789797864e-04, 5.004514334406104e-04,
6.112527611295723e-04, 7.465858083766792e-04, 9.118819655545162e-04,
1.113775147844802e-03, 1.360368037547893e-03, 1.661557273173934e-03,
2.029430636295734e-03, 2.478752176666358e-03, 3.027554745375813e-03,
3.697863716482929e-03, 4.516580942612666e-03, 5.516564420760772e-03,
6.737946999085467e-03, 8.229747049020023e-03, 1.005183574463358e-02,
1.227733990306844e-02, 1.499557682047770e-02, 1.831563888873418e-02,
2.237077185616559e-02, 2.732372244729256e-02, 3.337326996032607e-02,
4.076220397836620e-02, 4.978706836786394e-02, 6.081006262521795e-02,
7.427357821433388e-02, 9.071795328941247e-02, 1.108031583623339e-01,
1.353352832366127e-01, 1.652988882215865e-01, 2.018965179946554e-01,
2.465969639416064e-01, 3.011942119122020e-01, 3.678794411714423e-01,
4.493289641172216e-01, 5.488116360940264e-01, 6.703200460356393e-01,
8.187307530779818e-01, 1e0, 1.221402758160170e+00,
1.491824697641270e+00, 1.822118800390509e+00, 2.225540928492468e+00,
2.718281828459046e+00, 3.320116922736548e+00, 4.055199966844675e+00,
4.953032424395115e+00, 6.049647464412947e+00, 7.389056098930650e+00,
9.025013499434122e+00, 1.102317638064160e+01, 1.346373803500169e+01,
1.644464677109706e+01, 2.008553692318767e+01, 2.453253019710935e+01,
2.996410004739703e+01, 3.659823444367799e+01, 4.470118449330084e+01,
5.459815003314424e+01, 6.668633104092515e+01, 8.145086866496814e+01,
9.948431564193386e+01, 1.215104175187350e+02, 1.484131591025766e+02,
1.812722418751512e+02, 2.214064162041872e+02, 2.704264074261528e+02,
3.302995599096489e+02, 4.034287934927351e+02, 4.927490410932563e+02,
6.018450378720822e+02, 7.350951892419732e+02, 8.978472916504184e+02,
1.096633158428459e+03, 1.339430764394418e+03, 1.635984429995927e+03,
1.998195895104119e+03, 2.440601977624501e+03, 2.980957987041728e+03])
dlf.factor = np.array([1.2214027581601701])
dlf.cos = np.array([
1.746412733678043e-02, -7.658725022064888e-02, 1.761673907472465e-01,
-2.840940679113589e-01, 3.680388960144733e-01, -4.115498161707958e-01,
4.181209762362728e-01, -3.967204599348831e-01, 3.608829691008270e-01,
-3.171870084102961e-01, 2.744932842186247e-01, -2.324673650676961e-01,
1.971144816936984e-01, -1.634915360178986e-01, 1.381406405905393e-01,
-1.125728533897677e-01, 9.619580319372194e-02, -7.640431432353632e-02,
6.748891657821673e-02, -5.097864570224415e-02, 4.853609305288441e-02,
-3.293272689265632e-02, 3.677175984620380e-02, -1.969323595300588e-02,
3.053726798991684e-02, -9.301135480582538e-03, 2.895215492109734e-02,
-1.875526095801418e-04, 3.181452657662026e-02, 9.025726238227111e-03,
3.955376604096631e-02, 1.966766645672513e-02, 5.318782805621459e-02,
3.300575875620110e-02, 7.409212944640006e-02, 4.972863917303501e-02,
1.029344264288086e-01, 6.776855697600163e-02, 1.357865756912759e-01,
7.511614666518443e-02, 1.522218287240260e-01, 3.034571997381229e-02,
8.802563675323094e-02, -1.689255322598353e-01, -1.756581788680092e-01,
-6.123863775740898e-01, -5.098359641153184e-01, -6.736869803920745e-01,
4.599561125225532e-01, 8.907010262082216e-01, 1.039153770711999e+00,
-2.178135931072732e+00, 8.040971159674268e-01, 5.659848584656202e-01,
-9.349050336534268e-01, 8.006099486213468e-01, -5.944960111930493e-01,
4.369614304892440e-01, -3.292566347310282e-01, 2.547426420681868e-01,
-2.010899026277397e-01, 1.609467208423519e-01, -1.299975550484158e-01,
1.056082501090365e-01, -8.608337452556068e-02, 7.027252107999236e-02,
-5.735742622053085e-02, 4.673270108060494e-02, -3.793635725863799e-02,
3.060786160620013e-02, -2.446220554726340e-02, 1.927399223200865e-02,
-1.486843016804444e-02, 1.111747692371507e-02, -7.939442960305236e-03,
5.298852472637883e-03, -3.200104589830043e-03, 1.665382777953919e-03,
-6.913074254614758e-04, 1.999065225130592e-04,
-2.955159288961187e-05])
dlf.sin = np.array([
7.478326513505658e-07, -2.572850425065560e-06, 5.225955618519281e-06,
-7.352539610140040e-06, 8.768819961093828e-06, -8.560004370841340e-06,
8.101932279460349e-06, -5.983552716117552e-06, 5.036792825138655e-06,
-1.584355068233649e-06, 1.426050228179462e-06, 3.972863429067356e-06,
-1.903788077376088e-06, 1.144652944379527e-05, -4.327773998196030e-06,
2.297298998355334e-05, -4.391227697686659e-06, 4.291202395830839e-05,
1.760279032167125e-06, 8.017887907026914e-05, 2.364651853689879e-05,
1.535031685829202e-04, 8.375427119939347e-05, 3.030115685600468e-04,
2.339455351760637e-04, 6.157392107422657e-04, 5.921808556382737e-04,
1.281873037121434e-03, 1.424276189020714e-03, 2.718506171172064e-03,
3.324504626808429e-03, 5.839859904586436e-03, 7.608663600764702e-03,
1.263571470998938e-02, 1.714199295539484e-02, 2.735013970005427e-02,
3.794840483226463e-02, 5.858519896601026e-02, 8.166914231915734e-02,
1.215508018998907e-01, 1.658946642767184e-01, 2.324389477118542e-01,
2.938956625118840e-01, 3.572525844816433e-01, 3.479235360502319e-01,
2.294314115090992e-01, -1.250412450354792e-01, -6.340986743027450e-01,
-9.703404081656508e-01, -2.734109755210948e-01, 1.321852608494946e+00,
6.762199721133603e-01, -2.093257651144232e+00, 1.707842350925794e+00,
-8.844618831465598e-01, 3.720792781726873e-01, -1.481509947473694e-01,
6.124339615448667e-02, -2.726194382687923e-02, 1.307668436907975e-02,
-6.682101544475918e-03, 3.599101395415812e-03, -2.030735143712865e-03,
1.197624324158372e-03, -7.382202519234128e-04, 4.756906961407787e-04,
-3.199977708080284e-04, 2.238628518300115e-04, -1.618377502708346e-04,
1.199233854156409e-04, -9.025345928219504e-05, 6.830860296946832e-05,
-5.143409372298764e-05, 3.804574823200909e-05, -2.720604959632104e-05,
1.839913059679674e-05, -1.140157702141663e-05, 6.172802138985788e-06,
-2.706562852604888e-06, 8.403636781016683e-07,
-1.356300450956746e-07])
return dlf | r"""Key 81 pt CosSin filter, as published in [Key09]_.
Taken from file ``FilterModules.f90`` provided with 1DCSEM_.
License: `Apache License, Version 2.0,
<http://www.apache.org/licenses/LICENSE-2.0>`_. |
385,025 | def _close(self, args):
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._x_close_ok()
raise error_for_code(reply_code, reply_text,
(class_id, method_id), ConnectionError) | Request a connection close
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method. |
385,026 | def add_line(self, line=, *, empty=False):
max_page_size = self.max_size - self._prefix_len - 2
if len(line) > max_page_size:
raise RuntimeError( % (max_page_size))
if self._count + len(line) + 1 > self.max_size:
self.close_page()
self._count += len(line) + 1
self._current_page.append(line)
if empty:
self._current_page.append()
self._count += 1 | Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`. |
385,027 | def _create_arg_dict(self, tenant_id, data, in_sub, out_sub):
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)
in_ip_dict = self.get_in_ip_addr(tenant_id)
out_ip_dict = self.get_out_ip_addr(tenant_id)
excl_list = [in_ip_dict.get(), out_ip_dict.get()]
arg_dict = {: tenant_id,
: data.get(),
: in_seg, : in_vlan,
: out_seg, : out_vlan,
: data.get(),
: in_sub, : out_sub,
: in_ip_dict.get(),
: out_ip_dict.get(),
: excl_list}
return arg_dict | Create the argument dictionary. |
385,028 | def fetch_json(self, uri_path, http_method=, query_params=None,
body=None, headers=None):
query_params = query_params or {}
headers = headers or {}
query_params = self.add_authorisation(query_params)
uri = self.build_uri(uri_path, query_params)
allowed_methods = ("POST", "PUT", "DELETE")
if http_method in allowed_methods and not in headers:
headers[] =
headers[] =
response, content = self.client.request(
uri=uri,
method=http_method,
body=body,
headers=headers
)
self.check_errors(uri, response)
return json.loads(content.decode()) | Make a call to Trello API and capture JSON response. Raises an error
when it fails.
Returns:
dict: Dictionary with the JSON data |
385,029 | def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
was_imported = in sys.modules or in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"caneasy_install -U setuptoolspkg_resources']
return do_download() | Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script. |
385,030 | def infer_format(filename:str) -> str:
_, ext = os.path.splitext(filename)
return ext | Return extension identifying format of given filename |
385,031 | def client_for(service, service_module, thrift_service_name=None):
assert service_module,
service = service or
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit(, 1)[-1]
method_names = get_service_methods(service_module.Iface)
def init(
self,
tchannel,
hostport=None,
trace=False,
protocol_headers=None,
):
self.async_thrift = self.__async_client_class__(
tchannel=tchannel,
hostport=hostport,
trace=trace,
protocol_headers=protocol_headers,
)
self.threadloop = tchannel._threadloop
init.__name__ =
methods = {
: init,
: async_client_for(
service=service,
service_module=service_module,
thrift_service_name=thrift_service_name,
)
}
methods.update({
method_name: generate_method(method_name)
for method_name in method_names
})
return type(thrift_service_name + , (object,), methods) | Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient. |
385,032 | def _bse_cli_list_ref_formats(args):
all_refformats = api.get_reference_formats()
if args.no_description:
liststr = all_refformats.keys()
else:
liststr = format_columns(all_refformats.items())
return .join(liststr) | Handles the list-ref-formats subcommand |
385,033 | async def notifications(dev: Device, notification: str, listen_all: bool):
notifications = await dev.get_notifications()
async def handle_notification(x):
click.echo("got notification: %s" % x)
if listen_all:
if notification is not None:
await dev.services[notification].listen_all_notifications(
handle_notification
)
else:
click.echo("Listening to all possible notifications")
await dev.listen_notifications(fallback_callback=handle_notification)
elif notification:
click.echo("Subscribing to notification %s" % notification)
for notif in notifications:
if notif.name == notification:
await notif.activate(handle_notification)
click.echo("Unable to find notification %s" % notification)
else:
click.echo(click.style("Available notifications", bold=True))
for notification in notifications:
click.echo("* %s" % notification) | List available notifications and listen to them.
Using --listen-all [notification] allows to listen to all notifications
from the given subsystem.
If the subsystem is omited, notifications from all subsystems are
requested. |
385,034 | def get(self, request):
sections_list = self.generate_sections()
p = Paginator(sections_list, 25)
page = request.GET.get()
try:
sections = p.page(page)
except PageNotAnInteger:
sections = p.page(1)
except EmptyPage:
sections = p.page(p.num_pages)
context = {
: sections,
: self.generate_page_title(),
: self.browse_type
}
return render(
request,
self.template_path,
context
) | Handle HTTP GET request.
Returns template and context from generate_page_title and
generate_sections to populate template. |
385,035 | def __check_prefix_conflict(self, existing_ni_or_ns_uri, incoming_prefix):
if incoming_prefix not in self.__prefix_map:
return
prefix_check_ni = self.__prefix_map[incoming_prefix]
if isinstance(existing_ni_or_ns_uri, _NamespaceInfo):
existing_ni = existing_ni_or_ns_uri
if prefix_check_ni is not existing_ni:
raise DuplicatePrefixError(incoming_prefix, prefix_check_ni.uri, existing_ni.uri)
else:
ns_uri = existing_ni_or_ns_uri
assert not self.contains_namespace(ns_uri)
raise DuplicatePrefixError(incoming_prefix, prefix_check_ni.uri, ns_uri) | If existing_ni_or_ns_uri is a _NamespaceInfo object (which must
be in this set), then caller wants to map incoming_prefix to that
namespace. This function verifies that the prefix isn't already mapped
to a different namespace URI. If it is, an exception is raised.
Otherwise, existing_ni_or_ns_uri is treated as a string namespace URI
which must not already exist in this set. Caller wants to map
incoming_prefix to that URI. If incoming_prefix maps to anything
already, that represents a prefix conflict and an exception is raised. |
385,036 | def encode(self, payload):
token = jwt.encode(payload, self.signing_key, algorithm=self.algorithm)
return token.decode() | Returns an encoded token for the given payload dictionary. |
385,037 | def pkg(pkg_path,
pkg_sum,
hash_type,
test=None,
**kwargs):
*
popts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
if not os.path.isfile(pkg_path):
return {}
if not salt.utils.hashutils.get_hash(pkg_path, hash_type) == pkg_sum:
return {}
root = tempfile.mkdtemp()
s_pkg = tarfile.open(pkg_path, )
members = s_pkg.getmembers()
for member in members:
if salt.utils.stringutils.to_unicode(member.path).startswith((os.sep, .format(os.sep))):
return {}
elif .format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
return {}
s_pkg.extractall(root)
s_pkg.close()
lowstate_json = os.path.join(root, )
with salt.utils.files.fopen(lowstate_json, ) as fp_:
lowstate = salt.utils.json.load(fp_)
for chunk in lowstate:
if not isinstance(chunk, dict):
return lowstate
pillar_json = os.path.join(root, )
if os.path.isfile(pillar_json):
with salt.utils.files.fopen(pillar_json, ) as fp_:
pillar_override = salt.utils.json.load(fp_)
else:
pillar_override = None
roster_grains_json = os.path.join(root, )
if os.path.isfile(roster_grains_json):
with salt.utils.files.fopen(roster_grains_json, ) as fp_:
roster_grains = salt.utils.json.load(fp_)
if os.path.isfile(roster_grains_json):
popts[] = roster_grains
popts[] =
popts[] = {}
popts[] = _get_test_value(test, **kwargs)
envs = os.listdir(root)
for fn_ in envs:
full = os.path.join(root, fn_)
if not os.path.isdir(full):
continue
popts[][fn_] = [full]
st_ = salt.state.State(popts, pillar_override=pillar_override)
snapper_pre = _snapper_pre(popts, kwargs.get(, ))
ret = st_.call_chunks(lowstate)
ret = st_.call_listen(lowstate, ret)
try:
shutil.rmtree(root)
except (IOError, OSError):
pass
_set_retcode(ret)
_snapper_post(popts, kwargs.get(, ), snapper_pre)
return ret | Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state
can be generated using salt-ssh.
CLI Example:
.. code-block:: bash
salt '*' state.pkg /tmp/salt_state.tgz 760a9353810e36f6d81416366fc426dc md5 |
385,038 | def exclude(prop):
t replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side.'
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
_excluded.add(prop)
if isinstance(prop, RelationshipProperty):
for local in prop.local_columns:
_excluded.add(local) | Don't replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side. |
385,039 | def apply_dict_of_variables_vfunc(
func, *args, signature, join=, fill_value=None
):
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = OrderedDict()
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars | Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects. |
385,040 | def create_transient(self, input_stream, original_name, length=None):
ext = os.path.splitext(original_name)[1]
transient = self.new_transient(ext)
if not os.path.isdir(self.transient_root):
os.makedirs(self.transient_root)
self._copy_file(input_stream, transient.path, length=length)
return transient | Create TransientFile and file on FS from given input stream and
original file name. |
385,041 | def LEA(cpu, dest, src):
dest.write(Operators.EXTRACT(src.address(), 0, dest.size)) | Loads effective address.
Computes the effective address of the second operand (the source operand) and stores it in the first operand
(destination operand). The source operand is a memory address (offset part) specified with one of the processors
addressing modes; the destination operand is a general-purpose register. The address-size and operand-size
attributes affect the action performed by this instruction. The operand-size
attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the
attribute of the code segment.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand. |
385,042 | def analyze_beam_spot(scan_base, combine_n_readouts=1000, chunk_size=10000000, plot_occupancy_hists=False, output_pdf=None, output_file=None):
combine_n_readouts//data//SCC_50_fei4_self_trigger_scan_390
time_stamp = []
x = []
y = []
for data_file in scan_base:
with tb.open_file(data_file + , mode="r+") as in_hit_file_h5:
meta_data_array = in_hit_file_h5.root.meta_data[:]
hit_table = in_hit_file_h5.root.Hits
parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array[][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array[][::combine_n_readouts])))
analysis_utils.index_event_number(hit_table)
analyze_data = AnalyzeRawData()
analyze_data.create_tot_hist = False
analyze_data.create_bcid_hist = False
analyze_data.histogram.set_no_scan_parameter()
index = 0
best_chunk_size = chunk_size
progress_bar = progressbar.ProgressBar(widgets=[, progressbar.Percentage(), , progressbar.Bar(marker=, left=, right=), , progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80)
progress_bar.start()
for parameter_index, parameter_range in enumerate(parameter_ranges):
logging.debug( + str(parameter_range[0]) + + str(parameter_range[2]) + + str(parameter_range[3]) + + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + )
analyze_data.reset()
readout_hit_len = 0
for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=parameter_range[2], stop_event_number=parameter_range[3], start_index=index, chunk_size=best_chunk_size):
analyze_data.analyze_hits(hits)
readout_hit_len += hits.shape[0]
progress_bar.update(index)
best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size else chunk_size
occupancy_array = analyze_data.histogram.get_occupancy()
projection_x = np.sum(occupancy_array, axis=0).ravel()
projection_y = np.sum(occupancy_array, axis=1).ravel()
x.append(analysis_utils.get_mean_from_histogram(projection_x, bin_positions=range(0, 80)))
y.append(analysis_utils.get_mean_from_histogram(projection_y, bin_positions=range(0, 336)))
time_stamp.append(parameter_range[0])
if plot_occupancy_hists:
plotting.plot_occupancy(occupancy_array[:, :, 0], title= + time.strftime(, time.localtime(parameter_range[0])) + + time.strftime(, time.localtime(parameter_range[1])), filename=output_pdf)
progress_bar.finish()
plotting.plot_scatter([i * 250 for i in x], [i * 50 for i in y], title=, x_label=, y_label=, marker_style=, filename=output_pdf)
if output_file:
with tb.open_file(output_file, mode="a") as out_file_h5:
rec_array = np.array(zip(time_stamp, x, y), dtype=[(, float), (, float), (, float)])
try:
beam_spot_table = out_file_h5.create_table(out_file_h5.root, name=, description=rec_array, title=, filters=tb.Filters(complib=, complevel=5, fletcher32=False))
beam_spot_table[:] = rec_array
except tb.exceptions.NodeError:
logging.warning(output_file + )
return time_stamp, x, y | Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined
for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um.
Parameters
----------
scan_base: list of str
scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ]
combine_n_readouts: int
the number of read outs to combine (e.g. 1000)
max_chunk_size: int
the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
output_pdf: PdfPages
PdfPages file object, if none the plot is printed to screen |
385,043 | def update_project(id, **kwargs):
content = update_project_raw(id, **kwargs)
if content:
return utils.format_json(content) | Update an existing Project with new information |
385,044 | def clean(args):
p = OptionParser(clean.__doc__)
opts, args = p.parse_args(args)
for link_name in os.listdir(os.getcwd()):
if not op.islink(link_name):
continue
logging.debug("remove symlink `{0}`".format(link_name))
os.unlink(link_name) | %prog clean
Removes all symlinks from current folder |
385,045 | def mailto_to_envelope(mailto_str):
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers) | Interpret mailto-string into a :class:`alot.db.envelope.Envelope` |
385,046 | def procrustes(anchors, X, scale=True, print_out=False):
def centralize(X):
n = X.shape[0]
ones = np.ones((n, 1))
return X - np.multiply(1 / n * np.dot(ones.T, X), ones)
m = anchors.shape[0]
N, d = X.shape
assert m >= d,
X_m = X[N - m:, :]
ones = np.ones((m, 1))
mux = 1 / m * np.dot(ones.T, X_m)
muy = 1 / m * np.dot(ones.T, anchors)
sigmax = 1 / m * np.linalg.norm(X_m - mux)**2
sigmaxy = 1 / m * np.dot((anchors - muy).T, X_m - mux)
try:
U, D, VT = np.linalg.svd(sigmaxy)
except np.LinAlgError:
print()
print(sigmaxy)
print(np.linalg.matrix_rank(sigmaxy))
if (scale):
c = np.trace(np.diag(D)) / sigmax
else:
c = np.trace(np.diag(D)) / sigmax
if (print_out):
print(.format(c))
c = 1.0
R = np.dot(U, VT)
t = muy.T - c * np.dot(R, mux.T)
X_transformed = (c * np.dot(R, (X - mux).T) + muy.T).T
return X_transformed, R, t, c | Fit X to anchors by applying optimal translation, rotation and reflection.
Given m >= d anchor nodes (anchors in R^(m x d)), return transformation
of coordinates X (output of EDM algorithm) optimally matching anchors in least squares sense.
:param anchors: Matrix of shape m x d, where m is number of anchors, d is dimension of setup.
:param X: Matrix of shape N x d, where the last m points will be used to find fit with the anchors.
:param scale: set to True if the point set should be scaled to match the anchors.
:return: the transformed vector X, the rotation matrix, translation vector, and scaling factor. |
385,047 | def infer_type(self, in_type):
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states. |
385,048 | def animate(self, **kwargs):
super(VisSurface, self).render(**kwargs)
surf_cmaps = kwargs.get(, None)
tri_idxs = []
vert_coords = []
trisurf_params = []
frames = []
frames_tris = []
num_vertices = 0
fig = plt.figure(figsize=self.vconf.figure_size, dpi=self.vconf.figure_dpi)
ax = Axes3D(fig)
surf_count = 0
for plot in self._plots:
if plot[] == and self.vconf.display_evalpts:
verts = plot[][0]
tris = plot[][1]
tri_idxs += [[ti + num_vertices for ti in tri.data] for tri in tris]
vert_coords += [vert.data for vert in verts]
num_vertices = len(vert_coords)
params = {}
if surf_cmaps:
try:
params[] = surf_cmaps[surf_count]
surf_count += 1
except IndexError:
params[] = plot[]
else:
params[] = plot[]
trisurf_params += [params for _ in range(len(tris))]
pts = np.array(vert_coords, dtype=self.vconf.dtype)
for tidx, pidx in zip(tri_idxs, trisurf_params):
frames_tris.append(tidx)
triangulation = mpltri.Triangulation(pts[:, 0], pts[:, 1], triangles=frames_tris)
p3df = ax.plot_trisurf(triangulation, pts[:, 2], alpha=self.vconf.alpha, **pidx)
frames.append([p3df])
ani = animation.ArtistAnimation(fig, frames, interval=100, blit=True, repeat_delay=1000)
if not self.vconf.display_axes:
plt.axis()
if self.vconf.axes_equal:
self.vconf.set_axes_equal(ax)
if self.vconf.display_labels:
ax.set_xlabel()
ax.set_ylabel()
ax.set_zlabel()
fig_filename = kwargs.get(, None)
fig_display = kwargs.get(, True)
if fig_display:
plt.show()
else:
fig_filename = self.vconf.figure_image_filename if fig_filename is None else fig_filename
self.vconf.save_figure_as(fig, fig_filename)
return fig | Animates the surface.
This function only animates the triangulated surface. There will be no other elements, such as control points
grid or bounding box.
Keyword arguments:
* ``colormap``: applies colormap to the surface
Colormaps are a visualization feature of Matplotlib. They can be used for several types of surface plots via
the following import statement: ``from matplotlib import cm``
The following link displays the list of Matplolib colormaps and some examples on colormaps:
https://matplotlib.org/tutorials/colors/colormaps.html |
385,049 | def get_failed_jobs(self, fail_running=False, fail_pending=False):
failed_jobs = {}
for job_key, job_details in self.jobs.items():
if job_details.status == JobStatus.failed:
failed_jobs[job_key] = job_details
elif job_details.status == JobStatus.partial_failed:
failed_jobs[job_key] = job_details
elif fail_running and job_details.status == JobStatus.running:
failed_jobs[job_key] = job_details
elif fail_pending and job_details.status <= JobStatus.pending:
failed_jobs[job_key] = job_details
return failed_jobs | Return a dictionary with the subset of jobs that are marked as failed
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
failed_jobs : dict
Dictionary mapping from job key to `JobDetails` for the failed jobs. |
385,050 | def set_subcommands(func, parser):
if hasattr(func, ) and func.__subcommands__:
sub_parser = parser.add_subparsers(
title=SUBCOMMANDS_LIST_TITLE, dest=,
description=SUBCOMMANDS_LIST_DESCRIPTION.format(
func.__cmd_name__),
help=func.__doc__)
for sub_func in func.__subcommands__.values():
parser = get_parser(sub_func, sub_parser)
for args, kwargs in get_shared(sub_func):
parser.add_argument(*args, **kwargs)
else:
for args, kwargs in get_shared(func):
parser.add_argument(*args, **kwargs) | Set subcommands. |
385,051 | def set_memory(self, total=None, static=None):
if total:
self.params["rem"]["mem_total"] = total
if static:
self.params["rem"]["mem_static"] = static | Set the maxium allowed memory.
Args:
total: The total memory. Integer. Unit: MBytes. If set to None,
this parameter will be neglected.
static: The static memory. Integer. Unit MBytes. If set to None,
this parameterwill be neglected. |
385,052 | def download_sysdig_capture(self, capture_id):
url = .format(
url=self.url, id=capture_id, product=self.product)
res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return False, self.lasterr
return True, res.content | **Description**
Download a sysdig capture by id.
**Arguments**
- **capture_id**: the capture id to download.
**Success Return Value**
The bytes of the scap |
385,053 | def gaussian_prior_model_for_arguments(self, arguments):
return CollectionPriorModel(
{
key: value.gaussian_prior_model_for_arguments(arguments)
if isinstance(value, AbstractPriorModel)
else value
for key, value in self.__dict__.items() if key not in (, , )
}
) | Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
prior_models: [PriorModel]
A new list of prior models with gaussian priors |
385,054 | def _check_seismogenic_depths(self, upper_depth, lower_depth):
if upper_depth:
if upper_depth < 0.:
raise ValueError(
)
else:
self.upper_depth = upper_depth
else:
self.upper_depth = 0.0
if not lower_depth:
raise ValueError(
)
if lower_depth < self.upper_depth:
raise ValueError(
)
self.lower_depth = lower_depth | Checks the seismic depths for physical consistency
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km) |
385,055 | def directionaldiff(f, x0, vec, **options):
x0 = np.asarray(x0)
vec = np.asarray(vec)
if x0.size != vec.size:
raise ValueError()
vec = np.reshape(vec/np.linalg.norm(vec.ravel()), x0.shape)
return Derivative(lambda t: f(x0+t*vec), **options)(0) | Return directional derivative of a function of n variables
Parameters
----------
fun: callable
analytical function to differentiate.
x0: array
vector location at which to differentiate fun. If x0 is an nxm array,
then fun is assumed to be a function of n*m variables.
vec: array
vector defining the line along which to take the derivative. It should
be the same size as x0, but need not be a vector of unit length.
**options:
optional arguments to pass on to Derivative.
Returns
-------
dder: scalar
estimate of the first derivative of fun in the specified direction.
Examples
--------
At the global minimizer (1,1) of the Rosenbrock function,
compute the directional derivative in the direction [1 2]
>>> import numpy as np
>>> import numdifftools as nd
>>> vec = np.r_[1, 2]
>>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2
>>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True)
>>> np.allclose(dd, 0)
True
>>> np.abs(info.error_estimate)<1e-14
True
See also
--------
Derivative,
Gradient |
385,056 | def write_chisq(page, injList, grbtag):
if injList:
th = []+injList + []
else:
th= [,]
injList = []
td = []
plots = [,,, ]
for test in plots:
pTag = test.replace(,).title()
d = [pTag]
for inj in injList + []:
plot = markup.page()
p = "%s/plots_clustered/GRB%s_%s_vs_snr_zoom.png" % (inj, grbtag,
test)
plot.a(href=p, title="%s %s versus SNR" % (inj, pTag))
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page | Write injection chisq plots to markup.page object page |
385,057 | def make_url(*args, **kwargs):
base = "/".join(args)
if kwargs:
return "%s?%s" % (base, urlencode(kwargs))
else:
return base | Makes a URL from component parts |
385,058 | def _check_for_answers(self, pk):
longest_match = ()
if len(self._answer_patterns) > 0:
data = (pk.header,) + tuple(pk.data)
for p in list(self._answer_patterns.keys()):
logger.debug(, p, data)
if len(p) <= len(data):
if p == data[0:len(p)]:
match = data[0:len(p)]
if len(match) >= len(longest_match):
logger.debug(, match)
longest_match = match
if len(longest_match) > 0:
self._answer_patterns[longest_match].cancel()
del self._answer_patterns[longest_match] | Callback called for every packet received to check if we are
waiting for an answer on this port. If so, then cancel the retry
timer. |
385,059 | def _pnorm_default(x, p):
return np.linalg.norm(x.data.ravel(), ord=p) | Default p-norm implementation. |
385,060 | def calculate_md5(filename, length):
assert length >= 0
if length == 0:
return
md5_summer = hashlib.md5()
f = open(filename, )
try:
bytes_read = 0
while bytes_read < length:
chunk_size = min(MD5_CHUNK_SIZE, length - bytes_read)
chunk = f.read(chunk_size)
if not chunk:
break
md5_summer.update(chunk)
bytes_read += len(chunk)
finally:
f.close()
md5 = md5_summer.digest()
return md5 | Calculate the MD5 hash of a file, up to length bytes.
Returns the MD5 in its binary form, as an 8-byte string. Raises IOError
or OSError in case of error. |
385,061 | def get_docker_tag(platform: str, registry: str) -> str:
platform = platform if any(x in platform for x in [, ]) else .format(platform)
if not registry:
registry = "mxnet_local"
return "{0}/{1}".format(registry, platform) | :return: docker tag to be used for the container |
385,062 | async def _get_popular_people_page(self, page=1):
return await self.get_data(self.url_builder(
,
url_params=OrderedDict(page=page),
)) | Get a specific page of popular person data.
Arguments:
page (:py:class:`int`, optional): The page to get.
Returns:
:py:class:`dict`: The page data. |
385,063 | def to_bytes(self, frame, state):
frame = six.binary_type(frame)
return self.encode_length(frame, state) + frame | Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream. |
385,064 | def get_search_result(self, ddoc_id, index_name, **query_params):
ddoc = DesignDocument(self, ddoc_id)
return self._get_search_result(
.join((ddoc.document_url, , index_name)),
**query_params
) | Retrieves the raw JSON content from the remote database based on the
search index on the server, using the query_params provided as query
parameters. A ``query`` parameter containing the Lucene query
syntax is mandatory.
Example for search queries:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve documents where the Lucene field name is 'name' and
# the value is 'julia*'
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
include_docs=True)
for row in resp['rows']:
# Process search index data (in JSON format).
Example if the search query requires grouping by using
the ``group_field`` parameter:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve JSON response content, limiting response to 10 documents
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
group_field='name',
limit=10)
for group in resp['groups']:
for row in group['rows']:
# Process search index data (in JSON format).
:param str ddoc_id: Design document id used to get the search result.
:param str index_name: Name used in part to identify the index.
:param str bookmark: Optional string that enables you to specify which
page of results you require. Only valid for queries that do not
specify the ``group_field`` query parameter.
:param list counts: Optional JSON array of field names for which
counts should be produced. The response will contain counts for each
unique value of this field name among the documents matching the
search query.
Requires the index to have faceting enabled.
:param list drilldown: Optional list of fields that each define a
pair of a field name and a value. This field can be used several
times. The search will only match documents that have the given
value in the field name. It differs from using
``query=fieldname:value`` only in that the values are not analyzed.
:param str group_field: Optional string field by which to group
search matches. Fields containing other data
(numbers, objects, arrays) can not be used.
:param int group_limit: Optional number with the maximum group count.
This field can only be used if ``group_field`` query parameter
is specified.
:param group_sort: Optional JSON field that defines the order of the
groups in a search using ``group_field``. The default sort order
is relevance. This field can have the same values as the sort field,
so single fields as well as arrays of fields are supported.
:param int limit: Optional number to limit the maximum count of the
returned documents. In case of a grouped search, this parameter
limits the number of documents per group.
:param query/q: A Lucene query in the form of ``name:value``.
If name is omitted, the special value ``default`` is used.
The ``query`` parameter can be abbreviated as ``q``.
:param ranges: Optional JSON facet syntax that reuses the standard
Lucene syntax to return counts of results which fit into each
specified category. Inclusive range queries are denoted by brackets.
Exclusive range queries are denoted by curly brackets.
For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an
inclusive range of 0 to 100.
Requires the index to have faceting enabled.
:param sort: Optional JSON string of the form ``fieldname<type>`` for
ascending or ``-fieldname<type>`` for descending sort order.
Fieldname is the name of a string or number field and type is either
number or string or a JSON array of such strings. The type part is
optional and defaults to number.
:param str stale: Optional string to allow the results from a stale
index to be used. This makes the request return immediately, even
if the index has not been completely built yet.
:param list highlight_fields: Optional list of fields which should be
highlighted.
:param str highlight_pre_tag: Optional string inserted before the
highlighted word in the highlights output. Defaults to ``<em>``.
:param str highlight_post_tag: Optional string inserted after the
highlighted word in the highlights output. Defaults to ``</em>``.
:param int highlight_number: Optional number of fragments returned in
highlights. If the search term occurs less often than the number of
fragments specified, longer fragments are returned. Default is 1.
:param int highlight_size: Optional number of characters in each
fragment for highlights. Defaults to 100 characters.
:param list include_fields: Optional list of field names to include in
search results. Any fields included must have been indexed with the
``store:true`` option.
:returns: Search query result data in JSON format |
385,065 | def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper | Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``. |
385,066 | def update(self, instance):
assert isinstance(instance, UnitOfWork)
if instance.db_id:
query = {: ObjectId(instance.db_id)}
else:
query = {unit_of_work.PROCESS_NAME: instance.process_name,
unit_of_work.TIMEPERIOD: instance.timeperiod,
unit_of_work.START_ID: instance.start_id,
unit_of_work.END_ID: instance.end_id}
self.ds.update(COLLECTION_UNIT_OF_WORK, query, instance)
return instance.db_id | method finds unit_of_work record and change its status |
385,067 | def inverse(self):
inv = np.empty(self.size, np.int)
inv[self.sorter] = self.sorted_group_rank_per_key
return inv | return index array that maps unique values back to original space. unique[inverse]==keys |
385,068 | def _check_subject_identifier_matches_requested(self, authentication_request, sub):
if in authentication_request:
requested_id_token_sub = authentication_request[].get(, {}).get()
requested_userinfo_sub = authentication_request[].get(, {}).get()
if requested_id_token_sub and requested_userinfo_sub and requested_id_token_sub != requested_userinfo_sub:
raise AuthorizationError(
.format(requested_id_token_sub, requested_userinfo_sub))
requested_sub = requested_id_token_sub or requested_userinfo_sub
if requested_sub and sub != requested_sub:
raise AuthorizationError({}\
.format(requested_sub)) | Verifies the subject identifier against any requested subject identifier using the claims request parameter.
:param authentication_request: authentication request
:param sub: subject identifier
:raise AuthorizationError: if the subject identifier does not match the requested one |
385,069 | def setupTable_VORG(self):
if "VORG" not in self.tables:
return
self.otf["VORG"] = vorg = newTable("VORG")
vorg.majorVersion = 1
vorg.minorVersion = 0
vorg.VOriginRecords = {}
vorg_count = Counter(_getVerticalOrigin(self.otf, glyph)
for glyph in self.allGlyphs.values())
vorg.defaultVertOriginY = vorg_count.most_common(1)[0][0]
if len(vorg_count) > 1:
for glyphName, glyph in self.allGlyphs.items():
vorg.VOriginRecords[glyphName] = _getVerticalOrigin(
self.otf, glyph)
vorg.numVertOriginYMetrics = len(vorg.VOriginRecords) | Make the VORG table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired. |
385,070 | def logs(self, prefix=):
logs = []
logs += [(, np.mean(self.success_history))]
if self.compute_Q:
logs += [(, np.mean(self.Q_history))]
logs += [(, self.n_episodes)]
if prefix != and not prefix.endswith():
return [(prefix + + key, val) for key, val in logs]
else:
return logs | Generates a dictionary that contains all collected statistics. |
385,071 | def get_path(self, dir=None):
if not dir:
dir = self.fs.getcwd()
if self == dir:
return
path_elems = self.get_path_elements()
pathname =
try: i = path_elems.index(dir)
except ValueError:
for p in path_elems[:-1]:
pathname += p.dirname
else:
for p in path_elems[i+1:-1]:
pathname += p.dirname
return pathname + path_elems[-1].name | Return path relative to the current working directory of the
Node.FS.Base object that owns us. |
385,072 | def documentation(self):
newclient = self.__class__(self.session, self.root_url)
return newclient.get_raw() | Get the documentation that the server sends for the API. |
385,073 | def bbduk_trim(forward_in, forward_out, reverse_in=, reverse_out=,
trimq=20, k=25, minlength=50, forcetrimleft=15, hdist=1, returncmd=False, **kwargs):
options = kwargs_to_string(kwargs)
cmd =
try:
subprocess.check_output(cmd.split()).decode()
except subprocess.CalledProcessError:
print()
raise FileNotFoundError
if os.path.isfile(forward_in.replace(, )) and reverse_in == and in forward_in:
reverse_in = forward_in.replace(, )
if reverse_out == :
if in forward_out:
reverse_out = forward_out.replace(, )
else:
raise ValueError()
cmd = \
\
.format(f_in=forward_in,
r_in=reverse_in,
f_out=forward_out,
r_out=reverse_out,
trimq=trimq,
k=k,
minlength=minlength,
forcetrimleft=forcetrimleft,
hdist=hdist,
optn=options)
elif reverse_in == or reverse_in is None:
cmd = \
\
.format(f_in=forward_in,
f_out=forward_out,
trimq=trimq,
k=k,
minlength=minlength,
forcetrimleft=forcetrimleft,
hdist=hdist,
optn=options)
else:
if reverse_out == :
raise ValueError()
cmd = \
\
.format(f_in=forward_in,
r_in=reverse_in,
f_out=forward_out,
r_out=reverse_out,
trimq=trimq,
k=k,
minlength=minlength,
forcetrimleft=forcetrimleft,
hdist=hdist,
optn=options)
out, err = accessoryfunctions.run_subprocess(cmd)
if returncmd:
return out, err, cmd
else:
return out, err | Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can
be overwritten by using keyword parameters.
:param forward_in: Forward reads you want to quality trim.
:param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value.
:param forward_out: Output forward reads.
:param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used.
:param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used.
:param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list.
:return: out and err: stdout string and stderr string from running bbduk. |
385,074 | def find_application(app_id=None, app_name=None):
LOGGER.debug("ApplicationService.find_application")
if (app_id is None or not app_id) and (app_name is None or not app_name):
raise exceptions.ArianeCallParametersError()
if (app_id is not None and app_id) and (app_name is not None and app_name):
LOGGER.warn(
)
app_name = None
params = None
if app_id is not None and app_id:
params = {: app_id}
elif app_name is not None and app_name:
params = {: app_name}
ret = None
if params is not None:
args = {: , : , : params}
response = ApplicationService.requester.call(args)
if response.rc == 0:
ret = Application.json_2_application(response.response_content)
elif response.rc != 404:
err_msg = + \
str(app_id) + + str(app_name) + + \
+ str(response.response_content) + + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(
err_msg
)
return ret | find the application according application id (prioritary) or application name
:param app_id: the application id
:param app_name: the application name
:return: found application or None if not found |
385,075 | def parse_definition_expr(expr, default_value=None):
try:
define, value = expr.split(, 1)
try:
value = parse_number_token(value)
except ValueError:
value = parse_bool_token(value)
except ValueError:
if expr:
define, value = expr, default_value
else:
raise ValueError("Invalid definition expression `%s`" % str(expr))
d = define.strip()
if d:
return d, value
else:
raise ValueError("Invalid definition symbol `%s`" % str(define)) | Parses a definition expression and returns a key-value pair
as a tuple.
Each definition expression should be in one of these two formats:
* <variable>=<value>
* <variable>
:param expr:
String expression to be parsed.
:param default_value:
(Default None) When a definition is encountered that has no value, this
will be used as its value.
:return:
A (define, value) tuple
or raises a ``ValueError`` if an invalid
definition expression is provided.
or raises ``AttributeError`` if None is provided for ``expr``.
Usage:
>>> parse_definition_expr('DEBUG=1')
('DEBUG', 1)
>>> parse_definition_expr('FOOBAR=0x40')
('FOOBAR', 64)
>>> parse_definition_expr('FOOBAR=whatever')
('FOOBAR', 'whatever')
>>> parse_definition_expr('FOOBAR=false')
('FOOBAR', False)
>>> parse_definition_expr('FOOBAR=TRUE')
('FOOBAR', True)
>>> parse_definition_expr('FOOBAR', default_value=None)
('FOOBAR', None)
>>> parse_definition_expr('FOOBAR', default_value=1)
('FOOBAR', 1)
>>> parse_definition_expr('FOOBAR=ah=3')
('FOOBAR', 'ah=3')
>>> parse_definition_expr(' FOOBAR=ah=3 ')
('FOOBAR', 'ah=3 ')
>>> parse_definition_expr(' FOOBAR =ah=3 ')
('FOOBAR', 'ah=3 ')
>>> parse_definition_expr(' FOOBAR = ah=3 ')
('FOOBAR', ' ah=3 ')
>>> parse_definition_expr(" ")
Traceback (most recent call last):
...
ValueError: Invalid definition symbol ` `
>>> parse_definition_expr(None)
Traceback (most recent call last):
...
AttributeError: 'NoneType' object has no attribute 'split' |
385,076 | def _calculate(self):
self.logpriors = np.zeros_like(self.rad)
for i in range(self.N-1):
o = np.arange(i+1, self.N)
dist = ((self.zscale*(self.pos[i] - self.pos[o]))**2).sum(axis=-1)
dist0 = (self.rad[i] + self.rad[o])**2
update = self.prior_func(dist - dist0)
self.logpriors[i] += np.sum(update)
self.logpriors[o] += update
| # This is equivalent
for i in range(self.N-1):
for j in range(i+1, self.N):
d = ((self.zscale*(self.pos[i] - self.pos[j]))**2).sum(axis=-1)
r = (self.rad[i] + self.rad[j])**2
cost = self.prior_func(d - r)
self.logpriors[i] += cost
self.logpriors[j] += cost |
385,077 | def nonoverlap(item_a, time_a, item_b, time_b, max_value):
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value) | Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. |
385,078 | def _compute_anelastic_attenuation_term(self, C, rrup, mag):
r = (rrup**2. + (C[] * np.exp(C[] * mag +
C[] * (8.5 - mag)**2.5))**2.)**.5
f3 = ((C[] + C[] * mag) * np.log(r) +
(C[] + C[] * mag) * r)
return f3 | Compute magnitude-distance scaling term as defined in equation 21,
page 2291 (Tavakoli and Pezeshk, 2005) |
385,079 | def query_fetch_all(self, query, values):
self.cursor.execute(query, values)
retval = self.cursor.fetchall()
self.__close_db()
return retval | Executes a db query, gets all the values, and closes the connection. |
385,080 | def info(self):
hosts = .join(x[] for x in self.members())
mongodb_uri = + hosts + + self.repl_id
result = {"id": self.repl_id,
"auth_key": self.auth_key,
"members": self.members(),
"mongodb_uri": mongodb_uri,
"orchestration": }
if self.login:
uri = (
% (self.mongodb_auth_uri(hosts), self.repl_id))
result[] = uri
return result | return information about replica set |
385,081 | def _parse_routes(iface, opts):
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
result = {}
if not in opts:
_raise_error_routes(iface, , )
for opt in opts:
result[opt] = opts[opt]
return result | Filters given options and outputs valid settings for
the route settings file. |
385,082 | def discover_settings(conf_base=None):
settings = {
: ,
: False,
: False,
: False,
: {},
: {},
}
if sys.platform.startswith():
settings[] = False
if conf_base:
merge(settings, load_config(, conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings | Discover custom settings for ZMQ path |
385,083 | def Load(self):
for record in super(EventFileLoader, self).Load():
yield event_pb2.Event.FromString(record) | Loads all new events from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All events in the file that have not been yielded yet. |
385,084 | def setup_panel_params(self, scale_x, scale_y):
def train(scale, limits, trans, name):
if limits is None:
rangee = scale.dimension()
else:
rangee = scale.transform(limits)
out = scale.break_info(rangee)
)
return out | Compute the range and break information for the panel |
385,085 | def to_capabilities(self):
capabilities = ChromeOptions.to_capabilities(self)
capabilities.update(self._caps)
opera_options = capabilities[self.KEY]
if self.android_package_name:
opera_options["androidPackage"] = self.android_package_name
if self.android_device_socket:
opera_options["androidDeviceSocket"] = self.android_device_socket
if self.android_command_line_file:
opera_options["androidCommandLineFile"] = \
self.android_command_line_file
return capabilities | Creates a capabilities with all the options that have been set and
returns a dictionary with everything |
385,086 | def acquire(self, key):
if _debug: DeviceInfoCache._debug("acquire %r", key)
if isinstance(key, int):
device_info = self.cache.get(key, None)
elif not isinstance(key, Address):
raise TypeError("key must be integer or an address")
elif key.addrType not in (Address.localStationAddr, Address.remoteStationAddr):
raise TypeError("address must be a local or remote station")
else:
device_info = self.cache.get(key, None)
if device_info:
if _debug: DeviceInfoCache._debug(" - reference bump")
device_info._ref_count += 1
if _debug: DeviceInfoCache._debug(" - device_info: %r", device_info)
return device_info | Return the known information about the device and mark the record
as being used by a segmenation state machine. |
385,087 | def on_backward_end(self, iteration:int, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
if iteration % self.stats_iters == 0: self.gen_stats_updated, self.crit_stats_updated = False, False
if not (self.gen_stats_updated and self.crit_stats_updated): self._write_model_stats(iteration=iteration) | Callback function that writes backward end appropriate data to Tensorboard. |
385,088 | def _find_all_versions(self, project_name):
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
url_locations = [
link for link in itertools.chain(
(Link(url, trusted=True) for url in index_url_loc),
(Link(url, trusted=True) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug(,
len(url_locations), project_name)
for location in url_locations:
logger.debug(, location)
canonical_name = pkg_resources.safe_name(project_name).lower()
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name.lower(), canonical_name, formats)
find_links_versions = self._package_versions(
(Link(url, , trusted=True) for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug(, page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
,
.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
,
.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
return (
file_versions + find_links_versions + page_versions +
dependency_versions
) | Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted |
385,089 | def _check_stations_csv(self, usr, root):
if path.exists(path.join(usr, )):
return
else:
copyfile(root, path.join(usr, )) | Reclocate a stations.csv copy in user home for easy manage.
E.g. not need sudo when you add new station, etc |
385,090 | def resize_image(fullfile,fullfile_resized,_megapixels):
logger.debug("%s - Resizing to %s MP"%(fullfile,_megapixels))
img = Image.open(fullfile)
width,height=img.size
current_megapixels=width*height/(2.0**20)
new_width,new_height=resize_compute_width_height(\
fullfile,_megapixels)
if not new_width:
logger.debug("%s - NOT Resizing, scale is > 1"%(fullfile))
return False
logger.info("%s - Resizing image from %0.1f to %0.1f MP (%dx%d) to (%dx%d)"\
%(fullfile,current_megapixels,_megapixels,width,height,new_width,new_height))
imageresize = img.resize((new_width,new_height), Image.ANTIALIAS)
imageresize.save(fullfile_resized, )
if not update_exif_GEXIV2(fullfile,fullfile_resized):
return False
return True | Resizes image (fullfile), saves to fullfile_resized. Image
aspect ratio is conserved, will be scaled to be close to _megapixels in
size. Eg if _megapixels=2, will resize 2560x1920 so each dimension
is scaled by ((2**(20+1*MP))/float(2560*1920))**2 |
385,091 | def _convert_to_ndarray(self, data):
if data.__class__.__name__ != "DataFrame":
raise Exception(f"data is not a DataFrame but {data.__class__.__name__}.")
shape_ndarray = (self._height, self._width, data.shape[1])
data_ndarray = data.values.reshape(shape_ndarray)
return data_ndarray | Converts data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.) |
385,092 | def process_bytecode(link_refs: Dict[str, Any], bytecode: bytes) -> str:
all_offsets = [y for x in link_refs.values() for y in x.values()]
validate_link_ref_fns = (
validate_link_ref(ref["start"] * 2, ref["length"] * 2)
for ref in concat(all_offsets)
)
pipe(bytecode, *validate_link_ref_fns)
link_fns = (
replace_link_ref_in_bytecode(ref["start"] * 2, ref["length"] * 2)
for ref in concat(all_offsets)
)
processed_bytecode = pipe(bytecode, *link_fns)
return add_0x_prefix(processed_bytecode) | Replace link_refs in bytecode with 0's. |
385,093 | def delete_chat_sticker_set(self, chat_id):
result = apihelper.delete_chat_sticker_set(self.token, chat_id)
return result | Use this method to delete a group sticker set from a supergroup. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights. Use the field can_set_sticker_set
optionally returned in getChat requests to check if the bot can use this method. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of the target supergroup
(in the format @supergroupusername)
:return: |
385,094 | def get(self):
if self.bucket.get() < 1:
return None
now = time.time()
self.mutex.acquire()
try:
task = self.priority_queue.get_nowait()
self.bucket.desc()
except Queue.Empty:
self.mutex.release()
return None
task.exetime = now + self.processing_timeout
self.processing.put(task)
self.mutex.release()
return task.taskid | Get a task from queue when bucket available |
385,095 | def format_option(self, ohi):
lines = []
choices = .format(ohi.choices) if ohi.choices else
arg_line = (
.format(args=self._maybe_cyan(.join(ohi.display_args)),
dflt=self._maybe_green(.format(choices, ohi.default))))
lines.append(arg_line)
indent =
lines.extend([.format(indent, s) for s in wrap(ohi.help, 76)])
if ohi.deprecated_message:
lines.append(self._maybe_red(.format(indent, ohi.deprecated_message)))
if ohi.removal_hint:
lines.append(self._maybe_red(.format(indent, ohi.removal_hint)))
return lines | Format the help output for a single option.
:param OptionHelpInfo ohi: Extracted information for option to print
:return: Formatted help text for this option
:rtype: list of string |
385,096 | def make_clean_visible_file(i_chunk, clean_visible_path):
_clean = open(clean_visible_path, )
_clean.write()
_clean.write()
for idx, si in enumerate(i_chunk):
if si.stream_id is None:
stream_id =
else:
stream_id = si.stream_id
doc = lxml.etree.Element("FILENAME", stream_id=stream_id)
if si.body and si.body.clean_visible:
try:
doc.text = si.body.clean_visible.decode()
except ValueError:
doc.text = drop_invalid_and_upper_utf8_chars(
si.body.clean_visible.decode())
except Exception, exc:
logger.critical(traceback.format_exc(exc))
logger.critical(,
si.stream_id)
logger.critical(repr(si.body.clean_visible))
logger.critical(, si.stream_id)
raise
else:
doc.text =
_clean.write(lxml.etree.tostring(doc, encoding=))
_clean.write()
_clean.close()
logger.info(clean_visible_path)
-htmlwb<FILENAME docid="%s"></FILENAME>\n-html | make a temp file of clean_visible text |
385,097 | def add(self, pattern, method=None, call=None, name=None):
if not pattern.endswith():
pattern +=
parts = tuple(pattern.split()[1:])
node = self._routes
for part in parts:
node = node.setdefault(part, {})
if method is None:
node[] = call
elif isinstance(method, str):
node[method.upper()] = call
else:
for m in method:
node[m.upper()] = call
if name is not None:
self._reverse[name] = pattern | Add a url pattern.
Args:
pattern (:obj:`str`): URL pattern to add. This is usually '/'
separated path. Parts of the URL can be parameterised using
curly braces.
Examples: "/", "/path/to/resource", "/resoures/{param}"
method (:obj:`str`, :obj:`list` of :obj:`str`, optional): HTTP
methods for the path specied. By default, GET method is added.
Value can be either a single method, by passing a string, or
multiple methods, by passing a list of strings.
call (callable): Callable corresponding to the url pattern and the
HTTP method specified.
name (:obj:`str`): Name for the pattern that can be used for
reverse matching
Note:
A trailing '/' is always assumed in the pattern. |
385,098 | def start_sctp_server(self, ip, port, name=None, timeout=None, protocol=None, family=):
self._start_server(SCTPServer, ip, port, name, timeout, protocol, family) | Starts a new STCP server to given `ip` and `port`.
`family` can be either ipv4 (default) or ipv6.
pysctp (https://github.com/philpraxis/pysctp) need to be installed your system.
Server can be given a `name`, default `timeout` and a `protocol`.
Notice that you have to use `Accept Connection` keyword for server to
receive connections.
Examples:
| Start STCP server | 10.10.10.2 | 53 |
| Start STCP server | 10.10.10.2 | 53 | Server1 |
| Start STCP server | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 |
| Start STCP server | 10.10.10.2 | 53 | timeout=5 | |
385,099 | def _GetDirectory(self):
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return CPIODirectory(self._file_system, self.path_spec) | Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.