content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code) | 6e99dc7d280ea28c3240f76a70b57234b9da98d3 | 3,604 |
def mean_square_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate MSE loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
MSE of given predictions
"""
return (1 / y_true.shape[0]) * (np.sum((y_true - y_pred) ** 2)) | a9dbbd2264cba04618531024ce7eaae0e7c76b8d | 3,605 |
def graduation_threshold(session):
"""get graduation threshold
url : "/user/graduation-threshold"
Args:
session ([requests.session]): must be login webap!
Returns:
[requests.models.Response]: requests response
other error will return False
"""
# post it, it will return Aength.kuas.edu.tw cookie
Aength_login = session.post('https://webap.nkust.edu.tw/nkust/fnc.jsp',
data={'fncid': 'AG635'})
# get post data
try:
root = etree.HTML(Aength_login.text)
term_form_xpath = root.xpath('//input[@type="hidden"]')
term_form = {i.values()[1]: i.values()[-1] for i in term_form_xpath}
except:
return False
# final post
query_url = 'http://Aength.kuas.edu.tw/AUPersonQ.aspx'
res = session.post(url=query_url, data=term_form)
return res | 18a1e3f1389995ee1c41fe49d16f047a3e4d8bf8 | 3,607 |
def q_conjugate(q):
"""
quarternion conjugate
"""
w, x, y, z = q
return (w, -x, -y, -z) | bb7e28d0318702d7d67616ba2f7dc0e922e27c72 | 3,608 |
def row_r(row, boxsize):
"""Cell labels in 'row' of Sudoku puzzle of dimension 'boxsize'."""
nr = n_rows(boxsize)
return range(nr * (row - 1) + 1, nr * row + 1) | b69e3995475b9ab62d9684c79d0d2473273487c7 | 3,609 |
from typing import Sequence
def get_set_from_word(permutation: Sequence[int], digit: Digit) -> set[int]:
"""
Returns a digit set from a given digit word,
based on the current permutation.
i.e. if:
permutation = [6, 5, 4, 3, 2, 1, 0]
digit = 'abcd'
then output = {6, 5, 4, 3}
"""
return {permutation[ord(char) - ord("a")] for char in digit} | 06058d96e94398f4a26613aefc8b5eeb92dec3e5 | 3,610 |
def get_avg_sentiment(sentiment):
"""
Compiles and returnes the average sentiment
of all titles and bodies of our query
"""
average = {}
for coin in sentiment:
# sum up all compound readings from each title & body associated with the
# coin we detected in keywords
average[coin] = sum([item['compound'] for item in sentiment[coin]])
# get the mean compound sentiment if it's not 0
if average[coin] != 0:
average[coin] = average[coin] / len(sentiment[coin])
return average | 6a79c3d4f28e18a33290ea86a912389a5b48b0f3 | 3,611 |
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme) | 80e981d4556b0de79a68994666ac56d8dbe9bdd5 | 3,612 |
def data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_total_size_put(uuid, tapi_common_capacity_value=None): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_total_size_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | b03bf60af4b98099a7b07278e72c72cf8b247823 | 3,613 |
def statistics_power_law_alpha(A_in):
"""
Compute the power law coefficient of the degree distribution of the input graph
Parameters
----------
A_in: sparse matrix or np.array
The input adjacency matrix.
Returns
-------
Power law coefficient
"""
degrees = A_in.sum(axis=0)
return powerlaw.Fit(degrees, xmin=max(np.min(degrees), 1)).power_law.alpha | 72f1ead0fa1e42752154ef1567ea8c10d407019d | 3,614 |
def common_gnuplot_settings():
""" common gnuplot settings. """
g_plot = Gnuplot.Gnuplot(persist=1)
# The following line is for rigor only. It seems to be assumed for .csv files
g_plot('set datafile separator \",\"')
g_plot('set ytics nomirror')
g_plot('set xtics nomirror')
g_plot('set xtics font ", 10"')
g_plot('set ytics font ", 10"')
g_plot('set tics out scale 1.0')
g_plot('set grid')
g_plot('set key out horiz')
g_plot('set key bot center')
g_plot('set key samplen 2 spacing .8 font ", 9"')
g_plot('set term png size 1200, 600')
g_plot('set title font ", 11"')
g_plot('set ylabel font ", 10"')
g_plot('set xlabel font ", 10"')
g_plot('set xlabel offset 0, 0.5')
g_plot('set xlabel "Elapsed Time (Seconds)"')
return(g_plot) | 0a8149c2fce1d7738b4c85bfb2eb82d32fa3c540 | 3,616 |
def video_feed_cam1():
"""Video streaming route. Put this in the src attribute of an img tag."""
cam = Camera(0)
return Response(gen(cam), mimetype='multipart/x-mixed-replace; boundary=frame') | ca11f40bb603dc45d2709b46719fc11bde526c55 | 3,617 |
def listDatasets(objects = dir()):
"""
Utility function to identify currently loaded datasets.
Function should be called with default parameters,
ie as 'listDatasets()'
"""
datasetlist = []
for item in objects:
try:
if eval(item + '.' + 'has_key("DATA")') == True:
datasetlist.append(item)
except AttributeError:
pass
return datasetlist | 061d7c9287c6166b3e7d55449be49db30400ce56 | 3,618 |
def _(node: FromReference, ctx: AnnotateContext) -> BoxType:
"""Check that the parent node had {node.name} as a valid reference. Raises
an error if not, else copy over the set of references.
"""
t = box_type(node.over)
ft = t.row.fields.get(node.name, None)
if not isinstance(ft, RowType):
raise ErrReference(
ErrType.INVALID_TABLE_REF, name=node.name, path=ctx.get_path(node.over)
)
return BoxType(node.name, ft) | f53c4f47d2027ae0a7fe59fb52f3fa48f463dda3 | 3,619 |
from typing import Mapping
from typing import Dict
def invert(d: Mapping):
"""
invert a mapper's key and value
:param d:
:return:
"""
r: Dict = {}
for k, v in d.items():
r[v] = of(r[v], k) if v in r else k
return r | 65a49d107b4277a97035becb7d8be3cc1098544f | 3,620 |
def data_dir() -> str:
"""The directory where result data is written to"""
return '/tmp/bingads/' | 7ace22372ad0043eb6492e028687e31e78d8a85f | 3,621 |
def intensity_weighted_dispersion(data, x0=0.0, dx=1.0, rms=None,
threshold=None, mask_path=None, axis=0):
"""
Returns the intensity weighted velocity dispersion (second moment).
"""
# Calculate the intensity weighted velocity first.
m1 = intensity_weighted_velocity(data=data, x0=x0, dx=dx, rms=rms,
threshold=threshold, mask_path=mask_path,
axis=axis)[0]
# Rearrange the data to what we need.
mask = _read_mask_path(mask_path=mask_path, data=data)
data = np.moveaxis(data, axis, 0)
mask = np.moveaxis(mask, axis, 0)
mask = _threshold_mask(data=data, mask=mask, rms=rms, threshold=threshold)
npix = np.sum(mask, axis=0)
weights = get_intensity_weights(data, mask)
npix_mask = np.where(npix > 1, 1, np.nan)
vpix = dx * np.arange(data.shape[0]) + x0
vpix = vpix[:, None, None] * np.ones(data.shape)
# Intensity weighted dispersion.
m1 = m1[None, :, :] * np.ones(data.shape)
m2 = np.sum(weights * (vpix - m1)**2, axis=0) / np.sum(weights, axis=0)
m2 = np.sqrt(m2)
if rms is None:
return m2 * npix_mask, None
# Calculate the uncertainties.
dm2 = ((vpix - m1)**2 - m2**2) * rms / np.sum(weights, axis=0)
dm2 = np.sqrt(np.sum(dm2**2, axis=0)) / 2. / m2
return m2 * npix_mask, dm2 * npix_mask | dd3539ac2f48a1e9a6ceacc8262dc3a8e3646205 | 3,622 |
import requests
def vrtnws_api_request(service, path, params=None):
"""Sends a request to the VRTNWS API endpoint"""
url = BASE_URL_VRTNWS_API.format(service, path)
try:
res = requests.get(url, params)
try:
return res.json()
except ValueError:
return None
except requests.RequestException as ex:
print("VRTNWS API request '{}' failed:".format(url), ex)
return None | 9dad4e372348ff699762a5eaa42c9c1e7700e18e | 3,623 |
from typing import Type
def test_coreapi_schema(sdk_client_fs: ADCMClient, tested_class: Type[BaseAPIObject]):
"""Test coreapi schema"""
def _get_params(link):
result = {}
for field in link.fields:
result[field.name] = True
return result
schema_obj = sdk_client_fs._api.schema
with allure.step(f'Get {tested_class.__name__} schema objects'):
for path in tested_class.PATH:
assert path in schema_obj.data
schema_obj = schema_obj[path]
params = _get_params(schema_obj.links['list'])
with allure.step(f'Check if filters are acceptable for coreapi {tested_class.__name__}'):
for _filter in tested_class.FILTERS:
expect(
_filter in params,
f"Filter {_filter} should be acceptable for coreapi in class {tested_class.__name__}",
)
assert_expectations() | 8c205a2055ede6941b549112ef0893c37367ad71 | 3,624 |
def augment_tensor(matrix, ndim=None):
"""
Increase the dimensionality of a tensor,
splicing it into an identity matrix of a higher
dimension. Useful for generalizing
transformation matrices.
"""
s = matrix.shape
if ndim is None:
ndim = s[0]+1
arr = N.identity(ndim)
arr[:s[0],:s[1]] = matrix
return arr | 89d6ea36d016f8648cdc62852e55351a965eae02 | 3,625 |
def ping_redis() -> bool:
"""Call ping on Redis."""
try:
return REDIS.ping()
except (redis.exceptions.ConnectionError, redis.exceptions.ResponseError):
LOGGER.warning('Redis Ping unsuccessful')
return False | 0584109627470629141fccadc87075bfbb82e753 | 3,626 |
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 7/8 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times due to fluctuations in difficulty. They will likely
come early, if the network space and VDF rates increase continuously.
We start off at 2,199,023,255,552 which is 2^41 (about 2.2 heather) and half year 2, then half again year 4, then
half again year 8 etc. after 5 halfings we drop to zero, but don't panic, that's year 64
right shift >> to half...
"""
if height == 0:
return uint64(int((7 / 8) * (_base_reward << 16)))
elif height < 1 * _blocks_per_year:
return uint64(int((7 / 8) * _base_reward))
elif height < 3 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 1)))
elif height < 7 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 2)))
elif height < 15 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 3)))
elif height < 31 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 4)))
elif height < 63 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 5)))
else:
return uint64(0) | 9493c9b422eb58d429b586d3ea19da4e537a0d71 | 3,627 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
username = request.form.get("username").strip()
password = request.form.get("password")
# Ensure username was submitted
if not username:
return apology("must provide username", 403)
# Ensure password was submitted
elif not password:
return apology("must provide password", 403)
username = request.form.get("username")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username", username=username)
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], password):
return apology("invalid username and/or password", 403)
print
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | dcb37d57fc30d399c397c472d619b157575556ec | 3,629 |
def get_string_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
output = None
if CFContainer:
output = cf.CFStringGetCStringPtr(CFContainer, 0)
return output | fb08c31cd0bdbc3198b23a1c7d37d15d932a158f | 3,630 |
from typing import Callable
from typing import Any
def gamma_from_delta(
fn: Callable[..., Tensor], *, create_graph: bool = False, **params: Any
) -> Tensor:
"""Computes and returns gamma of a derivative from the formula of delta.
Note:
The keyword argument ``**params`` should contain at least one of
the following combinations:
- ``spot``
- ``moneyness`` and ``strike``
- ``log_moneyness`` and ``strike``
Args:
fn (callable): Function to calculate delta.
create_graph (bool, default=False): If ``True``,
graph of the derivative will be constructed,
allowing to compute higher order derivative products.
**params: Parameters passed to ``fn``.
Returns:
torch.Tensor
"""
return delta(pricer=fn, create_graph=create_graph, **params) | 508cb5df3cb19c5406ad190dbb0562140eab097a | 3,631 |
import re
def clean_filename(string: str) -> str:
"""
清理文件名中的非法字符,防止保存到文件系统时出错
:param string:
:return:
"""
string = string.replace(':', '_').replace('/', '_').replace('\x00', '_')
string = re.sub('[\n\\\*><?\"|\t]', '', string)
return string.strip() | 805023382e30c0d0113715cdf6c7bcbc8b383066 | 3,632 |
def homework(request, id_class):
"""
View for listing the specified class' assignments
"""
cl = Classes.objects.get(pk=id_class)
assm = Assignments.objects.all().filter(a_class=cl)
return render_to_response("assignments.html", {"assignments": assm, "class": cl}, context_instance=RequestContext(request)) | be828d4519b73cdbd6f8f34b0aa10eda1299c0f0 | 3,633 |
def get_digits_from_right_to_left(number):
"""Return digits of an integer excluding the sign."""
number = abs(number)
if number < 10:
return (number, )
lst = list()
while number:
number, digit = divmod(number, 10)
lst.insert(0, digit)
return tuple(lst) | 6b5626ad42313534d207c75d2713d0c9dc97507c | 3,635 |
def make_diamond(block):
"""
Return a block after rotating counterclockwise 45° to form a diamond
"""
result = []
upper = upper_triangle(block)
upper = [i.rjust(size-1) for i in upper]
upper_form = []
upper_length = len(upper)
for i in range(upper_length):
upper_form.append(diag_line(upper))
upper = upper_triangle(upper)
upper = [k.rjust(size-1-i-1) for k in upper]
upper_form = [' '.join(i) for i in upper_form]
upper_form = upper_form[::-1]
diag = diag_line(block)
diag = ' '.join(diag)
lower = lower_triangle(block)
lower = [i.ljust(size-1) for i in lower]
lower_form = []
lower_length = len(lower)
for i in range(lower_length):
lower_form.append(diag_line(lower))
lower = lower_triangle(lower)
lower = [k.ljust(size-1-i-1) for k in lower]
lower_form = [' '.join(i) for i in lower_form]
max_length = len(diag)
upper_form = [i.center(max_length) for i in upper_form]
lower_form = [i.center(max_length) for i in lower_form]
result += upper_form
result.append(diag)
result += lower_form
return result | e8e2afbdd34465a03e9db53169bf5c6bec1a375a | 3,636 |
def do_sitelist(parser, token):
"""
Allows a template-level call a list of all the active sites.
"""
return SitelistNode() | 208288989469a57e141f64be37d595fe8a1f84d6 | 3,637 |
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result | 6408fe02facd709a0d449cb87a2d963a0d92a007 | 3,638 |
def build_null_stop_time_series(feed, date_label='20010101', freq='5Min',
*, split_directions=False):
"""
Return a stop time series with the same index and hierarchical columns
as output by :func:`compute_stop_time_series_base`,
but fill it full of null values.
"""
start = date_label
end = pd.to_datetime(date_label + ' 23:59:00')
rng = pd.date_range(start, end, freq=freq)
inds = [
'num_trips',
]
sids = feed.stops.stop_id
if split_directions:
product = [inds, sids, [0, 1]]
names = ['indicator', 'stop_id', 'direction_id']
else:
product = [inds, sids]
names = ['indicator', 'stop_id']
cols = pd.MultiIndex.from_product(product, names=names)
return pd.DataFrame([], index=rng, columns=cols).sort_index(
axis=1, sort_remaining=True) | da638448f7f5b88d6e23e487f2ecdbc0e72a6607 | 3,639 |
import signal
def yulewalk(order, F, M):
"""Recursive filter design using a least-squares method.
[B,A] = YULEWALK(N,F,M) finds the N-th order recursive filter
coefficients B and A such that the filter:
B(z) b(1) + b(2)z^-1 + .... + b(n)z^-(n-1)
---- = -------------------------------------
A(z) 1 + a(1)z^-1 + .... + a(n)z^-(n-1)
matches the magnitude frequency response given by vectors F and M.
The YULEWALK function performs a least squares fit in the time domain. The
denominator coefficients {a(1),...,a(NA)} are computed by the so called
"modified Yule Walker" equations, using NR correlation coefficients
computed by inverse Fourier transformation of the specified frequency
response H.
The numerator is computed by a four step procedure. First, a numerator
polynomial corresponding to an additive decomposition of the power
frequency response is computed. Next, the complete frequency response
corresponding to the numerator and denominator polynomials is evaluated.
Then a spectral factorization technique is used to obtain the impulse
response of the filter. Finally, the numerator polynomial is obtained by a
least squares fit to this impulse response. For a more detailed explanation
of the algorithm see [1]_.
Parameters
----------
order : int
Filter order.
F : array
Normalised frequency breakpoints for the filter. The frequencies in F
must be between 0.0 and 1.0, with 1.0 corresponding to half the sample
rate. They must be in increasing order and start with 0.0 and end with
1.0.
M : array
Magnitude breakpoints for the filter such that PLOT(F,M) would show a
plot of the desired frequency response.
References
----------
.. [1] B. Friedlander and B. Porat, "The Modified Yule-Walker Method of
ARMA Spectral Estimation," IEEE Transactions on Aerospace Electronic
Systems, Vol. AES-20, No. 2, pp. 158-173, March 1984.
Examples
--------
Design an 8th-order lowpass filter and overplot the desired
frequency response with the actual frequency response:
>>> f = [0, .6, .6, 1] # Frequency breakpoints
>>> m = [1, 1, 0, 0] # Magnitude breakpoints
>>> [b, a] = yulewalk(8, f, m) # Filter design using a least-squares method
"""
F = np.asarray(F)
M = np.asarray(M)
npt = 512
lap = np.fix(npt / 25).astype(int)
mf = F.size
npt = npt + 1 # For [dc 1 2 ... nyquist].
Ht = np.array(np.zeros((1, npt)))
nint = mf - 1
df = np.diff(F)
nb = 0
Ht[0][0] = M[0]
for i in range(nint):
if df[i] == 0:
nb = nb - int(lap / 2)
ne = nb + lap
else:
ne = int(np.fix(F[i + 1] * npt)) - 1
j = np.arange(nb, ne + 1)
if ne == nb:
inc = 0
else:
inc = (j - nb) / (ne - nb)
Ht[0][nb:ne + 1] = np.array(inc * M[i + 1] + (1 - inc) * M[i])
nb = ne + 1
Ht = np.concatenate((Ht, Ht[0][-2:0:-1]), axis=None)
n = Ht.size
n2 = np.fix((n + 1) / 2)
nb = order
nr = 4 * order
nt = np.arange(0, nr)
# compute correlation function of magnitude squared response
R = np.real(np.fft.ifft(Ht * Ht))
R = R[0:nr] * (0.54 + 0.46 * np.cos(np.pi * nt / (nr - 1))) # pick NR correlations # noqa
# Form window to be used in extracting the right "wing" of two-sided
# covariance sequence
Rwindow = np.concatenate(
(1 / 2, np.ones((1, int(n2 - 1))), np.zeros((1, int(n - n2)))),
axis=None)
A = polystab(denf(R, order)) # compute denominator
# compute additive decomposition
Qh = numf(np.concatenate((R[0] / 2, R[1:nr]), axis=None), A, order)
# compute impulse response
_, Ss = 2 * np.real(signal.freqz(Qh, A, worN=n, whole=True))
hh = np.fft.ifft(
np.exp(np.fft.fft(Rwindow * np.fft.ifft(np.log(Ss, dtype=np.complex))))
)
B = np.real(numf(hh[0:nr], A, nb))
return B, A | d3e0f709d303c7432854d4975c858b5968245084 | 3,640 |
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
user_obj = await tbot.get_entity(previous_message.sender_id)
else:
user = event.pattern_match.group(1)
if user.isnumeric():
user = int(user)
if not user:
await event.reply("Pass the user's username, id or reply!")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await tbot.get_entity(user_id)
return user_obj
try:
user_obj = await tbot.get_entity(user)
except (TypeError, ValueError) as err:
await event.reply(str(err))
return None
return user_obj | 600fc6d1e73f4637f51479d2e2ebabaa93723b34 | 3,641 |
import json
def parse_contest_list(json_file):
"""Parse a list of Contests from a JSON file.
Note:
Template for Contest format in JSON in contest_template.json
"""
with open(json_file, 'r') as json_data:
data = json.load(json_data)
contests = []
for contest in data:
contest_ballots = data[contest]['contest_ballots']
tally = data[contest]['tally']
num_winners = data[contest]['num_winners']
reported_winners = data[contest]['reported_winners']
contest_type = ContestType[data[contest]['contest_type']]
contests.append(Contest(contest_ballots, tally, num_winners, reported_winners, contest_type))
return contests | 637da8b03fe975aa2183d78eaa3704d57d66680d | 3,642 |
def get_image_blob(roidb, mode):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
if mode == 'train' or mode == 'val':
with open(roidb['image'], 'rb') as f:
data = f.read()
data = np.frombuffer(data, dtype='uint8')
img = cv2.imdecode(data, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gt_boxes = roidb['boxes']
gt_label = roidb['gt_classes']
# resize
if mode == 'train':
img, im_scale = _resize(img, target_size=800, max_size=1333)
need_gt_boxes = gt_boxes.copy()
need_gt_boxes[:, :4] *= im_scale
img, need_gt_boxes, need_gt_label = _rotation(
img, need_gt_boxes, gt_label, prob=1.0, gt_margin=1.4)
else:
img, im_scale = _resize(img, target_size=1000, max_size=1778)
need_gt_boxes = gt_boxes
need_gt_label = gt_label
img = img.astype(np.float32, copy=False)
img = img / 255.0
mean = np.array(cfg.pixel_means)[np.newaxis, np.newaxis, :]
std = np.array(cfg.pixel_std)[np.newaxis, np.newaxis, :]
img -= mean
img /= std
img = img.transpose((2, 0, 1))
return img, im_scale, need_gt_boxes, need_gt_label | 35fdea333b8245294c16907e8c26c16164fb4906 | 3,643 |
def rx_weight_fn(edge):
"""A function for returning the weight from the common vertex."""
return float(edge["weight"]) | 4c405ffeae306a3920a6e624c748fb00cc1ee8ac | 3,644 |
def image_inputs(images_and_videos, data_dir, text_tmp_images):
"""Generates a list of input arguments for ffmpeg with the given images."""
include_cmd = []
# adds images as video starting on overlay time and finishing on overlay end
img_formats = ['gif', 'jpg', 'jpeg', 'png']
for ovl in images_and_videos:
filename = ovl['image']
# checks if overlay is image or video
is_img = False
for img_fmt in img_formats:
is_img = filename.lower().endswith(img_fmt)
if is_img:
break
# treats image overlay
if is_img:
duration = str(float(ovl['end_time']) - float(ovl['start_time']))
is_gif = filename.lower().endswith('.gif')
has_fade = (float(ovl.get('fade_in_duration', 0)) +
float(ovl.get('fade_out_duration', 0))) > 0
# A GIF with no fade is treated as an animated GIF should.
# It works even if it is not animated.
# An animated GIF cannot have fade in or out effects.
if is_gif and not has_fade:
include_args = ['-ignore_loop', '0']
else:
include_args = ['-f', 'image2', '-loop', '1']
include_args += ['-itsoffset', str(ovl['start_time']), '-t', duration]
# GIFs should have a special input decoder for FFMPEG.
if is_gif:
include_args += ['-c:v', 'gif']
include_args += ['-i']
include_cmd += include_args + ['%s/assets/%s' % (data_dir,
filename)]
# treats video overlays
else:
duration = str(float(ovl['end_time']) - float(ovl['start_time']))
include_args = ['-itsoffset', str(ovl['start_time']), '-t', duration]
include_args += ['-i']
include_cmd += include_args + ['%s/assets/%s' % (data_dir,
filename)]
# adds texts as video starting and finishing on their overlay timing
for img2 in text_tmp_images:
duration = str(float(img2['end_time']) - float(img2['start_time']))
include_args = ['-f', 'image2', '-loop', '1']
include_args += ['-itsoffset', str(img2['start_time']), '-t', duration]
include_args += ['-i']
include_cmd += include_args + [str(img2['path'])]
return include_cmd | b210687d00edc802cbf362e4394b61e0c0989095 | 3,645 |
def generate_graph(data_sets: pd.DataFrame, data_source: str, data_state: str, toggle_new_case: bool, year: int) -> tuple[px.line, px.bar]:
"""Takes in the inputs and returns a graph object. The inputs are the source, data, location and year.
The graph is a prediction of the sentiment from the comments as a function of time. Another trace of cases can be displayed as well.
We can also have graphs directly comparing # of cases with sentiment by having cases on the x and its sentiment on that day on the y.
Depending on the input, a graph that takes into account source, state(how much the model is trained), show cases(toggle on/off), location and year.
The user can choose which type of graph to generate.
Returns a line graph and a bar chart.
"""
main_graph = px.line(
data_sets[data_source],
x="Date",
y="New Cases",
)
if toggle_new_case:
main_graph.add_trace(
go.Line(
x=data_sets[data_source].loc[:, 'Date'],
y=data_sets[data_source].loc[:, 'New Cases']
)
)
stat_data_sets = pd.DataFrame(
index=["Max", "Min", "Mean"],
data={
"Cases": [
data_sets[data_source].loc[:, "New Cases"].max(),
data_sets[data_source].loc[:, "New Cases"].min(),
data_sets[data_source].loc[:, "New Cases"].mean(),
]
},
)
stats_graph = px.bar(
stat_data_sets,
x=["Max", "Min", "Mean"],
y="Cases",
)
return main_graph, stats_graph | 437e34419e187ba7ae86bd50c57844a5e55a4bf7 | 3,646 |
def f_not_null(seq):
"""过滤非空值"""
seq = filter(lambda x: x not in (None, '', {}, [], ()), seq)
return seq | a88eab0a03ef5c1db3ceb4445bb0d84a54157875 | 3,647 |
def flickr(name, args, options, content, lineno,
contentOffset, blockText, state, stateMachine):
""" Restructured text extension for inserting flickr embedded slideshows """
if len(content) == 0:
return
string_vars = {
'flickid': content[0],
'width': 400,
'height': 300,
'extra': ''
}
extra_args = content[1:] # Because content[0] is ID
extra_args = [ea.strip().split("=") for ea in extra_args] # key=value
extra_args = [ea for ea in extra_args if len(ea) == 2] # drop bad lines
extra_args = dict(extra_args)
if 'width' in extra_args:
string_vars['width'] = extra_args.pop('width')
if 'height' in extra_args:
string_vars['height'] = extra_args.pop('height')
if extra_args:
params = [PARAM % (key, extra_args[key]) for key in extra_args]
string_vars['extra'] = "".join(params)
return [nodes.raw('', CODE % (string_vars), format='html')] | 9b41c558dd5f5ef7be1aff1e9567f1c5d5b26f31 | 3,648 |
def serialize(key):
"""
Return serialized version of key name
"""
s = current_app.config['SERIALIZER']
return s.dumps(key) | dba2202e00960420252c00120333b142d3a8f216 | 3,649 |
def calc_disordered_regions(limits, seq):
"""
Returns the sequence of disordered regions given a string of
starts and ends of the regions and the sequence.
Example
-------
limits = 1_5;8_10
seq = AHSEDQNAAANTH...
This will return `AHSED_AAA`
"""
seq = seq.replace(' ', '')
regions = [tuple(region.split('_')) for region
in limits.split(';')]
return '_'.join([seq[int(i)-1:int(j)] for i,j in regions]) | 2c9a487a776a742470deb98e6f471b04b23a0ff7 | 3,650 |
import random
def person_attribute_string_factory(sqla):
"""Create a fake person attribute that is enumerated."""
create_multiple_attributes(sqla, 5, 1)
people = sqla.query(Person).all()
if not people:
create_multiple_people(sqla, random.randint(3, 6))
people = sqla.query(Person).all()
current_person = random.choice(people)
nonenumerated_values = sqla.query(Attribute).all()
if not nonenumerated_values:
create_multiple_nonenumerated_values(sqla, random.randint(3, 6))
nonenumerated_values = sqla.query(Attribute).all()
current_nonenumerated_value = random.choice(nonenumerated_values)
person_attribute = {
'personId': current_person.id,
'attributeId': current_nonenumerated_value.id,
'stringValue': rl_fake().first_name()
}
return person_attribute | b2c3f632c0671b41044e143c5aab32abf928a362 | 3,651 |
def forward_pass(log_a, log_b, logprob_s0):
"""Computing the forward pass of Baum-Welch Algorithm.
By employing log-exp-sum trick, values are computed in log space, including
the output. Notation is adopted from https://arxiv.org/abs/1910.09588.
`log_a` is the likelihood of discrete states, `log p(s[t] | s[t-1], x[t-1])`,
`log_b` is the likelihood of observations, `log p(x[t], z[t] | s[t])`,
and `logprob_s0` is the likelihood of initial discrete states, `log p(s[0])`.
Forward pass calculates the filtering likelihood of `log p(s_t | x_1:t)`.
Args:
log_a: a float `Tensor` of size [batch, num_steps, num_categ, num_categ]
stores time dependent transition matrices, `log p(s[t] | s[t-1], x[t-1])`.
`A[i, j]` is the transition probability from `s[t-1]=j` to `s[t]=i`.
log_b: a float `Tensor` of size [batch, num_steps, num_categ] stores time
dependent emission matrices, 'log p(x[t](, z[t])| s[t])`.
logprob_s0: a float `Tensor` of size [num_categ], initial discrete states
probability, `log p(s[0])`.
Returns:
forward_pass: a float 3D `Tensor` of size [batch, num_steps, num_categ]
stores the forward pass probability of `log p(s_t | x_1:t)`, which is
normalized.
normalizer: a float 2D `Tensor` of size [batch, num_steps] stores the
normalizing probability, `log p(x_t | x_1:t-1)`.
"""
num_steps = log_a.get_shape().with_rank_at_least(3).dims[1].value
tas = [tf.TensorArray(tf.float32, num_steps, name=n)
for n in ["forward_prob", "normalizer"]]
# The function will return normalized forward probability and
# normalizing constant as a list, [forward_logprob, normalizer].
init_updates = utils.normalize_logprob(
logprob_s0[tf.newaxis, :] + log_b[:, 0, :], axis=-1)
tas = utils.write_updates_to_tas(tas, 0, init_updates)
prev_prob = init_updates[0]
init_state = (1,
prev_prob,
tas)
def _cond(t, *unused_args):
return t < num_steps
def _steps(t, prev_prob, fwd_tas):
"""One step forward in iterations."""
bi_t = log_b[:, t, :] # log p(x[t+1] | s[t+1])
aij_t = log_a[:, t, :, :] # log p(s[t+1] | s[t], x[t])
current_updates = tf.math.reduce_logsumexp(
bi_t[:, :, tf.newaxis] + aij_t + prev_prob[:, tf.newaxis, :],
axis=-1)
current_updates = utils.normalize_logprob(current_updates, axis=-1)
prev_prob = current_updates[0]
fwd_tas = utils.write_updates_to_tas(fwd_tas, t, current_updates)
return (t+1, prev_prob, fwd_tas)
_, _, tas_final = tf.while_loop(
_cond,
_steps,
init_state
)
# transpose to [batch, step, state]
forward_prob = tf.transpose(tas_final[0].stack(), [1, 0, 2])
normalizer = tf.transpose(tf.squeeze(tas_final[1].stack(), axis=[-1]), [1, 0])
return forward_prob, normalizer | e3c6cc193ea01bd308c821de31c43ab42c9fea69 | 3,652 |
def TonnetzToString(Tonnetz):
"""TonnetzToString: List -> String."""
TonnetzString = getKeyByValue(dictOfTonnetze, Tonnetz)
return TonnetzString | db878b4e0b857d08171653d53802fb41e6ca46a4 | 3,653 |
def get_mc_calibration_coeffs(tel_id):
"""
Get the calibration coefficients from the MC data file to the
data. This is ahack (until we have a real data structure for the
calibrated data), it should move into `ctapipe.io.hessio_event_source`.
returns
-------
(peds,gains) : arrays of the pedestal and pe/dc ratios.
"""
peds = pyhessio.get_pedestal(tel_id)[0]
gains = pyhessio.get_calibration(tel_id)[0]
return peds, gains | 0bd202f608ff062426d8cdce32677c2c2f2583de | 3,654 |
from math import exp, pi, sqrt
def bryc(K):
"""
基于2002年Bryc提出的一致逼近函数近似累积正态分布函数
绝对误差小于1.9e-5
:param X: 负无穷到正无穷取值
:return: 累积正态分布积分值的近似
"""
X = abs(K)
cnd = 1.-(X*X + 5.575192*X + 12.77436324) * exp(-X*X/2.)/(sqrt(2.*pi)*pow(X, 3) + 14.38718147*pow(X, 2) + 31.53531977*X + 2*12.77436324)
if K < 0:
cnd = 1. - cnd
return cnd | e2feb6fa7f806294cef60bb5afdc4e70c95447f8 | 3,655 |
def grid_square_neighbors_1d_from(shape_slim):
"""
From a (y,x) grid of coordinates, determine the 8 neighors of every coordinate on the grid which has 8
neighboring (y,x) coordinates.
Neighbor indexes use the 1D index of the pixel on the masked grid counting from the top-left right and down.
For example:
x x x x x x x x x x
x x x x x x x x x x Th s s an example mask.Mask2D, where:
x x x x x x x x x x
x x x 0 1 2 3 x x x x = `True` (P xel s masked and excluded from the gr d)
x x x 4 5 6 7 x x x o = `False` (P xel s not masked and ncluded n the gr d)
x x x 8 9 10 11 x x x
x x x x x x x x x x
x x x x x x x x x x
x x x x x x x x x x
x x x x x x x x x x
On the grid above, the grid cells in 1D indxes 5 and 6 have 8 neighboring pixels and their entries in the
grid_neighbors_1d array will be:
grid_neighbors_1d[0,:] = [0, 1, 2, 4, 6, 8, 9, 10]
grid_neighbors_1d[1,:] = [1, 2, 3, 5, 7, 9, 10, 11]
The other pixels will be included in the grid_neighbors_1d array, but correspond to `False` entries in
grid_has_neighbors and be omitted from calculations that use the neighbor array.
Parameters
----------
shape_slim : np.ndarray
The irregular 1D grid of (y,x) coordinates over which a square uniform grid is overlaid.
pixel_scales : (float, float)
The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates.
"""
shape_of_edge = int(np.sqrt(shape_slim))
has_neighbors = np.full(shape=shape_slim, fill_value=False)
neighbors_1d = np.full(shape=(shape_slim, 8), fill_value=-1.0)
index = 0
for y in range(shape_of_edge):
for x in range(shape_of_edge):
if y > 0 and x > 0 and y < shape_of_edge - 1 and x < shape_of_edge - 1:
neighbors_1d[index, 0] = index - shape_of_edge - 1
neighbors_1d[index, 1] = index - shape_of_edge
neighbors_1d[index, 2] = index - shape_of_edge + 1
neighbors_1d[index, 3] = index - 1
neighbors_1d[index, 4] = index + 1
neighbors_1d[index, 5] = index + shape_of_edge - 1
neighbors_1d[index, 6] = index + shape_of_edge
neighbors_1d[index, 7] = index + shape_of_edge + 1
has_neighbors[index] = True
index += 1
return neighbors_1d, has_neighbors | 72c0009915b397005c9b9afb52dfb2fa20c1c99c | 3,656 |
def get_total_entries(df, pdbid, cdr):
"""
Get the total number of entries of the particular CDR and PDBID in the database
:param df: dataframe.DataFrame
:rtype: int
"""
return len(get_all_entries(df, pdbid, cdr)) | fec351a6d23fd73e082d3b5c066fa9d367629dea | 3,657 |
def _gf2mulxinvmod(a,m):
"""
Computes ``a * x^(-1) mod m``.
*NOTE*: Does *not* check whether `a` is smaller in degree than `m`.
Parameters
----------
a, m : integer
Polynomial coefficient bit vectors.
Polynomial `a` should be smaller degree than `m`.
Returns
-------
c : integer
Polynomial coefficient bit vector of ``c = a * x^(-1) mod m``.
"""
c = (a ^ ((a&1)*m)) >> 1
return c | e60e99cd7ebfd3df795cdad5d712f1278b7b9a0f | 3,658 |
def find_cuda_family_config(repository_ctx, script_path, cuda_libraries):
"""Returns CUDA config dictionary from running find_cuda_config.py"""
python_bin = repository_ctx.which("python3")
exec_result = execute(repository_ctx, [python_bin, script_path] + cuda_libraries)
if exec_result.return_code:
errmsg = err_out(exec_result)
auto_configure_fail("Failed to run find_cuda_config.py: {}".format(errmsg))
# Parse the dict from stdout.
return dict([tuple(x.split(": ")) for x in exec_result.stdout.splitlines()]) | e71c14528946c8815bef2355cbcc797bbc03bb39 | 3,659 |
def _prediction_feature_weights(booster, dmatrix, n_targets,
feature_names, xgb_feature_names):
""" For each target, return score and numpy array with feature weights
on this prediction, following an idea from
http://blog.datadive.net/interpreting-random-forests/
"""
# XGBClassifier does not have pred_leaf argument, so use booster
leaf_ids, = booster.predict(dmatrix, pred_leaf=True)
xgb_feature_names = {f: i for i, f in enumerate(xgb_feature_names)}
tree_dumps = booster.get_dump(with_stats=True)
assert len(tree_dumps) == len(leaf_ids)
target_feature_weights = partial(
_target_feature_weights,
feature_names=feature_names, xgb_feature_names=xgb_feature_names)
if n_targets > 1:
# For multiclass, XGBoost stores dumps and leaf_ids in a 1d array,
# so we need to split them.
scores_weights = [
target_feature_weights(
leaf_ids[target_idx::n_targets],
tree_dumps[target_idx::n_targets],
) for target_idx in range(n_targets)]
else:
scores_weights = [target_feature_weights(leaf_ids, tree_dumps)]
return scores_weights | 54814b1d0d2ce0ca66e7bdd5c5933477c5c8c169 | 3,660 |
from typing import Sequence
from typing import Collection
from typing import List
def group_by_repo(repository_full_name_column_name: str,
repos: Sequence[Collection[str]],
df: pd.DataFrame,
) -> List[np.ndarray]:
"""Group items by the value of their "repository_full_name" column."""
if df.empty:
return [np.array([], dtype=int)] * len(repos)
df_repos = df[repository_full_name_column_name].values.astype("S")
repos = [
np.array(repo_group if not isinstance(repo_group, set) else list(repo_group), dtype="S")
for repo_group in repos
]
unique_repos, imap = np.unique(np.concatenate(repos), return_inverse=True)
if len(unique_repos) <= len(repos):
matches = np.array([df_repos == repo for repo in unique_repos])
pos = 0
result = []
for repo_group in repos:
step = len(repo_group)
cols = imap[pos:pos + step]
group = np.flatnonzero(np.sum(matches[cols], axis=0, dtype=bool))
pos += step
result.append(group)
else:
result = [
np.flatnonzero(np.in1d(df_repos, repo_group))
for repo_group in repos
]
return result | 8e10e32d1a1bb8b31e25000dc63be0b3cd1645d0 | 3,661 |
def _row_or_col_is_header(s_count, v_count):
"""
Utility function for subdivide
Heuristic for whether a row/col is a header or not.
"""
if s_count == 1 and v_count == 1:
return False
else:
return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3. | 525b235fe7027524658f75426b6dbc9c8e334232 | 3,662 |
def values_hash(array, step=0):
"""
Return consistent hash of array values
:param array array: (n,) array with or without structure
:param uint64 step: optional step number to modify hash values
:returns: (n,) uint64 array
"""
cls, cast_dtype, view_dtype = _get_optimal_cast(array)
array = cls._cast(array, cast_dtype, view_dtype)
return cls._hash(array, UINT64(step)) | 7d2fedf0ca244797dd33e4f344dc81726b26efb6 | 3,663 |
import urllib
async def get_molecule_image(optimization_id: str):
"""Render the molecule associated with a particular bespoke optimization to an
SVG file."""
task = _get_task(optimization_id=optimization_id)
svg_content = smiles_to_image(urllib.parse.unquote(task.input_schema.smiles))
svg_response = Response(svg_content, media_type="image/svg+xml")
return svg_response | 6e3b178f18cef7d8a7ed4c75b75dfb0acd346fbe | 3,664 |
import torch
def total_variation_divergence(logits, targets, reduction='mean'):
"""
Loss.
:param logits: predicted logits
:type logits: torch.autograd.Variable
:param targets: target distributions
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
assert len(list(logits.size())) == len(list(targets.size()))
assert logits.size()[0] == targets.size()[0]
assert logits.size()[1] == targets.size()[1]
assert logits.size()[1] > 1
divergences = torch.sum(
torch.abs(torch.nn.functional.softmax(logits, dim=1) - targets), dim=1)
if reduction == 'mean':
return torch.mean(divergences)
elif reduction == 'sum':
return torch.sum(divergences)
else:
return divergences | 75f848e71e6fc78c60341e3fb46a2ff7d4531cbc | 3,665 |
def initialize_parameters(n_in, n_out, ini_type='plain'):
"""
Helper function to initialize some form of random weights and Zero biases
Args:
n_in: size of input layer
n_out: size of output/number of neurons
ini_type: set initialization type for weights
Returns:
params: a dictionary containing W and b
"""
params = dict() # initialize empty dictionary of neural net parameters W and b
if ini_type == 'plain':
params['W'] = np.random.randn(n_out, n_in) *0.01 # set weights 'W' to small random gaussian
elif ini_type == 'xavier':
params['W'] = np.random.randn(n_out, n_in) / (np.sqrt(n_in)) # set variance of W to 1/n
elif ini_type == 'he':
# Good when ReLU used in hidden layers
# Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
# Kaiming He et al. (https://arxiv.org/abs/1502.01852)
# http: // cs231n.github.io / neural - networks - 2 / # init
params['W'] = np.random.randn(n_out, n_in) * np.sqrt(2/n_in) # set variance of W to 2/n
params['b'] = np.zeros((n_out, 1)) # set bias 'b' to zeros
return params | 1350d086c12dc40792a2f84a3a5edf5e683f9e95 | 3,666 |
def logcdf(samples, data, prior_bounds, weights=None, direction=DEFAULT_CUMULATIVE_INTEGRAL_DIRECTION, num_proc=DEFAULT_NUM_PROC):
"""estimates the log(cdf) at all points in samples based on data and integration in "direction".
Does this directly by estimating the CDF from the weighted samples WITHOUT building a KDE"""
### this should be relatively quick (just an ordered summation), so we do it once
data, cweights = stats.samples2cdf(data, weights=weights)
if direction=='increasing':
pass ### we already integrate up from the lower values to higher values
elif direction=='decreasing':
cweights = 1. - cweights ### reverse the order of the integral
else:
raise ValueError('direction=%s not understood!'%direction)
logcdfs = np.empty(len(samples), dtype=float)
if num_proc==1: ### do everything on this one core
logcdfs[:] = _logcdf_worker(samples, data, cweights, prior_bounds)
else: ### parallelize
# partition work amongst the requested number of cores
Nsamp = len(samples)
sets = _define_sets(Nsamp, num_proc)
# set up and launch processes.
procs = []
for truth in sets:
conn1, conn2 = mp.Pipe()
proc = mp.Process(target=_logcdf_worker, args=(samples[truth], data, cweights, prior_bounds), kwargs={'conn':conn2})
proc.start()
procs.append((proc, conn1))
conn2.close()
# read in results from process
for truth, (proci, conni) in zip(sets, procs):
proci.join() ### should clean up child...
logcdfs[truth] = conni.recv()
return logcdfs | 5b57b964a57cae59425e73b089a3d1d6f7fbf95d | 3,667 |
import requests
import json
def post_gist(description, files):
"""Post a gist of the analysis"""
username, password = get_auth()
sess = requests.Session()
sess.auth = (username, password)
params = {
'description': description,
'files': files,
'public': False,
}
headers = {
'Content-Type': 'application/json',
'Accept': '*/*',
'User-Agent': 'stolaf-cs-toolkit/v1',
}
req = sess.post('https://api.github.com/gists',
headers=headers,
data=json.dumps(params))
result = req.json()
return result.get('html_url', '"' + result.get('message', 'Error') + '"') | a348203a08455099f6eefb660a234796d22380ae | 3,668 |
def compute_exposure_params(reference, tone_mapper="aces", t_max=0.85, t_min=0.85):
"""
Computes start and stop exposure for HDR-FLIP based on given tone mapper and reference image.
Refer to the Visualizing Errors in Rendered High Dynamic Range Images
paper for details about the formulas
:param reference: float tensor (with CxHxW layout) containing reference image (nonnegative values)
:param tone_mapper: (optional) string describing the tone mapper assumed by HDR-FLIP
:param t_max: (optional) float describing the t value used to find the start exposure
:param t_max: (optional) float describing the t value used to find the stop exposure
:return: two floats describing start and stop exposure, respectively, to use for HDR-FLIP
"""
if tone_mapper == "reinhard":
k0 = 0
k1 = 1
k2 = 0
k3 = 0
k4 = 1
k5 = 1
x_max = t_max * k5 / (k1 - t_max * k4)
x_min = t_min * k5 / (k1 - t_min * k4)
elif tone_mapper == "hable":
# Source: https://64.github.io/tonemapping/
A = 0.15
B = 0.50
C = 0.10
D = 0.20
E = 0.02
F = 0.30
k0 = A * F - A * E
k1 = C * B * F - B * E
k2 = 0
k3 = A * F
k4 = B * F
k5 = D * F * F
W = 11.2
nom = k0 * np.power(W, 2) + k1 * W + k2
denom = k3 * np.power(W, 2) + k4 * W + k5
white_scale = denom / nom # = 1 / (nom / denom)
# Include white scale and exposure bias in rational polynomial coefficients
k0 = 4 * k0 * white_scale
k1 = 2 * k1 * white_scale
k2 = k2 * white_scale
k3 = 4 * k3
k4 = 2 * k4
#k5 = k5 # k5 is not changed
c0 = (k1 - k4 * t_max) / (k0 - k3 * t_max)
c1 = (k2 - k5 * t_max) / (k0 - k3 * t_max)
x_max = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
c0 = (k1 - k4 * t_min) / (k0 - k3 * t_min)
c1 = (k2 - k5 * t_min) / (k0 - k3 * t_min)
x_min = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
else: #tone_mapper == "aces":
# Source: ACES approximation: https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
# Include pre-exposure cancelation in constants
k0 = 0.6 * 0.6 * 2.51
k1 = 0.6 * 0.03
k2 = 0
k3 = 0.6 * 0.6 * 2.43
k4 = 0.6 * 0.59
k5 = 0.14
c0 = (k1 - k4 * t_max) / (k0 - k3 * t_max)
c1 = (k2 - k5 * t_max) / (k0 - k3 * t_max)
x_max = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
c0 = (k1 - k4 * t_min) / (k0 - k3 * t_min)
c1 = (k2 - k5 * t_min) / (k0 - k3 * t_min)
x_min = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
# Convert reference to luminance
lum_coeff_r = 0.2126
lum_coeff_g = 0.7152
lum_coeff_b = 0.0722
Y_reference = reference[0:1, :, :] * lum_coeff_r + reference[1:2, :, :] * lum_coeff_g + reference[2:3, :, :] * lum_coeff_b
# Compute start exposure
Y_hi = np.amax(Y_reference)
if Y_hi == 0:
return 0, 0
start_exposure = np.log2(x_max / Y_hi)
# Compute stop exposure
Y_lo = np.percentile(Y_reference, 50)
stop_exposure = np.log2(x_min / Y_lo)
return start_exposure, stop_exposure | 330265585be9a27f38f20cef55d6a2c588819d35 | 3,669 |
from typing import Literal
from typing import Any
def scores_generic_graph(
num_vertices: int,
edges: NpArrayEdges,
weights: NpArrayEdgesFloat,
cond: Literal["or", "both", "out", "in"] = "or",
is_directed: bool = False,
) -> NpArrayEdgesFloat:
"""
Args:
num_vertices: int
number ofvertices
edges: np.array
edges
weights: np.array
edge weights
cond: str
"out", "in", "both", "or"
Returns:
np.array:
**alphas** edge scores
"""
w_adj, adj = construct_sp_matrices(
weights, edges, num_vertices, is_directed=is_directed
)
def calc_degree(adj: Any, i: int) -> NpArrayEdgesFloat:
return np.asarray(adj.sum(axis=i)).flatten().astype(np.float64)
iin = edges[:, 1]
iout = edges[:, 0]
wdegree_out = calc_degree(w_adj, 0)[iout]
degree_out = calc_degree(adj, 0)[iout]
wdegree_in = calc_degree(w_adj, 1)[iin]
degree_in = calc_degree(adj, 1)[iin]
if cond == "out":
alphas = stick_break_scores(wdegree_out, degree_out, edges, weights)
elif cond == "in":
alphas = stick_break_scores(wdegree_in, degree_in, edges, weights)
else:
alphas_out = stick_break_scores(wdegree_out, degree_out, edges, weights)
alphas_in = stick_break_scores(wdegree_in, degree_in, edges, weights)
if cond == "both":
alphas = np.maximum(alphas_out, alphas_in)
elif cond == "or":
alphas = np.minimum(alphas_out, alphas_in)
return alphas | 6f3b4b969663ff48be7b0a4cf3571800dd0d15e8 | 3,671 |
def handle_storage_class(vol):
"""
vol: dict (send from the frontend)
If the fronend sent the special values `{none}` or `{empty}` then the
backend will need to set the corresponding storage_class value that the
python client expects.
"""
if "class" not in vol:
return None
if vol["class"] == "{none}":
return ""
if vol["class"] == "{empty}":
return None
else:
return vol["class"] | a2747b717c6b83bb1128f1d5e9d7696dd8deda19 | 3,672 |
def spherical_to_cartesian(radius, theta, phi):
""" Convert from spherical coordinates to cartesian.
Parameters
-------
radius: float
radial coordinate
theta: float
axial coordinate
phi: float
azimuthal coordinate
Returns
-------
list: cartesian vector
"""
cartesian = [radius * np.sin(theta) * np.cos(phi), radius * np.sin(theta) * np.sin(phi), radius * np.cos(theta)]
return cartesian | bc76aa608171243f3afc1fbdbaca90931b1e3d17 | 3,673 |
import torch
def compute_mrcnn_bbox_loss(mrcnn_target_deltas, mrcnn_pred_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss | b6f62a3255f21ce26cd69b6e53a778dfc23a7b86 | 3,674 |
def dummy_sgs(dummies, sym, n):
"""
Return the strong generators for dummy indices
Parameters
==========
dummies : list of dummy indices
`dummies[2k], dummies[2k+1]` are paired indices
sym : symmetry under interchange of contracted dummies::
* None no symmetry
* 0 commuting
* 1 anticommuting
n : number of indices
in base form the dummy indices are always in consecutive positions
Examples
========
>>> from sympy.combinatorics.tensor_can import dummy_sgs
>>> dummy_sgs(range(2, 8), 0, 8)
[[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9],
[0, 1, 2, 3, 6, 7, 4, 5, 8, 9]]
"""
if len(dummies) > n:
raise ValueError("List too large")
res = []
# exchange of contravariant and covariant indices
if sym is not None:
for j in dummies[::2]:
a = list(range(n + 2))
if sym == 1:
a[n] = n + 1
a[n + 1] = n
a[j], a[j + 1] = a[j + 1], a[j]
res.append(a)
# rename dummy indices
for j in dummies[:-3:2]:
a = list(range(n + 2))
a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1]
res.append(a)
return res | 774203b62a0335f9bea176a1228673b2466324e3 | 3,676 |
def get_uniform_comparator(comparator):
""" convert comparator alias to uniform name
"""
if comparator in ["eq", "equals", "==", "is"]:
return "equals"
elif comparator in ["lt", "less_than"]:
return "less_than"
elif comparator in ["le", "less_than_or_equals"]:
return "less_than_or_equals"
elif comparator in ["gt", "greater_than"]:
return "greater_than"
elif comparator in ["ge", "greater_than_or_equals"]:
return "greater_than_or_equals"
elif comparator in ["ne", "not_equals"]:
return "not_equals"
elif comparator in ["str_eq", "string_equals"]:
return "string_equals"
elif comparator in ["len_eq", "length_equals", "count_eq"]:
return "length_equals"
elif comparator in ["len_gt", "count_gt", "length_greater_than", "count_greater_than"]:
return "length_greater_than"
elif comparator in ["len_ge", "count_ge", "length_greater_than_or_equals", \
"count_greater_than_or_equals"]:
return "length_greater_than_or_equals"
elif comparator in ["len_lt", "count_lt", "length_less_than", "count_less_than"]:
return "length_less_than"
elif comparator in ["len_le", "count_le", "length_less_than_or_equals", \
"count_less_than_or_equals"]:
return "length_less_than_or_equals"
else:
return comparator | 20c24ba35dea92d916d9dd1006d110db277e0816 | 3,678 |
def inorder_traversal(root):
"""Function to traverse a binary tree inorder
Args:
root (Node): The root of a binary tree
Returns:
(list): List containing all the values of the tree from an inorder search
"""
res = []
if root:
res = inorder_traversal(root.left)
res.append(root.data)
res = res + inorder_traversal(root.right)
return res | f6d5141cbe9f39da609bd515133b367975e56688 | 3,679 |
def intensity_scale(X_f, X_o, name, thrs, scales=None, wavelet="Haar"):
"""
Compute an intensity-scale verification score.
Parameters
----------
X_f: array_like
Array of shape (m, n) containing the forecast field.
X_o: array_like
Array of shape (m, n) containing the verification observation field.
name: string
A string indicating the name of the spatial verification score
to be used:
+------------+--------------------------------------------------------+
| Name | Description |
+============+========================================================+
| FSS | Fractions skill score |
+------------+--------------------------------------------------------+
| BMSE | Binary mean squared error |
+------------+--------------------------------------------------------+
thrs: float or array_like
Scalar or 1-D array of intensity thresholds for which to compute the
verification.
scales: float or array_like, optional
Scalar or 1-D array of spatial scales in pixels,
required if ``name="FSS"``.
wavelet: str, optional
The name of the wavelet function to use in the BMSE.
Defaults to the Haar wavelet, as described in Casati et al. 2004.
See the documentation of PyWavelets for a list of available options.
Returns
-------
out: array_like
The two-dimensional array containing the intensity-scale skill scores
for each spatial scale and intensity threshold.
References
----------
:cite:`CRS2004`, :cite:`RL2008`, :cite:`EWWM2013`
See also
--------
pysteps.verification.spatialscores.binary_mse,
pysteps.verification.spatialscores.fss
"""
intscale = intensity_scale_init(name, thrs, scales, wavelet)
intensity_scale_accum(intscale, X_f, X_o)
return intensity_scale_compute(intscale) | 1f38d30378a9ec2dff7babd4edb52fceb8e23dab | 3,681 |
def make_count(bits, default_count=50):
"""
Return items count from URL bits if last bit is positive integer.
>>> make_count(['Emacs'])
50
>>> make_count(['20'])
20
>>> make_count(['бред', '15'])
15
"""
count = default_count
if len(bits) > 0:
last_bit = bits[len(bits)-1]
if last_bit.isdigit():
count = int(last_bit)
return count | 8e7dc356ba7c0787b4b44ee8bba17568e27d1619 | 3,682 |
def synthesize_ntf_minmax(order=32, osr=32, H_inf=1.5, f0=0, zf=False,
**options):
"""
Alias of :func:`ntf_fir_minmax`
.. deprecated:: 0.11.0
Function is now available from the :mod:`NTFdesign` module with
name :func:`ntf_fir_minmax`
"""
warn("Function superseded by ntf_fir_minmax in "
"NTFdesign module", PyDsmDeprecationWarning)
return ntf_fir_minmax(order, osr, H_inf, f0, zf, **options) | 6c6752584a4f9760218456b640187e442f2442aa | 3,683 |
def r2f(value):
"""
converts temperature in R(degrees Rankine) to F(degrees Fahrenheit)
:param value: temperature in R(degrees Rankine)
:return: temperature in F(degrees Fahrenheit)
"""
return const.convert_temperature(value, 'R', 'F') | 31e08dd0f3194912e5e306a5a2a5a4c9a98ef723 | 3,684 |
import re
def normalize_number(value: str, number_format: str) -> str:
"""
Transform a string that essentially represents a number to the corresponding number with the given number format.
Return a string that includes the transformed number. If the given number format does not match any supported one, return the given string.
:param value: the string
:param number_format: number format with which the value is normalized
:return: the normalized string
"""
if number_format == 'COMMA_POINT' or number_format == 'Comma Point':
nor_str = re.sub(pattern=',', repl='', string=value)
elif number_format == 'POINT_COMMA' or number_format == 'Point Comma':
nor_str = re.sub(pattern=',', repl='.', string=re.sub(pattern='\.', repl='', string=value))
elif number_format == 'SPACE_POINT' or number_format == 'Space Point':
nor_str = re.sub(pattern='\s', repl='', string=value)
elif number_format == 'SPACE_COMMA' or number_format == 'Space Comma':
nor_str = re.sub(pattern=',', repl='.', string=re.sub(pattern='\s', repl='', string=value))
elif number_format == 'NONE_COMMA' or number_format == 'None Comma':
nor_str = re.sub(pattern=',', repl='.', string=value)
else:
nor_str = value
return nor_str | c22bff28fc6ef6f424d0e9b8b0358b327cd153c5 | 3,687 |
def benchmark(Algorithm_, Network_, test):
"""
Benchmarks the Algorithm on a given class of Networks. Samples variable network size, and plots results.
@param Algorithm_: a subclass of Synchronous_Algorithm, the algorithm to test.
@param Network_: a subclass of Network, the network on which to benchmark the algorithm.
@param test: a function that may throw an assertion error
"""
def averages(x,y):
"""
Groups x's with the same value, averages corresponding y values.
@param x: A sorted list of x values
@param y: A list of corresponding y values
@return: (x grouped by value, corresponding mean y values)
Example:
averages([1,1,2,2,2,3], [5,6,3,5,1,8]) --> ([1, 2, 3], [5.5, 3.0, 8.0])
"""
new_x = [x[0]]
new_y = []
cur_x = new_x[0]
cur_ys = []
for x_i, y_i in zip(x,y):
if x_i == cur_x:
cur_ys.append(y_i)
else:
new_y.append( sum(cur_ys)/float(len(cur_ys) ) )
new_x.append( x_i )
cur_ys = [y_i]
cur_x = x_i
new_y.append( sum(cur_ys)/float(len(cur_ys) ) )
return new_x, new_y
def plot(x, y, title):
"""Plots the points (x[i],y[i]) for all i, fig."""
fig, ax = plt.subplots()
x_ave,y_ave = averages(x,y)
ax.scatter(x, y, label="data", color='b')
ax.scatter(x_ave, y_ave, label="means", color='r')
ax.set_xlim( xmin=0 )
ax.set_ylim( ymin=0 )
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title(title)
ax.set_xlabel(Network_.__name__ +' size')
data = sample(Algorithm_, Network_, test)
if data is None: return
size, comm, time = data
if issubclass(Algorithm_, Synchronous_Algorithm):
plot(size, time, Algorithm_.__name__ + ' Time Complexity')
plot(size, comm, Algorithm_.__name__ + ' Communication Complexity') | ee8d0d2bd9c9bc11eb8db07c09bfd6dc22e61ace | 3,689 |
def scale(obj, scale_ratio):
"""
:param obj: trimesh or file path
:param scale_ratio: float, scale all axis equally
:return:
author: weiwei
date: 20201116
"""
if isinstance(obj, trm.Trimesh):
tmpmesh = obj.copy()
tmpmesh.apply_scale(scale_ratio)
return tmpmesh
elif isinstance(obj, str):
originalmesh = trm.load(obj)
tmpmesh = originalmesh.copy()
tmpmesh.apply_scale(scale_ratio)
return tmpmesh | bdc84d04e9fd7d9009a60e2c47e59322526e3248 | 3,690 |
import collections
def _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name,
allow_python_preds):
"""Verifies input arguments for the case function.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a
callable which returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for the case operation.
allow_python_preds: if true, pred_fn_pairs may contain Python bools in
addition to boolean Tensors
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
Returns:
a tuple <list of scalar bool tensors, list of callables>.
"""
del name
if not isinstance(pred_fn_pairs, (list, tuple, dict)):
raise TypeError('fns must be a list, tuple, or dict')
if isinstance(pred_fn_pairs, collections.OrderedDict):
pred_fn_pairs = pred_fn_pairs.items()
elif isinstance(pred_fn_pairs, dict):
# No name to sort on in eager mode. Use dictionary traversal order,
# which is nondeterministic in versions of Python < 3.6
if not exclusive:
raise ValueError('Unordered dictionaries are not supported for the '
'`pred_fn_pairs` argument when `exclusive=False` and '
'eager mode is enabled.')
pred_fn_pairs = list(pred_fn_pairs.items())
for pred_fn_pair in pred_fn_pairs:
if not isinstance(pred_fn_pair, tuple) or len(pred_fn_pair) != 2:
raise TypeError('Each entry in pred_fn_pairs must be a 2-tuple')
pred, fn = pred_fn_pair
if ops.is_tensor(pred):
if pred.dtype != dtype.bool:
raise TypeError('pred must be Tensor of type bool: %s' % pred.name)
elif not allow_python_preds:
raise TypeError('pred must be a Tensor, got: %s' % pred)
elif not isinstance(pred, bool):
raise TypeError('pred must be a Tensor or bool, got: %s' % pred)
if not callable(fn):
raise TypeError('fn for pred %s must be callable.' % pred.name)
predicates, actions = zip(*pred_fn_pairs)
return predicates, actions | 6a6b16561600ce24ef69964ff33a309d664bb53f | 3,691 |
def get_policy(policy_name: str) -> Policy:
"""Returns a mixed precision policy parsed from a string."""
# Loose grammar supporting:
# - "c=f16" (params full, compute+output in f16),
# - "p=f16,c=f16" (params, compute and output in f16).
# - "p=f16,c=bf16" (params in f16, compute in bf16, output in bf16)
# For values that are not specified params defaults to f32, compute follows
# params and output follows compute (e.g. 'c=f16' -> 'p=f32,c=f16,o=f16').
param_dtype = jnp.float32
compute_dtype = output_dtype = None
if "=" in policy_name:
for part in policy_name.split(","):
key, value = part.split("=", 2)
value = parse_dtype(value)
if key == "p" or key == "params":
param_dtype = value
elif key == "c" or key == "compute":
compute_dtype = value
elif key == "o" or key == "output":
output_dtype = value
else:
raise ValueError(f"Unknown key '{key}' in '{policy_name}' should be "
"'params', 'compute' or 'output'.")
if compute_dtype is None:
compute_dtype = param_dtype
if output_dtype is None:
output_dtype = compute_dtype
else:
# Assume policy name is a dtype (e.g. 'f32' or 'half') that all components
# of the policy should contain.
param_dtype = compute_dtype = output_dtype = parse_dtype(policy_name)
return Policy(param_dtype=param_dtype, compute_dtype=compute_dtype,
output_dtype=output_dtype) | 2aac684706f001b537bdb103abfc63ffc79eb4c5 | 3,692 |
def reset():
"""Reset password page. User launch this page via the link in
the find password email."""
if g.user:
return redirect('/')
token = request.values.get('token')
if not token:
flash(_('Token is missing.'), 'error')
return redirect('/')
user = verify_auth_token(token, expires=1)
if not user:
flash(_('Invalid or expired token.'), 'error')
return redirect(url_for('.find'))
form = ResetForm()
if form.validate_on_submit():
user.change_password(form.password.data).save()
login_user(user)
flash(_('Your password is updated.'), 'info')
return redirect(url_for('.setting'))
return render_template('account/reset.html', form=form, token=token) | fd6e2c356bb664b87d1d2ad9dcd2305a05d541ec | 3,693 |
from pathlib import Path
def usort_file(path: Path, dry_run: bool = False, diff: bool = False) -> Result:
"""
Sorts one file, optionally writing the result back.
Returns: a Result object.
Note: Not intended to be run concurrently, as the timings are stored in a
global.
"""
result = Result(path)
result.timings = []
with save_timings(result.timings):
try:
config = Config.find(path)
src_contents = path.read_bytes()
dst_contents, encoding = usort_bytes(src_contents, config, path)
if src_contents != dst_contents:
result.changed = True
if diff:
result.diff = unified_diff(
src_contents.decode(encoding),
dst_contents.decode(encoding),
path.as_posix(),
)
if not dry_run:
path.write_bytes(dst_contents)
result.written = True
except Exception as e:
result.error = e
return result | b8455cc81f25890ecb88e809aab7d016ee1604d2 | 3,694 |
from datetime import datetime
import dateutil
def datetimeobj_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S),
tzinfo=dateutil.tz.tzoffset(None, _offset(z))
) | 2d3761d842a6f21ea646f6ac539d7ca4d78e20e9 | 3,695 |
def fn_sigmoid_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False rejection rate using Sigmoid."""
return tf.reduce_sum(
tf.multiply(tf.sigmoid(-1. * temperature * (preds - threshold)), labels)) | 84a248f4883ab383e2afc6556a335f33d114a9ae | 3,696 |
from typing import Optional
from typing import Sequence
def get_images(filters: Optional[Sequence[pulumi.InputType['GetImagesFilterArgs']]] = None,
sorts: Optional[Sequence[pulumi.InputType['GetImagesSortArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImagesResult:
"""
Get information on images for use in other resources (e.g. creating a Droplet
based on a snapshot), with the ability to filter and sort the results. If no filters are specified,
all images will be returned.
This data source is useful if the image in question is not managed by the provider or you need to utilize any
of the image's data.
Note: You can use the `getImage` data source to obtain metadata
about a single image if you already know the `slug`, unique `name`, or `id` to retrieve.
:param Sequence[pulumi.InputType['GetImagesFilterArgs']] filters: Filter the results.
The `filter` block is documented below.
:param Sequence[pulumi.InputType['GetImagesSortArgs']] sorts: Sort the results.
The `sort` block is documented below.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['sorts'] = sorts
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getImages:getImages', __args__, opts=opts, typ=GetImagesResult).value
return AwaitableGetImagesResult(
filters=__ret__.filters,
id=__ret__.id,
images=__ret__.images,
sorts=__ret__.sorts) | 0e56efb6f7735f3b15f2241e7bfa43e28863f866 | 3,697 |
def polevl(x, coef):
"""Taken from http://numba.pydata.org/numba-doc/0.12.2/examples.html"""
N = len(coef)
ans = coef[0]
i = 1
while i < N:
ans = ans * x + coef[i]
i += 1
return ans | 2c7c0f5b329ab5ea28d123112ec065dd0c292c12 | 3,698 |
def plot_displacement(A, B, save=False, labels=None):
"""
A and B are both num_samples x num_dimensions
for now, num_dimensions must = 2
"""
assert A.shape == B.shape
assert A.shape[1] == 2
if not labels is None:
assert len(labels) == A.shape[0]
delta = B - A
delta_dir = delta/np.linalg.norm(delta, axis=1).reshape(-1, 1)
fig = plt.figure()
# set size
xmin = min(min(A[:, 0]), min(B[:, 0]))
xmax = max(max(A[:, 0]), max(B[:, 0]))
ymin = min(min(A[:, 1]), min(B[:, 1]))
ymax = max(max(A[:, 1]), max(B[:, 1]))
plt.xlim(1.1*xmin, 1.1*xmax)
plt.ylim(1.1*ymin, 1.1*ymax)
# create
# add displacement arrows, possibly labels
offset = 0.05
for i in xrange(A.shape[0]):
plt.arrow(A[i, 0]+offset*delta_dir[i, 0], A[i, 1]+offset*delta_dir[i, 1],
delta[i, 0]-2*offset*delta_dir[i, 0], delta[i, 1]-2*offset*delta_dir[i, 1],
length_includes_head=True, alpha=0.5, color='grey',
head_width=0.08, head_length=0.08, width=0.009)
if not labels is None:
plt.annotate(labels[i], xy=A[i, :], xytext=A[i, :], color='red')
plt.annotate(labels[i], xy=B[i, :], xytext=B[i, :], color='blue')
if labels is None:
# without labels, just plot points
plt.scatter(A[:, 0], A[:, 1], s=35, c='red', linewidths=0)
plt.scatter(B[:, 0], B[:, 1], s=35, c='blue', linewidths=0)
plt.axhline(0, color='grey', linestyle='--')
plt.axvline(0, color='grey', linestyle='--')
# show
if save:
plt.savefig('fig.png')
else:
plt.show()
return True | 8cd05bbe56c590923e5d3a51d2b6eaf933b6a71b | 3,700 |
import math
def t_dp(tdb, rh):
""" Calculates the dew point temperature.
Parameters
----------
tdb: float
dry-bulb air temperature, [°C]
rh: float
relative humidity, [%]
Returns
-------
t_dp: float
dew point temperature, [°C]
"""
c = 257.14
b = 18.678
a = 6.1121
d = 234.5
gamma_m = math.log(rh / 100 * math.exp((b - tdb / d) * (tdb / (c + tdb))))
return round(c * gamma_m / (b - gamma_m), 1) | d3cd7de10ef51f36bc5d5bd978d991d5bfe3ba4c | 3,701 |
def LinkConfig(reset=0, loopback=0, scrambling=1):
"""Link Configuration of TS1/TS2 Ordered Sets."""
value = ( reset << 0)
value |= ( loopback << 2)
value |= ((not scrambling) << 3)
return value | 901fe6df8bbe8dfa65cd516ac14692594608edfb | 3,703 |
def test_tedlium_release():
"""
Feature: TedliumDataset
Description: test release of tedlium
Expectation: release
set invalid data
get throw error
"""
def test_config(release):
try:
ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE12, release)
except (ValueError, TypeError, RuntimeError) as e:
return str(e)
return None
# test the release
assert "release is not within the valid set of ['release1', 'release2', 'release3']" in test_config("invalid")
assert "Argument release with value None is not of type [<class 'str'>]" in test_config(None)
assert "Argument release with value ['list'] is not of type [<class 'str'>]" in test_config(["list"]) | a25f042745d1eea4dca62195e1259b7bcb8beb8d | 3,704 |
def get_data(n, input_dim, y_dim, attention_column=1):
"""
Data generation. x is purely random except that it's first value equals the target y.
In practice, the network should learn that the target = x[attention_column].
Therefore, most of its attention should be focused on the value addressed by attention_column.
:param n: the number of samples to retrieve.
:param input_dim: the number of dimensions of each element in the series.
:param attention_column: the column linked to the target. Everything else is purely random.
:return: x: model inputs, y: model targets
"""
x = np.random.standard_normal(size=(n, input_dim))
y = np.random.randint(low=0, high=2, size=(n, y_dim))
for i in range(y_dim):
x[:, i * 3] = y[:, i]
return x, y | 4c132abec92e8cd0ca6afd06e4032116d3631a50 | 3,705 |
def generate_tautomer_hydrogen_definitions(hydrogens, residue_name, isomer_index):
"""
Creates a hxml file that is used to add hydrogens for a specific tautomer to the heavy-atom skeleton
Parameters
----------
hydrogens: list of tuple
Tuple contains two atom names: (hydrogen-atom-name, heavy-atom-atom-name)
residue_name : str
name of the residue to fill the Residues entry in the xml tree
isomer_index : int
"""
hydrogen_definitions_tree = etree.fromstring("<Residues/>")
hydrogen_file_residue = etree.fromstring("<Residue/>")
hydrogen_file_residue.set("name", residue_name)
for name, parent in hydrogens:
h_xml = etree.fromstring("<H/>")
h_xml.set("name", name)
h_xml.set("parent", parent)
hydrogen_file_residue.append(h_xml)
hydrogen_definitions_tree.append(hydrogen_file_residue)
return hydrogen_definitions_tree | 69ec0c489d93edc7d8fe3ccde8df25c9c2fd01c9 | 3,706 |
def login(client=None, **defaults):
"""
@param host:
@param port:
@param identityName:
@param password:
@param serviceName:
@param perspectiveName:
@returntype: Deferred RemoteReference of Perspective
"""
d = defer.Deferred()
LoginDialog(client, d, defaults)
return d | a6c4593ef5b1fd29f46cba4a6484049bd093b6da | 3,707 |
import time
import json
import traceback
def insertInstrument():
""" Insert a new instrument or edit an existing instrument on a DAQBroker database. Guest users are not allowed to
create instruments. Created instruments are
.. :quickref: Create/Edit instrument; Creates or edits a DAQBroker instrument instrument
:param: Name : (String) unique instrument name
:param: instid : (Integer) unique instrument identifier. Used to edit an existing instrument
:param: description : (String) description of the instrument and its
:param: email : (String) contact information for the instrument operator
:param: Files : (Optional) JSON encoded list of instrument data source objects. Each Contains the following keys:
| ``name`` : (String) name of the data source
| ``metaid`` : (Integer) unique data source identifier. Only used to edit existing data sources
| ``type`` : (Integer) type of instrument data source
| ``node`` : (String) unique network node identifier
| ``remarks`` : (String) JSON encoded object of extra data source information
| ``channels`` : (Optional) JSON encoded list of data channel objects. Each contains the following keys:
| ``Name`` : (String) data channel name
| ``channelid`` : (Integer) unique channel identifier. -1 if the channel is new. Positive integer
if the channel already exists
| ``description`` : (String) data channel description
| ``units`` : (String) data channel physical units
| ``channeltype`` : (Integer) type of data channel
| ``0`` : Number
| ``1`` : Text
| ``2`` : Custom
| ``active`` : (Boolean) channel is shown on interface
| ``fileorder`` : (Integer) Used to order channels in a data source
| ``alias`` : (String) Original data channel name. Kept constant when name changes
| ``remarks`` : (String) JSON encoded object with extra information
| ``oldname`` : (String) Old channel name. Used to detect changes in the channel name
| ``channeltypeOld`` : (Integer) Old channel type. Used to detect changes in the channel type
"""
processRequest = request.get_json()
Session = sessionmaker(bind=current_user.engineObj)
session = Session()
conn = current_user.engineObj.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
try:
if 'instid' in processRequest:
newInst = False
instid = processRequest['instid']
instrument = session.query(daqbrokerDatabase.instruments).filter_by(instid=instid).first()
else:
newInst = True
maxInst = session.query(func.max(daqbrokerDatabase.instruments.instid)).one_or_none()
# print(maxInst==None)
if maxInst[0]:
maxInstid = maxInst[0]
else:
maxInstid = 0
instid = maxInstid + 1
instrument = daqbrokerDatabase.instruments(
Name=processRequest['Name'],
instid=instid,
active=False,
description=processRequest['description'],
username=current_user.username,
email=processRequest['email'],
insttype=0,
log=None)
# Now I have an object called "instrument" that I can use to add sources
# and metadatas and to those metadatas I should be able to add channels.
for file in processRequest['files']:
if 'metaid' in file:
metadata = session.query(daqbrokerDatabase.instmeta).filter_by(metaid=file["metaid"]).first()
metadata.clock = time.time() * 1000
metadata.name= file['name']
metadata.type=file['type']
metadata.node=file['node']
metadata.remarks=json.dumps(file['remarks'])
else:
maxMeta = session.query(func.max(daqbrokerDatabase.instmeta.metaid)).first()
if maxMeta[0]:
maxMetaid = maxMeta[0]
else:
maxMetaid = 0
metaid = maxMetaid + 1
metadata = daqbrokerDatabase.instmeta(
clock=time.time() * 1000,
name=file['name'],
metaid=metaid,
type=file["type"],
node=file["node"],
remarks=json.dumps(
file['remarks']),
sentRequest=False,
lastAction=0,
lasterrortime=0,
lasterror='',
lockSync=False)
instrument.sources.append(metadata)
channelid = None
if 'channels' in file:
channelsInsert = []
for channel in file['channels']:
if int(channel['channelid']) < 0: # New channel - have to insert
maxChannel = session.query(func.max(daqbrokerDatabase.channels.channelid)).first()
if not channelid:
if maxChannel[0]:
maxChannelid = maxChannel[0]
else:
maxChannelid = 0
channelid = maxChannelid + 1
else:
channelid = channelid + 1
if 'remarks' in channel:
if len(channel["remarks"].keys())>0:
theRemarks = json.dumps(channel["remarks"])
else:
theRemarks = json.dumps({})
else:
theRemarks = json.dumps({})
theChannel = daqbrokerDatabase.channels(
Name=channel["Name"],
channelid=channelid,
channeltype=int(
channel["channeltype"]),
valuetype=0,
units=channel['units'],
description=channel['description'],
active=int(
channel['active']) == 1,
remarks=theRemarks,
lastclock=0,
lastValue=None,
firstClock=0,
fileorder=channel['fileorder'],
alias=channel['alias'])
metadata.channels.append(theChannel)
channelsInsert.append({'name': channel["Name"], 'type': int(channel["channeltype"])})
if not newInst:
extra = ''
if int(channel['channeltype']) == 1:
newType = daqbrokerDatabase.Float
extra = "\"" + channel["Name"] + "\"::double precision"
column = daqbrokerDatabase.Column(channel["Name"], newType)
op.add_column(processRequest['Name'] + "_data", column)
elif int(channel['channeltype']) == 2:
newType = daqbrokerDatabase.Text
column = daqbrokerDatabase.Column(channel["Name"], newType)
op.add_column(processRequest['Name'] + "_data", column)
elif int(channel['channeltype']) == 3:
extra = "\"" + channel["Name"] + "\"::double precision"
theType = daqbrokerDatabase.Float
column = daqbrokerDatabase.Column(channel["Name"], newType)
op.add_column(processRequest['Name'] + "_custom", column)
elif not newInst:
theChannel = session.query(
daqbrokerDatabase.channels).filter_by(
channelid=channel['channelid']).first()
theChannel.Name = channel["Name"]
theChannel.channeltype = int(channel["channeltype"])
theChannel.units = channel['units']
theChannel.description = channel['description']
theChannel.active = int(channel['active']) == 1
theChannel.fileorder = channel['fileorder']
theChannel.alias = channel['alias']
if (not channel['channeltypeOld'] == channel['channeltype']) or (
not channel['oldName'] == str(channel['Name'])):
if not channel['oldName'] == str(channel['Name']):
newName = str(channel['Name'])
oldName = channel['oldName']
else:
oldName = str(channel['Name'])
newName = None
if not channel['channeltypeOld'] == channel['channeltype']:
if channel['channeltype'] == 1 or channel['channeltype'] == 3:
newType = daqbrokerDatabase.Float
extra = "\"" + oldName + "\"::double precision"
else:
newType = daqbrokerDatabase.Text
extra = None
else:
newType = None
if not channel['channeltypeOld'] == channel['channeltype'] and channel['channeltype'] == 3:
if not newName:
theName = oldName
else:
theName = newName
if not newType:
theType = daqbrokerDatabase.Float
else:
theType = newType
column = daqbrokerDatabase.Column(theName, theType)
op.drop_column(processRequest['Name'] + "_data", oldName)
op.add_column(processRequest['Name'] + "_custom", column)
elif not channel['channeltypeOld'] == channel['channeltype'] and channel['channeltypeOld'] != 3:
if not newName:
theName = oldName
else:
theName = newName
if not newType:
if channel['channeltypeOld'] == 1:
theType = daqbrokerDatabase.Float
else:
theType = daqbrokerDatabase.Text
else:
theType = newType
column = daqbrokerDatabase.Column(theName, theType)
op.drop_column(processRequest['Name'] + "_custom", oldName)
op.add_column(processRequest['Name'] + "_data", column)
else:
if channel['channeltype'] == 1 or channel['channeltype'] == 2:
if extra:
op.alter_column(
processRequest['Name'] + "_data",
oldName,
new_column_name=newName,
type_=newType,
postgresql_using=extra)
else:
op.alter_column(
processRequest['Name'] + "_data", oldName, new_column_name=newName, type_=newType)
else:
if extra=='':
op.alter_column(
processRequest['Name'] + "_custom", oldName, new_column_name=newName, type_=newType)
else:
op.alter_column(
processRequest['Name'] + "_data",
oldName,
new_column_name=newName,
type_=newType,
postgresql_using=extra)
elif newInst:
raise InvalidUsage("Cannot issue edit channels on new instrument", status_code=401)
if newInst:
daqbrokerDatabase.createInstrumentTable(processRequest['Name'], channelsInsert, True)
session.add(instrument)
daqbrokerDatabase.daqbroker_database.metadata.create_all(current_user.engineObj)
session.commit()
conn.close()
current_user.updateDB()
return jsonify('done')
except Exception as e:
traceback.print_exc()
session.rollback()
# for statement in deleteStatements:
# connection.execute(statement)
raise InvalidUsage(str(e), status_code=500) | 537b89fa72b161c86c4b49a7a6813cf95df45f09 | 3,708 |
def find_distance_to_major_settlement(country, major_settlements, settlement):
"""
Finds the distance to the nearest major settlement.
"""
nearest = nearest_points(settlement['geometry'], major_settlements.unary_union)[1]
geom = LineString([
(
settlement['geometry'].coords[0][0],
settlement['geometry'].coords[0][1]
),
(
nearest.coords[0][0],
nearest.coords[0][1]
),
])
distance_km = round(geom.length / 1e3)
return distance_km | 0bb33d3777ab0f60dfb05e69cb2f5ed711585b17 | 3,709 |
def x_for_half_max_y(xs, ys):
"""Return the x value for which the corresponding y value is half
of the maximum y value. If there is no exact corresponding x value,
one is calculated by linear interpolation from the two
surrounding values.
:param xs: x values
:param ys: y values corresponding to the x values
:return:
"""
if len(xs) != len(ys):
raise ValueError("xs and ys must be of equal length")
half_max_y = max(ys) / 2
for i in range(len(xs)-1):
if ys[i+1] >= half_max_y:
x_dist = xs[i+1] - xs[i]
y_dist = ys[i+1] - ys[i]
y_offset = half_max_y - ys[i]
if y_offset == 0:
return xs[i]
else:
x_offset = y_offset / y_dist * x_dist
return xs[i] + x_offset
return None | b18525664c98dc05d72a29f2904a13372f5696eb | 3,710 |
from typing import Union
from pathlib import Path
from typing import Dict
def get_dict_from_dotenv_file(filename: Union[Path, str]) -> Dict[str, str]:
"""
:param filename: .env file where values are extracted.
:return: a dict with keys and values extracted from the .env file.
"""
result_dict = {}
error_message = 'file {filename}: the line n°{index} is not correct: "{line}"'
with open(filename) as f:
for index, line in enumerate(f):
stripped_line = line.strip()
# we don't take into account comments
if stripped_line.startswith('#'):
continue
# we don't take into account empty lines
if not stripped_line:
continue
parts = stripped_line.split('#') # we remove inline comments if there are any
# we remove set or export command if there are any
new_line = SET_EXPORT_EXPRESSION.sub('', parts[0].strip())
# we get key and value
parts = new_line.split('=')
parts = _sanitize_key_and_value(parts)
if len(parts) != 2 or ITEM_EXPRESSION.match(parts[0]) is None \
or ITEM_EXPRESSION.match(parts[1]) is None:
line_number = index + 1
raise DecodeError(message=error_message.format(filename=filename, index=line_number, line=new_line))
result_dict[parts[0]] = parts[1]
return result_dict | 97bce5a69e29f9606a58a54ed5cc4d05ab45fbca | 3,711 |
def calculate_note_numbers(note_list, key_override = None):
"""
Takes in a list of notes, and replaces the key signature (second
element of each note tuple) with the note's jianpu number.
Parameters
----------
note_list : list of tuples
List of notes to calculate jianpu numbers for.
key_override : str
If this is provided, all notes will be assumed to be in the
given key.
"""
note_list_numbered = []
for note in note_list:
if note[0] != -1:
if(note[1] == 'C' or key_override == 'C'):
offset = 0
elif(note[1] == 'C#' or key_override == 'C#'
or note[1] == 'Db' or key_override == 'Db'):
offset = 1
elif(note[1] == 'D' or key_override == 'D'):
offset = 2
elif(note[1] == 'D#' or key_override == 'D#'
or note[1] == 'Eb' or key_override == 'Eb'):
offset = 3
elif(note[1] == 'E' or key_override == 'E'):
offset = 4
elif(note[1] == 'F' or key_override == 'F'):
offset = 5
elif(note[1] == 'F#' or key_override == 'F#'
or note[1] == 'Gb' or key_override == 'Gb'):
offset = 6
elif(note[1] == 'G' or key_override == 'G'):
offset = 7
elif(note[1] == 'G#' or key_override == 'G#'
or note[1] == 'Ab' or key_override == 'Ab'):
offset = 8
elif(note[1] == 'A' or key_override == 'A'):
offset = 9
elif(note[1] == 'A#' or key_override == 'A#'
or note[1] == 'Bb' or key_override == 'Bb'):
offset = 10
elif(note[1] == 'B' or key_override == 'B'):
offset = 11
num = (note[0]-offset) - ((note[0]-offset)//12)*12
num_to_jianpu = { 0:1,
1:1.5,
2:2,
3:2.5,
4:3,
5:4,
6:4.5,
7:5,
8:5.5,
9:6,
10:6.5,
11:7}
jianpu = num_to_jianpu[num]
note_list_numbered.append((note[0], jianpu, note[2], note[3]))
else:
note_list_numbered.append(note)
return note_list_numbered | a32bbae7f64b381ad3384fdd8ae5c045c6887c87 | 3,712 |
import numpy as np
def _from_Gryzinski(DATA):
"""
This function computes the cross section and energy values from the files
that store information following the Gryzinski Model
"""
a_0 = DATA['a_0']['VALUES']
epsilon_i_H = DATA['epsilon_i_H']['VALUES']
epsilon_i = DATA['epsilon_i']['VALUES']
xi = DATA['xi']['VALUES']
final_E = DATA['Final_E']['VALUES']
Energy_range = np.linspace(epsilon_i, final_E, 200)
u = Energy_range/epsilon_i
gg = (1+2/3*(1-1/(2*u))*np.log(np.e+(u-1)**(1/2)))
g = ((u-1)/u**2)*((u/(u+1))**(3/2))*((1-1/u)**(1/2))*gg
Cross_sections = 4*np.pi*(a_0**2)*((epsilon_i_H/epsilon_i)**2)*xi*g
return(Energy_range, Cross_sections) | 925fb1e76bf23915385cf56e3a663d111615700d | 3,713 |
from haversine import haversine
def stations_within_radius(stations, centre, r):
"""Returns an alphabetically-ordered list of the names of all the stations (in a list of stations objects) within a radius r (in km) of a central point
(which must be a Lat/Long coordinate)"""
# creates empty list
name_list = []
# extracts the necessary data from the list of stations
for i in range(len(stations)):
station_entry = stations[i]
s_coord = station_entry.coord
s_distance = haversine(s_coord, centre)
# Determines if the station is within the radius
if s_distance <= r:
s_name = station_entry.name
name_list.append(s_name)
#sorts the list
name_list.sort()
return name_list | 36bf8312e4295638c1e297f4a31b535c9ee96eaf | 3,714 |
def ticket_message_url(request, structure_slug, ticket_id): # pragma: no cover
"""
Makes URL redirect to add ticket message by user role
:type structure_slug: String
:type ticket_id: String
:param structure_slug: structure slug
:param ticket_id: ticket code
:return: redirect
"""
structure = get_object_or_404(OrganizationalStructure,
slug=structure_slug)
user_type = get_user_type(request.user, structure)
return redirect('uni_ticket:{}_ticket_message'.format(user_type),
structure_slug, ticket_id) | 29cec06302e943d74236ff82647cd28deb634107 | 3,716 |
from pathlib import Path
from datetime import datetime
from re import T
def load(
fin: Path,
azelfn: Path = None,
treq: list[datetime] = None,
wavelenreq: list[str] = None,
wavelength_altitude_km: dict[str, float] = None,
) -> dict[str, T.Any]:
"""
reads FITS images and spatial az/el calibration for allsky camera
Bdecl is in degrees, from IGRF model
"""
fin = Path(fin).expanduser()
if fin.is_file() and fin.suffix in (".h5", ".hdf5"):
return load_hdf5(fin, treq, wavelenreq)
flist = _slicereq(fin, treq, wavelenreq)
if not flist:
raise FileNotFoundError(f"No files found in {fin}")
# %% load data from good files, discarding bad
imgs = _sift(flist)
# %% camera location
imgs = _camloc(imgs, flist[0].parent)
# %% az / el
imgs = _azel(azelfn, imgs)
# %% projections
imgs = _project(imgs, wavelength_altitude_km)
return imgs | bb656482c1db134c3045ac5a30b9cecb8d9f6716 | 3,717 |
Subsets and Splits