content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def elgamal_keypair_from_secret(a: ElementModQ) -> Optional[ElGamalKeyPair]:
"""
Given an ElGamal secret key (typically, a random number in [2,Q)), returns
an ElGamal keypair, consisting of the given secret key a and public key g^a.
"""
secret_key_int = a
if secret_key_int < 2:
log_error("ElGamal secret key needs to be in [2,Q).")
return None
return ElGamalKeyPair(a, g_pow_p(a))
| 5,352,100 |
def make_plots(plot_data, plot_params_dict):
""" Draw and save plot from preprocessed data.
:param plot_data: Data structure processed by __name__.prep_data
:param plot_params_dict: dict holding some plotting paramaters, see e.g. plot_configs/single_day/pov_single_day_config.example.json["PLOT_PARAMS_DICT"]
:return:
"""
fig, axes = plt.subplots(nrows=2, ncols=1)
fig.set_size_inches(h=14, w=9)
colors = list(get_plot_colors(plot_data + ['dummy']*2))
color = colors.pop(0)
linestyles = get_plot_linestyles(len(plot_data) + 2)
linestyle = linestyles.pop(0)
orderbook_df = plot_data[0]['no_execution_df']
orderbook_df["MID_PRICE"].plot(ax=axes[0], label=plot_params_dict['baseline_label'], color=color, linestyle=linestyle)
for plot_data_dict in plot_data:
color = colors.pop(0)
linestyle = linestyles.pop(0)
pov = f'{100 * plot_data_dict["pov"]} %'
orderbook_df = plot_data_dict['no_execution_df']
orderbook_with_execution_df = plot_data_dict['yes_execution_df']
# mid_price
orderbook_with_execution_df["MID_PRICE"].plot(ax=axes[0], label=
f'{plot_params_dict["execution_label"]}{pov}', color=color, linestyle=linestyle)
# normalised difference
mid_price_yes_execution, mid_price_no_execution = forward_fill_series(orderbook_with_execution_df["MID_PRICE"],
orderbook_df["MID_PRICE"])
diff = 10000 * (mid_price_yes_execution - mid_price_no_execution) / mid_price_no_execution
diff = diff.to_frame()
diff = diff.loc[~diff.index.duplicated(keep='last')]
diff = diff[diff.columns[0]] # to series for plotting
diff.plot(ax=axes[1], label=f'{plot_params_dict["execution_label"]}{pov}', color=color, linestyle=linestyle)
axes[0].axvspan(pd.Timestamp(plot_params_dict['shade_start_datetime']),
pd.Timestamp(plot_params_dict['shade_end_datetime']), alpha=0.2, color='grey')
axes[1].axvspan(pd.Timestamp(plot_params_dict['shade_start_datetime']),
pd.Timestamp(plot_params_dict['shade_end_datetime']), alpha=0.2, color='grey')
axes[-1].xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
axes[-1].xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
axes[0].xaxis.set_visible(False)
axes[0].legend()
axes[-1].set_xlabel('Time', size=15)
axes[0].set_ylabel('Mid-price ($)', size=15)
axes[1].set_ylabel('Normalized Difference (bps)', size=15)
fig.tight_layout()
fig.subplots_adjust(top=0.7)
fig.savefig(plot_params_dict["output_file_path"], format='png', dpi=300, transparent=False, bbox_inches='tight',
pad_inches=0.03)
| 5,352,101 |
def current_default_thread_limiter():
"""Get the default `~trio.CapacityLimiter` used by
`trio.to_thread.run_sync`.
The most common reason to call this would be if you want to modify its
:attr:`~trio.CapacityLimiter.total_tokens` attribute.
"""
try:
limiter = _limiter_local.get()
except LookupError:
limiter = CapacityLimiter(DEFAULT_LIMIT)
_limiter_local.set(limiter)
return limiter
| 5,352,102 |
def _mask_board(board):
"""
A function that copies the inputted board replaces all ships with empty coordinates to mask them.
:param board: a 2D numpy array containing a string representation of the board. All ships should be visible.
:return: a 2D numpy array containing a string representation of the board, with all ships hidden.
"""
masked = copy.deepcopy(board) # copy operation
for (y, x), val in np.ndenumerate(board):
if val.isdigit():
masked[y][x] = ''
return masked
| 5,352,103 |
def test_proxy_handling():
"""Proxy variable no impact."""
with InstalledApp(wsgi_app.simple_app, host=HOST, port=80,
proxy='some.host:1234') as app:
http_client = http_lib.HTTPConnection(HOST)
http_client.request('GET', '/')
content = http_client.getresponse().read()
http_client.close()
assert content == b'WSGI intercept successful!\n'
assert app.success()
| 5,352,104 |
def query_yes_no(question, default="no", color=_constants.COLORS.RESET):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
stdout.write(color + question + COLORS.RESET + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
| 5,352,105 |
def remove_head_id(ref, hyp):
"""Assumes that the ID is the begin token of the string which is common
in Kaldi but not in Sphinx."""
ref_id = ref[0]
hyp_id = hyp[0]
if ref_id != hyp_id:
print('Reference and hypothesis IDs do not match! '
'ref="{}" hyp="{}"\n'
'File lines in hyp file should match those in the ref file.'.format(ref_id, hyp_id))
exit(-1)
ref = ref[1:]
hyp = hyp[1:]
return ref, hyp
| 5,352,106 |
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
| 5,352,107 |
def get_miner_day_list():
"""
存储提供者每天的miner数据
:return:
"""
miner_no = request.form.get("miner_no")
date = request.form.get("date")
data = MinerService.get_miner_day_list(miner_no, date)
return response_json(data)
| 5,352,108 |
def get_notebook_logs(experiment_id, operator_id):
"""
Get logs from a Experiment notebook.
Parameters
----------
experiment_id : str
operator_id : str
Returns
-------
dict or None
Operator's notebook logs. Or None when the notebook file is not found.
"""
notebook = get_jupyter_notebook(experiment_id, operator_id)
if not notebook:
return None
notebook = notebook["content"]
logs = {}
for cell in notebook["cells"]:
try:
metadata = cell["metadata"]["papermill"]
if metadata["exception"] and metadata["status"] == "failed":
for output in cell["outputs"]:
if output["output_type"] == "error":
error_log = output["traceback"]
traceback = remove_ansi_escapes(error_log)
logs = {"exception": output["ename"], "traceback": traceback}
except KeyError:
pass
return logs
| 5,352,109 |
def type_secret(locator, input_text, anchor="1", timeout=0, index=1, **kwargs):
"""Type secret information such as password.
Logging in start_keyword and end_keyword is filtered,
otherwise functionality is the same as TypeText.
Generally all secret credentials in Robot FW scripts should
be provided as external variables. Secrets must not be
stored directly to test script and not even to version control
system.
Examples
--------
.. code-block:: robotframework
# PASSWD is set outside the script
# Provide this to Robot FW as follows:
# robot --variable PASSWD:mypass123 test.robot
TypeSecret password ${PASSWD}
TypeSecret r1c1 ${PASSWD} #table
"""
type_text(locator, input_text, anchor, timeout=timeout, index=index, **kwargs)
| 5,352,110 |
def modify_env2(
function: Callable[[_UpdatedType], _SecondType],
) -> Kinded[Callable[
[Kind2[_Reader2Kind, _FirstType, _SecondType]],
Kind2[_Reader2Kind, _FirstType, _UpdatedType],
]]:
"""
Modifies the second type argument of a ``ReaderBased2``.
In other words, it modifies the function's
signature from: ``a -> b``
to: ``Container[x, a] -> Container[x, b]``
.. code:: python
>>> from returns.pointfree import modify_env2
>>> from returns.context import RequiresContext
>>> def multiply(arg: int) -> RequiresContext[int, int]:
... return RequiresContext(lambda deps: arg * deps)
>>> assert modify_env2(int)(multiply(3))('4') == 12
Note, that this function works with only ``Kind2`` containers
with ``.modify_env`` method.
See :class:`returns.primitives.interfaces.specific.reader.ReaderBased2`
for more info.
"""
@kinded
def factory(
container: Kind2[_Reader2Kind, _FirstType, _SecondType],
) -> Kind2[_Reader2Kind, _FirstType, _UpdatedType]:
return internal_modify_env2(container, function)
return factory
| 5,352,111 |
def test_article_admin_translate_button_expected(db, admin_client):
"""
Translate button should be in detail page with the right URL and lead to the
"Translate" form with the right available languages.
"""
ping = CategoryFactory(slug="ping")
# Create meat articles with unpublished DE translation
created_beef = multilingual_article(
title="Beef",
slug="beef",
langs=["de"],
fill_categories=[ping],
contents={
"de": {
"title": "Rindfleisch",
"slug": "rindfleisch",
"fill_categories": [ping],
"status": STATUS_DRAFT,
}
},
)
# Translate button is expected since there is an available language to translate to
url = get_admin_change_url(created_beef["original"])
response = admin_client.get(url)
assert response.status_code == 200
dom = html_pyquery(response)
existings = dom.find(".lotus-siblings-resume a")
assert len(existings) == 1
links = dom.find(".lotus-translate-link")
assert len(links) == 1
# Expected existing translation languages (without the original language)
existing_languages = [item.get("data-lotus-langcode") for item in existings]
assert sorted(existing_languages) == ["de"]
response = admin_client.get(links[0].get("href"))
assert response.status_code == 200
# Form is expected since there is an available language. Directly use the URL from
# translate button
dom = html_pyquery(response)
forms = dom.find("#lotus-translate-original-form")
assert len(forms) == 1
# Check expected available language is correct
options = dom.find("#lotus-translate-original-form #id_language option")
option_ids = [item.get("value") for item in options if item.get("value")]
assert sorted(option_ids) == ["fr"]
# Ensure the original id is correctly set into hidden input
original_id = dom.find("#lotus-translate-original-form input[name='original']")
assert len(original_id) == 1
assert int(original_id[0].get("value")) == created_beef["original"].id
| 5,352,112 |
def model_trees(z, quantiles, normed=False,
dbhfile='c:\\projects\\MLM_Hyde\\Data\\hyde_runkolukusarjat.txt',
plot=False,
biomass_function='marklund'):
"""
reads runkolukusarjat from Hyde and creates lad-profiles for pine, spruce and decid.
Args:
z - grid (m)
quantiles - cumulative frequency limits for grouping trees
normed - True returns sum(lad*dz) normalized to unity
Returns:
lad_p, lad_s, lad_d - leaf-area density profiles for model treegroups (m2/m3)
n_p, n_s, n_d - trees / ha in model treegroups
"""
dat = np.loadtxt(dbhfile, skiprows=1)
dz = z[1]-z[0]
M = len(quantiles)
# year 2008 data
pine = dat[:, [0, 1]]
spruce = dat[:, [0, 2]]
decid = dat[:, [0, 3]]
# pines
h, hb, mleaf, L, a = profiles_hyde(pine, 'pine', z, biomass_function=biomass_function)
n = pine[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_p = np.zeros([len(z), M])
n_p = np.zeros(M)
lai_p = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_p[:, k] = np.sum(a[:, f], axis=1)
n_p[k] = np.sum(n[f])
lai_p[k] = sum(dz*lad_p[:,k])
m = quantiles[k]
if normed:
lad_p[:, k] = lad_p[:, k] / np.maximum(np.sum(lad_p[:, k] * dz), eps)
# spruces
h, hb, mleaf, L, a = profiles_hyde(spruce, 'spruce', z, biomass_function=biomass_function)
n = spruce[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_s = np.zeros([len(z), M])
n_s = np.zeros(M)
lai_s = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_s[:, k] = np.sum(a[:, f], axis=1)
n_s[k] = np.sum(n[f])
lai_s[k] = sum(dz*lad_s[:,k])
m = quantiles[k]
if normed:
lad_s[:, k] = lad_s[:, k] / np.maximum(np.sum(lad_s[:, k] * dz), eps)
# decid
h, hb, mleaf, L, a = profiles_hyde(decid, 'birch', z, biomass_function=biomass_function)
n = decid[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_d = np.zeros([len(z), M])
n_d = np.zeros(M)
lai_d = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_d[:, k] = np.sum(a[:, f], axis=1)
n_d[k] = np.sum(n[f])
lai_d[k] = sum(dz*lad_d[:,k])
m = quantiles[k]
if normed:
lad_d[:, k] = lad_d[:, k] / np.maximum(np.sum(lad_d[:, k] * dz), eps)
if plot:
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.figure(figsize=(2.5,3.5))
for k in range(M):
plt.plot(lad_p[:, k],z,color=colors[0], label='pine, %.2f m$^2$m$^{-2}$' % lai_p[k])#,lad_g,z)
plt.plot(lad_s[:, k],z,color=colors[1], label='spruce, %.2f m$^2$m$^{-2}$' % lai_s[k])
plt.plot(lad_d[:, k],z,color=colors[2], label='decid, %.2f m$^2$m$^{-2}$' % lai_d[k])
plt.title(" ")#dbhfile.split("/")[-1])
plt.ylabel('height [m]')
if normed:
plt.xlabel('normalized lad [-]')
else:
plt.xlabel('lad [m$^2$m$^{-3}$]')
plt.tight_layout()
return lad_p, lad_s, lad_d, n_p, n_s, n_d, lai_p, lai_s, lai_d
| 5,352,113 |
def inject_general_timeline():
"""This function injects the function object 'Tweet.get_general_timeline'
into the application context so that 'get_general_timeline' can be accessed
in Jinja2 templates.
"""
return dict(get_general_timeline=Tweet.get_general_timeline)
| 5,352,114 |
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a Cosmo Skymed file. Returns a reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
CSKReader|None
`CSKReader` instance if Cosmo Skymed file, `None` otherwise
"""
if h5py is None:
return None
try:
csk_details = CSKDetails(file_name)
print('File {} is determined to be a Cosmo Skymed file.'.format(file_name))
return CSKReader(csk_details)
except (IOError, KeyError, ValueError):
# TODO: what all should we catch?
return None
| 5,352,115 |
def get_vaccinated_model(model, area=None):
"""Get all states that can be vaccinated or recovered (by area).
Parameters
----------
model : amici.model
Amici model which should be evaluated.
areas : list
List of area names as strings.
Returns
-------
states : list
List of states that can be vaccinated.
"""
if area is None:
states = [
x
for x in model.getStateNames()
if not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
]
else:
states = [
x
for x in model.getStateNames()
if (
not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
)
and (area in x)
]
return states
| 5,352,116 |
def MicrosecondsToDatetime(microseconds):
"""Returns a datetime given the number of microseconds, or None."""
if microseconds:
return datetime.utcfromtimestamp(float(microseconds) / 1000000)
return None
| 5,352,117 |
def update() -> bool:
"""
Pull down the latest Docker image build and prune old image versions.
"""
current_image = DEFAULT_IMAGE
latest_image = latest_build_image(current_image)
if latest_image == current_image:
print(colored("bold", "Updating Docker image %s…" % current_image))
else:
print(colored("bold", "Updating Docker image from %s to %s…" % (current_image, latest_image)))
print()
# Pull the latest image down
try:
subprocess.run(
["docker", "image", "pull", latest_image],
check = True)
except (OSError, subprocess.CalledProcessError):
return False
# Update the config file to point to the new image so we use it by default
# going forward.
config.set("docker", "image", latest_image)
# Prune any old images which are now dangling to avoid leaving lots of
# hidden disk use around. We don't use `docker image prune` because we
# want to just remove _our_ dangling images, not all. We very much don't
# want to automatically prune unrelated images.
print()
print(colored("bold", "Pruning old images…"))
print()
try:
images = dangling_images(current_image) \
+ old_build_images(current_image)
if images:
subprocess.run(
["docker", "image", "rm", *images],
check = True)
except (OSError, subprocess.CalledProcessError) as error:
warn()
warn("Update succeeded, but an error occurred pruning old image versions:")
warn(" ", error)
warn()
return True
| 5,352,118 |
def get_default_accept_image_formats():
"""With default bentoML config, this returns:
['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp']
"""
return [
extension.strip()
for extension in config("apiserver")
.get("default_image_handler_accept_file_extensions")
.split(",")
]
| 5,352,119 |
def findports():
"""Returns an array of the serial ports that have a command interpreter."""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(255)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
cmd= Cmd()
cmd.open(port)
cmd.close(True)
result.append(port)
except (OSError, serial.SerialException, CmdException) as error:
# print(error)
pass
return result
| 5,352,120 |
def import_download_cache(filename_or_obj, urls=None, update_cache=False, pkgname='astropy'):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, 'r') as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(url, f_temp_name,
remove_original=True,
pkgname=pkgname)
| 5,352,121 |
def cells_handler(results, cl):
"""
Changes result cell attributes based on object instance and field name
"""
suit_cell_attributes = getattr(cl.model_admin, 'suit_cell_attributes', None)
if not suit_cell_attributes:
return results
class_pattern = 'class="'
td_pattern = '<td'
th_pattern = '<th'
for row, result in enumerate(results):
instance = cl.result_list[row]
for col, item in enumerate(result):
field_name = cl.list_display[col]
attrs = copy(suit_cell_attributes(instance, field_name))
if not attrs:
continue
# Validate
if not isinstance(attrs, dict):
raise TypeError('"suit_cell_attributes" method must return dict. '
'Got: %s: %s' % (
attrs.__class__.__name__, attrs))
# Merge 'class' attribute
if class_pattern in item.split('>')[0] and 'class' in attrs:
css_class = attrs.pop('class')
replacement = '%s%s ' % (class_pattern, css_class)
result[col] = mark_safe(
item.replace(class_pattern, replacement))
# Add rest of attributes if any left
if attrs:
cell_pattern = td_pattern if item.startswith(
td_pattern) else th_pattern
result[col] = mark_safe(
result[col].replace(cell_pattern,
td_pattern + dict_to_attrs(attrs)))
return results
| 5,352,122 |
def dynamic_import(import_string):
"""
Dynamically import a module or object.
"""
# Use rfind rather than rsplit for Python 2.3 compatibility.
lastdot = import_string.rfind('.')
if lastdot == -1:
return __import__(import_string, {}, {}, [])
module_name, attr = import_string[:lastdot], import_string[lastdot + 1:]
parent_module = __import__(module_name, {}, {}, [attr])
return getattr(parent_module, attr)
| 5,352,123 |
def main():
"""
We are going to draw a fractal called 'Sierpinski Triangle'.
"""
sierpinski_triangle(ORDER, LENGTH, UPPER_LEFT_X, UPPER_LEFT_Y)
| 5,352,124 |
def _split_on_parenthesis(text_in: Union[str, list[str]]) -> List[str]:
"""Splits text up into a list of strings based on parenthesis locations."""
if isinstance(text_in, list):
if None in text_in:
return text_in
text_list = text_in
elif isinstance(text_in, str):
text_list = [text_in]
else:
return text_in
for i, text in enumerate(text_list):
if isinstance(text, str) and "(" in text:
text_inside = text[text.find("(")+1:text.rfind(")")]
out_add = _split_list(text, text_inside)
out_add[0] = out_add[0][:-1] # remove (
out_add[2] = out_add[2][1:] # remove )
out_add[1] = _get_unit(text_inside)
out_add = [text for text in out_add if text != ""]
out_add = [text for text in out_add if text != None]
text_list[i] = out_add
return _flatten_list(text_list)
| 5,352,125 |
def reply():
"""
Reply
"""
t = Twitter(auth=authen())
try:
id = int(g['stuff'].split()[0])
except:
printNicely(red('Sorry I can\'t understand.'))
return
tid = c['tweet_dict'][id]
user = t.statuses.show(id=tid)['user']['screen_name']
status = ' '.join(g['stuff'].split()[1:])
status = '@' + user + ' ' + str2u(status)
t.statuses.update(status=status, in_reply_to_status_id=tid)
| 5,352,126 |
def shell_command_error2exit_decorator(func: Callable):
"""Decorator to convert given ShellCommandException to an exit message
This avoids displaying nasty stack traces to end-users
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except ShellCommandException as e:
e = e.__cause__
print(f"{e}:\n{e.output}")
sys.exit(1)
return func_wrapper
| 5,352,127 |
def test_almost_equal():
"""Tests almost_equal"""
assert almost_equal(10.0, 10.0005)
assert not almost_equal(10.0, 10.1)
assert almost_equal(10.0, 10.01, precision=0.1)
# Test "a 10% discount on $10"
assert almost_equal(10.0 - 10.0 * (10.0 / 100), 9.0)
| 5,352,128 |
def getExternalIP():
""" Returns external ip of system """
ip = requests.get("http://ipv4.myexternalip.com/raw").text.strip()
if ip == None or ip == "":
ip = requests.get("http://ipv4.icanhazip.com").text.strip()
return ip
| 5,352,129 |
def intersection_indices(a, b):
"""
:param list a, b: two lists of variables from different factors.
returns a tuple of
(indices in a of the variables that are in both a and b,
indices of those same variables within the list b)
For example, intersection_indices([1,2,5,4,6],[3,5,1,2]) returns
([0, 1, 2], [2, 3, 1]).
"""
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
mapA = []
mapB = []
for i, itm in enumerate(a):
if itm in bind:
mapA.append(i)
mapB.append(bind.get(itm))
return mapA, mapB
| 5,352,130 |
def test_extract_requested_slot_from_entity_with_intent():
"""Test extraction of a slot value from entity with the different name
and certain intent
"""
# noinspection PyAbstractClass
class CustomFormAction(FormAction):
def slot_mappings(self):
return {"some_slot": self.from_entity(entity="some_entity",
intent="some_intent")}
form = CustomFormAction()
tracker = Tracker('default', {'requested_slot': 'some_slot'},
{'intent': {'name': 'some_intent', 'confidence': 1.0},
'entities': [{'entity': 'some_entity',
'value': 'some_value'}]},
[], False, None, {}, 'action_listen')
slot_values = form.extract_requested_slot(CollectingDispatcher(),
tracker, {})
# check that the value was extracted for correct intent
assert slot_values == {'some_slot': 'some_value'}
tracker = Tracker('default', {'requested_slot': 'some_slot'},
{'intent': {'name': 'some_other_intent',
'confidence': 1.0},
'entities': [{'entity': 'some_entity',
'value': 'some_value'}]},
[], False, None, {}, 'action_listen')
slot_values = form.extract_requested_slot(CollectingDispatcher(),
tracker, {})
# check that the value was not extracted for incorrect intent
assert slot_values == {}
| 5,352,131 |
def graph_cases(selenium, host):
"""
Factory method that allows to draw preconfigured graphs and manipulate them
with a series of helpful methods.
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:type host: qmxgraph.server.Host
:rtype: GraphCaseFactory
:return: Factory able to create cases.
"""
return GraphCaseFactory(selenium=selenium, host=host)
| 5,352,132 |
def test_pipeline(info,
pipeline_id,
datasource,
orchestration_backend,
orchestration_args,
processing_backend,
processing_args,
training_backend,
training_args,
serving_backend,
serving_args,
force):
"""Initiate a test run of a selected pipeline"""
if datasource is None:
utils.check_datasource_commit(info)
utils.resolve_pipeline_creation(info=info,
pipeline_type=PipelineRunTypes.test.name,
pipeline_=pipeline_id,
datasource=datasource,
orchestration_backend=orchestration_backend,
orchestration_args=orchestration_args,
processing_backend=processing_backend,
processing_args=processing_args,
force=force,
additional_args={
'training_backend': training_backend,
'training_args': training_args,
'serving_backend': serving_backend,
'serving_args': serving_args})
| 5,352,133 |
def motion_controller_start_images(image_numbers):
"""Configure motion controller
image_numbers: list of 1-based integers
e.g. image_numbers = collection_pass(1)"""
if "after image" in translate.mode:
XYZ = array([translation_after_image_xyz(i) for i in image_numbers])
triggered_motion.xyz = XYZ
triggered_motion.waitt = timing_system.waitt.next(wait_time(image_numbers[0]))
triggered_motion.armed = True
| 5,352,134 |
def figure_8():
"""
Notes
-----
Colors from Bang Wong's color-blind friendly colormap. Available at:
https://www.nature.com/articles/nmeth.1618
Wong's map acquired from David Nichols page. Available at:
https://davidmathlogic.com/colorblind/.
"""
# choosing test sample and network.
sample = const.SAMPLE_232p3_wet
network_folder = const.FOLDER_PRED_UNET
# we will return a 10 x 10 matthews matrix; each for a crop
matthews_coefs = np.ones((10, 10))
worst_indexes = np.zeros((10, 10))
# a variable to obtain inlay data.
inlay_data = []
# reading input data.
is_registered = sample['registered_path'] is not None
data_pred, data_gs = _pred_and_goldstd(sample,
folder_prediction=network_folder,
is_registered=is_registered,
is_binary=True)
data_pred = data_pred[slice(*sample['segmentation_interval'])]
# comp_color starts as gray (background).
comp_color = np.ones(
(*data_pred[0].shape, 3)
) * (np.asarray((238, 238, 238)) / 255)
for idx, (img_pred, img_gs) in enumerate(zip(data_pred, data_gs)):
# crop images in 100 (256, 256) pieces.
crop_pred = util.view_as_blocks(img_pred,
block_shape=(256, 256))
crop_gs = util.view_as_blocks(img_gs,
block_shape=(256, 256))
for i, _ in enumerate(crop_pred):
for j, _ in enumerate(crop_pred[i]):
# calculate the Matthews coefficient for each crop.
aux_conf = _confusion_matrix(crop_gs[i, j],
crop_pred[i, j])
aux_matthews = _measure_matthews(aux_conf)
# if smaller than previously, save results.
# restricting aux_matthews > 0.1 due to errors in all-TN regions
if (0.1 < aux_matthews < matthews_coefs[i, j]):
matthews_coefs[i, j] = aux_matthews
worst_indexes[i, j] = idx
aux_comp = _comparison_color(crop_gs[i, j], crop_pred[i, j])
comp_color[i*256:(i+1)*256, j*256:(j+1)*256] = aux_comp
# grab inlay data from crops we want to highlight.
for i, j in [(2, 2), (8, 7)]:
inlay_data.append(comp_color[i*256:(i+1)*256, j*256:(j+1)*256])
# Figure 8(a).
plt.figure(figsize=FIGURE_SIZE)
plt.imshow(comp_color)
for idx in np.arange(start=0, stop=2560, step=256): # according to image
plt.axvline(idx, color='white')
plt.axhline(idx, color='white')
matthews_coefs = np.round(matthews_coefs * 100, decimals=2)
for i, j in product(range(10), repeat=2):
facecolor, textcolor = _label_color(matthews_coefs[j, i])
plt.text(x=i*256 + 30, y=j*256 + 50,
s=str(matthews_coefs[j, i]),
fontsize=8,
color=textcolor,
bbox=dict(facecolor=facecolor, alpha=0.9))
_check_if_folder_exists(folder='./figures')
plt.savefig('figures/Fig_08a' + SAVE_FIG_FORMAT, bbox_inches='tight')
plt.close()
# Figures 8(b, c).
indexes = {0: 'b', 1: 'c'}
for idx in indexes.keys():
plt.figure(figsize=FIGURE_SIZE)
plt.imshow(inlay_data[idx])
_check_if_folder_exists(folder='./figures')
plt.savefig(f'figures/Fig_08{indexes[idx]}' + SAVE_FIG_FORMAT,
bbox_inches='tight')
plt.close()
return None
| 5,352,135 |
def check_threats(message):
"""Return list of threats found in message"""
threats = []
for threat_check in get_threat_checks():
for expression in threat_check["expressions"]:
if re.search(expression, message, re.I | re.U):
del threat_check["expressions"]
threats += [threat_check]
break
return threats
| 5,352,136 |
def data_processing_max(data, column):
"""Compute the max of a column."""
return costly_compute_cached(data, column).max()
| 5,352,137 |
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary'
| 5,352,138 |
def extract_data_from_csv_stream(client: Client, alert_id: str,
attachment_id: str, delimiter: bytes = b'\r\n') -> List[dict]:
"""
Call the attachment download API and parse required fields.
Args:
client (Client): Cyberint API client.
alert_id (str): ID of the alert the attachment belongs to.
attachment_id (str): ID of the attachment itself.
delimiter (bytes): Delimeter for the CSV file.
Returns:
list(dict): List of all the data found using the wanted fields.
"""
first_line = True
field_indexes = {} # {wanted_field_name: wanted_field_index...}
information_found = []
for csv_line in client.get_csv_file(alert_id, attachment_id, delimiter):
csv_line_separated = csv_line.split(',')
if first_line:
for field in CSV_FIELDS_TO_EXTRACT:
try:
field_indexes[field] = csv_line_separated.index(field)
except ValueError:
pass
first_line = False
else:
try:
extracted_field_data = {field_name.lower(): csv_line_separated[field_index]
for field_name, field_index in field_indexes.items()}
if extracted_field_data:
information_found.append(extracted_field_data)
except IndexError:
pass
return information_found
| 5,352,139 |
def validate_password(password, password_repeat=None):
"""
Validate user password.
:param password: password as string
:param password_repeat: repeat password
:return: False - valid password
"""
if password_repeat:
if password != password_repeat:
return "Passwords did not match."
flag = False
if len(password) < 8:
flag = True
elif not re.search("[a-z]", password):
flag = True
elif not re.search("[A-Z]", password):
flag = True
elif not re.search("[0-9]", password):
flag = True
elif re.search("\s", password):
flag = True
if flag:
return (
"Password must contain at least a lower case, an upper case, a number, no spaces "
"and be at least 9 characters."
)
return False
| 5,352,140 |
def get_large_circuit(backend: IBMBackend) -> QuantumCircuit:
"""Return a slightly larger circuit that would run a bit longer.
Args:
backend: Backend on which the circuit will run.
Returns:
A larger circuit.
"""
n_qubits = min(backend.configuration().n_qubits, 20)
circuit = QuantumCircuit(n_qubits, n_qubits)
for qubit in range(n_qubits - 1):
circuit.h(qubit)
circuit.cx(qubit, qubit + 1)
circuit.measure(list(range(n_qubits)), list(range(n_qubits)))
return circuit
| 5,352,141 |
def loadUserProject():
""" Loads a project that contains only the contents of user.dev.
This project will not be cached, so every call will reload it."""
userFilePath = os.path.join(os.path.expanduser(devon.userPath), userFileName)
project = DevonProject("", time.time())
__mergeProject(project, "", userFilePath)
return project
| 5,352,142 |
def top_k(loc_pred, loc_true, topk):
"""
count the hit numbers of loc_true in topK of loc_pred, used to calculate Precision, Recall and F1-score,
calculate the reciprocal rank, used to calcualte MRR,
calculate the sum of DCG@K of the batch, used to calculate NDCG
Args:
loc_pred: (batch_size * output_dim)
loc_true: (batch_size * 1)
topk:
Returns:
tuple: tuple contains:
hit (int): the hit numbers \n
rank (float): the sum of the reciprocal rank of input batch \n
dcg (float): dcg
"""
assert topk > 0, "top-k ACC评估方法:k值应不小于1"
loc_pred = torch.FloatTensor(loc_pred)
val, index = torch.topk(loc_pred, topk, 1)
index = index.numpy()
hit = 0
rank = 0.0
dcg = 0.0
for i, p in enumerate(index):
target = loc_true[i]
if target in p:
hit += 1
rank_list = list(p)
rank_index = rank_list.index(target)
# rank_index is start from 0, so need plus 1
rank += 1.0 / (rank_index + 1)
dcg += 1.0 / np.log2(rank_index + 2)
return hit, rank, dcg
| 5,352,143 |
def enviar_contacto(request):
"""
Enviar email con el formulario de contacto
a soporte tecnico
"""
formulario = ContactoForm()
if request.method == 'POST':
formulario = ContactoForm(request.POST)
if formulario.is_valid():
mail = EmailMessage(subject='HPC Contacto',
from_email=formulario.cleaned_data['email'],
to=EMAIL_TO)
mail.body = 'El usuario %s ha comentado: %s' \
% (formulario.cleaned_data['nombre'], formulario.cleaned_data['mensaje'])
mail.send()
messages.success(request, "El personal de soporte técnico ha recibido su consulta, "
"pronto nos pondremos en contacto.")
return HttpResponseRedirect('/')
ctx = {'form': formulario}
return render_to_response('contacto/enviar_contacto.html', ctx, context_instance=RequestContext(request))
| 5,352,144 |
def dict_depth(d):
"""
递归地获取一个dict的深度
d = {'a':1, 'b': {'c':{}}} --> depth(d) == 3
"""
if isinstance(d, dict):
return 1 + (max(map(dict_depth, d.values())) if d else 0)
return 0
| 5,352,145 |
def test_split_exist_new(utils_patch):
"""
Tests split on exising new pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "Unable to split datapool: pool already exists"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict(
[
("split", False),
("error", "Unable to split datapool: pool already exists"),
]
)
assert ret == res
| 5,352,146 |
def apercorr(psf,image,objects,psfobj,verbose=False):
"""
Calculate aperture correction.
Parameters
----------
psf : PSF object
The best-fitting PSF model.
image : string or CCDData object
The input image to fit. This can be the filename or CCDData object.
objects : table
The output table of best-fit PSF values for all of the sources.
psfobj : table
The table of PSF objects.
verbose : boolean, optional
Verbose output to the screen. Default is False.
Returns
-------
objects : table
The output table with an "apcorr" column inserted and the aperture correction
applied to "psfmag".
apcor : float
The aperture correction in mag.
cgrow : numpy array
The cumulative aperture correction array.
Example
-------
apcor = apercorr(psf,image,objects,psfobj)
"""
# Get model of all stars except the PSF stars
ind1,ind2 = dln.match(objects['id'],psfobj['id'])
left = np.delete(np.arange(len(objects)),ind1)
neiobj = objects[left]
neimodel = image.copy()
neimodel.data *= 0
neimodel.error[:] = 1
neimodelim = psf.add(neimodel,neiobj)
neimodel.data = neimodelim
# Subtract everything except the PSF stars from the image
resid = image.copy()
if image.mask is not None:
resid.data[~resid.mask] -= neimodel.data[~resid.mask]
else:
resid.data -= modelnei.data
residim = np.maximum(resid.data-resid.sky,0)
resid.data = residim
resid.sky[:] = 0.0
# Do aperture photometry with lots of apertures on the PSF
# stars
# rk = (20/3.)**(1/11.) * rk-1 for k=2,..,12
rseeing = psf.fwhm()*0.5
apers = np.cumprod(np.hstack((3.0,np.ones(11,float)*(20/3.)**(1/11.))))
#apers = np.array([3.0,3.7965,4.8046,6.0803,7.6947,9.7377,12.3232,
# 15.5952,19.7360,24.9762,31.6077,40.0000])
apercat = aperphot(resid,psfobj,apers)
# Fit curve of growth
# use magnitude differences between successive apertures.
apars, agrow, derr = fitgrowth(apercat,apers,rseeing=psf.fwhm()*0.5)
# Get magnitude difference errors
nstars = len(apercat)
napers = len(apers)
derr = np.zeros((nstars,napers-1),float)
for i in range(len(apers)-1):
err1 = apercat['magerr_aper'+str(i+1)]
err2 = apercat['magerr_aper'+str(i+2)]
derr[:,i] = np.sqrt(err1**2+err2**2)
wt = 1/derr**2
# THE CURVE TURNS OVER AT LARGE RADIUS!!!!???
# It shouldn't EVER do that.
# Calculate empirical growth curve
egrow,egrowerr = empgrowth(apercat,apers)
# Get "adopted" growth curve by taking the weighted average
# of the analytical and empirical growth curves
# with the empirical weighted higher at small r and
# the analytical weighted higher at large r
gwt = np.mean(wt,axis=0) # mean weights over the stars
adopgrow = (egrow*gwt + agrow*(1/(0.1*agrow))**2) / (gwt+(1/(0.1*agrow))**2)
adopgrowerr = 1 / (gwt+(1/(0.1*agrow))**2)
# Adopted cumulative growth curve
# sum from the outside in, with an outer tail given by
# extrapolation of the analytic model to 2*outer aperture
cadopgrow = np.cumsum(adopgrow[::-1])[::-1]
# add extrapolation from rlast t=o2*rlast
tail = diffprofile([2*apers[-1],apers[-1]],*apars)
cadopgrow += tail
cadopgrow = np.hstack((cadopgrow,tail)) # add value for outer aperture
cadopgrowerr = np.hstack((adopgrowerr,0.0))
# Calculate "total" magnitude for the PSF stars
totmag,toterr = totphot(apercat,apers,cadopgrow,cadopgrowerr)
# Calculate median offset between total and PSF magnitude
# psf - total
ind1,ind2 = dln.match(objects['id'],psfobj['id'])
diffmag = objects['psfmag'][ind1] - totmag[ind2]
apcor = np.median(diffmag) # positive value
# Apply aperture correction to the data
# add apcorr column and keep initial mags in instmag
objects['apcorr'] = apcor
objects['inst_psfmag'] = objects['psfmag']
objects['psfmag'] -= apcor # make brighter
if verbose:
print('Aperture correction = %.3f mag' % apcor)
return objects, apcor, cadopgrow
| 5,352,147 |
def product_loading_factor_single_discount(skus: str, product_list: Dict[str, object], product: Dict[str, int], product_name: str, rules: list) -> Tuple[int, str]:
"""
Single product loading factor for calculating discounts with one rule
Parameters
----------
skus: str
String containing indiviudal product skus
product_list: Dict[str, object]
Product discount list used for applying discounts
product: Dict[str, int]
Product list used for returning the current products price
product_name: str
The name of the product
rules: List
List of discount rules names to apply
Returns
-------
Tuple:
price: int
Calculated price
skus: str
Updated skus list
"""
number_of_products = skus.count(product_name)
product_price = product[product_name]
product_discount_data_object = product_list[product_name][rules[0]]
discount_threshold = product_discount_data_object['discount_threshold']
while number_of_products > 0:
if number_of_products > 0 and number_of_products % discount_threshold == 0:
product_discount_data_object['count'] += 1
number_of_products -= discount_threshold
else:
number_of_products -= 1
applied_discount = product_discount_data_object['count']
remainder_product_count = skus.count(product_name) - (applied_discount * discount_threshold)
discount_to_apply = product_discount_data_object['discount']
apply_discount = (applied_discount * product_price * discount_threshold) - (applied_discount * discount_to_apply)
price = apply_discount + (remainder_product_count * product_price)
return price, skus
| 5,352,148 |
def clean_text(page):
"""Return the clean-ish running text parts of a page."""
return re.sub(_UNWANTED, "", _unescape_entities(page))
| 5,352,149 |
async def test_when_a_stream_iterator_fails_midway():
"""
Sometimes bad things happen. What happens if we're iterating a stream and
some anarchist decides to delete it from under our feet? Well, I guess we
should return an error to the caller and stop the iterator.
"""
dispatcher = MessageDispatcher()
output_queue = TeeQueue()
conversation = IterStreamEvents("my-stream")
first_msg = read_stream_events_completed(
conversation.conversation_id, "my-stream", [NewEvent("event", data={"x": 1})]
)
second_msg = read_stream_events_failure(
conversation.conversation_id, ReadStreamResult.StreamDeleted
)
future = await dispatcher.start_conversation(conversation)
await dispatcher.dispatch(first_msg, output_queue)
await dispatcher.dispatch(second_msg, output_queue)
iterator = await asyncio.wait_for(future, 1)
event = await anext(iterator)
assert event.event.json()["x"] == 1
with pytest.raises(StreamDeleted):
await anext(iterator)
assert not dispatcher.has_conversation(conversation.conversation_id)
| 5,352,150 |
def error(msg: str, exc_type: Exception=Exception) -> Exception:
"""
Wrapper to get around Python's distinction between statements and expressions
Can be used in lambdas and expressions such as: a if b else error(c)
:param msg: error message
:param exc_type: type of exception to raise
"""
raise exc_type(msg)
| 5,352,151 |
def test_aks(directory: str, aks_service: AksWebservice):
"""
Test AKS with sample call.
:param directory: directory of data_folder with test data
:param aks_service: AKS Web Service to Test
"""
num_dupes_to_score = 4
dupes_test = get_dupes_test(directory)
text_to_score = dupes_test.iloc[0, num_dupes_to_score]
json_text = text_to_json(text_to_score)
scoring_url = aks_service.scoring_uri
api_key = aks_service.get_keys()[0]
headers = {
"content-type": "application/json",
"Authorization": ("Bearer " + api_key),
}
requests.post(
scoring_url, data=json_text, headers=headers
) # Run the request twice since the first time takes a
r = requests.post(
scoring_url, data=json_text, headers=headers
) # little longer due to the loading of the model
print(r)
dupes_to_score = dupes_test.iloc[:5, num_dupes_to_score]
text_data = list(map(text_to_json, dupes_to_score)) # Retrieve the text data
for text in text_data:
r = requests.post(scoring_url, data=text, headers=headers)
print(r)
| 5,352,152 |
def calculate_recall(tp, n):
"""
:param tp: int
Number of True Positives
:param n: int
Number of total instances
:return: float
Recall
"""
if n == 0:
return 0
return tp / n
| 5,352,153 |
def authenticate_user_password(password : 'bytes', encryption_dict : 'dict', id_array : 'list'):
"""
Authenticate the user password.
Parameters
----------
password : bytes
The password to be authenticated as user password.
encryption_dict : dict
The dictionary containing all the information about the encryption procedure.
id_array : list
The two elements array ID, contained in the trailer dictionary.
Returns
-------
The encryption key if the user password is valid, None otherwise.
"""
R = encryption_dict["R"]
U = encryption_dict["U"]
U = U.value if isinstance(U, PDFLiteralString) else unhexlify(U.value)
encryption_key = compute_encryption_key(password, encryption_dict, id_array)
if R == 2:
cipher = rc4(PASSWORD_PADDING, encryption_key)
else:
input_to_md5 = bytearray()
input_to_md5.extend(PASSWORD_PADDING)
input_to_md5.extend(id_array[0])
computed_hash = md5(input_to_md5).digest()
cipher = rc4(computed_hash, encryption_key)
for counter in range(1, 20):
cipher = rc4(cipher, bytes(x ^ counter for x in encryption_key))
correct_password = (U[:16] == cipher[:16]) if R >= 3 else (U == cipher)
return encryption_key if correct_password else None
| 5,352,154 |
def date_to_num(date):
"""Convert datetime to days since 1901"""
num = (date.year - 1901) * 365.25
num += [
0, 31, 59.25, 90.25, 120.25,
151.25, 181.25, 212.25, 243.25,
273.25, 304.25, 334.25
][date.month - 1]
num += date.day
return int(num)
| 5,352,155 |
def load_world(filename: str, size: Tuple[int, int], resolution: int) -> np.array:
"""Load a preconstructred track to initialize world.
Args:
filename: Full path to the track file (png).
size: Width and height of the map
resolution: Resolution of the grid map (i.e. into how many cells)
one meter is divided into.
Returns:
An initialized gridmap based on the preconstructed track as
an n x m dimensional numpy array, where n is the width (num cells)
and m the height (num cells) - (after applying resolution).
"""
width_in_cells, height_in_cells = np.multiply(size, resolution)
world = np.array(png_to_ogm(
filename, normalized=True, origin='lower'))
# If the image is already in our desired shape, no need to rescale it
if world.shape == (height_in_cells, width_in_cells):
return world
# Otherwise, scale the image to our desired size.
resized_world = resize(world, (width_in_cells, height_in_cells))
return resized_world
| 5,352,156 |
def generate_image(model, img_size, n_flow, n_block, n_sample, temp=0.7, ctx=None, label=None):
"""Generate a single image from a Glow model."""
# Determine sizes of each layer
z_sample = []
z_shapes = calc_z_shapes(3, img_size, n_flow, n_block)
for z in z_shapes:
z_new = torch.randn(n_sample, *z) * temp
z_sample.append(z_new.to(device))
assert ctx is None or label is None # can either insert label or context
if label is not None:
return model.reverse(z_sample, label=label)
else:
# handles both cases where only context is provided or no label or context is provided
return model.reverse(z_sample, ctx=ctx)
| 5,352,157 |
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg)
| 5,352,158 |
def slice_map(center, radius, m):
""" :func:`slice_map` for slicing Map object based on center and radius.
:param center: x, y tuple of center of sliced map
:param radius: - :class:`int` center of sliced map
:param m: - :class:`Map` Map object that want to be sliced
return :class:`Map`
"""
# TODO
# it should slice player list and kingdom list too
results = dict()
for cell in m.gen_tiles():
if distance(center, (cell.coordinate.x, cell.coordinate.y)) <= radius:
if cell.region_id not in results:
results[cell.region_id] = []
results[cell.region_id].append(cell)
new_map = Map(m.client)
new_map._raw_data = deepcopy(m._raw_data)
new_map._raw_data["response"]["1"]["region"] = results
return new_map
| 5,352,159 |
def configure(repl):
"""
Configuration method. This is called during the start-up of ptpython.
:param repl: `PythonRepl` instance.
"""
# Vi mode.
repl.vi_mode = True
# Enable 24bit True color. (Not all terminals support this. -- maybe check
# $TERM before changing.)
repl.true_color = True
| 5,352,160 |
def download_file(url) -> Path:
"""Better download"""
name = Path(urlparse(unquote(url)).path).name
with mktempdir() as tmpdir:
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=30)
def get():
with requests.get(url, stream=True) as r:
save_path = tmpdir.joinpath(name)
with open(save_path, "wb") as f:
shutil.copyfileobj(r.raw, f, length=16 * 1024 * 1024)
return save_path
yield get()
| 5,352,161 |
def creategui(handlerfunctions):
"""Initializes and returns the gui."""
gui = GUI(handlerfunctions)
# root.title('DBF Utility')
return gui
| 5,352,162 |
def rank_src_trgs(enc_dec_gen, src_list, trg_list):
"""
"""
batch_size = len(trg_list)
x, y = enc_dec_gen.encode_inputs(src_list,
trg_list,
add_bos=True,
add_eos=True)
y_len = torch.sum(y.ne(enc_dec_gen.model.PAD), -1)
with torch.no_grad():
y_target = y[:, 1:]
y = y[:, :-1]
enc_self_attn_mask = enc_dec_gen.model.get_attn_mask(x, x)
enc_outputs = enc_dec_gen.model.encoder(x,
enc_self_attn_mask)
enc_output = enc_outputs[0]
n = y.size(0)//x.size(0)
x = x.repeat([1,n]).view(y.size(0), -1)
enc_output = enc_output.repeat([1, n, 1]).view(x.size(0), x.size(1), -1)
dec_self_attn_mask = enc_dec_gen.model.get_subsequent_mask(y)
dec_self_attn_mask = dec_self_attn_mask | enc_dec_gen.model.get_attn_mask(y, y)
dec_enc_attn_mask = enc_dec_gen.model.get_attn_mask(y, x)
trg_embedding = None
if enc_dec_gen.model.share_src_trg_emb == True:
trg_embedding = enc_dec_gen.model.encoder.src_embedding
dec_outputs = enc_dec_gen.model.decoder(y,
enc_output,
dec_self_attn_mask,
dec_enc_attn_mask,
trg_embedding=trg_embedding)
logits = dec_outputs[0]
logits = logits.view(-1, enc_dec_gen.trg_vocab_size)
log_probs = -F.nll_loss(F.log_softmax(logits, -1),
y_target.contiguous().view(-1),
ignore_index=enc_dec_gen.model.PAD,
reduction='none')
log_probs = torch.sum(log_probs.view(batch_size, -1), -1)
norm = 1
if enc_dec_gen.normalize == "gnmt":
norm = torch.pow(5. + y_len, enc_dec_gen.gamma) / np.power(6., enc_dec_gen.gamma)
elif enc_dec_gen.normalize == "linear":
norm = y_len
log_probs = log_probs / norm
log_probs = log_probs.cpu().numpy()
return log_probs
| 5,352,163 |
def plugin_poll(handle):
""" Extracts data from the sensor and returns it in a JSON document as a Python dict.
Available for poll mode only.
Args:
handle: handle returned by the plugin initialisation call
Returns:
returns a sensor reading in a JSON document, as a Python dict, if it is available
None - If no reading is available
Raises:
Exception
"""
try:
time_stamp = utils.local_timestamp()
data = {'asset': handle['assetName']['value'],
'timestamp': time_stamp,
'readings': {"random": next(generate_data())}}
except (Exception, RuntimeError) as ex:
_LOGGER.exception("Exception is {}".format(str(ex)))
raise ex
else:
return data
| 5,352,164 |
def _set_new_event_entity_for_export_method():
"""Register the new for_export method on EventEntity."""
global _orig_event_entity_for_export
_orig_event_entity_for_export = m_models.EventEntity.for_export
m_models.EventEntity.for_export = _event_entity_for_export
| 5,352,165 |
def top_average_pathways(document, file, max_sets, get_all=False):
""" Read the pathways file and get the top average pathways """
# read in the samples and get the data with out the stratification by bug
samples, pathways, data = document.read_table(file)
pathway_names = utilities.pathway_names(pathways)
pathways, data = utilities.remove_stratified_pathways(pathways,
data, remove_description=True)
# remove extra identifier from sample name if included in workflow
samples = [sample.replace("_Abundance","").replace("-RPKs","") for sample in samples]
# get the average abundance for the pathways
if get_all:
top_pathways, top_data = pathways, data
else:
top_pathways, top_data = utilities.top_rows(pathways,
data, max_sets, function="average")
# get the top names with descriptions
top_names_and_descriptions = [name+":"+pathway_names[name] for name in top_pathways]
return samples, top_pathways, top_data, top_names_and_descriptions
| 5,352,166 |
def ltria2skew(L):
"""
assume L has already passed the assertion check
:param L: lower triangle matrix, shape [N, 3]
:return: skew sym A [N, 3, 3]
"""
if len(L.shape) == 2:
N = L.shape[0]
# construct the skew-sym matrix
A = torch.zeros(N, 3, 3).cuda() # [N, 3, 3]
A[:, 1, 0] = L[:, 0]
A[:, 2, 0] = L[:, 1]
A[:, 2, 1] = L[:, 2]
A[:, 0, 1] = -L[:, 0]
A[:, 0, 2] = -L[:, 1]
A[:, 1, 2] = -L[:, 2]
return A
elif len(L.shape) == 1:
A = torch.zeros(3, 3).cuda()
A[1, 0] = L[0]
A[2, 0] = L[1]
A[2, 1] = L[2]
A[0, 1] = -L[0]
A[0, 2] = -L[1]
A[1, 2] = -L[2]
return A
else:
raise NotImplementedError
| 5,352,167 |
def gamma_contrast(data_sample, num_patches=324, num_channel=2, shape_data=None,
gamma_range=(0.5, 1.7), invert_image=False, per_channel=False,
retain_stats=False):
"""Performs gamma contrast transformation"""
epsilon = 1e-7
data_sample_patch = []
gamma_range_tensor = tf.convert_to_tensor(gamma_range)
for patch in range(num_patches):
if invert_image:
data_sample = - data_sample
if not per_channel:
# if np.random.random() < 0.5 and gamma_range[0] < 1:
# gamma = np.random.uniform(gamma_range[0], 1)
# else:
# gamma = np.random.uniform(max(gamma_range[0], 1), gamma_range[1])
def true_fn():
gamma_fn = tf.random.uniform(shape=(), minval=gamma_range[0], maxval=1, seed=1)
return gamma_fn
def false_fn():
gamma_fn = tf.random.uniform(shape=(), minval=tf.math.maximum(gamma_range[0], 1),
maxval=gamma_range[1], seed=1)
return gamma_fn
cond = tf.math.logical_and(tf.math.less(tf.random.uniform(shape=(), minval=0, maxval=0.99, seed=1), 0.5),
tf.math.less(gamma_range_tensor[0], 1))
gamma = tf.cond(cond, true_fn, false_fn)
min_val_ten = tf.math.reduce_min(data_sample[patch, ...])
range_tensor = tf.math.reduce_max(data_sample[patch, ...]) - min_val_ten
data_sample_norm = tf.math.divide(tf.math.subtract(data_sample[patch, ...], min_val_ten),
tf.math.add(range_tensor, epsilon))
data_img = tf.image.adjust_gamma(image=data_sample_norm, gamma=gamma,
gain=tf.math.add(range_tensor, epsilon))
data_img = tf.math.add(data_img, min_val_ten)
data_sample_patch.append(data_img)
else:
data_sample_per_channel = []
for c in range(num_channel):
def true_fn():
gamma_fn = tf.random_uniform_initializer(minval=gamma_range[0], maxval=1, seed=1)
return gamma_fn
def false_fn():
gamma_fn = tf.random_uniform_initializer(minval=tf.math.maximum(gamma_range[0], 1),
maxval=gamma_range[1], seed=1)
return gamma_fn
cond = tf.math.logical_and(tf.math.less(tf.random.uniform(shape=(), minval=0, maxval=0.99, seed=1), 0.5),
tf.math.less(gamma_range_tensor[0], 1))
gamma = tf.cond(cond, true_fn, false_fn)
min_val_ten = tf.math.reduce_min(data_sample[patch, :, :, :, c])
#rnge_tensor = tf.math.reduce_max(data_sample[patch, :, :, :, c]) - min_val_ten
data_sample_norm = tf.math.divide(tf.math.subtract(data_sample[patch, ..., c], min_val_ten),
tf.math.add(range_tensor, epsilon))
data_img = tf.image.adjust_gamma(image=data_sample_norm, gamma=gamma,
gain=tf.math.add(range_tensor, epsilon))
data_img = tf.math.add(data_img, min_val_ten)
data_sample_per_channel.append(data_img)
data_sample_channel = tf.stack(data_sample_per_channel)
data_sample_channel = tf.transpose(data_sample_channel, perm=[1, 2, 3, 0])
data_sample_patch.append(data_sample_channel)
data_sample_return = tf.stack(data_sample_patch)
# data_sample_return = tf.transpose(data_sample_return, perm=[1, 2, 3, 4, 0])
return data_sample_return
| 5,352,168 |
def _converge(helper, rcs, group):
"""
Function to be passed to :func:`_oob_disable_then` as the ``then``
parameter that triggers convergence.
"""
return group.trigger_convergence(rcs)
| 5,352,169 |
def HybridClientFactory(jid, password):
"""
Client factory for XMPP 1.0.
This is similar to L{client.XMPPClientFactory} but also tries non-SASL
autentication.
"""
a = HybridAuthenticator(jid, password)
return xmlstream.XmlStreamFactory(a)
| 5,352,170 |
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return dict(form=form)
| 5,352,171 |
def countingsort(A):
"""
Sort the list A. A has to be a list of integers.
Every element of the list A has to be non-negative.
@param A: the list that should get sorted
@return the sorted list
"""
if len(A) == 0:
return []
C = [0] * (max(A)+1)
B = [""] * len(A)
# Count the number of elements
for el in A:
C[el] += 1
# Now C[i] contains how often i is in A
for index in xrange(1, len(C)):
C[index] += C[index-1]
for el in A[::-1]:
B[C[el]-1] = el
C[el] -= 1
return B
| 5,352,172 |
def calc_obstacle_map(ox, oy, resolution, vr):
"""
Build obstacle map according to the distance of a
certain grid to obstacles. Treat the area near the
obstacle within the turning radius of the vehicle
as the obstacle blocking area and mark it as TRUE.
"""
min_x = round(min(ox))
min_y = round(min(oy))
max_x = round(max(ox))
max_y = round(max(oy))
x_width = round(max_x - min_x)
y_width = round(max_y - min_y)
# obstacle map generation
obstacle_map = [[False for _ in range(y_width)] for _ in range(x_width)]
for ix in range(x_width):
x = ix + min_x
for iy in range(y_width):
y = iy + min_y
# print(x, y)
for iox, ioy in zip(ox, oy):
d = math.sqrt((iox - x)**2 + (ioy - y)**2)
if d * resolution <= vr:
obstacle_map[ix][iy] = True
break
return obstacle_map, min_x, min_y, max_x, max_y, x_width, y_width
| 5,352,173 |
def dilate(poly,eps):
"""
The function dilates a polytope.
For a given polytope a polytopic over apoproximation of the $eps$-dilated set is computed.
An e-dilated Pe set of P is defined as:
Pe = {x+n|x in P ^ n in Ball(e)}
where Ball(e) is the epsilon neighborhood with norm |n|<e
The current implementation is quite crude, hyper-boxes are placed over the original vertices
and the returned polytope is a qhull of these new vertices.
:param poly: original polytope
:param eps: positive scalar value with which the polytope is dilated
:return: polytope
"""
if isinstance(poly,polytope.Region):
dil_reg = []
for pol in poly.list_poly:
assert isinstance(pol,polytope.Polytope)
dil_reg += [dilate(pol, eps)]
return polytope.Region(dil_reg)
vertices = extreme(poly)
dim = len(vertices[0]) # this is the dimensionality of the space
dil_eps = dim * [[-eps,eps]]
dil_eps_v = [np.array(n) for n in itertools.product(*dil_eps)] # vectors with (+- eps,+- eps, +- eps,...)
new_vertices = []
for v,d in itertools.product(vertices,dil_eps_v):
new_vertices += [[np.array(v).flatten() + np.array(d).flatten()]]
# make box
# print("add vertices part:", np.array(v).flatten() + np.array(d).flatten())
VV = np.concatenate(new_vertices)
# print("V", VV)
return qhull(VV)
| 5,352,174 |
def downgrade():
"""Reverse MSSQL backend compatibility improvements"""
conn = op.get_bind()
if conn.dialect.name != 'mssql':
return
alter_mssql_datetime2_column(conn, op, 'serialized_dag', 'last_updated', False)
op.alter_column(table_name="xcom", column_name="timestamp", type_=_get_timestamp(conn), nullable=True)
with op.batch_alter_table('task_reschedule') as task_reschedule_batch_op:
task_reschedule_batch_op.alter_column(
column_name='end_date', type_=_get_timestamp(conn), nullable=True
)
task_reschedule_batch_op.alter_column(
column_name='reschedule_date', type_=_get_timestamp(conn), nullable=True
)
task_reschedule_batch_op.alter_column(
column_name='start_date', type_=_get_timestamp(conn), nullable=True
)
with op.batch_alter_table('task_fail') as task_fail_batch_op:
task_fail_batch_op.drop_index('idx_task_fail_dag_task_date')
task_fail_batch_op.alter_column(
column_name="execution_date", type_=_get_timestamp(conn), nullable=False
)
task_fail_batch_op.create_index(
'idx_task_fail_dag_task_date', ['dag_id', 'task_id', 'execution_date'], unique=False
)
with op.batch_alter_table('task_instance') as task_instance_batch_op:
task_instance_batch_op.drop_index('ti_state_lkp')
task_instance_batch_op.create_index(
'ti_state_lkp', ['dag_id', 'task_id', 'execution_date'], unique=False
)
op.create_unique_constraint('UQ__dag_run__dag_id_run_id', 'dag_run', ['dag_id', 'run_id'])
op.create_unique_constraint('UQ__dag_run__dag_id_execution_date', 'dag_run', ['dag_id', 'execution_date'])
op.drop_index('idx_not_null_dag_id_execution_date', table_name='dag_run')
op.drop_index('idx_not_null_dag_id_run_id', table_name='dag_run')
| 5,352,175 |
def read_files_to_vardf(map_df, df_dict, gridclimname, dataset, metadata,
file_start_date, file_end_date, file_delimiter,
file_time_step, file_colnames,
subset_start_date, subset_end_date, min_elev, max_elev, variable_list=None):
"""
# reads in the files to generate variables dataframes
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
df_dict: (dict) an existing dictionary where new computations will be stored
gridclimname: (str) the suffix for the dataset to be named; if None provided, default to dataset name
dataset: (str) the name of the dataset catalogged into map_df
metadata: (str) the dictionary that contains the metadata explanations; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_delimiter: (str) a file parsing character to be used for file reading
file_time_step: (str) the timedelta code for the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
min_elev: (float) minimum elevation permitted
max_elev: (float) maximum elevation permitted
"""
# start time
starttime = pd.datetime.now()
# date range from ogh_meta file
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step)
met_daily_subdates=pd.date_range(subset_start_date, subset_end_date, freq=file_time_step)
# omit null entries or missing data file
map_df = map_df.loc[pd.notnull(map_df[dataset]), :]
print('Number of data files within elevation range ({0}-{1} m): {2}'.format(min_elev, max_elev, len(map_df)))
# establish default list of variables
if isinstance(variable_list, type(None)):
variable_list = metadata[dataset]['variable_list']
# iterate through each data file
for eachvar in metadata[dataset]['variable_list']:
# exclude YEAR, MONTH, and DAY
if eachvar not in ['YEAR', 'MONTH', 'DAY'] and eachvar in variable_list:
# identify the variable column index
usecols = [metadata[dataset]['variable_list'].index(eachvar)]
# initiate df as a list
df_list=[]
# loop through each file
for ind, row in map_df.iterrows():
# consider rewriting the params to just select one column by index at a time
var_series = dask.delayed(pd.read_table)(filepath_or_buffer=row[dataset],
delimiter=file_delimiter,header=None,usecols=usecols,
names=[tuple(row[['FID', 'LAT', 'LONG_']])])
# append the series into the list of series
df_list.append(var_series)
# concatenate list of series (axis=1 is column-wise) into a dataframe
df1 = dask.delayed(pd.concat)(df_list, axis=1)
# set and subset date_range index
df2 = df1.set_index(met_daily_dates, inplace=False).loc[met_daily_subdates]
# assign dataframe to dictionary object
df_dict['_'.join([eachvar, gridclimname])] = dask.compute(df2)[0]
print(eachvar+ ' dataframe reading complete:' + str(pd.datetime.now()-starttime))
return(df_dict)
| 5,352,176 |
def read_file(pickle_file_name):
"""Reads composite or non-composite class-activation maps from Pickle file.
:param pickle_file_name: Path to input file (created by
`write_standard_file` or `write_pmm_file`).
:return: gradcam_dict: Has the following keys if not a composite...
gradcam_dict['denorm_predictor_matrices']: See doc for
`write_standard_file`.
gradcam_dict['cam_matrices']: Same.
gradcam_dict['guided_cam_matrices']: Same.
gradcam_dict['full_storm_id_strings']: Same.
gradcam_dict['storm_times_unix_sec']: Same.
gradcam_dict['model_file_name']: Same.
gradcam_dict['target_class']: Same.
gradcam_dict['target_layer_name']: Same.
gradcam_dict['sounding_pressure_matrix_pa']: Same.
...or the following keys if composite...
gradcam_dict['mean_denorm_predictor_matrices']: See doc for
`write_pmm_file`.
gradcam_dict['mean_cam_matrices']: Same.
gradcam_dict['mean_guided_cam_matrices']: Same.
gradcam_dict['model_file_name']: Same.
gradcam_dict['non_pmm_file_name']: Same.
gradcam_dict['pmm_max_percentile_level']: Same.
gradcam_dict['mean_sounding_pressures_pa']: Same.
:return: pmm_flag: Boolean flag. True if `gradcam_dict` contains
composite, False otherwise.
:raises: ValueError: if dictionary does not contain expected keys.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
gradcam_dict = pickle.load(pickle_file_handle)
pickle_file_handle.close()
pmm_flag = MEAN_PREDICTOR_MATRICES_KEY in gradcam_dict
if pmm_flag:
missing_keys = list(
set(PMM_FILE_KEYS) - set(gradcam_dict.keys())
)
else:
missing_keys = list(
set(STANDARD_FILE_KEYS) - set(gradcam_dict.keys())
)
if len(missing_keys) == 0:
return gradcam_dict, pmm_flag
error_string = (
'\n{0:s}\nKeys listed above were expected, but not found, in file '
'"{1:s}".'
).format(str(missing_keys), pickle_file_name)
raise ValueError(error_string)
| 5,352,177 |
def tacodev(val=None):
"""a valid taco device"""
if val in ('', None):
return ''
val = string(val)
if not tacodev_re.match(val):
raise ValueError('%r is not a valid Taco device name' % val)
return val
| 5,352,178 |
def VioNet_densenet(config, home_path):
"""
Load DENSENET model
config.device
config.pretrained_model
config.sample_size
config.sample_duration
"""
device = config.device
ft_begin_idx = config.ft_begin_idx
sample_size = config.sample_size[0]
sample_duration = config.sample_duration
model = densenet121(num_classes=2,
sample_size=sample_size,
sample_duration=sample_duration).to(device)
# state_dict = torch.load(g_path +'/VioNet/'+ 'weights/DenseNet_Kinetics.pth')
state_dict = torch.load(os.path.join(home_path, VIONET_WEIGHTS, 'DenseNet_Kinetics.pth'))
model.load_state_dict(state_dict)
params = dn.get_fine_tuning_params(model, ft_begin_idx)
return model, params
| 5,352,179 |
def afficheStats(statsKm, listeJoueurs):
"""
Affiche le graphique des kms parcourus par
chaque joueur en fonction du temps.
"""
i = 0
for liste in statsKm:
plt.plot(liste, label=listeJoueurs[i].nom)
i += 1
plt.title("Evolution de la partie")
plt.xlabel("Tour")
plt.ylabel("Km")
plt.legend()
plt.show()
| 5,352,180 |
def explore_sub() -> None:
"""Explore some data sets."""
stats_df = data_frame_stats(sub)
print(stats_df) # .head().style.format({"notnulls%": "{:.2%}",
# "unique%": "{:.2%}"})
sub_stats = sub["form"].value_counts().reset_index()
sub_stats["label"] = \
np.where(sub_stats["form"].rank(ascending=False) < 8,
sub_stats["index"], "Other")
sub_stats = \
sub_stats.groupby("label").\
sum().sort_values(by="form", ascending=False).reset_index()
print(sub_stats)
# plot using Plotly
# because the awesome Plotly.Express strugle with the subplots, I'll use
# lower level API to create two charts.
# Bar chart on the left and pie chart on the right\n",
# fig = make_subplots(rows=1, cols=2, specs=[[{'type':'bar'},
# {'type':'pie'}]], subplot_titles=["Forms - Bar Chart","Forms -
# Pie Chart"])
#
# # Creating the Bar chart\n",
# fig.add_trace(
# go.Bar(y=sub_stats["form"],
# x=sub_stats["label"],
# text=sub_stats["form"], # values displayed in the bars
# textposition='auto',
# name="Forms - Bar Chart"),
# row=1, col=1)
#
# # Creating the Pie chart
# fig.add_trace(
# go.Pie(values=sub_stats["form"],
# labels=sub_stats["label"],
# textinfo='label+percent'),
# row=1, col=2)
# # adding the title
# fig.update_layout(title="Most common forms in 2020Q1")
# display the chart in the jupyter notebook
# fig.show()
# let's filter adsh, unique report identifier of the `8-K`s
eight_k_filter = sub[sub["form"] == "8-K"][["name", "adsh"]]
print('eight_k_filter.shape:', eight_k_filter.shape)
print('\neight_k_filter.head()\n', eight_k_filter.head())
print('\nnum.info()\n', num.info())
print('\nnum.head()\n', num.head())
# merge the file headers with the detailed data
eight_k_nums = num.merge(eight_k_filter)
print('eight_k_nums.shape:', eight_k_nums.shape)
print('\neight_k_nums.head()\n', eight_k_nums.head())
print('len(eight_k_nums["adsh"].unique())',
len(eight_k_nums["adsh"].unique()))
# num contain many lines for each `adsh` so we only .merge on unique
# values using .drop_duplicates()
# we left join to see all the records from sub.txt and possibly related
# record in num.txt
# we apply an indicator which highlights whether the adsh appear in
# `both` files or `left_only`
contain_data_df = \
sub[["adsh", "form"]].merge(num["adsh"].drop_duplicates(),
how="left", indicator=True)
print('\ncontain_data_df.sample(3)\n', contain_data_df.sample(3))
# we pivot the data to have 2 columns "both" and "left_only"
contain_data_stats = \
contain_data_df.pivot_table(index="form", columns="_merge",
values="adsh", aggfunc="count")
print('\ncontain_data_stats 1\n', contain_data_stats.head())
# if there's no data in left_only (100% are in both) we fill in zero
# .fillna(0)
# since the columns are categorical we need to turn them to string with
# .rename(columns=str) in
# order to .reset_index()
contain_data_stats = \
contain_data_stats.fillna(0).rename(columns=str).reset_index()
print('\ncontain_data_stats 2\n', contain_data_stats.head())
# finally we sort the data and rename the columns to be more user-friendly
# "both" --> "numeric data"
contain_data_stats = \
contain_data_stats.sort_values(by="both", ascending=False).rename(
columns={"both": "numeric data", "left_only": "no data"})
print('\ncontain_data_stats 3\n', contain_data_stats.head())
# we calculate the percentage of adsh which have numeric data\n",
contain_data_stats["perc_filled"] = \
contain_data_stats["numeric data"] /\
(contain_data_stats["numeric data"]+contain_data_stats["no data"])
print('\ncontain_data_stats 4\n', contain_data_stats.head())
# fig = px.bar(contain_data_stats,
# x="form", y=["numeric data","no data"], text="perc_filled")
# fig.update_traces(texttemplate='%{text:.2%}', textposition='auto')
# fig.update_layout(yaxis_title="Count")
# fig.show()
# filter only 10-K and 10-Q forms
tens = sub[sub["form"].isin(["10-Q", "10-K"])]
print('\ntens\n', tens.head())
# count how many forms of each type are in the dataset
tens_counts = tens["form"].value_counts().reset_index().\
rename(columns={"index": "form type", "form": "count"})
print('\ntens_counts\n', tens_counts)
# using Plotly.Express create a bar chart
# fig = px.bar(tens_counts,
# x="form type",
# y="count",
# barmode='group',
# text="count",
# title="Number of Annual and Quarterly forms in 2020Q1"
# )
# fig.show()
| 5,352,181 |
def train_and_eval(train_epochs, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name, eval_filename,
unformed_filename, positive_labels, combination_method, method, c_lst, lbd_type, experiment_name, a, c, gold_b):
"""Train and evaluate the model."""
index_map, weights = wvd.load(test_embeddings_file_name)
#Get positive labels
positive_labels = positive_labels.split(',')
print("reading training data...")
train_file_name = train_data
df_train = pd.read_table(train_file_name, dtype={'train_nodes':str})
df_train = df_train.sample(frac=1)
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
#Get inputs
train_x, labels, _ = get_input(df_train, weights, index_map, combination_method)
train_loader = torch.utils.data.DataLoader(dataset=LinksDataset(train_x, labels), batch_size=batch_size, shuffle=True)
pos_labels = [l_ for l_ in labels if l_ != 0]
#Start loading evaluation data (same as test data for cancer cases)
print("reading eval data...")
test_file_name = test_data
df_test = pd.read_table(test_file_name, dtype={'train_nodes':str})
# remove NaN elements
df_test = df_test.dropna(how='any', axis=0)
c_dict = {}
if c_lst:
c_file = open(c_lst, 'r')
c_ = c_file.readline().strip(' \n\t')
while c_:
c_dict[c_] = 1
c_ = c_file.readline().strip(' \n\t')
test_x, test_labels, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')
test_loader = torch.utils.data.DataLoader(dataset=LinksDataset(test_x, test_labels), batch_size=batch_size, shuffle=False)
#End loading evaluation data
print("\nBuilding model...")
feature_dim = train_x[0].shape[0]
model, criterion, optimizer = build_model(feature_dim)
# Train the model
print("\nTraining model...")
total_step = len(train_loader)
best_info = {'best_rank':1000000000}
evaluate_every = 5
for epoch in range(train_epochs):
for i, (train_x, labels) in enumerate(train_loader):
labels = labels.type(torch.LongTensor)
#labels = labels.view(-1, 1)
links = train_x.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(links)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 500 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, train_epochs, i+1, total_step, loss.item()))
if (epoch + 1) % evaluate_every == 0:
#Save the current model
torch.save(model, CUR_PATH)
#Load the last saved best model
lmodel = torch.load(CUR_PATH)
lmodel.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
# Test the model
with torch.no_grad():
predictions = []
for test_links, _ in test_loader:
test_links = test_links.to(device)
outputs = lmodel(test_links)
predicted, _ = torch.max(outputs.data, 1)
predictions.extend([tensor.item() for tensor in predicted])
if lbd_type == 'closed_discovery':
ranks, ties, output = do_case_cd_evaluations(a, c, gold_b, [p for p in predictions], [x for x in test_original_x], experiment_name)
for ind, tie in enumerate(ties):
if tie > 10:
ranks.pop(ind)
if len(ranks) > 0:
best_rank = min(ranks)
if best_rank < best_info['best_rank']:
print("Saving because {} < {}".format(best_rank, best_info['best_rank']))
torch.save(model, PATH)
best_info['case_name'] = experiment_name
best_info['best_rank'] = best_rank
best_info['loss_at_best'] = loss.item()
best_info['epoch'] = epoch + 1
best_info['output'] = output
fil = open("{}.txt".format(experiment_name), 'w')
fil.write("{}\n\n{}".format(str(best_info), best_info['output']))
fil.close()
elif lbd_type == 'open_discovery':
ranks, ties, output = do_case_od_evaluations(a, gold_b, c, c_dict, [p for p in predictions], [x for x in test_original_x], experiment_name)
for ind, tie in enumerate(ties):
if tie > 10:
ranks.pop(ind)
if len(ranks) > 0:
best_rank = min(ranks)
if best_rank < best_info['best_rank']:
print("Saving because {} < {}".format(best_rank, best_info['best_rank']))
torch.save(model, PATH)
best_info['case_name'] = experiment_name
best_info['best_rank'] = best_rank
best_info['loss_at_best'] = loss.item()
best_info['epoch'] = epoch + 1
best_info['output'] = output
fil = open("{}.txt".format(experiment_name), 'w')
fil.write("{}\n\n{}".format(str(best_info), best_info['output']))
fil.close()
else:
print("ERROR: Invalid lbd_type: {}".format(lbd_type))
| 5,352,182 |
def decode_layout_example(example, input_range=None):
"""Given an instance and raw labels, creates <inputs, label> pair.
Decoding includes.
1. Converting images from uint8 [0, 255] to [0, 1.] float32.
2. Mean subtraction and standardization using hard-coded mean and std.
3. Convert boxes from yxyx [0-1] to xyxy un-normalized.
4. Add 1 to all labels to account for background/padding object at label 0.
5. Shuffling dictionary keys to be consistent with the rest of the code.
Args:
example: dict; Input image and raw labels.
input_range: tuple; Range of input. By default we use Mean and StdDev
normalization.
Returns:
A dictionary of {'inputs': input image, 'labels': task label}.
"""
image = tf.image.convert_image_dtype(example['image'], dtype=tf.float32)
# Normalize.
if input_range:
image = image * (input_range[1] - input_range[0]) + input_range[0]
else:
mean_rgb = tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=tf.float32)
std_rgb = tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=tf.float32)
image = (image - mean_rgb) / std_rgb
boxes = example['objects']['boxes']
target = {
'boxes': boxes,
'labels': example['objects']['label'] + 1, # 0'th class is padding.
'binary_labels': example['objects']['binary_label'] + 1,
'desc_id': example['objects']['desc_id'],
'resource_id': example['objects']['resource_id'],
'name_id': example['objects']['name_id'],
'obj_mask': example['objects']['obj_mask'],
}
# Filters objects to exclude degenerate boxes.
valid_bbx = tf.logical_and(boxes[:, 2] > boxes[:, 0],
boxes[:, 3] > boxes[:, 1])
# -1 is ROOT node, remove it for training & eval.
valid_node = tf.greater(example['objects']['label'], -1)
keep = tf.where(tf.logical_and(valid_bbx, valid_node))[:, 0]
target_kept = {k: tf.gather(v, keep) for k, v in target.items()}
target_kept['orig_size'] = tf.cast(tf.shape(image)[0:2], dtype=tf.int32)
target_kept['size'] = tf.identity(target_kept['orig_size'])
return {
'inputs': image,
'label': target_kept,
}
| 5,352,183 |
def bact_plot(samples, bacteroidetes, healthiest_sample):
"""
Returns a graph of the distribution of the data in a graph
==========
samples : pandas.DataFrame
The sample data frame. Must contain column `Bacteroidetes` and
`Firmicutes` that contain the percentage of those phyla.
Returns
=======
plotly graph
"""
import plotly.figure_factory as ff
hist_data = [samples["Bacteroidetes"]]
group_labels = ["Bacteroidetes"]
bact = ff.create_distplot(hist_data, group_labels, show_hist=False)
bact["layout"].update(title="Bacteroidetes Sample Distribution ")
bact["layout"].update(
showlegend=False,
annotations=[
dict(
x=bacteroidetes,
y=0,
xref="x",
yref="y",
text="You are here!",
showarrow=True,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor="#0e0f36",
ax=70,
ay=-30,
bordercolor="#06a300",
borderwidth=2,
borderpad=4,
bgcolor="#69f564",
opacity=0.8,
),
dict(
x=healthiest_sample["Bacteroidetes"],
y=0,
xref="x",
yref="y",
text="Healthiest Sample",
showarrow=True,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor="#0e0f36",
ax=70,
ay=30,
bordercolor="#4c0acf",
borderwidth=2,
borderpad=4,
bgcolor="#b977f2",
opacity=0.8,
),
],
)
return bact
| 5,352,184 |
def dynamic_upload_to(instance, filename):
"""
根据链接类型,决定存储的目录
"""
file_dir = (LogoImgRelatedDirEnum.APP.value
if instance.link_type == LinkTypeEnum.LIGHT_APP.value else LogoImgRelatedDirEnum.ICON.value)
return os.path.join(file_dir, filename)
| 5,352,185 |
def xdraw_lines(lines, **kwargs):
"""Draw lines and optionally set individual name, color, arrow, layer, and
width properties.
"""
guids = []
for l in iter(lines):
sp = l['start']
ep = l['end']
name = l.get('name', '')
color = l.get('color')
arrow = l.get('arrow')
layer = l.get('layer')
width = l.get('width')
guid = add_line(Point3d(*sp), Point3d(*ep))
if not guid:
continue
obj = find_object(guid)
if not obj:
continue
attr = obj.Attributes
if color:
attr.ObjectColor = FromArgb(*color)
attr.ColorSource = ColorFromObject
else:
attr.ColorSource = ColorFromLayer
if arrow == 'end':
attr.ObjectDecoration = EndArrowhead
if arrow == 'start':
attr.ObjectDecoration = StartArrowhead
if layer and find_layer_by_fullpath:
index = find_layer_by_fullpath(layer, True)
if index >= 0:
attr.LayerIndex = index
if width:
attr.PlotWeight = width
attr.PlotWeightSource = PlotWeightFromObject
attr.Name = name
obj.CommitChanges()
guids.append(guid)
return guids
| 5,352,186 |
def test_fragment_three_aa_peptide_c_series():
"""Test c1/2 fragmentation"""
fragments = PeptideFragment0r('MKK', charges=[1],ions=['c']).df
# fragments = fragger.fragment_peptide(ion_series=['c'])
assert isinstance(fragments, DataFrame)
# assert len(fragments) == 3
row = fragments.iloc[0]
assert row['name'] == 'c1'
assert row['hill'] == 'C(5)H(12)N(2)O(1)S(1)'
assert row['charge'] == 1
assert pytest.approx(
row['mz'],
5e-6
) == 149.07436
row = fragments.iloc[2]
assert row['name'] == 'c2'
assert row['hill'] == 'C(11)H(24)N(4)O(2)S(1)'
assert row['charge'] == 1
assert pytest.approx(
row['mz'],
5e-6
) == 277.16932
| 5,352,187 |
def _emit_params_file_action(ctx, path, mnemonic, cmds):
"""Helper function that writes a potentially long command list to a file.
Args:
ctx (struct): The ctx object.
path (string): the file path where the params file should be written.
mnemonic (string): the action mnemomic.
cmds (list<string>): the command list.
Returns:
(File): an executable file that runs the command set.
"""
filename = "%s.%sFile.params" % (path, mnemonic)
f = ctx.new_file(ctx.configuration.bin_dir, filename)
ctx.file_action(output = f,
content = "\n".join(["set -e"] + cmds),
executable = True)
return f
| 5,352,188 |
def annotate_validation_results(results, parsed_data):
"""Annotate validation results with potential add-on restrictions like
denied origins."""
if waffle.switch_is_active('record-install-origins'):
denied_origins = sorted(
DeniedInstallOrigin.find_denied_origins(parsed_data['install_origins'])
)
for origin in denied_origins:
insert_validation_message(
results,
message=gettext(
'The install origin {origin} is not permitted.'.format(
origin=origin
)
),
)
return results
| 5,352,189 |
def test_player_1_wins_diagonally_at_beginning_of_the_main_diagonal(game):
"""
Board final state is
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 1 0 0 0
0 0 1 1 0 0 0
2 1 2 2 0 0 0
1 2 2 1 0 0 0
"""
[game.drop_checker_on_column(c) for c in [3, 3, 3, 4, 3, 4, 0, 5, 1, 6, 2]]
assert game.was_won
assert game.winner == "player1"
assert game.is_over
assert game.board("player1") == [
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[2, 1, 2, 2, 0, 0, 0],
[1, 2, 2, 1, 0, 0, 0]
]
| 5,352,190 |
def RunCommand(command,
input=None,
pollFn=None,
outStream=None,
errStream=None,
killOnEarlyReturn=True,
verbose=False,
debug=False,
printErrorInfo=False):
"""
Run a command, with optional input and polling function.
Args:
command: list of the command and its arguments.
input: optional string of input to feed to the command, it should be
short enough to fit in an i/o pipe buffer.
pollFn: if present will be called occasionally to check if the command
should be finished early. If pollFn() returns true then the command
will finish early.
outStream: if present, the stdout output of the command will be written to
outStream.
errStream: if present, the stderr output of the command will be written to
errStream.
killOnEarlyReturn: if true and pollFn returns true, then the subprocess will
be killed, otherwise the subprocess will be detached.
verbose: if true, the command is echoed to stderr.
debug: if true, prints debugging information to stderr.
printErrorInfo: if true, prints error information when the subprocess
returns a non-zero exit code.
Returns: the output of the subprocess.
Exceptions:
Raises Error if the subprocess returns an error code.
Raises ValueError if called with invalid arguments.
"""
if verbose:
sys.stderr.write("command %s\n" % command)
stdin = None
if input:
stdin = subprocess.PIPE
try:
process = subprocess.Popen(
args=command,
stdin=stdin,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
if not isinstance(command, basestring):
command = ' '.join(command)
if printErrorInfo:
sys.stderr.write("Command failed: '%s'\n" % command)
raise Error(e)
def StartThread(out):
queue = Queue.Queue()
def EnqueueOutput(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
thread = threading.Thread(target=EnqueueOutput, args=(out, queue))
thread.daemon = True
thread.start()
return queue
outQueue = StartThread(process.stdout)
errQueue = StartThread(process.stderr)
def ReadQueue(queue, out, out2):
try:
while True:
line = queue.get(False)
out.write(line)
if out2 != None:
out2.write(line)
except Queue.Empty:
pass
outBuf = StringIO.StringIO()
errorBuf = StringIO.StringIO()
if input:
process.stdin.write(input)
while True:
returncode = process.poll()
if returncode != None:
break
ReadQueue(errQueue, errorBuf, errStream)
ReadQueue(outQueue, outBuf, outStream)
if pollFn != None and pollFn():
returncode = 0
if killOnEarlyReturn:
process.kill()
break
time.sleep(0.1)
# Drain queue
ReadQueue(errQueue, errorBuf, errStream)
ReadQueue(outQueue, outBuf, outStream)
out = outBuf.getvalue()
error = errorBuf.getvalue()
if returncode:
if not isinstance(command, basestring):
command = ' '.join(command)
if printErrorInfo:
sys.stderr.write("Command failed: '%s'\n" % command)
sys.stderr.write(" stdout: '%s'\n" % out)
sys.stderr.write(" stderr: '%s'\n" % error)
sys.stderr.write(" returncode: %d\n" % returncode)
raise Error("Command failed: %s" % command)
if debug:
sys.stderr.write("output: %s\n" % out)
return out
| 5,352,191 |
def ignore_firstline_dedent(text: str) -> str:
"""Like textwrap.dedent(), but ignore first empty lines
Args:
text: The text the be dedented
Returns:
The dedented text
"""
out = []
started = False
for line in text.splitlines():
if not started and not line.strip():
continue
if not started:
started = True
out.append(line)
return textwrap.dedent("\n".join(out))
| 5,352,192 |
def test_provide_fail(providers: list[Provider]) -> None:
"""It raises an exception."""
with pytest.raises(Exception):
provide(providers, URL())
| 5,352,193 |
def clear_output(wait_time=0):
""" Clear terminal output"""
time.sleep(wait_time)
if 'linux' in sys.platform or 'darwin' == sys.platform:
os.system('clear')
else:
os.system('cls')
| 5,352,194 |
def discrete_micro(micro, es):
""" Performs discrete policy for autoscaling engine.
Algorithm:
if microservice crosses high threshold:
scale-up microservice
else:
scale-down microservice
"""
micro_config = util.read_config_file(eng.MICRO_CONFIG)
if up_scale(micro, es, "Micro"):
# Finally, scale up the microservice
execute.scale_microservice(micro, int(micro_config.get(micro, 'up_step')))
elif down_scale(micro, es, "Micro"):
execute.scale_microservice(micro, int(micro_config.get(micro, 'down_step')))
else:
print("Discrete Policies rejects scaling decision for microservice: "+micro+". Keep Observing...")
| 5,352,195 |
def read_file(filename):
"""
Read a file and return its binary content. \n
@param filename : filename as string. \n
@return data as bytes
"""
with open(filename, mode='rb') as file:
file_content = file.read()
return file_content
| 5,352,196 |
def getdictkeys(value):
"""
Returns the ordered keys of a dict
"""
if type(value) == dict:
keys = list(value.keys())
keys.sort(key=toint)
return keys
return []
| 5,352,197 |
def exp_lr_scheduler(optimizer, epoch, init_lr=5e-3, lr_decay_epoch=40):
"""Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (0.1**(epoch // lr_decay_epoch))
if epoch % lr_decay_epoch == 0:
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
| 5,352,198 |
def leff_obs(n_zbin, pos_pos_dir, she_she_dir, pos_she_dir, ell_filename, pos_nl_path, she_nl_path, noise_ell_path,
leff_path, save_dir):
"""
Obtain a pseudo- cut-sky 'observation' by sampling from the Wishart distribution with effective l.
Args:
n_zbin (int): Number of redshift bins.
pos_pos_dir (str): Path to directory containing input position-position power spectra.
she_she_dir (str): Path to directory containing input shear-shear power spectra.
pos_she_dir (str): Path to directory containing input position-shear power spectra.
ell_filename (str): Filename of ells file within each input directory.
pos_nl_path (str): Path to position noise power spectrum.
she_nl_path (str): Path to shear noise power spectrum.
noise_ell_path (str): Path to noise ells.
leff_path (str): Path to leff map file.
save_dir (str): Path to output directory.
"""
n_fields = 2 * n_zbin
# Load theory cls and convert to matrices
theory_cls = like_w.load_cls(n_zbin, pos_pos_dir, she_she_dir, pos_she_dir)
theory_mats = like_w.cl_matrix(theory_cls, n_fields)
# Load ells and do some consistency checks
ell_pos_pos = np.loadtxt(os.path.join(pos_pos_dir, ell_filename))
ell_she_she = np.loadtxt(os.path.join(she_she_dir, ell_filename))
ell_pos_she = np.loadtxt(os.path.join(pos_she_dir, ell_filename))
lmax = np.amax(ell_pos_pos)
lmin = np.amin(ell_pos_she)
n_ell = int(lmax - lmin + 1)
assert np.allclose(ell_pos_pos, ell_she_she)
assert np.allclose(ell_pos_pos, ell_pos_she)
assert len(ell_pos_pos) == n_ell
assert theory_mats.shape[0] == n_ell
# Load noise cls and ells and trim to correct range
pos_nl = np.loadtxt(pos_nl_path)
she_nl = np.loadtxt(she_nl_path)
noise_ell = np.loadtxt(noise_ell_path)
noise_keep = np.logical_and(noise_ell >= lmin, noise_ell <= lmax)
pos_nl = pos_nl[noise_keep]
she_nl = she_nl[noise_keep]
assert np.allclose(noise_ell[noise_keep], ell_pos_pos)
# Convert noise cls to matrices (all diagonal)
nl_nonzero = np.array([pos_nl, she_nl]*n_zbin)
n_cls = int(n_fields * (n_fields + 1) / 2)
nl_zero = np.zeros((n_cls - n_fields, n_ell))
nl = np.concatenate((nl_nonzero, nl_zero))
noise_mats = like_w.cl_matrix(nl, n_fields)
# Load and apply leff map
leff_map = np.loadtxt(leff_path)
leff_l = leff_map[:, 0]
leff_map = leff_map[(leff_l >= lmin) & (leff_l <= lmax)]
assert np.allclose(ell_pos_pos, leff_map[:, 0])
leffs = leff_map[:, 1]
# For each l, sample from wishart with df = 2 * leff + 1 and scale = theory_cl_mat / df
obs_mats = np.full_like(theory_mats, np.nan)
zero = np.zeros(theory_mats.shape[1:])
print(f'Starting at {time.strftime("%c")}')
for i, (l, leff, theory_mat, noise_mat) in enumerate(zip(ell_pos_pos, leffs, theory_mats, noise_mats)):
print(f'l = {l:.0f} / {lmax:.0f}', end='\r')
df = 2 * leff + 1
# If wishart isn't defined, set all 0 - no effect as will be excluded from likelihood anyway
if df < n_fields:
obs_mats[i] = zero
continue
scale = (theory_mat + noise_mat) * 1. / df
obs = scipy.stats.wishart.rvs(df=df, scale=scale)
assert is_pd(obs)
obs_mats[i] = obs
# Save to disk
assert np.all(np.isfinite(obs_mats))
obs_cls = like_w.cl_vector(obs_mats)
save_cls_nob(obs_cls, n_zbin, save_dir, lmin=lmin)
| 5,352,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.