content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def predict():
"""Renders the predict page and makes predictions if the method is POST."""
if request.method == 'GET':
return render_predict()
# Get arguments
checkpoint_name = request.form['checkpointName']
if 'data' in request.files:
# Upload data file with SMILES
data = request.files['data']
data_name = secure_filename(data.filename)
data_path = os.path.join(app.config['TEMP_FOLDER'], data_name)
data.save(data_path)
# Check if header is smiles
possible_smiles = get_header(data_path)[0]
smiles = [possible_smiles] if Chem.MolFromSmiles(possible_smiles) is not None else []
# Get remaining smiles
smiles.extend(get_smiles(data_path))
elif request.form['textSmiles'] != '':
smiles = request.form['textSmiles'].split()
else:
smiles = [request.form['drawSmiles']]
checkpoint_path = os.path.join(app.config['CHECKPOINT_FOLDER'], checkpoint_name)
task_names = load_task_names(checkpoint_path)
num_tasks = len(task_names)
gpu = request.form.get('gpu')
# Create and modify args
parser = ArgumentParser()
add_predict_args(parser)
args = parser.parse_args([])
preds_path = os.path.join(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'])
args.test_path = 'None' # TODO: Remove this hack to avoid assert crashing in modify_predict_args
args.preds_path = preds_path
args.checkpoint_path = checkpoint_path
if gpu is not None:
if gpu == 'None':
args.no_cuda = True
else:
args.gpu = int(gpu)
modify_predict_args(args)
# Run predictions
preds = make_predictions(args, smiles=smiles)
if all(p is None for p in preds):
return render_predict(errors=['All SMILES are invalid'])
# Replace invalid smiles with message
invalid_smiles_warning = "Invalid SMILES String"
preds = [pred if pred is not None else [invalid_smiles_warning] * num_tasks for pred in preds]
return render_predict(predicted=True,
smiles=smiles,
num_smiles=min(10, len(smiles)),
show_more=max(0, len(smiles)-10),
task_names=task_names,
num_tasks=len(task_names),
preds=preds,
warnings=["List contains invalid SMILES strings"] if None in preds else None,
errors=["No SMILES strings given"] if len(preds) == 0 else None) | 5,356,200 |
def zernike_name(index, framework='Noll'):
"""
Get the name of the Zernike with input index in input framework (Noll or WSS).
:param index: int, Zernike index
:param framework: str, 'Noll' or 'WSS' for Zernike ordering framework
:return zern_name: str, name of the Zernike in the chosen framework
"""
noll_names = {1: 'piston', 2: 'tip', 3: 'tilt', 4: 'defocus', 5: 'astig45', 6: 'astig0', 7: 'ycoma', 8: 'xcoma',
9: 'ytrefoil', 10: 'xtrefoil', 11: 'spherical'}
wss_names = {1: 'piston', 2: 'tip', 3: 'tilt', 5: 'defocus', 4: 'astig45', 6: 'astig0', 8: 'ycoma', 7: 'xcoma',
10: 'ytrefoil', 11: 'xtrefoil', 9: 'spherical'}
if framework == 'Noll':
zern_name = noll_names[index]
elif framework == 'WSS':
zern_name = wss_names[index]
else:
raise ValueError('No known Zernike convention passed.')
return zern_name | 5,356,201 |
def discriminator_train_batch_mle(batches, discriminator, loss_fn, optimizer):
"""
Summary
1. watch discriminator trainable_variables
2. extract encoder_output, labels, sample_weight, styles, captions from batch and make them tensors
3. predictions = discriminator(encoder_output, captions, styles, training=True)
4. loss = loss_fn(labels, predictions, sample_weight=sample_weight)
5. gradients = tape.gradient(loss, discriminator.trainable_variables))
6. optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))
"""
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(discriminator.trainable_variables)
encoder_output = tf.concat([b[0] for b in batches], axis=0)
labels = tf.concat([b[2] for b in batches], axis=0)
sample_weight = tf.concat([b[3] for b in batches], axis=0)
styles = tf.concat([b[4] for b in batches], axis=0)
captions = [b[1] for b in batches]
max_caption_length = max([c.shape[1] for c in captions])
captions = [tf.pad(c, paddings=tf.constant([[0, 0], [0, max_caption_length - c.shape[1]]])) for c in captions]
captions = tf.concat(captions, axis=0)
predictions = discriminator(encoder_output, captions, styles, training=True)
loss = loss_fn(labels, predictions, sample_weight=sample_weight)
gradients = tape.gradient(loss, discriminator.trainable_variables)
optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))
return loss | 5,356,202 |
def get_subs_dict(expression, mod):
"""
Builds a substitution dictionary of an expression based of the
values of these symbols in a model.
Parameters
----------
expression : sympy expression
mod : PysMod
Returns
-------
dict of sympy.Symbol:float
"""
subs_dict = {}
symbols = expression.atoms(Symbol)
for symbol in symbols:
attr = str(symbol)
subs_dict[attr] = getattr(mod, attr)
return subs_dict | 5,356,203 |
def save_index_summary(name, rates, dates, grid_dim):
"""
Save index file
Parameters
----------
See Also
--------
DataStruct
"""
with open(name + INDEX_SUMMARY_EXT, "w+b") as file_index:
nlist = 0
keywords_data, nums_data, nlist = get_keywords_section_data(rates) # need to calc NLIST filed for DIMENS
write_unrst_data_section(f=file_index, name=RESTART, stype=INDEX_META_BLOCK_SPEC[RESTART]['type'],
data_array=np.array(
[' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8]))
dimen = INDEX_META_BLOCK_SPEC[DIMENS]
dimen['struct']['nlist'].val = nlist
write_unrst_section(file_index, DIMENS, dimen, grid_dim)
write_unrst_data_section(f=file_index, name=KEYWORDS, stype=INDEX_SECTIONS_DATA[KEYWORDS].type,
data_array=keywords_data)
wgnames_date = get_wgnames_section_data(rates)
write_unrst_data_section(f=file_index, name=WGNAMES, stype=INDEX_SECTIONS_DATA[WGNAMES].type,
data_array=wgnames_date)
write_unrst_data_section(f=file_index, name=NUMS, stype=INDEX_SECTIONS_DATA[NUMS].type,
data_array=nums_data)
units_data, nlist = get_units_section_data(rates)
write_unrst_data_section(f=file_index, name=UNITS, stype=INDEX_SECTIONS_DATA[UNITS].type,
data_array=units_data)
write_unrst_data_section(f=file_index, name=STARTDAT, stype=INDEX_SECTIONS_DATA[STARTDAT].type,
data_array=get_startdat_section_data(dates[0]))
return nlist | 5,356,204 |
def fluxes_SIF_predict_noSIF(model_NEE, label, EV1, EV2, NEE_max_abs):
"""
Predict the flux partitioning from a trained NEE model.
:param model_NEE: full model trained on NEE
:type model_NEE: keras.Model
:param label: input of the model part 1 (APAR)
:type label: tf.Tensor
:param EV1: input of the model part 2 (GPP_input)
:type EV1: tf.Tensor
:param EV2: input of the model part 3 (Reco_input)
:type EV2: tf.Tensor
:param NEE_max_abs: normalization factor of NEE
:type NEE_max_abs: tf.Tensor | float
:return: corresponding NEE, GPP and Reco value for the provided data
:rtype: (tf.Tensor, tf.Tensor, tf.Tensor)
"""
NEE_NN = (layer_output_noSIF(model_NEE, 'NEE', label, EV1, EV2) * NEE_max_abs)
NEE_NN = tf.reshape(NEE_NN, (NEE_NN.shape[0],))
GPP_NN = (layer_output_noSIF(model_NEE, 'GPP', label, EV1, EV2) * NEE_max_abs)
GPP_NN = tf.reshape(GPP_NN, (NEE_NN.shape[0],))
Reco_NN = (layer_output_noSIF(model_NEE, 'Reco', label, EV1, EV2) * NEE_max_abs)
Reco_NN = tf.reshape(Reco_NN, (NEE_NN.shape[0],))
return NEE_NN, GPP_NN, Reco_NN | 5,356,205 |
def xml_string(line, tag, namespace, default=None):
""" Get string value from etree element """
try:
val = (line.find(namespace + tag).text)
except:
val = default
return val | 5,356,206 |
def generate_header(salutation, name, surname, postSalutation, address, zip, city, phone, email):
"""
This function generates the header pdf page
"""
# first we take the html file and parse it as a string
#print('generating header page', surname, name)
with open('/home/danielg3/www/crowdlobbying.ch/python/pdf/header.html', 'r', encoding='utf-8') as myfile:
data = myfile.read()
to_write = data.format(salutation, name, (surname + ' ' + postSalutation), str(datetime.datetime.now())[0:10])
pdfkit.from_string(to_write, '/tmp/header.pdf')
return open('/tmp/header.pdf', 'rb') | 5,356,207 |
def cli_cosmosdb_collection_exists(client, database_id, collection_id):
"""Returns a boolean indicating whether the collection exists """
return len(list(client.QueryContainers(
_get_database_link(database_id),
{'query': 'SELECT * FROM root r WHERE r.id=@id',
'parameters': [{'name': '@id', 'value': collection_id}]}))) > 0 | 5,356,208 |
def probabilities (X) -> dict:
""" This function maps the set of outcomes found in the sequence of events, 'X', to their respective probabilty of occuring in 'X'.
The return value is a python dictionary where the keys are the set of outcomes and the values are their associated probabilities."""
# The set of outcomes, denoted as 'C', and the total events, denoted as 'T'.
C, T = set(X), len(X)
return {c: X.count(c) / T for c in C} | 5,356,209 |
def get_recommend_news():
"""获取新闻推荐列表"""
# 触电新闻主页推荐实际URL
recommend_news_url = 'https://api.itouchtv.cn:8090/newsservice/v9/recommendNews?size=24&channelId=0'
# 当前毫秒时间戳
current_ms = int(time.time() * 1000)
headers = get_headers(target_url=recommend_news_url, ts_ms=current_ms)
resp = requests.get(url=recommend_news_url, headers=headers)
if resp.ok:
news_data = resp.json()
return news_data.get('newsList', [])
else:
raise Exception('请求异常:\n==> target_url: %s\n==> headers: %s' % (recommend_news_url, headers)) | 5,356,210 |
def put_profile_pic(url, profile):
"""
Takes a url from filepicker and uploads
it to our aws s3 account.
"""
try:
r = requests.get(url)
size = r.headers.get('content-length')
if int(size) > 10000000: #greater than a 1mb #patlsotw
return False
filename, headers = urlretrieve(url + "/resize?w=600&h=600")
resize_filename, headers = urlretrieve(url + "/resize?w=40&h=40") # store profile sized picture (40x40px)
conn = S3Connection(settings.AWS["AWS_ACCESS_KEY_ID"], settings.AWS["AWS_SECRET_ACCESS_KEY"])
b = conn.get_bucket(settings.AWS["BUCKET"])
_set_key(b, profile.user.username, filename)
k = _set_key(b, profile.user.username + "resize", resize_filename)
except Exception as e:
print e
return False
return "http://s3.amazonaws.com/%s/%s"% (settings.AWS["BUCKET"], k.key) | 5,356,211 |
def get_raw_data() -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Loads serialized data from file.
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]: Tuple of
features, labels and classes for the dataset.
"""
data_file: str = Path().absolute().joinpath(RAW_DATA_FILE).__str__()
data_dict: Dict[str, np.ndarray] = np.load(data_file, allow_pickle=True)
x: np.ndarray = data_dict['X']
y: np.ndarray = data_dict['Y']
classes: np.ndarray = data_dict['classes']
return x, y, classes | 5,356,212 |
def cog_pixel_value(
lon,
lat,
url,
bidx=None,
titiler_endpoint="https://titiler.xyz",
verbose=True,
**kwargs,
):
"""Get pixel value from COG.
Args:
lon (float): Longitude of the pixel.
lat (float): Latitude of the pixel.
url (str): HTTP URL to a COG, e.g., 'https://opendata.digitalglobe.com/events/california-fire-2020/pre-event/2018-02-16/pine-gulch-fire20/1030010076004E00.tif'
bidx (str, optional): Dataset band indexes (e.g bidx=1, bidx=1&bidx=2&bidx=3). Defaults to None.
titiler_endpoint (str, optional): Titiler endpoint, e.g., "https://titiler.xyz", "planetary-computer", "pc". Defaults to None.
verbose (bool, optional): Print status messages. Defaults to True.
Returns:
list: A dictionary of band info.
"""
titiler_endpoint = check_titiler_endpoint(titiler_endpoint)
kwargs["url"] = url
if bidx is not None:
kwargs["bidx"] = bidx
r = requests.get(f"{titiler_endpoint}/cog/point/{lon},{lat}", params=kwargs).json()
bands = cog_bands(url, titiler_endpoint)
# if isinstance(titiler_endpoint, str):
# r = requests.get(f"{titiler_endpoint}/cog/point/{lon},{lat}", params=kwargs).json()
# else:
# r = requests.get(
# titiler_endpoint.url_for_stac_pixel_value(lon, lat), params=kwargs
# ).json()
if "detail" in r:
if verbose:
print(r["detail"])
return None
else:
values = r["values"]
result = dict(zip(bands, values))
return result | 5,356,213 |
def select_daily(ds, day_init=15, day_end=21):
"""
Select lead time days.
Args:
ds: xarray dataset.
day_init (int): first lead day selection. Defaults to 15.
day_end (int): last lead day selection. Defaults to 21.
Returns:
xarray dataset subset based on time selection.
::Lead time indices for reference::
Week 1: 1, 2, 3, 4, 5, 6, 7
Week 2: 8, 9, 10, 11, 12, 13, 14
Week 3: 15, 16, 17, 18, 19, 20, 21
Week 4: 22, 23, 24, 25, 26, 27, 28
Week 5: 29, 30, 31, 32, 33, 34, 35
Week 6: 36, 37, 38, 39, 40, 41, 42
"""
return ds.isel(lead=slice(day_init, day_end + 1)) | 5,356,214 |
def project_polarcoord_lines(lines, img_w, img_h):
"""
Project lines in polar coordinate space <lines> (e.g. from hough transform) onto a canvas of size
<img_w> by <img_h>.
"""
if img_w <= 0:
raise ValueError('img_w must be > 0')
if img_h <= 0:
raise ValueError('img_h must be > 0')
lines_ab = []
for i, (rho, theta) in enumerate(lines):
# calculate intersections with canvas dimension minima/maxima
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x_miny = rho / cos_theta if cos_theta != 0 else float("inf") # x for a minimal y (y=0)
y_minx = rho / sin_theta if sin_theta != 0 else float("inf") # y for a minimal x (x=0)
x_maxy = (rho - img_w * sin_theta) / cos_theta if cos_theta != 0 else float("inf") # x for maximal y (y=img_h)
y_maxx = (rho - img_h * cos_theta) / sin_theta if sin_theta != 0 else float("inf") # y for maximal x (y=img_w)
# because rounding errors happen, sometimes a point is counted as invalid because it
# is slightly out of the bounding box
# this is why we have to correct it like this
def border_dist(v, border):
return v if v <= 0 else v - border
# set the possible points
# some of them will be out of canvas
possible_pts = [
([x_miny, 0], (border_dist(x_miny, img_w), 0)),
([0, y_minx], (border_dist(y_minx, img_h), 1)),
([x_maxy, img_h], (border_dist(x_maxy, img_w), 0)),
([img_w, y_maxx], (border_dist(y_maxx, img_h), 1)),
]
# get the valid and the dismissed (out of canvas) points
valid_pts = []
dismissed_pts = []
for p, dist in possible_pts:
if 0 <= p[0] <= img_w and 0 <= p[1] <= img_h:
valid_pts.append(p)
else:
dismissed_pts.append((p, dist))
# from the dismissed points, get the needed ones that are closed to the canvas
n_needed_pts = 2 - len(valid_pts)
if n_needed_pts > 0:
dismissed_pts_sorted = sorted(dismissed_pts, key=lambda x: abs(x[1][0]), reverse=True)
for _ in range(n_needed_pts):
p, (dist, coord_idx) = dismissed_pts_sorted.pop()
p[coord_idx] -= dist # correct
valid_pts.append(p)
p1 = pt(*valid_pts[0])
p2 = pt(*valid_pts[1])
lines_ab.append((p1, p2))
return lines_ab | 5,356,215 |
def standardize_for_imshow(image):
"""
A luminance standardization for pyplot's imshow
This just allows me to specify a simple, transparent standard for what white
and black correspond to in pyplot's imshow method. Likely could be
accomplished by the colors.Normalize method, but I want to make this as
explicit as possible. If the image is nonnegative, we divide by the scalar
that makes the largest value 1.0. If the image is nonpositive, we
divide by the scalar that makes the smallest value -1.0, and then add 1, so
that this value is 0.0, pitch black. If the image has both positive and
negative values, we divide and shift so that 0.0 in the original image gets
mapped to 0.5 for imshow and the largest absolute value gets mapped to
either 0.0 or 1.0 depending on whether it was positive of negative.
Parameters
----------
image : ndarray
The image to be standardized, can be (h, w) or (h, w, c). All operations
are scalar operations applied to every color channel. Note this, may
change hue of color images, I think.
Returns
-------
standardized_image : ndarray
An RGB image in the range [0.0, 1.0], ready to be showed by imshow.
raw_val_mapping : tuple(float, float, float)
Indicates what raw values got mapped to 0.0, 0.5, and 1.0, respectively
"""
max_val = np.max(image)
min_val = np.min(image)
if max_val == min_val: # constant value
standardized_image = 0.5 * np.ones(image.shape)
if max_val > 0:
raw_val_mapping = [0.0, max_val, 2*max_val]
elif max_val < 0:
raw_val_mapping = [2*max_val, max_val, 0.0]
else:
raw_val_mapping = [-1.0, 0.0, 1.0]
else:
if min_val >= 0:
standardized_image = image / max_val
raw_val_mapping = [0.0, 0.5*max_val, max_val]
elif max_val <= 0:
standardized_image = (image / -min_val) + 1.0
raw_val_mapping = [min_val, 0.5*min_val, 0.0]
else:
# straddles 0.0. We want to map 0.0 to 0.5 in the displayed image
skew_toward_max = np.argmax([abs(min_val), abs(max_val)])
if skew_toward_max:
normalizer = (2 * max_val)
raw_val_mapping = [-max_val, 0.0, max_val]
else:
normalizer = (2 * np.abs(min_val))
raw_val_mapping = [min_val, 0.0, -min_val]
standardized_image = (image / normalizer) + 0.5
return standardized_image, raw_val_mapping | 5,356,216 |
def err_failure(error) :
""" Check a error on failure """
return not err_success(error) | 5,356,217 |
def rah_fixed_dt( u2m, roh_air, cp, dt, disp, z0m, z0h, tempk):
"""
It takes input of air density, air specific heat, difference of temperature between surface skin and a height of about 2m above, and the aerodynamic resistance to heat transport. This version runs an iteration loop to stabilize psychrometric data for the aerodynamic resistance to heat flux.
Fixed temperature difference correction of aerodynamic roughness for heat transport
"""
PI = 3.14159265358979323846
ublend=u2m*(log(100-disp)-log(z0m))/(log(2-disp)-log(z0m))
for i in range(10):
ustar = 0.41*ublend/(log((100-disp)/z0m)-psim)
rah = (log((2-disp)/z0h)-psih)/(0.41*ustar)
h_in = roh_air * cp * dt / rah
length= -roh_air*cp*pow(ustar,3)*tempk/(0.41*9.81*h_in)
xm = pow(1.0-16.0*((100-disp)/length),0.25)
xh = pow(1.0-16.0*((2-disp)/length),0.25)
psim = 2.0*log((1.0+xm)/2.0)+log((1+xm*xm)-2*atan(xm)+0.5*PI)
psih = 2.0*log((1.0+xh*xh)/2.0)
return rah | 5,356,218 |
def process_grid_subsets(output_file, start_subset_id=0, end_subset_id=-1):
""""Execute analyses on the data of the complete grid and save the processed data to a netCDF file.
By default all subsets are analyzed
Args:
output_file (str): Name of netCDF file to which the results are saved for the respective
subset. (including format {} placeholders)
start_subset_id (int): Starting subset id to be analyzed
end_subset_id (int): Last subset id to be analyzed
(set to -1 to process all subsets after start_subset_id)
"""
ds, lons, lats, levels, hours, i_highest_level = read_raw_data(start_year, final_year)
check_for_missing_data(hours)
# Reading the data of all grid points from the NetCDF file all at once requires a lot of memory. On the other hand,
# reading the data of all grid points one by one takes up a lot of CPU. Therefore, the dataset is analysed in
# pieces: the subsets are read and processed consecutively.
n_subsets = int(np.ceil(float(len(lats)) / read_n_lats_per_subset))
# Define subset range to be processed in this run
if end_subset_id == -1:
subset_range = range(start_subset_id, n_subsets)
else:
subset_range = range(start_subset_id, end_subset_id+1)
if subset_range[-1] > (n_subsets-1):
raise ValueError("Requested subset ID ({}) is higher than maximal subset ID {}."
.format(subset_range[-1], (n_subsets-1)))
# Loop over all specified subsets to write processed data to the output file.
counter = 0
total_iters = len(lats) * len(lons)*len(subset_range)/n_subsets
start_time = timer()
for i_subset in subset_range:
# Find latitudes corresponding to the current i_subset
i_lat0 = i_subset * read_n_lats_per_subset
if i_lat0+read_n_lats_per_subset < len(lats):
lat_ids_subset = range(i_lat0, i_lat0 + read_n_lats_per_subset)
else:
lat_ids_subset = range(i_lat0, len(lats))
lats_subset = lats[lat_ids_subset]
print("Subset {}, Latitude(s) analysed: {} to {}".format(i_subset, lats_subset[0], lats_subset[-1]))
# Initialize result arrays for this subset
res = initialize_result_dict(lats_subset, lons)
print(' Result array configured, reading subset input now, time lapsed: {:.2f} hrs'
.format(float(timer()-start_time)/3600))
# Read data for the subset latitudes
v_levels_east = ds.variables['u'][:, i_highest_level:, lat_ids_subset, :].values
v_levels_north = ds.variables['v'][:, i_highest_level:, lat_ids_subset, :].values
v_levels = (v_levels_east**2 + v_levels_north**2)**.5
t_levels = ds.variables['t'][:, i_highest_level:, lat_ids_subset, :].values
q_levels = ds.variables['q'][:, i_highest_level:, lat_ids_subset, :].values
try:
surface_pressure = ds.variables['sp'][:, lat_ids_subset, :].values
except KeyError:
surface_pressure = np.exp(ds.variables['lnsp'][:, lat_ids_subset, :].values)
print(' Input read, performing statistical analysis now, time lapsed: {:.2f} hrs'
.format(float(timer()-start_time)/3600))
for i_lat_in_subset in range(len(lat_ids_subset)): # Saves a file for each subset.
for i_lon in range(len(lons)):
if (i_lon % 20) == 0: # Give processing info every 20 longitudes
print(' {} of {} longitudes analyzed, satistical analysis of longitude {}, time lapsed: '
'{:.2f} hrs'.format(i_lon, len(lons), lons[i_lon], float(timer()-start_time)/3600))
counter += 1
level_heights, density_levels = compute_level_heights(levels,
surface_pressure[:, i_lat_in_subset, i_lon],
t_levels[:, :, i_lat_in_subset, i_lon],
q_levels[:, :, i_lat_in_subset, i_lon])
# Determine wind at altitudes of interest by means of interpolating the raw wind data.
v_req_alt = np.zeros((len(hours), len(heights_of_interest))) # Interpolation results array.
rho_req_alt = np.zeros((len(hours), len(heights_of_interest)))
for i_hr in range(len(hours)):
if not np.all(level_heights[i_hr, 0] > heights_of_interest):
raise ValueError("Requested height ({:.2f} m) is higher than height of highest model level."
.format(level_heights[i_hr, 0]))
v_req_alt[i_hr, :] = np.interp(heights_of_interest, level_heights[i_hr, ::-1],
v_levels[i_hr, ::-1, i_lat_in_subset, i_lon])
rho_req_alt[i_hr, :] = np.interp(heights_of_interest, level_heights[i_hr, ::-1],
density_levels[i_hr, ::-1])
p_req_alt = calc_power(v_req_alt, rho_req_alt)
# Determine wind statistics at fixed heights of interest.
for i_out, fixed_height_id in enumerate(analyzed_heights_ids['fixed']):
v_mean, v_perc5, v_perc32, v_perc50 = get_statistics(v_req_alt[:, fixed_height_id])
res['fixed']['wind_speed']['mean'][i_out, i_lat_in_subset, i_lon] = v_mean
res['fixed']['wind_speed']['percentile'][5][i_out, i_lat_in_subset, i_lon] = v_perc5
res['fixed']['wind_speed']['percentile'][32][i_out, i_lat_in_subset, i_lon] = v_perc32
res['fixed']['wind_speed']['percentile'][50][i_out, i_lat_in_subset, i_lon] = v_perc50
v_ranks = get_percentile_ranks(v_req_alt[:, fixed_height_id], [4., 8., 14., 25.])
res['fixed']['wind_speed']['rank'][4][i_out, i_lat_in_subset, i_lon] = v_ranks[0]
res['fixed']['wind_speed']['rank'][8][i_out, i_lat_in_subset, i_lon] = v_ranks[1]
res['fixed']['wind_speed']['rank'][14][i_out, i_lat_in_subset, i_lon] = v_ranks[2]
res['fixed']['wind_speed']['rank'][25][i_out, i_lat_in_subset, i_lon] = v_ranks[3]
p_fixed_height = p_req_alt[:, fixed_height_id]
p_mean, p_perc5, p_perc32, p_perc50 = get_statistics(p_fixed_height)
res['fixed']['wind_power_density']['mean'][i_out, i_lat_in_subset, i_lon] = p_mean
res['fixed']['wind_power_density']['percentile'][5][i_out, i_lat_in_subset, i_lon] = p_perc5
res['fixed']['wind_power_density']['percentile'][32][i_out, i_lat_in_subset, i_lon] = p_perc32
res['fixed']['wind_power_density']['percentile'][50][i_out, i_lat_in_subset, i_lon] = p_perc50
p_ranks = get_percentile_ranks(p_fixed_height, [40., 300., 1600., 9000.])
res['fixed']['wind_power_density']['rank'][40][i_out, i_lat_in_subset, i_lon] = p_ranks[0]
res['fixed']['wind_power_density']['rank'][300][i_out, i_lat_in_subset, i_lon] = p_ranks[1]
res['fixed']['wind_power_density']['rank'][1600][i_out, i_lat_in_subset, i_lon] = p_ranks[2]
res['fixed']['wind_power_density']['rank'][9000][i_out, i_lat_in_subset, i_lon] = p_ranks[3]
# Integrate power along the altitude.
for range_id in integration_range_ids:
height_id_start = analyzed_heights_ids['integration_ranges'][range_id][1]
height_id_final = analyzed_heights_ids['integration_ranges'][range_id][0]
p_integral = []
x = heights_of_interest[height_id_start:height_id_final + 1]
for i_hr in range(len(hours)):
y = p_req_alt[i_hr, height_id_start:height_id_final+1]
p_integral.append(-np.trapz(y, x))
res['integration_ranges']['wind_power_density']['mean'][range_id, i_lat_in_subset, i_lon] = \
np.mean(p_integral)
# Determine wind statistics for ceiling cases.
for i_out, ceiling_id in enumerate(analyzed_heights_ids['ceilings']):
# Find the height maximizing the wind speed for each hour.
v_ceiling = np.amax(v_req_alt[:, ceiling_id:analyzed_heights_ids['floor'] + 1], axis=1)
v_ceiling_ids = np.argmax(v_req_alt[:, ceiling_id:analyzed_heights_ids['floor'] + 1], axis=1) + \
ceiling_id
# optimal_heights = [heights_of_interest[max_id] for max_id in v_ceiling_ids]
# rho_ceiling = get_density_at_altitude(optimal_heights + surf_elev)
rho_ceiling = rho_req_alt[np.arange(len(hours)), v_ceiling_ids]
p_ceiling = calc_power(v_ceiling, rho_ceiling)
v_mean, v_perc5, v_perc32, v_perc50 = get_statistics(v_ceiling)
res['ceilings']['wind_speed']['mean'][i_out, i_lat_in_subset, i_lon] = v_mean
res['ceilings']['wind_speed']['percentile'][5][i_out, i_lat_in_subset, i_lon] = v_perc5
res['ceilings']['wind_speed']['percentile'][32][i_out, i_lat_in_subset, i_lon] = v_perc32
res['ceilings']['wind_speed']['percentile'][50][i_out, i_lat_in_subset, i_lon] = v_perc50
v_ranks = get_percentile_ranks(v_ceiling, [4., 8., 14., 25.])
res['ceilings']['wind_speed']['rank'][4][i_out, i_lat_in_subset, i_lon] = v_ranks[0]
res['ceilings']['wind_speed']['rank'][8][i_out, i_lat_in_subset, i_lon] = v_ranks[1]
res['ceilings']['wind_speed']['rank'][14][i_out, i_lat_in_subset, i_lon] = v_ranks[2]
res['ceilings']['wind_speed']['rank'][25][i_out, i_lat_in_subset, i_lon] = v_ranks[3]
p_mean, p_perc5, p_perc32, p_perc50 = get_statistics(p_ceiling)
res['ceilings']['wind_power_density']['mean'][i_out, i_lat_in_subset, i_lon] = p_mean
res['ceilings']['wind_power_density']['percentile'][5][i_out, i_lat_in_subset, i_lon] = p_perc5
res['ceilings']['wind_power_density']['percentile'][32][i_out, i_lat_in_subset, i_lon] = p_perc32
res['ceilings']['wind_power_density']['percentile'][50][i_out, i_lat_in_subset, i_lon] = p_perc50
p_ranks = get_percentile_ranks(p_ceiling, [40., 300., 1600., 9000.])
res['ceilings']['wind_power_density']['rank'][40][i_out, i_lat_in_subset, i_lon] = p_ranks[0]
res['ceilings']['wind_power_density']['rank'][300][i_out, i_lat_in_subset, i_lon] = p_ranks[1]
res['ceilings']['wind_power_density']['rank'][1600][i_out, i_lat_in_subset, i_lon] = p_ranks[2]
res['ceilings']['wind_power_density']['rank'][9000][i_out, i_lat_in_subset, i_lon] = p_ranks[3]
print('Locations analyzed: ({}/{:.0f}).'.format(counter, total_iters))
# Flatten output, convert to xarray Dataset and write to output file.
output_file_name_formatted = output_file.format(**{'start_year': start_year, 'final_year': final_year,
'lat_subset_id': i_subset, 'max_lat_subset_id': n_subsets-1})
print('Writing output to file: {}'.format(output_file_name_formatted))
flattened_subset_output = get_result_dict(lats_subset, lons, hours, res)
nc_out = xr.Dataset.from_dict(flattened_subset_output)
nc_out.to_netcdf(output_file_name_formatted)
nc_out.close()
time_lapsed = float(timer()-start_time)
time_remaining = time_lapsed/counter*(total_iters-counter)
print("Time lapsed: {:.2f} hrs, expected time remaining: {:.2f} hrs.".format(time_lapsed/3600,
time_remaining/3600))
ds.close() # Close the input NetCDF file.
return n_subsets-1 | 5,356,219 |
def bulk_lookup(license_dict, pkg_list):
"""Lookup package licenses"""
pkg_licenses = {}
for pkg in pkg_list:
# Failsafe in case the bom file contains incorrect entries
if not pkg.get("name") or not pkg.get("version"):
continue
pkg_key = pkg["name"] + "@" + pkg["version"]
if pkg.get("vendor"):
pkg_key = pkg.get("vendor") + ":" + pkg["name"] + "@" + pkg["version"]
for lic in pkg.get("licenses"):
if lic == "X11":
lic = "MIT"
elif "MIT" in lic:
lic = "MIT"
curr_list = pkg_licenses.get(pkg_key, [])
match_lic = license_dict.get(lic)
if match_lic:
curr_list.append(match_lic)
pkg_licenses[pkg_key] = curr_list
return pkg_licenses | 5,356,220 |
def pack_bits(bools):
"""Pack sequence of bools into bits"""
if len(bools) % 8 != 0:
raise ValueError("list length must be multiple of 8")
bytes_ = []
b = 0
for j, v in enumerate(reversed(bools)):
b <<= 1
b |= v
if j % 8 == 7:
bytes_.append(b)
b = 0
return bytes_ | 5,356,221 |
def make_coffee(drink_name, order_ingredients):
"""Deduct the required ingredients from the resources."""
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print("Here is your {} ☕. Enjoy!".format(drink_name)) | 5,356,222 |
def init_ring_dihedral(species,instance,geom = []):
"""
Calculates the required modifications to a structures dihedral to create a cyclic TS
"""
if len(geom) == 0:
geom = species.geom
if len(instance) > 3:
if len(instance) < 6:
final_dihedral = 15.
else:
final_dihedral = 1.
dihedrals = []
for i in range(len(instance)-3):
dihedrals.append(calc_dihedral(geom[instance[i]], geom[instance[i+1]], geom[instance[i+2]], geom[instance[i+3]])[0])
dihedral_diff = [final_dihedral - dihedrals[i] for i in range(len(dihedrals))]
return dihedral_diff | 5,356,223 |
def ensure_s3_bucket(s3_client, bucket_name, bucket_region):
"""Ensure an s3 bucket exists, if it does not then create it.
Args:
s3_client (:class:`botocore.client.Client`): An s3 client used to
verify and create the bucket.
bucket_name (str): The bucket being checked/created.
bucket_region (str, optional): The region to create the bucket in. If
not provided, will be determined by s3_client's region.
"""
try:
s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Message'] == "Not Found":
logger.debug("Creating bucket %s.", bucket_name)
create_args = {"Bucket": bucket_name}
location_constraint = s3_bucket_location_constraint(
bucket_region
)
if location_constraint:
create_args["CreateBucketConfiguration"] = {
"LocationConstraint": location_constraint
}
s3_client.create_bucket(**create_args)
elif e.response['Error']['Message'] == "Forbidden":
logger.exception("Access denied for bucket %s. Did " +
"you remember to use a globally unique name?",
bucket_name)
raise
else:
logger.exception("Error creating bucket %s. Error %s",
bucket_name, e.response)
raise | 5,356,224 |
def get_largest_contour(
contours: List[NDArray], min_area: int = 30
) -> Optional[NDArray]:
"""
Finds the largest contour with size greater than min_area.
Args:
contours: A list of contours found in an image.
min_area: The smallest contour to consider (in number of pixels)
Returns:
The largest contour from the list, or None if no contour was larger
than min_area.
Example::
# Extract the blue contours
BLUE_HSV_MIN = (90, 50, 50)
BLUE_HSV_MAX = (110, 255, 255)
contours = rc_utils.find_contours(
rc.camera.get_color_image(), BLUE_HSV_MIN, BLUE_HSV_MAX
)
# Find the largest contour
largest_contour = rc_utils.get_largest_contour(contours)
"""
# Check that the list contains at least one contour
if len(contours) == 0:
return None
# Find and return the largest contour if it is larger than min_area
greatest_contour = max(contours, key=cv.contourArea)
if cv.contourArea(greatest_contour) < min_area:
return None
return greatest_contour | 5,356,225 |
def particle(
engine,
particle_id="",
color: Tuple4 = (1, 0.4, 0.1, 1),
random_color: bool = False,
color_temp: bool = False,
vx=None,
vy=None,
vz=None,
speed_limit=None,
) -> Material:
""" Particle material. """
mat = bpy.data.materials.new(f"Particle{particle_id}")
# FIXME(tpvasconcelos): Use different colors within a particle system
# if color_temp == 'temperature':
# factor = _get_speed_factor(vx, vy, vz, speed_limit)
if random_color:
color = _get_randomcolor()
if engine == "BLENDER_RENDER":
return _render_particle(mat, color[:-1])
return _cycles_particle(mat, color) | 5,356,226 |
def _get_hardware_sharing_throughputs(
outdirs,
device,
device_model,
precs,
filename,
mode,
):
""" The result is in the format of
{
'amp': pd.DataFrame, # df contains max_B rows
'fp32': pd.DataFrame, # df contains max_B rows
}
df format: (`B` is the index)
B {mode}:{prec}:0 {mode}:{prec}:1 ... {mode}:{prec}:avg {mode}:{prec}:min {mode}:{prec}:max
1 float float ... float float float
2 float float ... float float float
3 float float ... float float float
...
max_B float float ... float float float
"""
throughputs = {}
for prec in precs:
throughputs[prec] = {'B': []}
for outdir_idx, outdir in enumerate(outdirs):
Bs = []
throughputs_of_Bs = []
mode_outdir_path = os.path.join(outdir, device, device_model, prec, mode)
for B_exp in os.listdir(mode_outdir_path):
B = int(B_exp[1:])
Bs.append(B)
B_outdir_path = os.path.join(mode_outdir_path, B_exp)
timing_dfs = None
if mode == 'hfta':
timing_dfs = [pd.read_csv(os.path.join(B_outdir_path, filename))]
else:
timing_dfs = [
pd.read_csv(
os.path.join(B_outdir_path, 'idx{}'.format(idx), filename))
for idx in range(B)
]
throughputs_of_Bs.append(_calculate_throughputs(timing_dfs, device))
max_B = max(Bs)
linear_interpolator = scipy.interpolate.interp1d(Bs, throughputs_of_Bs)
throughputs[prec]['{}:{}:{}'.format(mode, prec, outdir_idx)] = [
linear_interpolator(B) for B in range(1, max_B + 1)
]
throughputs[prec]['B'] = range(1, max_B + 1)
throughputs[prec] = pd.DataFrame(throughputs[prec]).set_index('B')
_aggregate_along_rows(throughputs[prec], mode, prec)
return throughputs | 5,356,227 |
def mask_to_segm(mask, bbox, segm_size, index=None):
"""Crop and resize mask.
This function requires cv2.
Args:
mask (~numpy.ndarray): See below.
bbox (~numpy.ndarray): See below.
segm_size (int): The size of segm :math:`S`.
index (~numpy.ndarray): See below. :math:`R = N` when
:obj:`index` is :obj:`None`.
Returns:
~numpy.ndarray: See below.
.. csv-table::
:header: name, shape, dtype, format
:obj:`mask`, ":math:`(N, H, W)`", :obj:`bool`, --
:obj:`bbox`, ":math:`(R, 4)`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`index` (optional), ":math:`(R,)`", :obj:`int32`, --
:obj:`segms` (output), ":math:`(R, S, S)`", :obj:`float32`, \
":math:`[0, 1]`"
"""
pad = 1
_, H, W = mask.shape
bbox = chainer.backends.cuda.to_cpu(bbox)
# To work around an issue with cv2.resize (it seems to automatically
# pad with repeated border values), we manually zero-pad the masks by 1
# pixel prior to resizing back to the original image resolution.
# This prevents "top hat" artifacts. We therefore need to expand
# the reference boxes by an appropriate factor.
padded_segm_size = segm_size + pad * 2
expand_scale = padded_segm_size / segm_size
bbox = _expand_bbox(bbox, expand_scale)
resize_size = padded_segm_size
bbox = _integerize_bbox(bbox)
segm = []
if index is None:
index = np.arange(len(bbox))
else:
index = chainer.backends.cuda.to_cpu(index)
for i, bb in zip(index, bbox):
y_min = max(bb[0], 0)
x_min = max(bb[1], 0)
y_max = max(min(bb[2], H), 0)
x_max = max(min(bb[3], W), 0)
if y_max <= y_min or x_max <= x_min:
segm.append(np.zeros((segm_size, segm_size), dtype=np.float32))
continue
bb_height = bb[2] - bb[0]
bb_width = bb[3] - bb[1]
cropped_m = np.zeros((bb_height, bb_width), dtype=np.bool)
y_offset = y_min - bb[0]
x_offset = x_min - bb[1]
cropped_m[y_offset:y_offset + y_max - y_min,
x_offset:x_offset + x_max - x_min] =\
chainer.backends.cuda.to_cpu(mask[i, y_min:y_max, x_min:x_max])
with chainer.using_config('cv_resize_backend', 'cv2'):
sgm = transforms.resize(
cropped_m[None].astype(np.float32),
(resize_size, resize_size))[0].astype(np.int32)
segm.append(sgm[pad:-pad, pad:-pad])
return np.array(segm, dtype=np.float32) | 5,356,228 |
def plot_2D_vector_field(vector_field, downsampling):
"""vector_field should be a tensor of shape (2,H,W)"""
downsample2D = monai.networks.layers.factories.Pool['AVG', 2](
kernel_size=downsampling)
vf_downsampled = downsample2D(vector_field.unsqueeze(0))[0]
plt.quiver(
vf_downsampled[0, :, :], vf_downsampled[1, :, :],
angles='xy', scale_units='xy', scale=downsampling,
headwidth=4.
) | 5,356,229 |
def get_image(_svg_code):
"""
Convert the SVG string to PNG.
"""
svg2png(bytestring=_svg_code, write_to='output.png') | 5,356,230 |
def append_unique(func):
"""
This decorator will append each result - regardless of type - into a
list.
"""
def inner(*args, **kwargs):
return list(
set(
_results(
args[0],
func.__name__,
*args,
**kwargs
)
)
)
return inner | 5,356,231 |
def _get_unique_figs(tree):
"""
Extract duplicate figures from the tree
"""
return _find_unique_figures_wrap(list(map(_get_fig_values(tree),
tree)), []) | 5,356,232 |
def read_fssp(fssp_handle):
"""Process a FSSP file and creates the classes containing its parts.
Returns:
:header: Contains the file header and its properties.
:sum_dict: Contains the summary section.
:align_dict: Contains the alignments.
"""
header = FSSPHeader()
sum_dict = FSSPSumDict()
align_dict = FSSPAlignDict()
curline = fssp_handle.readline()
while not summary_title.match(curline):
# Still in title
header.fill_header(curline)
curline = fssp_handle.readline()
if not summary_title.match(curline):
raise ValueError("Bad FSSP file: no summary record found")
curline = fssp_handle.readline() # Read the title line, discard
curline = fssp_handle.readline() # Read the next line
# Process the summary records into a list
while summary_rec.match(curline):
cur_sum_rec = FSSPSumRec(curline)
sum_dict[cur_sum_rec.nr] = cur_sum_rec
curline = fssp_handle.readline()
# Outer loop: process everything up to the EQUIVALENCES title record
while not equiv_title.match(curline):
while (not alignments_title.match(curline) and
not equiv_title.match(curline)):
curline = fssp_handle.readline()
if not alignments_title.match(curline):
if equiv_title.match(curline):
# print("Reached equiv_title")
break
else:
raise ValueError("Bad FSSP file: no alignments title record found")
if equiv_title.match(curline):
break
# If we got to this point, this means that we have matched an
# alignments title. Parse the alignment records in a loop.
curline = fssp_handle.readline() # Read the title line, discard
curline = fssp_handle.readline() # Read the next line
while alignments_rec.match(curline):
align_rec = FSSPAlignRec(fff_rec(curline))
key = align_rec.chain_id + align_rec.res_name + str(align_rec.pdb_res_num)
align_list = curline[fssp_rec.align.start_aa_list:].strip().split()
if key not in align_dict:
align_dict[key] = align_rec
align_dict[key].add_align_list(align_list)
curline = fssp_handle.readline()
if not curline:
print("EOFEOFEOF")
raise EOFError
for i in align_dict.values():
i.pos_align_list2dict()
del i.PosAlignList
align_dict.build_resnum_list()
return (header, sum_dict, align_dict) | 5,356,233 |
def LoadJSON(json_string):
"""Loads json object from string, or None.
Args:
json_string: A string to get object from.
Returns:
JSON object if the string represents a JSON object, None otherwise.
"""
try:
data = json.loads(json_string)
except ValueError:
data = None
return data | 5,356,234 |
def stopLoop() -> None:
"""
Stop the network loop.
"""
global client
global logger
if client is None or logger is None:
raise MqttClientNotInit()
logger.info('stopping network loop')
client.loop_stop() | 5,356,235 |
def _dimensions_matrix(channels, n_cols=None, top_left_attribute=None):
"""
time,x0 y0,x0 x1,x0 y1,x0
x0,y0 time,y0 x1,y0 y1,y0
x0,x1 y0,x1 time,x1 y1,x1
x0,y1 y0,y1 x1,y1 time,y1
"""
# Generate the dimensions matrix from the docstring.
ds = inspect.getdoc(_dimensions_matrix).strip()
x, y = channels[:2]
def _get_dim(d):
if d == 'time':
return d
assert re.match(r'[xy][01]', d)
c = x if d[0] == 'x' else y
f = int(d[1])
return c, f
dims = [[_.split(',') for _ in re.split(r' +', line.strip())]
for line in ds.splitlines()]
x_dim = {(i, j): _get_dim(dims[i][j][0])
for i, j in product(range(4), range(4))}
y_dim = {(i, j): _get_dim(dims[i][j][1])
for i, j in product(range(4), range(4))}
return x_dim, y_dim | 5,356,236 |
def is_monotonic_increasing(x):
"""
Helper function to determine if a list is monotonically increasing.
"""
dx = np.diff(x)
return np.all(dx >= 0) | 5,356,237 |
def help_command(update: Update, context: CallbackContext) -> NoReturn:
"""Send a message when the command /help is issued."""
update.message.reply_text(
"""
Comandos:
/cadastro nome-de-usuario ex: /cadastro 000.000.000-00 ou fulandodetal\n
/Aditamento
/boletim
/escala
"""
) | 5,356,238 |
def cluster_size_threshold(data, thresh=None, min_size=20, save=False):
""" Removes clusters smaller than a prespecified number in a stat-file.
Parameters
----------
data : numpy-array or str
3D Numpy-array with statistic-value or a string to a path pointing to
a nifti-file with statistic values.
thresh : int, float
Initial threshold to binarize the image and extract clusters.
min_size : int
Minimum size (i.e. amount of voxels) of cluster. Any cluster with fewer
voxels than this amount is set to zero ('removed').
save : bool
If data is a file-path, this parameter determines whether the cluster-
corrected file is saved to disk again.
"""
if isinstance(data, (str, unicode)):
fname = copy(data)
data = nib.load(data)
affine = data.affine
data = data.get_data()
if thresh is not None:
data[data < thresh] = 0
clustered, num_clust = label(data > 0)
values, counts = np.unique(clustered.ravel(), return_counts=True)
# Get number of clusters by finding the index of the first instance
# when 'counts' is smaller than min_size
first_clust = np.sort(counts)[::-1] < min_size
if first_clust.sum() == 0:
print('All clusters were larger than: %i, returning original data' %
min_size)
return data
n_clust = np.argmax(first_clust)
# Sort and trim
cluster_nrs = values[counts.argsort()[::-1][:n_clust]]
cluster_nrs = np.delete(cluster_nrs, 0)
# Set small clusters to zero.
data[np.invert(np.in1d(clustered, cluster_nrs)).reshape(data.shape)] = 0
if save:
img = nib.Nifti1Image(data, affine=affine)
basename = op.basename(fname)
nib.save(img, basename.split('.')[0] + '_thresholded.nii.gz')
return data | 5,356,239 |
def dij_delay(parameter,error_rate, dT):
"""calculate the area-to-area latency
:param parameter: two-dimensional list about parameter of constellations
:param error_rate: float, probability of satellite failure
:param dT: int, accuracy of the results
"""
constellation_num = len(parameter[0])
for constellation_index in range(constellation_num):
constellation_name = parameter[0][constellation_index]
satellite_num = int(parameter[1][constellation_index])
cycle = int(parameter[2][constellation_index])
bound = parameter[5][constellation_index]
city_num = 4
dl = [[0 for i in range(int((cycle - 1)/dT) + 1)] for i in range(6)]
error = [0 for i in range(6)]
for time in range(1, cycle + 1, dT):
print(time)
G = nx.Graph()
edge = []
path = 'matlab_code\\' + constellation_name + '\\delay\\' + str(time) + '.mat'
data = scio.loadmat(path)
delay = data['delay']
G.add_nodes_from(range(satellite_num + city_num))
for i in range(satellite_num):
for j in range(i + 1, satellite_num):
if delay[i][j] > 0:
edge.append((i, j, delay[i][j]))
for j in range(satellite_num, satellite_num + city_num):
if delay[i][j] < bound:
edge.append((i, j, delay[i][j]))
G.add_weighted_edges_from(edge)
if error_rate > 0:
for i in range(satellite_num):
destroy = random.randint(1,int(100 / error_rate))
if destroy == 1:
G.remove_node(i)
count = 0
for i in range(satellite_num, satellite_num + city_num - 1): #city to city
for j in range(i+1, satellite_num + city_num):
if nx.has_path(G, source=i, target=j):
dl[count][int((time - 1) / dT)] = nx.dijkstra_path_length(G, source=i, target=j)
else: #GSL is broken down
error[count] += 1
dl[count][(time - 1) / dT] = 0.
count += 1
numpy.savetxt(constellation_name + '.csv', dl, fmt='%f') | 5,356,240 |
def convert_df(df):
"""Makes a Pandas DataFrame more memory-efficient through intelligent use of Pandas data types:
specifically, by storing columns with repetitive Python strings not with the object dtype for unique values
(entirely stored in memory) but as categoricals, which are represented by repeated integer values. This is a
net gain in memory when the reduced memory size of the category type outweighs the added memory cost of storing
one more thing. As such, this function checks the degree of redundancy for a given column before converting it."""
converted_df = pd.DataFrame() # Initialize DF for memory-efficient storage of strings (object types)
# TO DO: Infer dtypes of df
df_obj = df.select_dtypes(include=['object']).copy() # Filter to only those columns of object data type
for col in df.columns:
if col in df_obj:
num_unique_values = len(df_obj[col].unique())
num_total_values = len(df_obj[col])
if (num_unique_values / num_total_values) < 0.5: # Only convert data types if at least half of values are duplicates
converted_df.loc[:,col] = df[col].astype('category') # Store these columns as dtype "category"
else:
converted_df.loc[:,col] = df[col]
else:
converted_df.loc[:,col] = df[col]
converted_df.select_dtypes(include=['float']).apply(pd.to_numeric,downcast='float')
converted_df.select_dtypes(include=['int']).apply(pd.to_numeric,downcast='signed')
return converted_df | 5,356,241 |
def run_add(request):
"""Add a run."""
if request.method == "POST":
form = forms.AddRunForm(request.POST, user=request.user)
run = form.save_if_valid()
if run is not None:
messages.success(
request, u"Run '{0}' added.".format(
run.name)
)
return redirect("manage_runs")
else:
pf = PinnedFilters(request.COOKIES)
form = forms.AddRunForm(
user=request.user,
initial=pf.fill_form_querystring(request.GET).dict(),
)
return TemplateResponse(
request,
"manage/run/add_run.html",
{
"form": form
}
) | 5,356,242 |
def map_aemo_facility_status(facility_status: str) -> str:
"""
Maps an AEMO facility status to an Opennem facility status
"""
unit_status = facility_status.lower().strip()
if unit_status.startswith("in service"):
return "operating"
if unit_status.startswith("in commissioning"):
return "commissioning"
if unit_status.startswith("committed"):
return "committed"
if unit_status.startswith("maturing"):
return "maturing"
if unit_status.startswith("emerging"):
return "emerging"
raise Exception(
"Could not find AEMO status for facility status: {}".format(
unit_status
)
) | 5,356,243 |
def demand_monthly_ba(tfr_dfs):
"""A stub transform function."""
return tfr_dfs | 5,356,244 |
def render_page(page, title="My Page", context=None):
"""
A simple helper to render the md_page.html template with [context] vars, and
the additional contents of `page/[page].md` in the `md_page` variable.
It automagically adds the global template vars defined above, too.
It returns a string, usually the HTML contents to display.
"""
if context is None:
context = {}
context['title'] = title
context['md_page'] = ''
with file(get_path('page/%s.md' % page)) as f:
context['md_page'] = f.read()
return tpl_engine.get_template('md_page.html.jinja2').render(
dict(tpl_global_vars.items() + context.items())
) | 5,356,245 |
def _SortableApprovalStatusValues(art, fd_list):
"""Return a list of approval statuses relevant to one UI table column."""
sortable_value_list = []
for fd in fd_list:
for av in art.approval_values:
if av.approval_id == fd.field_id:
# Order approval statuses by life cycle.
# NOT_SET == 8 but should be before all other statuses.
sortable_value_list.append(
0 if av.status.number == 8 else av.status.number)
return sortable_value_list | 5,356,246 |
def resolve_pointer(document, pointer: str):
"""
Resolve a JSON pointer ``pointer`` within the referenced ``document``.
:param document: the referent document
:param str pointer: a json pointer URI fragment to resolve within it
"""
root = document
# Do only split at single forward slashes which are not prefixed by a caret
parts = re.split(r"(?<!\^)/", unquote(pointer.lstrip("/"))) if pointer else []
for part in parts:
# Restore escaped slashes and carets
replacements = {r"^/": r"/", r"^^": r"^"}
part = re.sub(
"|".join(re.escape(key) for key in replacements.keys()),
lambda k: replacements[k.group(0)],
part,
)
if isinstance(document, Sequence):
# Try to turn an array index to an int
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except KeyError as e:
raise KeyError(f"Pointer does not resolve to value: {pointer}") from e
if document is root:
# Prevents infinite recursion on same document
return document
else:
return replace(document, root) | 5,356,247 |
def load_config_file(config_file):
""" Loads the given file into a list of lines
:param config_file: file name of the config file
:type config_file: str
:return: config file as a list (one item per line) as returned by open().readlines()
"""
with open(config_file, 'r') as f:
config_document = f.readlines()
return config_document | 5,356,248 |
def retrieve_analysis_report(accession, fields=None, file=None):
"""Retrieve analysis report from ENA
:param accession: accession id
:param fields: comma-separated list of fields to have in the report (accessible with get_returnable_fields with result=analysis)
:param file: filepath to save the content of the report
:return: requested run repor
"""
return retrieve_filereport(
accession=accession,
result="analysis",
fields=fields,
file=file) | 5,356,249 |
def save_trajectory(file_name, trajectory):
""" Write trajectory as .csv file in the trajectories folder
Each line represents a single trajectory point with:
time from start (1 value), states (8 values), d/dt states (8 values), d^2/dt^2 states (8 values)
"""
# Create trajectory folder if non-existent
traj_folder = "../trajectories"
if not os.path.exists(traj_folder):
os.mkdir(traj_folder)
outfile = '{}/{}'.format(traj_folder, file_name)
# Save values as .csv files.
with open(outfile, 'w') as f:
writer = csv.writer(f, delimiter=' ')
for i in range(len(trajectory.times)):
writer.writerow([trajectory.times[i]] + trajectory.states[i].tolist() + trajectory.states_dot[i].tolist() + trajectory.states_ddot[i].tolist()) | 5,356,250 |
def _CommonChecks(input_api, output_api):
"""Checks for both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, project_name='Native Client',
excluded_paths=tuple(EXCLUDE_PROJECT_CHECKS)))
# The commit queue assumes PRESUBMIT.py is standalone.
# TODO(bradnelson): Migrate code_hygiene to a common location so that
# it can be used by the commit queue.
old_sys_path = list(sys.path)
try:
sys.path.append(os.path.join(NACL_TOP_DIR, 'tools'))
sys.path.append(os.path.join(NACL_TOP_DIR, 'build'))
import code_hygiene
finally:
sys.path = old_sys_path
del old_sys_path
affected_files = input_api.AffectedFiles(include_deletes=False)
exclude_dirs = [ NACL_TOP_DIR + '/' + x for x in EXCLUDE_PROJECT_CHECKS ]
for filename in affected_files:
filename = filename.AbsoluteLocalPath()
if filename in exclude_dirs:
continue
if not IsFileInDirectories(filename, exclude_dirs):
errors, warnings = code_hygiene.CheckFile(filename, False)
for e in errors:
results.append(output_api.PresubmitError(e, items=errors[e]))
for w in warnings:
results.append(output_api.PresubmitPromptWarning(w, items=warnings[w]))
return results | 5,356,251 |
def profile_main_as_logs():
"""Main program for profiling. Profiling data logged.
"""
import cProfile
import pstats
import StringIO
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
stats.sort_stats('time') # Or cumulative
stats.print_stats(80) # 80 = how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
logging.info("Profile data:\n%s", stream.getvalue()) | 5,356,252 |
def visualize_cluster_entropy(
doc2vec, eval_kmeans, om_df, data_cols, ks, cmap_name="brg"
):
"""Visualize entropy of embedding space parition. Currently only supports doc2vec embedding.
Parameters
----------
doc2vec : Doc2Vec model instance
Instance of gensim.models.doc2vec.Doc2Vec
eval_kmeans : callable
Callable cluster fit function
For instance,
.. code-block:: python
def eval_kmeans(X,k):
km = KMeans(n_clusters=k)
km.fit(X)
return km
om_df : DataFrame
A pandas dataframe containing O&M data, which contains columns specified in om_col_dict
data_cols : list
List of column names (str) which have text data.
ks : list
List of k parameters required for the clustering mechanic `eval_kmeans`
cmap_name :
Optional, color map
Returns
-------
Matplotlib figure instance
"""
df = om_df.copy()
cols = data_cols
fig = plt.figure(figsize=(6, 6))
cmap = plt.cm.get_cmap(cmap_name, len(cols) * 2)
for i, col in enumerate(cols):
X = df[col].tolist()
X = [x.lower() for x in X]
tokenized_data = [word_tokenize(x) for x in X]
doc2vec_data = [
TaggedDocument(words=x, tags=[str(i)]) for i, x in enumerate(tokenized_data)
]
model = copy.deepcopy(doc2vec)
model.build_vocab(doc2vec_data)
model.train(
doc2vec_data, total_examples=model.corpus_count, epochs=model.epochs
)
X_doc2vec = [model.infer_vector(tok_doc) for tok_doc in tokenized_data]
sse = []
clusters = []
for true_k in ks:
km = eval_kmeans(X_doc2vec, true_k)
sse.append(km.inertia_)
clusters.append(km.labels_)
plt.plot(
ks, sse, color=cmap(2 * i), marker="o", label=f"Doc2Vec + {col} entropy"
)
vectorizer = TfidfVectorizer()
X_tfidf = vectorizer.fit_transform(X)
sse = []
clusters = []
for true_k in ks:
km = eval_kmeans(X_tfidf, true_k)
sse.append(km.inertia_)
clusters.append(km.labels_)
plt.plot(
ks, sse, color=cmap(2 * i + 1), marker="o", label=f"TF-IDF + {col} entropy"
)
plt.xlabel(r"Number of clusters *k*")
plt.ylabel("Sum of squared distance")
plt.legend()
return fig | 5,356,253 |
def delete_student_meal_plan(
person_id: str = None,
academic_term_id: str = None):
"""
Removes a meal plan from a student.
:param person_id: The numeric ID of the person.
:param academic_term_id: The numeric ID of the academic term you're interested in.
:returns: String containing xml or an lxml element.
"""
return get_anonymous(
'deleteStudentMealPlan',
person_id=person_id,
academic_term_id=academic_term_id) | 5,356,254 |
def colorize(x):
"""Converts a one-channel grayscale image to a color heatmap image. """
if x.dim() == 2:
torch.unsqueeze(x, 0, out=x)
return
if x.dim() == 3:
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = gauss(x, .5, .6, .2) + gauss(x, 1, .8, .3)
cl[1] = gauss(x, 1, .5, .3)
cl[2] = gauss(x, 1, .2, .3)
cl[cl.gt(1)] = 1
return cl
elif x.dim() == 4:
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:, 0, :, :] = gauss(x, .5, .6, .2) + gauss(x, 1, .8, .3)
cl[:, 1, :, :] = gauss(x, 1, .5, .3)
cl[:, 2, :, :] = gauss(x, 1, .2, .3)
return cl | 5,356,255 |
def local_role_density(
annotated_hypergraph, include_focus=False, absolute_values=False, as_array=False
):
"""
Calculates the density of each role within a 1-step neighbourhood
of a node, for all nodes.
Input:
annotated_hypergraph [AnnotatedHypergraph]: An annotated hypergraph.
include_focus [Bool]: If True, includes the roles of the focal node
in th calculation.
absolute_values [Bool]: If True, returns role counts rather than densities.
as_array [Bool]: If True, return an array rather than a Counter.
Returns:
role_densities []: An array of dimension (# nodes x # roles)
describing the density of each role.
"""
A = annotated_hypergraph
def get_counts(group):
return Counter([x.role for x in group])
by_edge = {
eid: get_counts(v)
for eid, v in groupby(
sorted(A.IL, key=lambda x: x.eid, reverse=True), lambda x: x.eid
)
}
densities = {}
for incidence in A.IL:
densities[incidence.nid] = (
densities.get(incidence.nid, Counter()) + by_edge[incidence.eid]
)
if not include_focus:
densities[incidence.nid] = densities.get(
incidence.nid, Counter()
) - Counter([incidence.role])
keys = set(chain.from_iterable(densities.values()))
for item in densities.values():
item.update({key: 0 for key in keys if key not in item})
if not absolute_values:
normalise_counters(densities)
if as_array:
densities = to_matrix(densities, A)
return densities | 5,356,256 |
def get(url: str) -> dict:
"""
author、audioName、audios
"""
data = {}
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Host": "www.kuwo.cn",
"Referer": "http://www.kuwo.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
}
song_info_url_format = "http://m.kuwo.cn/newh5/singles/songinfoandlrc?musicId={id}"
mp3_url_format = "http://www.kuwo.cn/url?format=mp3&rid={id}&response=url&type=convert_url3&br={quality}&from=web"
# http://www.kuwo.cn/play_detail/*********
id = re.findall(r"/(\d{1,})", url)
if id:
id = id[0]
else:
data["msg"] = "不支持输入的链接形式"
return data
session = requests.session()
# 得到最高品质以及歌曲信息
with session.get(song_info_url_format.format(id=id), headers=headers, timeout=10) as rep:
if rep.status_code == 200 and rep.json().get("status") == 200:
best_quality = rep.json().get("data").get(
"songinfo").get("coopFormats")[0]
author = rep.json().get("data").get("songinfo").get("artist")
song_name = rep.json().get("data").get("songinfo").get("songName")
pic = rep.json().get("data").get("songinfo").get("pic")
data["author"] = author
data["audioName"] = song_name
data["imgs"] = [pic]
else:
data["msg"] = "获取失败"
return data
if not best_quality:
best_quality = "128kmp3"
# 得到歌曲链接
with session.get(mp3_url_format.format(id=id, quality=best_quality), headers=headers, timeout=10) as rep:
if rep.status_code == 200 and rep.json().get("code") == 200:
play_url = rep.json().get("url")
data["audios"] = [play_url]
else:
data["msg"] = "获取音频链接失败"
return data | 5,356,257 |
def get_subject_mask(subject, run=1, rois=[1030,2030], path=DATADIR,
space=MRISPACE,
parcellation=PARCELLATION):
"""
Get subject mask by run and ROI key to apply to a dataset
(rois are in DATADIR/PARCELLATION.tsv)
inputs:
subject - sid00[0-9]{4}
run - which run to use for parcellation (redundant?) [1-8]
rois - list of regions of interest for mask [1030,2030]
path - dir containing roi parcellations [DATADIR]
space - parcellation space [MRISPACE]
parcellation- file [PARCELLATION]
outputs:
mask_ds - pymvpa Dataset containing mask data {0,[rois]}
"""
fname = opj(path, 'sub-%s'%subject, 'func', 'sub-%s_task-*_run-%02d_space-%s_%s.nii.gz'%(subject, run, space, parcellation))
#print fname
fname = glob.glob(fname)[0]
ds=P.fmri_dataset(fname)
found = np.where(np.isin(ds.samples,rois))[1]
return ds[:,found] | 5,356,258 |
def _add_args(parser, args):
"""
Call subcommand.add_argument() based on args list.
:param parser: the parser being build
:param list args: a data structure representing the arguments to be added
"""
for name, arg in args:
parser.add_argument(name, **arg) | 5,356,259 |
def social_auth(user):
"""
Return True if specified user has logged in with local account, False if user
uses 3rd party account for sign-in.
"""
return True if user.password is not settings.SOCIAL_AUTH_USER_PASSWORD else False | 5,356,260 |
def update_post(post_id):
"""
The route used to update a post. It displays the create_post.html page with the original posts contents filled in,
and allows the user to change anything about the post. When the post has been successfully updated it redirects to
the post route.
"""
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post', form=form, legend='Update Post') | 5,356,261 |
def lrelu(x, leak=0.2, scope="lrelu"):
"""
leaky relu
if x > 0: return x
else: return leak * x
:param x: tensor
:param leak: float, leak factor alpha >= 0
:param scope: str, name of the operation
:return: tensor, leaky relu operation
"""
with tf.variable_scope(scope):
# if leak < 1:
# return tf.maximum(x, leak * x)
# elif x > 0:
# return x
# else:
# return leak * x
return tf.nn.relu(x) - leak * tf.nn.relu(-x) | 5,356,262 |
def test_upload(mocker):
"""Sanity check that upload is ok."""
runner = CliRunner()
with runner.isolated_filesystem():
os.mkdir("cli_test_data")
with open("cli_test_data/test.fastq.gz", "w") as fastq_file:
fastq_file.write("AAABBB")
mocked_login = mocker.patch.object(
APIClient, "login", return_value=None
)
mocked_get_credentials = mocker.patch(
"gencove.command.upload.main.get_s3_client_refreshable"
)
upload_id = str(uuid4())
mocked_get_upload_details = mocker.patch.object(
APIClient,
"get_upload_details",
return_value=UploadsPostData(
**{
"id": upload_id,
"last_status": {"id": str(uuid4()), "status": ""},
"s3": {"bucket": "test", "object_name": "test"},
}
),
)
mocked_upload_file = mocker.patch(
"gencove.command.upload.main.upload_file"
)
res = runner.invoke(
upload,
["cli_test_data"],
input="\n".join(["[email protected]", "123456"]),
)
assert res.exit_code == 0
mocked_login.assert_called_once()
mocked_get_credentials.assert_called_once()
mocked_get_upload_details.assert_called_once()
mocked_upload_file.assert_called_once()
assert not mocked_upload_file.call_args[1]["no_progress"] | 5,356,263 |
def set_logger(debug_level="info", detail_level=2):
"""Initialises the logger.
Args:
Debug_level (str): Minimum logger level to display.
- debug
- info
Detail level (int): Level of detail for the formatter.
- 0: Only messages
- 1: Messages and time
- 2 (default): Messages, time and debug level
Returns:
None: Creates a global variable ``logger``
"""
global logger
logger = logging.getLogger()
handler = logging.StreamHandler()
logger.addHandler(handler)
# set logger format
if detail_level == 0:
formatter = logging.Formatter("%(message)s")
elif detail_level == 1:
formatter = logging.Formatter("%(asctime)s %(message)s", "%H:%M:%S")
elif detail_level == 2:
formatter = logging.Formatter(
"%(asctime)s %(levelname)-8s %(message)s" "%H:%M:%S"
)
handler.setFormatter(formatter)
# set logger verboisty
if debug_level == "info":
logger.setLevel(logging.INFO)
elif debug_level == "debug":
logger.setLevel(logging.DEBUG)
else:
logger.error("Logger level " + debug_level + " not a valid option") | 5,356,264 |
def enable_async(func: Callable) -> Callable:
"""
Overview:
Empower the function with async ability.
Arguments:
- func (:obj:`Callable`): The original function.
Returns:
- runtime_handler (:obj:`Callable`): The wrap function.
"""
@wraps(func)
def runtime_handler(task: "Task", *args, async_mode: Optional[bool] = None, **kwargs) -> "Task":
"""
Overview:
If task's async mode is enabled, execute the step in current loop executor asyncly,
or execute the task sync.
Arguments:
- task (:obj:`Task`): The task instance.
- async_mode (:obj:`Optional[bool]`): Whether using async mode.
Returns:
- result (:obj:`Union[Any, Awaitable]`): The result or future object of middleware.
"""
if async_mode is None:
async_mode = task.async_mode
if async_mode:
assert not kwargs, "Should not use kwargs in async_mode, use position parameters, kwargs: {}".format(kwargs)
t = task._async_loop.run_in_executor(task._thread_pool, func, task, *args, **kwargs)
task._async_stack.append(t)
return task
else:
return func(task, *args, **kwargs)
return runtime_handler | 5,356,265 |
def get_past_data_from_bucket_as_dataframe():
"""Read a blob"""
bucket_name = "deep_learning_model_bucket"
blob_name = "past_data.csv"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
return_data = blob.download_as_text()
return_data = StringIO(return_data)
df = pd.read_csv(return_data, sep=",", header=0, index_col=False)
return df | 5,356,266 |
def align_junction_LeftRight(viral_seq, bp_pos, ri_pos, align_to="L"):
"""If align_to="L", put all the ambiguous nucleotides in the
'body' part of the junction defined by bp_pos and ri_pos,
that is the junction point is moved as close to the 5'
end (of viral_seq) as possible. If align_to="R", do the opposite."""
py_ri = ri_pos-1 # in this way viral_seq[py_ri] is the first nucleotide after the junction obtained by DI-tector
py_bp = bp_pos-1 # in this way viral_seq[py_bp] is the last nucleotide before the junction obtained by DI-tector
assert (align_to == "L" or align_to == "R"), "Plese enter R or L to align as right as possible or as left as possible."
new_bp_pos = py_bp
new_ri_pos = py_ri
try_next_alignement = True
while try_next_alignement:
if align_to == "L":
if vir_seq[new_bp_pos] == vir_seq[new_ri_pos-1]:
new_bp_pos -= 1
new_ri_pos -= 1
else:
try_next_alignement = False
elif align_to == "R":
if vir_seq[new_bp_pos+1] == vir_seq[new_ri_pos]:
new_bp_pos += 1
new_ri_pos += 1
else:
try_next_alignement = False
new_bp_pos += 1 # in this way I am using a fixed convention
new_ri_pos += 1 # in this way I am using a fixed convention
return new_bp_pos, new_ri_pos | 5,356,267 |
def initialize_sqlite_tables_if_not_initialized() -> bool:
"""
Initialize the sqlite tables if they have not been
initialized yet.
Returns
-------
initialized : bool
If initialized, returns True.
"""
table_exists: bool = _table_exists(
table_name=TableName.EXPRESSION_NORMAL)
if table_exists:
return False
_create_expression_normal_table()
_create_expression_handler_table()
_create_indent_num_normal_table()
_create_indent_num_handler_table()
_create_last_scope_table()
_create_event_handler_scope_count_table()
_create_loop_count_table()
_create_debug_mode_setting_table()
_create_debug_mode_callable_count_table()
_create_stage_elem_id_table()
_create_variable_name_count_table()
_create_handler_calling_stack_table()
_create_circular_calling_handler_name_table()
_create_stage_id_table()
return True | 5,356,268 |
def poisson2vpvs(poisson_ratio):
"""
Convert Poisson's ratio to Vp/Vs ratio.
Parameters
----------
poisson_ratio : float
Poisson's ratio.
Returns
-------
vpvs_ratio : float
Vp/Vs ratio.
"""
return sqrt(2 * (poisson_ratio - 1) / (2 * poisson_ratio - 1)) | 5,356,269 |
def run_test_problem1b():
""" Tests the problem1b function. """
# -------------------------------------------------------------------------
# TODO: 5. Implement this TEST function.
# It TESTS the problem1b function defined below.
# Include at least ** 4 ** tests. Use the usual form:
#
# expected = XXX
# actual = problem1b(YYY, YYY)
# print('Test 1 expected:', expected)
# print(' actual: ', actual)
# -------------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 3
# TIME ESTIMATE: 15 minutes.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the problem1b function:')
print('--------------------------------------------------') | 5,356,270 |
def vs30_to_z1pt0_cy14(vs30, japan=False):
"""
Returns the estimate depth to the 1.0 km/s velocity layer based on Vs30
from Chiou & Youngs (2014) California model
:param numpy.ndarray vs30:
Input Vs30 values in m/s
:param bool japan:
If true returns the Japan model, otherwise the California model
:returns:
Z1.0 in m
"""
if japan:
c1 = 412. ** 2.
c2 = 1360.0 ** 2.
return np.exp((-5.23 / 2.0) *
np.log((np.power(vs30,2.) + c1) / (c2 + c1)))
else:
c1 = 571 ** 4.
c2 = 1360.0 ** 4.
return np.exp((-7.15 / 4.0) * np.log((vs30 ** 4. + c1) / (c2 + c1))) | 5,356,271 |
def empowerment(iface, priority=0):
"""
Class decorator for indicating a powerup's powerup interfaces.
The class will also be declared as implementing the interface.
@type iface: L{zope.interface.Interface}
@param iface: The powerup interface.
@type priority: int
@param priority: The priority the powerup will be installed at.
"""
def _deco(cls):
cls.powerupInterfaces = (
tuple(getattr(cls, 'powerupInterfaces', ())) +
((iface, priority),))
implementer(iface)(cls)
return cls
return _deco | 5,356,272 |
def multigauss_and_bgd_jacobian(x, *params):
"""Jacobien of the multiple Gaussian profile plus a polynomial background to data.
The degree of the polynomial background is fixed by parameters.CALIB_BGD_NPARAMS.
The order of the parameters is a first block CALIB_BGD_NPARAMS parameters (from low to high Legendre polynome degree,
contrary to np.polyval), and then block of 3 parameters for the Gaussian profiles like amplitude, mean and standard
deviation. x values are renormalised on the [-1, 1] interval for the background.
Parameters
----------
x: array
The x data values.
*params: list of float parameters as described above.
Returns
-------
y: array
The jacobian values.
Examples
--------
>>> import spectractor.parameters as parameters
>>> parameters.CALIB_BGD_NPARAMS = 4
>>> x = np.arange(600.,800.,1)
>>> p = [20, 1, -1, -1, 20, 650, 3, 40, 750, 5]
>>> y = multigauss_and_bgd_jacobian(x, *p)
>>> assert(np.all(np.isclose(y.T[0],np.ones_like(x))))
>>> print(y.shape)
(200, 10)
"""
bgd_nparams = parameters.CALIB_BGD_NPARAMS
out = []
x_norm = rescale_x_for_legendre(x)
for k in range(bgd_nparams):
# out.append(params[k]*(parameters.CALIB_BGD_ORDER-k)*x**(parameters.CALIB_BGD_ORDER-(k+1)))
# out.append(x ** (bgd_nparams - 1 - k))
c = np.zeros(bgd_nparams)
c[k] = 1
out.append(np.polynomial.legendre.legval(x_norm, c))
for k in range((len(params) - bgd_nparams) // 3):
jac = gauss_jacobian(x, *params[bgd_nparams + 3 * k:bgd_nparams + 3 * k + 3]).T
for j in jac:
out.append(list(j))
return np.array(out).T | 5,356,273 |
def test_pes_transform(Simulator, seed):
"""Test behaviour of PES when function and transform both defined."""
n = 200
# error must be with respect to transformed vector (conn.size_out)
T = np.asarray([[0.5], [-0.5]]) # transform to output
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(output=[1])
a = nengo.Ensemble(n, dimensions=1)
b = nengo.Node(size_in=2)
e = nengo.Node(size_in=1)
nengo.Connection(u, a)
learned_conn = nengo.Connection(
a, b, function=lambda x: [0], transform=T,
learning_rule_type=nengo.PES(learning_rate=1e-3))
assert T.shape[0] == learned_conn.size_out
assert T.shape[1] == learned_conn.size_mid
nengo.Connection(b[0], e, synapse=None)
nengo.Connection(nengo.Node(output=-1), e)
nengo.Connection(
e, learned_conn.learning_rule, transform=T, synapse=None)
p_b = nengo.Probe(b, synapse=0.05)
sim = nengo.Simulator(m)
sim.run(1.0)
tend = sim.trange() > 0.7
assert np.allclose(sim.data[p_b][tend], [1, -1], atol=1e-2) | 5,356,274 |
def get_empty_config():
"""
Return an empty Config object with no options set.
"""
empty_color_config = get_empty_color_config()
result = Config(
examples_dir=None,
custom_dir=None,
color_config=empty_color_config,
use_color=None,
pager_cmd=None,
editor_cmd=None,
squeeze=None,
subs=None
)
return result | 5,356,275 |
def add_user(vo, usercert):
"""Add the user identified by the given cert to the specified VO. Uses direct MySQL statements instead of voms-admin.
The CA cert that issued the user cert must already be in the database's 'ca' table - this happens automatically if
the CA cert is in /etc/grid-security/certificates when the VOMS database is created.
"""
usercert_dn, usercert_issuer = cagen.certificate_info(usercert)
dbname = 'voms_' + vo
# Find the index in the "ca" table ("cid") for the OSG Test CA that gets created by voms_install_db.
output, _, _, = mysql.check_execute(r'''SELECT cid FROM ca WHERE ca='%(usercert_issuer)s';''' % locals(),
'Get ID of user cert issuer from database', dbname)
output = output.strip()
assert output, "User cert issuer not found in database"
ca = int(output)
mysql.check_execute(r'''
INSERT INTO `usr` VALUES (1,'%(usercert_dn)s',%(ca)d,NULL,'root@localhost',NULL);
INSERT INTO `m` VALUES (1,1,1,NULL,NULL);''' % locals(),
'Add VO user', dbname) | 5,356,276 |
def parse(sql_string):
"""Given a string containing SQL, parse it and return the normalized result."""
parsed = select_stmt.parseString(sql_string)
parsed.from_clause = _normalize_from_clause(parsed.from_clause)
parsed.where_clause = _normalize_where_clause(parsed.where_clause)
return parsed | 5,356,277 |
def check_overwrite(path: str, overwrite: bool = False) -> str:
"""
Check if a path exists, if so raising a RuntimeError if overwriting is disabled.
:param path: Path
:param overwrite: Whether to overwrite
:return: Path
"""
if Path(path).is_file() and not overwrite:
raise RuntimeError(
f"Requested existing {path!r} as output, but overwriting is disabled."
)
return path | 5,356,278 |
def process_amendments(notice, notice_xml):
""" Process the changes to the regulation that are expressed in the notice.
"""
amends = []
notice_changes = changes.NoticeChanges()
amdpars_by_parent = []
for par in notice_xml.xpath('//AMDPAR'):
parent = par.getparent()
exists = filter(lambda aXp: aXp.parent == parent, amdpars_by_parent)
if exists:
exists[0].append(par)
else:
amdpars_by_parent.append(AmdparByParent(parent, par))
for aXp in amdpars_by_parent:
amended_labels = []
designate_labels, other_labels = [], []
context = [aXp.parent.get('PART') or notice['cfr_part']]
for par in aXp.amdpars:
als, context = parse_amdpar(par, context)
amended_labels.extend(als)
for al in amended_labels:
if isinstance(al, DesignateAmendment):
subpart_changes = process_designate_subpart(al)
if subpart_changes:
notice_changes.update(subpart_changes)
designate_labels.append(al)
elif new_subpart_added(al, notice['cfr_part']):
notice_changes.update(process_new_subpart(notice, al, par))
designate_labels.append(al)
else:
other_labels.append(al)
create_xmlless_changes(other_labels, notice_changes)
section_xml = find_section(par)
if section_xml is not None:
for section in reg_text.build_from_section(
notice['cfr_part'], section_xml):
create_xml_changes(other_labels, section, notice_changes)
for appendix in parse_appendix_changes(other_labels,
notice['cfr_part'], aXp.parent):
create_xml_changes(other_labels, appendix, notice_changes)
interp = parse_interp_changes(other_labels, notice['cfr_part'],
aXp.parent)
if interp:
create_xml_changes(other_labels, interp, notice_changes)
amends.extend(designate_labels)
amends.extend(other_labels)
if amends:
notice['amendments'] = amends
notice['changes'] = notice_changes.changes | 5,356,279 |
def bytes_to_text(input):
"""Converts given bytes (latin-1 char + padding)*length to text"""
content = struct.unpack((int(len(input)/2))*"sx", input)
return "".join([x.decode("latin-1") for x in content]).rstrip("\x00") | 5,356,280 |
def flatten_conv_capsule(inputs):
"""
:param inputs is output from a convolutional capsule layer
inputs.shape = [N,OH,OW,C,PH] C is channel number, PH is vector length
:return shape = [N,OH*OW*C,PH]
"""
inputs_shape = inputs.shape
l=[]
for i1 in range(inputs_shape[1]):
for i2 in range(inputs_shape[2]):
for i3 in range(inputs_shape[3]):
l.append(inputs[:,i1,i2,i3,:])
out = tf.stack(l,axis=1)
return out | 5,356,281 |
def LookupValue(values, name, scope, kind):
"""Like LookupKind, but for constant values."""
# If the type is an enum, the value can be specified as a qualified name, in
# which case the form EnumName.ENUM_VALUE must be used. We use the presence
# of a '.' in the requested name to identify this. Otherwise, we prepend the
# enum name.
if isinstance(kind, mojom.Enum) and '.' not in name:
name = '%s.%s' % (kind.spec.split(':', 1)[1], name)
for i in reversed(xrange(len(scope) + 1)):
test_spec = '.'.join(scope[:i])
if test_spec:
test_spec += '.'
test_spec += name
value = values.get(test_spec)
if value:
return value
return values.get(name) | 5,356,282 |
def SI1452(key,
Aleph=u'\u05d0', Tav=u'\u05ea'):
"""
Minimalist caps action
Make sure latin capital letters are produced in keys carrying them
(additionally, make Hebrew-letter keys go to level 2)
"""
if Aleph<=key.level_chars[1]<=Tav or u'A' <=key.level_chars[2]<=u'Z':
return CapsByShift()
else:
return None | 5,356,283 |
def _testClockwise():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.clockwise
False
>>> contour = font['A'][1]
>>> contour.clockwise
True
>>> contour._clockwiseCache = None
>>> contour.clockwise = False
>>> contour.clockwise
False
>>> contour._clockwiseCache = None
>>> contour.clockwise = True
>>> contour.clockwise
True
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.clockwise = False
>>> contour.clockwise
False
>>> contour._clockwiseCache = None
>>> contour.clockwise = True
>>> contour.clockwise
True
""" | 5,356,284 |
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
"""
Run integration tests for cluster-agent
"""
if install_deps:
deps(ctx)
# We need docker for the kubeapiserver integration tests
tags = DEFAULT_BUILD_TAGS + ["docker"]
test_args = {
"go_build_tags": " ".join(get_build_tags(tags, [])),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'go test {race_opt} -tags "{go_build_tags}" {exec_opts}'.format(**test_args)
prefixes = [
"./test/integration/util/kube_apiserver",
"./test/integration/util/leaderelection",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix)) | 5,356,285 |
def new_topic(request):
"""添加新主题"""
if request.method != 'POST':
form = TopicForm() # 如果不是POST请求, 表示首次请求, 返回空表单
else:
# POST提交了数据, 对数据进行处理
form = TopicForm(request.POST) # 根据请求传入的数据创建一个表单对象
# is_valid()函数核实用户填写了所有必不可少的字段(表单字段默认都是必不可少的),
# 且输入的数据与要求的字段类型一致
if form.is_valid(): # 检查表单的信息是否合法有效
form.save() # 将数据保存至数据库
# reverse()获取页面对应的URL, HttpResponseRedirect用于将浏览器页面重定向到topic
return HttpResponseRedirect(reverse('learning_logs:topics'))
context = {'form': form} # 传入到父类模板中显示, html中包含context字段
return render(request, 'learning_logs/new_topic.html', context) | 5,356,286 |
def pkg_config(*packages, **kw):
"""Translate pkg-config data to compatible Extension parameters.
Example usage:
>>> from distutils.extension import Extension
>>> from pkgdist import pkg_config
>>>
>>> ext_kwargs = dict(
... include_dirs=['include'],
... extra_compile_args=['-std=c++11'],
... )
>>> extensions = [
... Extension('foo', ['foo.c']),
... Extension('bar', ['bar.c'], **pkg_config('lcms2')),
... Extension('ext', ['ext.cpp'], **pkg_config(('nss', 'libusb-1.0'), **ext_kwargs)),
... ]
"""
flag_map = {
'-I': 'include_dirs',
'-L': 'library_dirs',
'-l': 'libraries',
}
try:
tokens = subprocess.check_output(
['pkg-config', '--libs', '--cflags'] + list(packages)).split()
except OSError as e:
sys.stderr.write(f'running pkg-config failed: {e.strerror}\n')
sys.exit(1)
for token in tokens:
token = token.decode()
if token[:2] in flag_map:
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
else:
kw.setdefault('extra_compile_args', []).append(token)
return kw | 5,356,287 |
async def get_xdg_data_dir(app=None):
"""Return a data directory for this app.
Create the directory if it does not exist.
"""
if app is None:
app = await get_package_name()
data_home = Path(await get_xdg_home('XDG_DATA_HOME'))
data_dir = data_home / app
if not await data_dir.exists():
await data_dir.mkdir()
return await data_dir.resolve() | 5,356,288 |
def patch_subscription(subscription, data):
""" Patches the given subscription with the data provided
"""
return stage_based_messaging_client.update_subscription(
subscription["id"], data) | 5,356,289 |
def create_related_profile(instance, created, *args, **kwargs):
"""
checks if the save causing the received signal
is the one that creates a user instance
If the save that caused the signal is an update then,
a user already exists in the database
"""
if instance and created:
instance.profile = UserProfile.objects.create(user=instance) | 5,356,290 |
def update_game(game_obj, size, center1, center2):
"""
Update game state
"""
new_game_obj = game_obj.copy()
if center1 is not None:
new_game_obj['rudder1_pos'] = center1
if center2 is not None:
new_game_obj['rudder2_pos'] = center2
# Check if hitting corner
init_vel = new_game_obj['velocity']
if new_game_obj['pos'][1] >= 480-15 or new_game_obj['pos'][1] <= 15:
new_game_obj['velocity'] = (init_vel[0], -1*init_vel[1])
if new_game_obj['pos'][0] >= 640-15:
new_game_obj['pos'] = (size[1]/2, size[0]/2)
new_game_obj['velocity'] = (-1.05*abs(new_game_obj['velocity'][0]),
1.05*abs(new_game_obj['velocity'][1]))
new_game_obj['score1'] += 1
elif new_game_obj['pos'][0] <= 15:
new_game_obj['pos'] = (size[1]/2, size[0]/2)
new_game_obj['score2'] += 1
new_game_obj['velocity'] = (1.05*abs(new_game_obj['velocity'][0]),
-1.05*abs(new_game_obj['velocity'][1]))
elif 0 <= new_game_obj['pos'][0]-new_game_obj['rudder1_pos'][0] <= 17 and new_game_obj['rudder1_pos'][1]-(50+15) < new_game_obj['pos'][1] < new_game_obj['rudder1_pos'][1] + 50+15:
new_game_obj['velocity'] = (-1*init_vel[0], init_vel[1])
elif 0 <= new_game_obj['rudder2_pos'][0] - new_game_obj['pos'][0] <= 17 and new_game_obj['rudder2_pos'][1]-(50+15) < new_game_obj['pos'][1] < new_game_obj['rudder2_pos'][1]+(50+15):
init_vel = new_game_obj['velocity']
new_game_obj['velocity'] = (-1*init_vel[0], init_vel[1])
new_game_obj['pos'] = (new_game_obj['pos'][0] + new_game_obj['velocity']
[0], new_game_obj['pos'][1] + new_game_obj['velocity'][1])
# print(new_game_obj)
return new_game_obj | 5,356,291 |
def get_background_pools(experiment: Experiment) -> ThreadPoolExecutor:
"""
Create a pool for background activities. The pool is as big as the number
of declared background activities. If none are declared, returned `None`.
"""
method = experiment.get("method")
rollbacks = experiment.get("rollbacks", [])
activity_background_count = 0
for activity in method:
if activity and activity.get("background"):
activity_background_count = activity_background_count + 1
activity_pool = None
if activity_background_count:
logger.debug(
"{c} activities will be run in the background".format(
c=activity_background_count))
activity_pool = ThreadPoolExecutor(activity_background_count)
rollback_background_pool = 0
for activity in rollbacks:
if activity and activity.get("background"):
rollback_background_pool = rollback_background_pool + 1
rollback_pool = None
if rollback_background_pool:
logger.debug(
"{c} rollbacks will be run in the background".format(
c=rollback_background_pool))
rollback_pool = ThreadPoolExecutor(rollback_background_pool)
return activity_pool, rollback_pool | 5,356,292 |
def download_thumb(se: requests.Session, proxy: dict, addr: str) -> str:
"""下载缩略图
Args:
se: 会话对象
proxy: 代理字典
addr: 缩略图地址
Returns:
成功时返回缩略图的本地绝对路径,失败时返回空字符串
"""
header = {'User-Agent': USER_AGENT}
try:
with se.get(addr,
headers=header,
proxies=proxy,
stream=True,
timeout=5) as thumb_res:
with NamedTemporaryFile('w+b', prefix='PETSpider_', delete=False) as thumb:
for chunk in thumb_res.iter_content():
thumb.write(chunk)
path = thumb.name
except (OSError, IOError):
return ''
else:
return path | 5,356,293 |
def iter_dir_completions(arg):
"""Generate an iterator that iterates through directory name completions.
:param arg: The directory name fragment to match
:type arg: str
"""
return iter_file_completions(arg, True) | 5,356,294 |
def climate_eurotronic_spirit_z_fixture(client, climate_eurotronic_spirit_z_state):
"""Mock a climate radio danfoss LC-13 node."""
node = Node(client, climate_eurotronic_spirit_z_state)
client.driver.controller.nodes[node.node_id] = node
return node | 5,356,295 |
def shuffles_from_transition_counts(transition_counts, initial_state, final_state):
"""
iterate over sequences having transition count N
starting in initial_state, ending in final_state
Args:
* N - int array of transition counts
* initial_state - int initial state
* final_state - int final state
Yields:
* sequence having transition count N, starting in initial_state, ending in final_state
"""
N = transition_counts.copy()
n = np.sum(N) + 1 # trajectory length
K = N.shape[0] # number of states
seq = np.zeros(n, dtype=int)-1 # sequence, trajectory
seq[0] = initial_state
t = 1
while t >= 1:
prev_state = seq[t-1]
current_state = seq[t]+1
while current_state < K and N[prev_state, current_state] == 0:
current_state += 1
if current_state == K: # go left
if seq[t] != -1:
N[seq[t-1],seq[t]] += 1
seq[t] = -1
t -= 1
else: # go right or stay if at end
if seq[t] != -1:
N[seq[t-1],seq[t]] += 1
seq[t] = current_state
N[seq[t-1],seq[t]] -= 1
if t < n-1: # go right if not at end
t += 1
else: # stay if at end
yield tuple(seq) | 5,356,296 |
def cli():
"""Command line interface for Mach O Peek"""
pass | 5,356,297 |
def call_experiment(thunk, thunk_params_dict_list, args, cpu_num, **kwargs):
"""
:params_dict thunk:待启动的函数
:params_dict params_dict:批量参数名
:params kwargs: 其他的一些没考虑到的参数~用处不大,没事儿最好别写这个,容易造成混乱~
正常的函数,传入参数之后,就会直接执行。
但是通过这个神奇的lambda,就可以即把参数传进去,又不执行。返回出一个函数
再次调用的时候,只需要将返回值,加上括号,即当一个无参数传入的函数执行就可以了。
"""
def thunk_plus():
# Fork into multiple processes
mpi_fork(cpu_num)
# Run thunk
thunk(thunk_params_dict_list)
# lambda封装会让tune_func.py中导入MPI模块报初始化错误。
# thunk_plus = lambda: thunk(params_dict)
# mpi_fork(len(params_dict))
pickled_thunk = cloudpickle.dumps(thunk_plus)
encoded_thunk = base64.b64encode(zlib.compress(pickled_thunk)).decode('utf-8')
# 默认mpi_fork函数和run_entrypoint.py是在同一个文件夹spinup_utils,因此获取mpi的绝对路径
# 如果不行的话,自己添加entrypoint的绝对路径就行
base_path = mpi_fork.__code__.co_filename
run_entrypoint_path = base_path.replace(base_path.split('/')[-1], '')
entrypoint = osp.join(run_entrypoint_path, 'run_entrypoint.py')
# entrypoint = osp.join(osp.abspath(osp.dirname(__file__)), 'run_entrypoint.py')
# subprocess的输入就是一个字符串列表,正常在命令行,该怎么输入,这个就该怎么写。
cmd = [sys.executable if sys.executable else 'python', entrypoint, encoded_thunk]
print("tune_exps_pid:", os.getpid())
try:
subprocess.check_call(cmd, env=os.environ)
except CalledProcessError:
err_msg = '\n'*3 + '='*DIV_LINE_WIDTH + '\n' + dedent("""
Check the traceback above to see what actually went wrong.
""") + '='*DIV_LINE_WIDTH + '\n'*3
print(err_msg)
raise | 5,356,298 |
def xi_eta_to_ab(ξ, η):
""" function to transform xi, eta coords to a, b
see Hesthaven function 'rstoab'
@param xi, eta vectors of xi, eta pts
"""
a, b = np.zeros_like(ξ), np.zeros_like(η)
singular = np.isclose(η, 1.0)
nonsingular = np.logical_not(singular)
a[nonsingular] = 2*(1. + ξ[nonsingular])/(1 - η[nonsingular]) - 1
a[singular] = -1
b = η
return a, b | 5,356,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.