content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def freeze_loop(src, start, end, loopStart, loopEnd=None):
""" Freezes a range of frames form start to end using the frames
comprended between loopStart and loopEnd.
If no end frames are provided for the range or the loop,
start frames will be used instead.
"""
core = vs.get_core()
if loopEnd is None:
loopEnd = loopStart
if start < 0 or start > src.num_frames - 1:
raise ValueError('start frame out of bounds: {}.'.format(start))
if loopStart < 0 or loopStart > src.num_frames - 1:
raise ValueError('loop start frame out of bounds: {}.'.format(loopStart))
if end < start or end > src.num_frames - 1:
raise ValueError('end frame out of bounds: {}.'.format(end))
if loopEnd < loopStart or loopEnd > src.num_frames - 1:
raise ValueError('loop end out of bounds: {}.'.format(loopEnd))
loop = core.std.Loop(src[loopStart:loopEnd + 1], 0)
span = end - start + 1
if start != 0:
final = src[:start] + loop[:span]
else:
final = loop[:span]
if end < src.num_frames - 1:
final = final + src[end + 1:]
if src.num_frames != final.num_frames:
raise ValueError(
'input / output framecount missmatch (got: {}; expected: {}).'.format(
final.num_frames, src.num_frames))
return final | 67284a264ada601dbd01c30c1bf32f48ad9eb9d8 | 3,656,700 |
def timevalue(cflo, prate, base_date=0, utility=None):
"""
Computes the equivalent net value of a generic cashflow at time `base_date`
using the periodic interest rate `prate`. If `base_date` is 0, `timevalue`
computes the net present value of the
cashflow. If `base_date` is the index of the last element of `cflo`,
this function computes the equivalent future value.
Args:
cflo (pandas.Series, list of pandas.Series): Generic cashflow.
prate (pandas.Series): Periodic interest rate.
base_date (int, tuple): Time.
utility (function): Utility function.
Returns:
Float or list of floats.
**Examples.**
>>> cflo = cashflow([-732.54] + [100]*8, start='2000Q1', freq='Q')
>>> prate = interest_rate([2]*9, start='2000Q1', freq='Q')
>>> timevalue(cflo, prate) # doctest: +ELLIPSIS
0.00...
>>> prate = interest_rate([12]*5, start='2000Q1', freq='Q')
>>> cflo = cashflow([-200]+[100]*4, start='2000Q1', freq='Q')
>>> timevalue(cflo, prate) # doctest: +ELLIPSIS
103.73...
>>> timevalue(cflo, prate, 4) # doctest: +ELLIPSIS
163.22...
>>> prate = interest_rate([12]*5, start='2000Q1', freq='Q')
>>> cflo = cashflow([-200] + [100]*4, start='2000Q1', freq='Q')
>>> timevalue(cflo=cflo, prate=prate) # doctest: +ELLIPSIS
103.73...
>>> timevalue(cflo=[cflo, cflo], prate=prate) # doctest: +ELLIPSIS
0 103.734935
1 103.734935
dtype: float64
"""
if isinstance(cflo, pd.Series):
cflo = [cflo]
if not isinstance(prate, pd.Series):
raise TypeError("`prate` must be a pandas.Series")
verify_period_range(cflo + [prate])
retval = pd.Series([0] * len(cflo), dtype=np.float64)
factor = to_discount_factor(prate=prate, base_date=base_date)
for index, xcflo in enumerate(cflo):
netval = 0
for time, _ in enumerate(xcflo):
netval += xcflo[time] * factor[time]
retval[index] = netval
if len(retval) == 1:
return retval[0]
return retval | 704f6988d1995a8602314df08d1dcfbed549f1ed | 3,656,701 |
def munge(examples, multiplier, prob, loc_var, data_t, seed=0):
""" Generates a dataset from the original one
:param examples: Training examples
:type examples: 2d numpy array
:param multiplier: size multiplier
:type multiplier: int k
:param prob: probability of swapping values
:type prob: flt (0 to 1)
:param loc_var: local variance parameter
:type loc_var: flt
:param data_t: Identifies whether or not the attribute is continuous or nominal
:type data_t: Numpy array of strs
"""
np.random.seed(seed)
new_dataset = None
continuous = [True if x == FeatureType.CONTINUOUS else False for x in data_t]
nominal = np.logical_not(continuous)
data_c = examples[:, continuous].astype(float)
# Scales data linearly from 0 to 1
norm_data_c = normalize(data_c - np.min(data_c, axis=0), axis=0, norm='max')
data_n = examples[:, nominal]
indicies = nn(norm_data_c, data_n)
for i in range(multiplier):
T_prime = np.copy(examples)
# Runs through all the examples in the dataset
for j in range(examples.shape[0]):
index = indicies[j, 1] if indicies[j, 0] == j else indicies[j, 0]
pt1 = T_prime[j, :]
pt2 = T_prime[index, :]
# Runs through all features for an example and its nn
for k in range(len(data_t)):
# Swaps the two fields with probability prob
if np.random.ranf() < prob:
if data_t[k] == FeatureType.CONTINUOUS:
std = abs(float(pt1[k]) - float(pt2[k])) / loc_var
temp = float(pt1[k])
pt1[k] = np.random.normal(float(pt2[k]), std)
pt2[k] = np.random.normal(temp, std)
else:
temp = pt1[k]
pt1[k] = pt2[k]
pt2[k] = temp
# Combines the dataset to the final one
if new_dataset is None:
new_dataset = np.copy(T_prime)
else:
new_dataset = np.vstack((new_dataset, T_prime))
return new_dataset | 339d5cafedb8abd6094cde81004c5056a3830d26 | 3,656,702 |
def is_interested_source_code_file(afile):
"""
If a file is the source code file that we are interested.
"""
tokens = afile.split(".")
if len(tokens) > 1 and tokens[-1] in ("c", "cpp", "pl", "tmpl", "py", "s", "S"):
# we care about C/C++/perl/template/python/assembly source code files
return True
return False | 9bd77dc3b530262cc2bf8a32c0d050ea30077030 | 3,656,703 |
import json
import time
import logging
def handle_survey_answers():
"""
Receives form data adds submission to database.
Args:
data (str): From the POST request arguments. Should in JSON form and
have all the quiz response information.
Raises:
AssertionError: When the quiz type is not valid.
Returns:
str: The ID of the quiz entry in the database.
"""
# Load the JSON as dictionary.
entry = json.loads(request.form['data'])
# Add the current timestamp to the data.
timestamp = time.time()
entry['timestamp_secs'] = timestamp
entry_string = json.dumps(entry, indent=4, sort_keys=True)
logging.debug(entry_string)
db = get_db()
# Use the form type to access differnt collections.
form_type = entry['form_type']
responses_col = None
# Differentiate between men and women quiz.
if form_type == 'men':
responses_col = db.responses_men
elif form_type == 'women':
responses_col = db.responses_women
else:
logging.warning("Form Type is not 'men' or 'women': {}".format(
form_type))
raise AssertionError("Form Type is not 'men' or 'women': {}".format(
form_type))
# Update responses counter.
responses_col.find_one_and_update(
{'_id': 'responses'},
{'$inc': {'count': 1}},
upsert=True)
# Insert the response information.
response_id = responses_col.insert_one(entry).inserted_id
resp = {"id": str(response_id)}
return jsonify(resp) | 2abd696e009d95b58671369cdfb9f1449a991474 | 3,656,704 |
def recursively_extract(node, exfun, maxdepth=2):
"""
Transform a html ul/ol tree into a python list tree.
Converts a html node containing ordered and unordered lists and list items
into an object of lists with tree-like structure. Leaves are retrieved by
applying `exfun` function to the html nodes not containing any ul/ol list.
Args:
node: BeautifulSoup HTML node to traverse
exfun: function to apply to every string node found
maxdepth: maximal depth of lists to go in the node
Returns:
A tree-like python object composed of lists.
Examples:
>>> node_content = \
'''
<ol>
<li>Hase</li>
<li>Nase<ol><li>Eins</li><li>Zwei</li></ol></li>
</ol>'''
>>> node = BeautifulSoup(node_content, "lxml")
>>> recursively_extract(node, lambda x: x)
[<li>Hase</li>, [<li>Eins</li>, <li>Zwei</li>]]
>>> recursively_extract(node, lambda x: x.get_text())
['Hase', ['Eins', 'Zwei']]
"""
if node.name in ['ol', 'ul']:
lilist = node
else:
lilist = node.ol or node.ul
if lilist and maxdepth:
# apply 'recursively_extract' to every 'li' node found under this node
return [recursively_extract(li, exfun, maxdepth=(maxdepth - 1))
for li in lilist.find_all('li', recursive=False)]
# if this node doesn't contain 'ol' or 'ul' node, return the transformed
# leaf (using the 'exfun' function)
return exfun(node) | cc5732a786579172dda31958ad2bd468a4feef81 | 3,656,705 |
import math
def group_v2_deconv_decoder(latent_tensor,
output_shape,
hy_ncut=1,
group_feats_size=gin.REQUIRED,
lie_alg_init_scale=gin.REQUIRED,
lie_alg_init_type=gin.REQUIRED,
n_act_points=gin.REQUIRED,
is_training=True):
"""Convolutional decoder used in beta-VAE paper for the chairs data.
Based on row 3 of Table 1 on page 13 of "beta-VAE: Learning Basic Visual
Concepts with a Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl)
Here we add an extra linear mapping for group features extraction.
Args:
latent_tensor: Input tensor of shape (batch_size,) to connect decoder to.
output_shape: Shape of the data.
group_feats_size: The dimension of group features.
is_training: Whether or not the graph is built for training (UNUSED).
Returns:
Output tensor of shape (batch_size, 64, 64, num_channels) with the [0,1]
pixel intensities.
group_feats: Group features.
"""
# del is_training
lie_alg_basis_ls = []
latent_dim = latent_tensor.get_shape().as_list()[-1]
latents_in_cut_ls = split_latents(latent_tensor, hy_ncut=hy_ncut) # [x0, x1]
mat_dim = int(math.sqrt(group_feats_size))
for i in range(latent_dim):
init = tf.initializers.random_normal(0, lie_alg_init_scale)
lie_alg_tmp = tf.get_variable('lie_alg_' + str(i),
shape=[1, mat_dim, mat_dim],
initializer=init)
if lie_alg_init_type == 'oth':
lie_alg_tmp = tf.matrix_band_part(lie_alg_tmp, 0, -1)
lie_alg_tmp = lie_alg_tmp - tf.transpose(lie_alg_tmp,
perm=[0, 2, 1])
lie_alg_basis_ls.append(lie_alg_tmp)
lie_alg_basis = tf.concat(lie_alg_basis_ls,
axis=0)[tf.newaxis,
...] # [1, lat_dim, mat_dim, mat_dim]
lie_alg = 0
lie_group = tf.eye(mat_dim, dtype=lie_alg_basis_ls[0].dtype)[tf.newaxis, ...]
for i, lie_alg_basis_i in enumerate(lie_alg_basis_ls):
lie_alg_tmp = lie_alg_basis_i * latent_tensor[:, i][..., tf.newaxis, tf.newaxis]
lie_alg = lie_alg + lie_alg_tmp
lie_group_tmp = tf.linalg.expm(
lie_alg_tmp) # [b, mat_dim, mat_dim]
lie_group = tf.matmul(lie_group_tmp, lie_group)
# if not is_training:
# lie_alg_mul = latent_tensor[
# ..., tf.newaxis, tf.
# newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim]
# lie_alg = tf.reduce_sum(lie_alg_mul, axis=1) # [b, mat_dim, mat_dim]
# lie_group = tf.linalg.expm(lie_alg) # [b, mat_dim, mat_dim]
# else:
# lie_group = tf.eye(
# mat_dim,
# dtype=latents_in_cut_ls[0].dtype)[tf.newaxis, ...]
# lie_alg = 0
# for latents_in_cut_i in latents_in_cut_ls:
# lie_alg_mul_tmp = latents_in_cut_i[
# ..., tf.newaxis, tf.newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim]
# lie_alg_tmp = tf.reduce_sum(
# lie_alg_mul_tmp,
# axis=1) # [b, mat_dim, mat_dim]
# lie_alg = lie_alg + lie_alg_tmp
# lie_group_tmp = tf.linalg.expm(
# lie_alg_tmp) # [b, mat_dim, mat_dim]
# lie_group = tf.matmul(lie_group,
# lie_group_tmp)
transed_act_points_tensor = tf.reshape(lie_group, [-1, mat_dim * mat_dim])
# lie_alg_mul = latent_tensor[
# ..., tf.newaxis, tf.
# newaxis] * lie_alg_basis # [b, lat_dim, mat_dim, mat_dim]
# lie_alg = tf.reduce_sum(lie_alg_mul, axis=1) # [b, mat_dim, mat_dim]
# lie_group = tf.linalg.expm(lie_alg) # [b, mat_dim, mat_dim]
# act_init = tf.initializers.random_normal(0, 0.01)
# act_points = tf.get_variable('act_points',
# shape=[1, mat_dim, n_act_points],
# initializer=act_init)
# transed_act_points = tf.matmul(lie_group, act_points)
# transed_act_points_tensor = tf.reshape(transed_act_points,
# [-1, mat_dim * n_act_points])
d1 = tf.layers.dense(transed_act_points_tensor, 256, activation=tf.nn.relu)
d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)
d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])
d3 = tf.layers.conv2d_transpose(
inputs=d2_reshaped,
filters=64,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d4 = tf.layers.conv2d_transpose(
inputs=d3,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d5 = tf.layers.conv2d_transpose(
inputs=d4,
filters=32,
kernel_size=4,
strides=2,
activation=tf.nn.relu,
padding="same",
)
d6 = tf.layers.conv2d_transpose(
inputs=d5,
filters=output_shape[2],
kernel_size=4,
strides=2,
padding="same",
)
return tf.reshape(d6, [-1] + output_shape), lie_group, lie_alg_basis | c098852a7d3e85be944494de74810e021d7fd106 | 3,656,706 |
import os
import colorsys
def cptfile2dict(filepath):
"""
Extracts a color dictionary and list for a colormap object from a .cpt file
Parameters
----------
filepath: str
filepath of a .cpt file including file extension
Returns
-------
colormap name, list containing all colors, dictionary containing all colors
"""
if not os.path.exists(filepath):
raise ImportError("file ", filepath, "not found")
file = open(filepath)
name = os.path.splitext(os.path.basename(filepath))[0]
lines = file.readlines()
file.close()
x = []
r = []
g = []
b = []
color_model = "RGB"
for l in lines:
ls = l.split()
if l.strip():
if l[0] == "#":
if ls[-1] == "HSV":
color_model = "HSV"
continue
if ls[0] == "B" or ls[0] == "F" or ls[0] == "N":
pass
else:
x.append(float(ls[0]))
r.append(float(ls[1]))
g.append(float(ls[2]))
b.append(float(ls[3]))
xtemp = float(ls[4])
rtemp = float(ls[5])
gtemp = float(ls[6])
btemp = float(ls[7])
else:
continue
x.append(xtemp)
r.append(rtemp)
g.append(gtemp)
b.append(btemp)
x = np.array(x, dtype=np.float64)
r = np.array(r, dtype=np.float64)
g = np.array(g, dtype=np.float64)
b = np.array(b, dtype=np.float64)
if color_model == "HSV":
for i in range(r.shape[0]):
rr, gg, bb = colorsys.hsv_to_rgb(r[i] / 360., g[i], b[i])
r[i] = rr
g[i] = gg
b[i] = bb
if color_model == "RGB":
r = r/255
g = g/255
b = b/255
x_norm = (x - x[0])/(x[-1] - x[0])
col_list = [(r[i], g[i], b[i]) for i in range(len(r))]
red = []
green = []
blue = []
for i in range(len(x)):
red.append([x_norm[i], r[i], r[i]])
green.append([x_norm[i], g[i], g[i]])
blue.append([x_norm[i], b[i], b[i]])
color_dict = {"red": red, "green": green, "blue": blue}
return name, col_list, color_dict | 8743c2a092b7749aaecbc3a5f6f815e74dd489df | 3,656,707 |
def UncertaintyLossNet():
"""Creates Uncertainty weighted loss model https://arxiv.org/abs/1705.07115
"""
l1 = layers.Input(shape=())
l2 = layers.Input(shape=())
loss = UncertaintyWeightedLoss()([l1, l2])
model = Model(inputs=[l1, l2], outputs=loss)
return model | 5a6553edc321a6e307848e261692541cedea4ebb | 3,656,708 |
from typing import Iterable
import logging
from pathlib import Path
def inject_signals(
frame_files: Iterable[str],
channels: [str],
ifos: [str],
prior_file: str,
n_samples: int,
outdir: str,
fmin: float = 20,
waveform_duration: float = 8,
snr_range: Iterable[float] = [25, 50],
):
"""Injects simulated BBH signals into a frame, or set of corresponding
frames from different interferometers. Frames should have the same
start/stop time and the same sample rate
Args:
frame_files: list of paths to frames to be injected
channels: channel names of the strain data in each frame
ifos: list of interferometers corresponding to frames, e.g., H1, L1
prior_file: prior file for bilby to sample from
n_samples: number of signal to inject
outdir: output directory to which injected frames will be written
fmin: Minimum frequency for highpass filter
waveform_duration: length of injected waveforms
snr_range: desired signal SNR range
Returns:
Paths to the injected frames and the parameter file
"""
strains = [
TimeSeries.read(frame, ch) for frame, ch in zip(frame_files, channels)
]
logging.info("Read strain from frame files")
span = set([strain.span for strain in strains])
if len(span) != 1:
raise ValueError(
"Frame files {} and {} have different durations".format(
*frame_files
)
)
frame_start, frame_stop = next(iter(span))
frame_duration = frame_stop - frame_start
sample_rate = set([int(strain.sample_rate.value) for strain in strains])
if len(sample_rate) != 1:
raise ValueError(
"Frame files {} and {} have different sample rates".format(
*frame_files
)
)
sample_rate = next(iter(sample_rate))
fftlength = int(max(2, np.ceil(2048 / sample_rate)))
# set the non-overlapping times of the signals in the frames randomly
# leaves buffer at either end of the series so edge effects aren't an issue
signal_times = sorted(
np.random.choice(
np.arange(
waveform_duration,
frame_duration - waveform_duration,
waveform_duration,
),
size=n_samples,
replace=False,
)
)
# log and print out some simulation parameters
logging.info("Simulation parameters")
logging.info("Number of samples : {}".format(n_samples))
logging.info("Sample rate [Hz] : {}".format(sample_rate))
logging.info("High pass filter [Hz] : {}".format(fmin))
logging.info("Prior file : {}".format(prior_file))
# define a Bilby waveform generator
waveform_generator = bilby.gw.WaveformGenerator(
duration=waveform_duration,
sampling_frequency=sample_rate,
frequency_domain_source_model=lal_binary_black_hole,
parameter_conversion=convert_to_lal_binary_black_hole_parameters,
waveform_arguments={
"waveform_approximant": "IMRPhenomPv2",
"reference_frequency": 50,
"minimum_frequency": 20,
},
)
# sample GW parameters from prior distribution
priors = bilby.gw.prior.BBHPriorDict(prior_file)
sample_params = priors.sample(n_samples)
sample_params["geocent_time"] = signal_times
signals_list = []
snr_list = []
for strain, channel, ifo in zip(strains, channels, ifos):
# calculate the PSD
strain_psd = strain.psd(fftlength)
# generate GW waveforms
raw_signals = generate_gw(
sample_params,
waveform_generator=waveform_generator,
)
signals, snr = project_raw_gw(
raw_signals,
sample_params,
waveform_generator,
ifo,
get_snr=True,
noise_psd=strain_psd,
)
signals_list.append(signals)
snr_list.append(snr)
old_snr = np.sqrt(np.sum(np.square(snr_list), axis=0))
new_snr = np.random.uniform(snr_range[0], snr_range[1], len(snr_list[0]))
signals_list = [
signals * (new_snr / old_snr)[:, None] for signals in signals_list
]
sample_params["luminosity_distance"] = (
sample_params["luminosity_distance"] * old_snr / new_snr
)
snr_list = [snr * new_snr / old_snr for snr in snr_list]
outdir = Path(outdir)
frame_out_paths = [outdir / f.name for f in map(Path, frame_files)]
for strain, signals, frame_path in zip(
strains, signals_list, frame_out_paths
):
for i in range(n_samples):
idx1 = int(
(signal_times[i] - waveform_duration / 2.0) * sample_rate
)
idx2 = idx1 + waveform_duration * sample_rate
strain[idx1:idx2] += signals[i]
strain.write(frame_path)
# Write params and similar to output file
param_file = outdir / f"param_file_{frame_start}-{frame_stop}.h5"
with h5py.File(param_file, "w") as f:
# write signals attributes, snr, and signal parameters
params_gr = f.create_group("signal_params")
for k, v in sample_params.items():
params_gr.create_dataset(k, data=v)
# Save signal times as actual GPS times
f.create_dataset("GPS-start", data=signal_times + frame_start)
for i, ifo in enumerate(ifos):
ifo_gr = f.create_group(ifo)
ifo_gr.create_dataset("signal", data=signals_list[i])
ifo_gr.create_dataset("snr", data=snr_list[i])
# write frame attributes
f.attrs.update(
{
"size": n_samples,
"frame_start": frame_start,
"frame_stop": frame_stop,
"sample_rate": sample_rate,
"psd_fftlength": fftlength,
}
)
# Update signal attributes
f.attrs["waveform_duration"] = waveform_duration
f.attrs["flag"] = "GW"
return frame_out_paths, param_file | 204aca5dee78e885191907890fc064503ff61f57 | 3,656,709 |
async def lyric(id: int, endpoint: NeteaseEndpoint = Depends(requestClient)):
"""
## Name: `lyric`
> 歌词
---
### Required:
- ***int*** **`id`**
- Description: 单曲ID
"""
return await endpoint.lyric(id=id) | 331c0bced7bbd2523426522286a85f3cc6a3a29f | 3,656,710 |
def get_body(m):
"""extract the plain text body. return the body"""
if m.is_multipart():
body = m.get_body(preferencelist=('plain',)).get_payload(decode=True)
else:
body = m.get_payload(decode=True)
if isinstance(body, bytes):
return body.decode()
else:
return body | 7980c1471a0a09c793cb8124066a97caac21ae0d | 3,656,711 |
import os
def get_image(img_path, ch=3, scale=None, tile_size=None, interpolate=cv2.INTER_AREA):
"""
Loads image data into standard Numpy array
Reads image and reverses channel order.
Loads image as 8 bit (regardless of original depth)
Parameters
------
img_path: str
Image file path.
ch: int
Number of input channels (default = 3).
scale: float
Scaling factor.
tile_size: int
Tile dimension (square).
interpolate: int
Interpolation method (OpenCV).
Returns
------
numpy array
Image array; formats: grayscale: [HW]; colour: [HWC].
w: int
Image width (px).
h: int
Image height (px).
w_resized: int
Image width resized (px).
h_resized: int
Image height resized (px).
"""
assert ch == 3 or ch == 1, 'Invalid number of input channels:\t{}.'.format(ch)
assert os.path.exists(img_path), 'Image path {} does not exist.'.format(img_path)
if not tile_size:
tile_size = defaults.tile_size
# verify image channel number
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if is_grayscale(img) and ch == 3:
print('\nInput image is grayscale but process expects colour (RGB).\n\tApplication stopped.')
exit(1)
elif not is_grayscale(img) and ch == 1:
if input("\nInput image is in colour (RGB) but process expects grayscale. "
"Apply grayscale filter? (Enter \'Y\' or \'y\' for Yes): ") in ['Y', 'y']:
grayscale(img)
# load image data
if ch == 3:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
# get dimensions
height, width = img.shape[:2]
height_resized = height
width_resized = width
# apply scaling
if scale:
min_dim = min(height, width)
# adjust scale to minimum size (tile dimensions)
if min_dim < tile_size:
scale = tile_size / min_dim
dim = (int(scale * width), int(scale * height))
img = cv2.resize(img, dim, interpolation=interpolate)
height_resized, width_resized = img.shape[:2]
return img, width, height, width_resized, height_resized | 2d34bb64762c2c478eed71caab02d4b39dfc28ce | 3,656,712 |
def density(mass, volume):
"""
Calculate density.
"""
return mass / volume * 1 | 53b1f76ba66695a9cd72be9186bcc374ee11f53b | 3,656,713 |
import sys
def _is_global(obj, name=None):
"""Determine if obj can be pickled as attribute of a file-backed module"""
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None:
name = getattr(obj, '__name__', None)
module_name = _whichmodule(obj, name)
if module_name is None:
# In this case, obj.__module__ is None AND obj was not found in any
# imported module. obj is thus treated as dynamic.
return False
if module_name == "__main__":
return False
module = sys.modules.get(module_name, None)
if module is None:
# The main reason why obj's module would not be imported is that this
# module has been dynamically created, using for example
# types.ModuleType. The other possibility is that module was removed
# from sys.modules after obj was created/imported. But this case is not
# supported, as the standard pickle does not support it either.
return False
# module has been added to sys.modules, but it can still be dynamic.
if _is_dynamic(module):
return False
try:
obj2, parent = _getattribute(module, name)
except AttributeError:
# obj was not found inside the module it points to
return False
return obj2 is obj | b0846e156c1d9201e75d6e38d37bdb49b8ee9bfd | 3,656,714 |
from typing import Union
from typing import Callable
import torch
def get_augmenter(augmenter_type: str,
image_size: ImageSizeType,
dataset_mean: DatasetStatType,
dataset_std: DatasetStatType,
padding: PaddingInputType = 1. / 8.,
pad_if_needed: bool = False,
subset_size: int = 2) -> Union[Module, Callable]:
"""
Args:
augmenter_type: augmenter type
image_size: (height, width) image size
dataset_mean: dataset mean value in CHW
dataset_std: dataset standard deviation in CHW
padding: percent of image size to pad on each border of the image. If a sequence of length 4 is provided,
it is used to pad left, top, right, bottom borders respectively. If a sequence of length 2 is provided, it is
used to pad left/right, top/bottom borders, respectively.
pad_if_needed: bool flag for RandomCrop "pad_if_needed" option
subset_size: number of augmentations used in subset
Returns: nn.Module for Kornia augmentation or Callable for torchvision transform
"""
if not isinstance(padding, tuple):
assert isinstance(padding, float)
padding = (padding, padding, padding, padding)
assert len(padding) == 2 or len(padding) == 4
if len(padding) == 2:
# padding of length 2 is used to pad left/right, top/bottom borders, respectively
# padding of length 4 is used to pad left, top, right, bottom borders respectively
padding = (padding[0], padding[1], padding[0], padding[1])
# image_size is of shape (h,w); padding values is [left, top, right, bottom] borders
padding = (
int(image_size[1] * padding[0]),
int(image_size[0] * padding[1]),
int(image_size[1] * padding[2]),
int(image_size[0] * padding[3])
)
augmenter_type = augmenter_type.strip().lower()
if augmenter_type == "simple":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "fixed":
return nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
# K.RandomVerticalFlip(p=0.2),
K.RandomResizedCrop(size=image_size, scale=(0.8, 1.0), ratio=(1., 1.)),
RandomAugmentation(
p=0.5,
augmentation=F.GaussianBlur2d(
kernel_size=(3, 3),
sigma=(1.5, 1.5),
border_type='constant'
)
),
K.ColorJitter(contrast=(0.75, 1.5)),
# additive Gaussian noise
K.RandomErasing(p=0.1),
# Multiply
K.RandomAffine(
degrees=(-25., 25.),
translate=(0.2, 0.2),
scale=(0.8, 1.2),
shear=(-8., 8.)
),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type in ["validation", "test"]:
return nn.Sequential(
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "randaugment":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
RandAugmentNS(n=subset_size, m=10),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
else:
raise NotImplementedError(f"\"{augmenter_type}\" is not a supported augmenter type") | 7b065d9bd7c9bc2cf3c0aa2fdf105c714df24705 | 3,656,715 |
def query(limit=None, username=None, ids=None, user=None):
"""# Retrieve Workspaces
Receive a generator of Workspace objects previously created in the Stark Bank API.
If no filters are passed and the user is an Organization, all of the Organization Workspaces
will be retrieved.
## Parameters (optional):
- limit [integer, default None]: maximum number of objects to be retrieved. Unlimited if None. ex: 35
- username [string, default None]: query by the simplified name that defines the workspace URL. This name is always unique across all Stark Bank Workspaces. Ex: "starkbankworkspace"
- ids [list of strings, default None]: list of ids to filter retrieved objects. ex: ["5656565656565656", "4545454545454545"]
- user [Organization/Project object, default None]: Organization or Project object. Not necessary if starkbank.user was set before function call
## Return:
- generator of Workspace objects with updated attributes
"""
return rest.get_stream(resource=_resource, limit=limit, username=username, ids=ids, user=user) | bc22336c7c76d549144e43b6d6c46793b1feedf9 | 3,656,716 |
def _add_output_tensor_nodes(net, preprocess_tensors, output_collection_name='inferece_op'):
"""
Adds output nodes for all preprocess_tensors.
:param preprocess_tensors: a dictionary containing the all predictions;
:param output_collection_name: Name of collection to add output tensors to.
:return: A tensor dict containing the added output tensor nodes.
"""
outputs = {}
outputs['roi_scores'] = tf.identity(net.all_rois_scores, name='rois_scores')
outputs['rois'] = tf.identity(net.all_rois, name='rois')
for output_key in outputs.keys():
tf.add_to_collection(output_collection_name, outputs[output_key])
return outputs | cdbb2b69a795bcc74925cce138e9d73bc4737276 | 3,656,717 |
def f_prob(times, lats, lons, members):
"""Probabilistic forecast containing also a member dimension."""
data = np.random.rand(len(members), len(times), len(lats), len(lons))
return xr.DataArray(
data,
coords=[members, times, lats, lons],
dims=["member", "time", "lat", "lon"],
attrs={"source": "test"},
) | 43fe73abb5667b0d29f36a4ee73e8d8ec1943ad0 | 3,656,718 |
def dunning_total_by_corpus(m_corpus, f_corpus):
"""
Goes through two corpora, e.g. corpus of male authors and corpus of female authors
runs dunning_individual on all words that are in BOTH corpora
returns sorted dictionary of words and their dunning scores
shows top 10 and lowest 10 words
:param m_corpus: Corpus object
:param f_corpus: Corpus object
:return: list of tuples (common word, (dunning value, m_corpus_count, f_corpus_count))
>>> from gender_analysis.analysis.dunning import dunning_total_by_corpus
>>> from gender_analysis.corpus import Corpus
>>> from gender_analysis.common import TEST_DATA_PATH
>>> path = TEST_DATA_PATH / 'sample_novels' / 'texts'
>>> csv_path = TEST_DATA_PATH / 'sample_novels' / 'sample_novels.csv'
>>> c = Corpus(path, csv_path=csv_path)
>>> m_corpus = c.filter_by_gender('male')
>>> f_corpus = c.filter_by_gender('female')
>>> result = dunning_total_by_corpus(m_corpus, f_corpus)
>>> print(result[0])
('she', (-12374.391057010947, 29382, 45907))
"""
wordcounter_male = m_corpus.get_wordcount_counter()
wordcounter_female = f_corpus.get_wordcount_counter()
totalmale_words = 0
totalfemale_words = 0
for male_word in wordcounter_male:
totalmale_words += wordcounter_male[male_word]
for female_word in wordcounter_female:
totalfemale_words += wordcounter_female[female_word]
dunning_result = {}
for word in wordcounter_male:
wordcount_male = wordcounter_male[word]
if word in wordcounter_female:
wordcount_female = wordcounter_female[word]
dunning_word = dunn_individual_word(totalmale_words, totalfemale_words,
wordcount_male, wordcount_female)
dunning_result[word] = (dunning_word, wordcount_male, wordcount_female)
dunning_result = sorted(dunning_result.items(), key=itemgetter(1))
return dunning_result | 324b0bb5e5f83451ca47cefed908cdd6dbc47c33 | 3,656,719 |
import os
import warnings
def get_apikey() -> str:
"""
Read and return the value of the environment variable ``LS_API_KEY``.
:return: The string value of the environment variable or an empty string
if no such variable could be found.
"""
api_key = os.environ.get("LS_API_KEY")
if api_key is None:
warnings.warn("No token found in environment variable LS_API_KEY.")
return api_key or "" | b88a6c0ac8e11add97abd1d7415126f75f50696d | 3,656,720 |
import uuid
import os
def test_pdf():
"""
测试pdf报表输出
:return:
"""
res = ResMsg()
report_path = current_app.config.get("REPORT_PATH", "./report")
file_name = "{}.pdf".format(uuid.uuid4().hex)
path = os.path.join(report_path, file_name)
path = pdf_write(path)
path = path.lstrip(".")
res.update(data=path)
return res.data | f5bba395c5899d34bbd422d403970f320d8e4843 | 3,656,721 |
import inspect
def vprint(*args, apply=print, **kwargs):
"""
Prints the variable name, its type and value.
::
vprint(5 + 5, sum([1,2]))
> 5 + 5 (<class 'int'>):
10
sum([1,2]) (<class 'int'>):
3
"""
def printarg(_name, _val) -> str:
_string = f'{_name}: {igit_debug.formatting.pformat(_val, types=True)}'
apply(_string)
return _string
strings = []
if args:
currframe = inspect.currentframe()
outer = inspect.getouterframes(currframe)
frameinfo = outer[1]
ctx = frameinfo.code_context[0].strip()
argnames = ctx[ctx.find('(') + 1:-1].split(', ')
if len(argnames) != len(args) + len(kwargs):
print(f"Too complex statement, try breaking it down to variables or eliminating whitespace",
# f'len(argnames): {len(argnames)}', f'len(args): {len(args)}', f'len(kwargs): {len(kwargs)}',
# vprint(ctx, argnames, args, kwargs)
)
# return
for i, val in enumerate(args):
try:
name = argnames[i].strip()
except IndexError:
continue # TODO: break?
strings.append(printarg(name, val))
for name, val in kwargs.items():
strings.append(printarg(name, val))
# return strings | 759320b427ee26bded41a86e46ec6ce72cbfcf7a | 3,656,722 |
from typing import Optional
from typing import Callable
def get_int(prompt: Optional[str] = None,
min_value: Optional[int] = None,
max_value: Optional[int] = None,
condition: Optional[Callable[[int], bool]] = None,
default: Optional[int] = None) -> int:
"""Gets an int from the command line.
:param prompt: Input prompt.
:param min_value: Minimum value of the parsed int.
:param max_value: Maximum value of the parsed int.
:param condition: Condition the int must match.
:param default: Default value used if no characters are typed.
:return: Input int.
"""
input_int = None
input_str = None
while input_int is None:
try:
input_str = input(_prompt_from_message(prompt, default=default)).strip()
if default is not None and len(input_str) == 0:
input_str = default
input_int = int(input_str)
if (min_value is not None and input_int < min_value) or \
(max_value is not None and input_int > max_value) or \
(condition is not None and not condition(input_int)):
input_int = None
raise ValueError()
except ValueError:
_print_invalid_value(input_str)
return input_int | c6ea07b495330c74bd36523cf12dd3e208926ea5 | 3,656,723 |
def make_stream_callback(observer, raw, frame_size, start, stop):
"""
Builds a callback function for stream plying. The observer is an object
which implements methods 'observer.set_playing_region(b,e)' and
'observer.set_playing_end(e)'. raw is the wave data in a str object.
frame_size is the number of bytes times number of channels per frame.
start and stop indicate which slice of raw would be played.
"""
start_ref = [ start ]
def callback(in_data, frame_count, time_info, status):
start = start_ref[0]
last = min(stop, start + frame_count*frame_size)
data = raw[start:last]
start_ref[0] = last
if last == stop: observer.set_playing_end(last)
else: observer.set_playing_region(start, last)
return (data, pyaudio.paContinue)
return callback | c29f7998f848c51af57e42c92a62f80c7a0c2e70 | 3,656,724 |
import torch
def predictCNN(segments, artifacts, device:torch.device = torch.device("cpu")):
"""
Perform model predictions on unseen data
:param segments: list of segments (paragraphs)
:param artifacts: run artifacts to evaluate
:param device: torch device
:return category predictions
"""
# Retrieve artifacts
params = artifacts["params"]
label_encoder = artifacts["label_encoder"]
tokenizer = artifacts["tokenizer"]
model = artifacts["model"]
# Prepare dataset into model readable format
preprocessed_segments = [preprocess.cleanText(segment, lower=params.lower, stem=params.stem) for segment in segments]
X = np.array(tokenizer.texts_to_sequences(preprocessed_segments), dtype="object")
y_blank = np.zeros((len(X), len(label_encoder)))
dataset = CNNDataset(X=X, y=y_blank, max_filter_size=int(params.max_filter_size))
dataloader = dataset.create_dataloader(batch_size=int(params.batch_size))
# Get model predictions
trainer = Trainer(model=model, device=device)
_, y_prob = trainer.predict_step(dataloader)
y_pred = [np.where(prob >= float(params.threshold), 1, 0) for prob in y_prob]
categories = label_encoder.decode(y_pred)
predictions = [{"input_text": segments[i], "preprocessed_text": preprocessed_segments[i], "predicted_tags": categories[i]} for i in range(len(categories))]
return predictions | 27ebdccaecd675104c670c1839daf634c142c640 | 3,656,725 |
import re
def transform_url(url):
"""Normalizes url to '[email protected]:{username}/{repo}' and also
returns username and repository's name."""
username, repo = re.search(r'[/:](?P<username>[A-Za-z0-9-]+)/(?P<repo>[^/]*)', url).groups()
if url.startswith('git@'):
return url, username, repo
return '[email protected]:{username}/{repo}'.format(**locals()), username, repo | 8d6e7d903d7c68d2f4fb3927bd7a02128cc09caf | 3,656,726 |
from typing import Optional
def prettyprint(data: dict, command: str, modifier: Optional[str] = '') -> str:
"""
Prettyprint the JSON data we get back from the API
"""
output = ''
# A few commands need a little special treatment
if command == 'job':
command = 'jobs'
if 'data' in data and 'jobs' in data['data']:
output = prettyprint_jobs(data, command)
elif 'data' in data and 'files' in data['data']:
output = prettyprint_firmware(data, command)
elif 'job_id' in data:
output = prettyprint_job(data, command)
elif 'data' in data and 'groups' in data['data']:
output = prettyprint_groups(data, 'groups')
elif 'data' in data and 'version' in data['data']:
output = prettyprint_version(data, 'version')
elif 'data' in data and command == 'device':
output = prettyprint_device(data)
elif 'data' in data and command in data['data']:
output = prettyprint_command(data, command)
elif 'status' in data and data['status'] == 'error':
output = prettyprint_error(data)
else:
output = prettyprint_other(data)
if modifier != '':
output = prettyprint_modifier(output, modifier)
return output | 727a59b22b2624fec56e685cc3b84f065bbfeffd | 3,656,727 |
def kmor(X: np.array, k: int, y: float = 3, nc0: float = 0.1, max_iteration: int = 100, gamma: float = 10 ** -6):
"""K-means clustering with outlier removal
Parameters
----------
X
Your data.
k
Number of clusters.
y
Parameter for outlier detection. Increase this to make outlier removal subtle.
nc0
Maximum percentage of your data that can be assigned to outlier cluster.
max_iteration
Maximum number of iterations.
gamma
Used to check the convergence.
Returns
-------
numpy.array
Numpy array that contains the assigned cluster of each data point (0 to k, the cluster k is the outlier
cluster)
"""
n = X.shape[0]
n0 = int(nc0 * X.shape[0])
Z = X[np.random.choice(n, k)]
def calculate_dd(U, Z):
return np.linalg.norm(X - Z[U], axis=1) ** 2
def calculate_D(outliers, dd):
factor = y / (n - outliers.size)
return factor * np.sum(np.delete(dd, outliers))
def calculate_U(X):
def closest(p):
return np.argmin(np.linalg.norm(Z - p, axis=1))
return np.apply_along_axis(closest, 1, X)
outliers = np.array([])
U = calculate_U(X)
s = 0
p = 0
while True:
# Update U (Theorem 1)
dd = calculate_dd(U, Z)
D = calculate_D(outliers, dd)
dd2 = dd[dd > D]
outliers = np.arange(n)[dd > D][dd2.argsort()[::-1]]
outliers = outliers[:n0]
U = calculate_U(X)
# Update Z (Theorem 3)
is_outlier = np.isin(U, outliers)
def mean_group(i):
x = X[np.logical_and(U == i, ~is_outlier)]
# Empty group
if x.size == 0:
x = X[np.random.choice(n, 1)]
return x.mean(axis=0)
Z = np.array([mean_group(i) for i in range(k)])
# Update P
dd = calculate_dd(U, Z)
D = calculate_D(outliers, dd)
if outliers.size == 0:
p1 = np.sum(dd)
else:
p1 = np.sum(dd[~outliers]) + D * outliers.size
# Exit condition
s += 1
if abs(p1 - p) < gamma or s > max_iteration:
break
p = p1
print("s:", s, "p:", p)
U[outliers] = k
return U | 5ffa55d45d615586971b1ec502981f1a7ab27cbe | 3,656,728 |
import tempfile
from shutil import copyfile
import importlib
import os
def compile_pipeline(pipeline_source: str, pipeline_name: str) -> str:
"""Read in the generated python script and compile it to a KFP package."""
# create a tmp folder
tmp_dir = tempfile.mkdtemp()
# copy generated script to temp dir
copyfile(pipeline_source, tmp_dir + '/' + "pipeline_code.py")
path = tmp_dir + '/' + 'pipeline_code.py'
spec = importlib.util.spec_from_file_location(tmp_dir.split('/')[-1], path)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
# path to generated pipeline package
pipeline_package = os.path.join(os.path.dirname(pipeline_source),
pipeline_name + '.pipeline.yaml')
Compiler().compile(foo.auto_generated_pipeline, pipeline_package)
return pipeline_package | e17c7c8aea9ba8d1ec46c3ecde0a3d2cad92ff45 | 3,656,729 |
def turnout_div(turnout_main, servo, gpo_provider):
"""Create a turnout set to the diverging route"""
turnout_main.set_route(True)
# Check that the route was set to the diverging route
assert(servo.get_angle() == ANGLE_DIV)
assert(gpo_provider.is_enabled())
return turnout_main | 542a747cc7f4cdc78b7ad046b0c4ce4a0a3cd33d | 3,656,730 |
def num_jewels(J: str, S: str) -> int:
"""
Time complexity: O(n + m)
Space complexity: O(n)
"""
jewels = set(J)
return sum(stone in jewels for stone in S) | f1a9632a791e3ef94699b566da61e27d9dc46b07 | 3,656,731 |
import socket
def internet(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
logger.info('Internet is there!!')
return True
except Exception as ex:
logger.warning('Internet is gone!!')
return False | 773f490baec40bf548ed2f13d1d1094c78b33366 | 3,656,732 |
import logging
def map_family_situation(code):
"""Maps French family situation"""
status = FamilySituation
mapping = {
"M": status.MARRIED.value,
"C": status.SINGLE.value,
"V": status.WIDOWED.value,
"D": status.DIVORCED.value,
"O": status.PACSED.value,
}
if code in mapping.keys():
return mapping[code]
else:
logging.warning("In {}, args {} not recognised".format("family_situation", code))
return code | ae5ac0c9ffadb31d25825e65fcb81d6ea9b0115f | 3,656,733 |
def transform(x, channels, img_shape, kernel_size=7, threshold=1e-4):
"""
----------
X : WRITEME
data with axis [b, 0, 1, c]
"""
for i in channels:
assert isinstance(i, int)
assert i >= 0 and i <= x.shape[3]
x[:, :, :, i] = lecun_lcn(x[:, :, :, i],
img_shape,
kernel_size,
threshold)
return x | c66725795585ea26dc9622ce42133a4a2f1445a8 | 3,656,734 |
import functools
def delete_files(files=[]):
"""This decorator deletes files before and after a function.
This is very useful for installation procedures.
"""
def my_decorator(func):
@functools.wraps(func)
def function_that_runs_func(self, *args, **kwargs):
# Inside the decorator
# Delete the files - prob don't exist yet
delete_paths(files)
# Run the function
stuff = func(self, *args, **kwargs)
# Delete the files if they do exist
delete_paths(files)
return stuff
return function_that_runs_func
return my_decorator | 09652e9dd527b6ae43cf47deb2eaf460de51552e | 3,656,735 |
import os
def get_town_table(screenshot_dir):
"""Generate python code for town table
Its format is
table[town_name] = (nearby town1, nearby town2...nearby town5)
The length of tuple may be different depends on town.
Arguments:
screenshot_dir (str): Directory which have town_name directory
and label.
Return:
python code style string (str)
"""
result = "TOWNS_TABLE = {}\n"
for di in sorted(os.listdir(screenshot_dir)):
dir_path = screenshot_dir + "/" + di
if not os.path.isdir(dir_path):
continue
for f in os.listdir(dir_path):
if f.lower().endswith(".txt"):
result += "TOWNS_TABLE[("
lines = open(dir_path + "/" + f).read().splitlines()
for i in range(3, len(lines), 3):
result += "'%s', " % lines[i]
result = result[:-2] + ")]\\"
result += "\n= '%s'\n" % di
break
return result | b6e1c9591cc0531fe9a28b7ce5fec5e5cc231849 | 3,656,736 |
def add_note(front, back, tag, model, deck, note_id=None):
"""
Add note with `front` and `back` to `deck` using `model`.
If `deck` doesn't exist, it is created.
If `model` doesn't exist, nothing is done.
If `note_id` is passed, it is used as the note_id
"""
model = mw.col.models.byName(model)
if model:
mw.col.decks.current()['mid'] = model['id']
else:
return None
# Creates or reuses deck with name passed using `deck`
did = mw.col.decks.id(deck)
deck = mw.col.decks.get(did)
note = mw.col.newNote()
note.model()['did'] = did
note.fields[0] = front
note.fields[1] = back
if note_id:
note.id = note_id
note.addTag(tag)
mw.col.addNote(note)
mw.col.save()
return note.id | e45528705dbd658dcb708259043f4a4b590e884b | 3,656,737 |
def indices_to_one_hot(data, nb_classes): #separate: embedding
"""Convert an iterable of indices to one-hot encoded labels."""
targets = np.array(data).reshape(-1)
return np.eye(nb_classes)[targets] | 36fdf0dbad51ae6d64c1a6bf783f083013686e40 | 3,656,738 |
from rdkit.Chem import rdMolTransforms
def translateToceroZcoord(moleculeRDkit):
"""
Translate the molecule to put the first atom in the origin of the coordinates
Parameters
----------
moleculeRDkit : RDkit molecule
An RDkit molecule
Returns
-------
List
List with the shift value applied to X, Y, Z
"""
conf = moleculeRDkit.GetConformer()
# avoid first atom overlap with dummy 3
if abs(conf.GetAtomPosition(0).x-1.0)<1e-3 and abs(conf.GetAtomPosition(0).y-1.0)<1e-3 and abs(conf.GetAtomPosition(0).z-0.0)<1e-3:
shiftX = conf.GetAtomPosition(0).x - 1.0
shiftY = conf.GetAtomPosition(0).y - 1.0
shiftZ = conf.GetAtomPosition(0).z
translationMatrix = np.array( [[1, 0, 0, -shiftX],
[0, 1, 0, -shiftY],
[0, 0, 1, -shiftZ],
[0, 0, 0, 1]], dtype=np.double)
rdMolTransforms.TransformConformer(conf, translationMatrix)
else:
shiftX = 0.0
shiftY = 0.0
shiftZ = 0.0
return [shiftX, shiftY, shiftZ] | cbe17cf023791517c01b0e52c11dde65532ab6d0 | 3,656,739 |
def standardize(mri):
"""
Standardize mean and standard deviation of each channel and z_dimension slice to mean 0 and standard
deviation 1.
Note: setting the type of the input mri to np.float16 beforehand causes issues, set it afterwards.
Args:
mri (np.array): input mri, shape (dim_x, dim_y, dim_z, num_channels)
Returns:
standardized_mri (np.array): standardized version of input mri
"""
standardized_mri = np.zeros(mri.shape)
# Iterate over channels
for c in range(mri.shape[3]):
# Iterate over the `z` depth dimension
for z in range(mri.shape[2]):
# Get a slice of the mri at channel c and z-th dimension
mri_slice = mri[:, :, z, c]
# Subtract the mean from mri_slice
centered = mri_slice - np.mean(mri_slice)
# Divide by the standard deviation (only if it is different from zero)
if np.std(centered) != 0:
centered_scaled = centered / np.std(centered)
# Update the slice of standardized mri with the centered and scaled mri
standardized_mri[:, :, z, c] = centered_scaled
return standardized_mri | 9c0847d1618023d83cdec48a1c43aae6efc1116f | 3,656,740 |
def current_floquet_kets(eigensystem, time):
"""
Get the Floquet basis kets at a given time. These are the
|psi_j(t)> = exp(-i energy[j] t) |phi_j(t)>,
using the notation in Marcel's thesis, equation (1.13).
"""
weights = np.exp(time * eigensystem.abstract_ket_coefficients)
weights = weights.reshape((1, -1, 1))
return np.sum(weights * eigensystem.k_eigenvectors, axis=1) | 60fdb845fc026bf3a109f05945b251a224b12092 | 3,656,741 |
def summary():
""" DB summary stats """
cur = get_cur()
res = []
try:
cur.execute('select count(study_id) as num_studies from study')
res = cur.fetchone()
except:
dbh.rollback()
finally:
cur.close()
if res:
return Summary(num_studies=res['num_studies'])
else:
return [] | e0159452df1909626d523896f1c2735fb4fc3e75 | 3,656,742 |
import time
def get_clockwork_conformations(molobj, torsions, resolution,
atoms=None,
debug=False,
timings=False):
"""
Get all conformation for specific cost
cost defined from torsions and resolution
"""
n_torsions = len(torsions)
if atoms is None:
atoms, xyz = cheminfo.molobj_to_xyz(molobj, atom_type="int")
del xyz
combinations = clockwork.generate_clockwork_combinations(resolution, n_torsions)
# Collect energies and coordinates
end_energies = []
end_coordinates = []
end_representations = []
first = True
for resolutions in combinations:
time_start = time.time()
# Get all conformations
c_energies, c_coordinates, c_states = get_conformations(molobj, torsions, resolutions)
N = len(c_energies)
# Filter unconverged
success = np.argwhere(c_states == 0)
success = success.flatten()
c_energies = c_energies[success]
c_coordinates = c_coordinates[success]
N2 = len(c_energies)
# Calculate representations
c_representations = [sim.get_representation(atoms, coordinates) for coordinates in c_coordinates]
c_representations = np.asarray(c_representations)
# Clean all new conformers for energies and similarity
idxs = clean_representations(atoms, c_energies, c_representations)
c_energies = c_energies[idxs]
c_coordinates = c_coordinates[idxs]
c_representations = c_representations[idxs]
if first:
first = False
end_energies += list(c_energies)
end_coordinates += list(c_coordinates)
end_representations += list(c_representations)
continue
# Asymmetrically add new conformers
idxs = merge.merge_asymmetric(atoms,
c_energies,
end_energies,
c_representations,
end_representations)
# Add new unique conformation to return collection
for i, idx in enumerate(idxs):
# if conformation already exists, continue
if len(idx) > 0: continue
# Add new unique conformation to collection
end_energies.append(c_energies[i])
end_coordinates.append(c_coordinates[i])
end_representations.append(c_representations[i])
time_end = time.time()
if timings:
timing = time_end - time_start
print("res time {:8.2f} cnf/sec - {:8.2f} tot sec".format(N/timing, timing))
continue
return end_energies, end_coordinates | 2d501adc974af4acf4dc5dd49879a9a8c72d2b22 | 3,656,743 |
def rotate_affine(img, rot=None):
"""Rewrite the affine of a spatial image."""
if rot is None:
return img
img = nb.as_closest_canonical(img)
affine = np.eye(4)
affine[:3] = rot @ img.affine[:3]
return img.__class__(img.dataobj, affine, img.header) | 4a06c286dcfc0832558c74f2cbce54d6e8d7a2d4 | 3,656,744 |
import math
def validate_ttl(options):
"""
Check with Vault if the ttl is valid.
:param options: Lemur option dictionary
:return: 1. Boolean if the ttl is valid or not.
2. the ttl in hours.
"""
if 'validity_end' in options and 'validity_start' in options:
ttl = math.floor(abs(options['validity_end'] - options['validity_start']).total_seconds() / 3600)
elif 'validity_years' in options:
ttl = options['validity_years'] * 365 * 24
else:
ttl = 0
headers = {'X-Vault-Token': vault_auth.get_token()}
url = '{}/roles/{}'.format(current_app.config.get('VAULT_PKI_URL'), options['authority'].name)
res, resp = vault_read_request(url, headers)
if res:
max_ttl = resp.json()['data']['max_ttl']
text_file = open("max_ttl.txt", "wt")
n = text_file.write(str(max_ttl))
text_file.close()
if int(max_ttl) < ttl:
current_app.logger.info('Certificate TTL is above max ttl - ' + max_ttl)
return True, ttl
else:
return True, ttl
else:
current_app.logger.info('Vault: Failed to get Vault max TTL')
raise Exception('Vault: ' + resp) | 83d7d323ae4b3db28f41879f630982d24515fcb1 | 3,656,745 |
from typing import Any
def vgg16(pretrained:bool=False,progress:bool=True,**kwargs:Any) ->VGG:
"""
Args:
pretrained(bool):是否加载预训练参数
progress(bool):是否显示下载数据的进度条
Return:
返回VGG模型
"""
return _vgg("vgg16","D",False,pretrained,progress,**kwargs) | 82d64394a705caa9e0a0c1e661078c9ea299fa05 | 3,656,746 |
def cy_gate(N=None, control=0, target=1):
"""Controlled Y gate.
Returns
-------
result : :class:`qutip.Qobj`
Quantum object for operator describing the rotation.
"""
if (control == 1 and target == 0) and N is None:
N = 2
if N is not None:
return gate_expand_2toN(cy_gate(), N, control, target)
return Qobj([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1j],
[0, 0, 1j, 0]],
dims=[[2, 2], [2, 2]]) | 8927557d0afe096218acf1ac0283c5ec073e3f98 | 3,656,747 |
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
**Examples**
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(1 + x)**2*(2 + x)**3
"""
return _generic_factor(f, gens, args, method='sqf') | 5f0267b7c314269e64c32951824346542e3e3452 | 3,656,748 |
def update_options_dpd2(dpd1_val):
"""
Updates the contents of the second dropdown menu based of the value of the first dropdown.
:param dpd1_val: str, first dropdown value
:return: list of dictionaries, labels and values
"""
all_options = [
strings.CITY_GDANSK,
strings.CITY_GDYNIA,
strings.CITY_KALINGRAD,
strings.CITY_KLAIPEDA,
strings.CITY_STPETERBURG,
]
all_options.remove(dpd1_val)
options = [{"label": opt, "value": opt} for opt in all_options]
return options | 4a2d0494f04e3026b133f61a70757046f011b5f1 | 3,656,749 |
import csv
def compute_min_paths_from_monitors(csv_file_path, delimiter='\t', origin_as=PEERING_ORIGIN):
"""
Inputs: csv_file_path, delimiter : csv file containing entries with the following format:
|collector|monitor|as_path, and the delimiter used
origin_as: the ASN you want to use as the terminal one for the as_path length computation
Output: A dictionary that contains for each monitor found in the given csv file, the minimum length path
and its length.
"""
monitor_routes = {} # contains the minimum length found for each route monitor
# key:monitor(string), value: (minimum as_path length(integer),
# the minimum length as_path(list of positive integers))
with open(csv_file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delimiter)
row_count = 0
for row in csv_reader:
row_count += 1
monitor = row[1]
# AS-path prep removing prepending and bgp poisoning
as_path_list = AS_path().make_list(row[2]) # as_path(string) -> as_path (list of positive integers)
as_path_rem_prepend = AS_path().remove_prependings(as_path_list)
as_path_cleared = AS_path().remove_loops(as_path_rem_prepend)
as_path_length = AS_path().count_length(as_path_cleared, origin_as)
if monitor in monitor_routes.keys():
if monitor_routes[monitor][0] > as_path_length:
monitor_routes[monitor] = (as_path_length, as_path_cleared)
else:
monitor_routes[monitor] = (as_path_length, as_path_cleared)
return monitor_routes | 6f0c1e26062213ea14af80a803c6e6ebd25c6543 | 3,656,750 |
def _blanking_rule_ftld_or3a(rule):
""" See _blanking_rule_ftld_or2a for rules """
if rule == 'Blank if Question 1 FTDIDIAG = 0 (No)':
return lambda packet: packet['FTDIDIAG'] == 0
elif rule == 'Blank if Question 3 FTDFDGPE = 0 (No)':
return lambda packet: packet['FTDFDGPE'] == 0
elif rule == 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)':
return lambda packet: packet['FTDFDGFh'] in (0, 9)
elif rule == 'Blank if Question 3a11, FTDFDGOA, ne 1 (Yes)':
return lambda packet: packet['FTDFDGOA'] != 1
else:
return lambda packet: False | cc0a925c7ad6ad72c4041d369f9a820c0a6a6b96 | 3,656,751 |
def _fix_nested_array(func_ir):
"""Look for assignment like: a[..] = b, where both a and b are numpy arrays, and
try to eliminate array b by expanding a with an extra dimension.
"""
"""
cfg = compute_cfg_from_blocks(func_ir.blocks)
all_loops = list(cfg.loops().values())
def find_nest_level(label):
level = 0
for loop in all_loops:
if label in loop.body:
level += 1
"""
def find_array_def(arr):
"""Find numpy array definition such as
arr = numba.unsafe.ndarray.empty_inferred(...).
If it is arr = b[...], find array definition of b recursively.
"""
arr_def = func_ir.get_definition(arr)
_make_debug_print("find_array_def")(arr, arr_def)
if isinstance(arr_def, ir.Expr):
if guard(_find_unsafe_empty_inferred, func_ir, arr_def):
return arr_def
elif arr_def.op == 'getitem':
return find_array_def(arr_def.value)
raise GuardException
def fix_array_assign(stmt):
"""For assignment like lhs[idx] = rhs, where both lhs and rhs are arrays, do the
following:
1. find the definition of rhs, which has to be a call to numba.unsafe.ndarray.empty_inferred
2. find the source array creation for lhs, insert an extra dimension of size of b.
3. replace the definition of rhs = numba.unsafe.ndarray.empty_inferred(...) with rhs = lhs[idx]
"""
require(isinstance(stmt, ir.SetItem))
require(isinstance(stmt.value, ir.Var))
debug_print = _make_debug_print("fix_array_assign")
debug_print("found SetItem: ", stmt)
lhs = stmt.target
# Find the source array creation of lhs
lhs_def = find_array_def(lhs)
debug_print("found lhs_def: ", lhs_def)
rhs_def = get_definition(func_ir, stmt.value)
debug_print("found rhs_def: ", rhs_def)
require(isinstance(rhs_def, ir.Expr))
if rhs_def.op == 'cast':
rhs_def = get_definition(func_ir, rhs_def.value)
require(isinstance(rhs_def, ir.Expr))
require(_find_unsafe_empty_inferred(func_ir, rhs_def))
# Find the array dimension of rhs
dim_def = get_definition(func_ir, rhs_def.args[0])
require(isinstance(dim_def, ir.Expr) and dim_def.op == 'build_tuple')
debug_print("dim_def = ", dim_def)
extra_dims = [ get_definition(func_ir, x, lhs_only=True) for x in dim_def.items ]
debug_print("extra_dims = ", extra_dims)
# Expand size tuple when creating lhs_def with extra_dims
size_tuple_def = get_definition(func_ir, lhs_def.args[0])
require(isinstance(size_tuple_def, ir.Expr) and size_tuple_def.op == 'build_tuple')
debug_print("size_tuple_def = ", size_tuple_def)
size_tuple_def.items += extra_dims
# In-place modify rhs_def to be getitem
rhs_def.op = 'getitem'
rhs_def.value = get_definition(func_ir, lhs, lhs_only=True)
rhs_def.index = stmt.index
del rhs_def._kws['func']
del rhs_def._kws['args']
del rhs_def._kws['vararg']
del rhs_def._kws['kws']
# success
return True
for label in find_topo_order(func_ir.blocks):
block = func_ir.blocks[label]
for stmt in block.body:
if guard(fix_array_assign, stmt):
block.body.remove(stmt) | e60c1ad0259f26f2e50fb36c9d239a8bb900686f | 3,656,752 |
def compute_angular_differences(matrix, orientation1, orientation2, cutoff):
""" Compute angular difference between two orientation ndarrays
:param matrix: domain matrix
:type matrix: np.ndarray
:param orientation1: orientation as (x, y, z, 3)
:type orientation1: np.ndarray
:param orientation2: orientation as (x, y, z, 3)
:type orientation2: np.ndarray
:param cutoff: to binarize domain
:type cutoff: (int, int)
:return: angle_errors in degrees, mean, std
:rtype: (np.ndarray, float, float)
"""
if not isinstance(matrix, np.ndarray) or not isinstance(orientation1, np.ndarray) or not isinstance(orientation2, np.ndarray):
raise Exception("Inputs must be ndarrays.")
if not isinstance(cutoff, tuple) or not len(cutoff) == 2:
raise Exception("Cutoff must be a tuple(int, int).")
if not (orientation1.ndim == 4 and orientation2.ndim == 4 and matrix.ndim == 3 and orientation1.shape[3] == 3 and
orientation1.shape == orientation2.shape and orientation1.shape[0] == matrix.shape[0] and
orientation1.shape[1] == matrix.shape[1] and orientation1.shape[2] == matrix.shape[2]):
raise Exception("Incorrect dimensions in input ndarrays.")
mask = np.logical_and(matrix >= cutoff[0], matrix <= cutoff[1])
unit_vectors_1 = orientation1[mask]
unit_vectors_2 = orientation2[mask]
radians_diff = np.einsum('ij,ij->i', unit_vectors_1, unit_vectors_2)
diff = np.zeros((unit_vectors_1.shape[0], 2), dtype=float)
diff[:, 0] = np.degrees(np.arccos(np.clip(radians_diff, -1, 1)))
diff[:, 1] = 180 - diff[:, 0]
diff = np.min(diff, axis=1)
angle_diff = np.zeros_like(matrix, dtype=float)
angle_diff[mask] = diff
return angle_diff, diff.mean(), diff.std() | 3ec860c484057de91eb306079328faff87a9b0e4 | 3,656,753 |
def await(*args):
"""Runs all the tasks specified in args,
and finally returns args unwrapped.
"""
return _await(args) | 1065986a6ac067222bf5c6ff47a395ab4d0c890e | 3,656,754 |
def convert_action_move_exists(action, board, player_turn):
"""
Converts action index to chess.Move object.
Assume the action key exists in map_action_uci
:param action:
:param board:
:param player_turn:
:return:
"""
move = chess.Move.from_uci(map_action_uci[action])
if player_turn == chess.BLACK:
move = chess.Move(from_square=chess.square_mirror(move.from_square),
to_square=chess.square_mirror(move.to_square), promotion=move.promotion)
if move.promotion == chess.QUEEN:
move.promotion = None
rank = move.to_square//8
try:
if move.promotion is None and board.piece_at(move.from_square).piece_type == chess.PAWN and \
(rank == 7 or rank == 0):
move.promotion = chess.QUEEN
except AttributeError as err:
print(board, move, action, player_turn)
raise AttributeError(err)
return move | f4c508a99967d65b6f2f07159fe3003730b220a2 | 3,656,755 |
import os
import re
def find_phase_files(input_filePath, run_number=1):
"""
Returns a list of the phase space files, sorted by z position
(filemname , z_approx)
"""
path, infile = os.path.split(input_filePath)
prefix = infile.split('.')[0] # Astra uses inputfile to name output
phase_import_file = ''
phase_files = [];
run_extension = astra_run_extension(run_number)
for file in os.listdir(path):
if re.match(prefix + '.\d\d\d\d.'+run_extension, file):
# Get z position
z = float(file.replace(prefix+ '.', '').replace('.'+run_extension,''))
phase_file=os.path.join(path, file)
phase_files.append((phase_file, z))
# Sort by z
return sorted(phase_files, key=lambda x: x[1]) | 16f2ec29468e82f27f87de5a8323fc70effe3af7 | 3,656,756 |
import pwd
import win32api
def _get_system_username():
"""Return the current system user."""
if not win32:
return pwd.getpwuid(getuid())[0]
else:
return win32api.GetUserName() | 4dfdc93630d2c3940c7087fc2125f81f0e385d9f | 3,656,757 |
import glob
def _get_vmedia_device():
"""Finds the device filename of the virtual media device using sysfs.
:returns: a string containing the filename of the virtual media device
"""
sysfs_device_models = glob.glob("/sys/class/block/*/device/model")
vmedia_device_model = "virtual media"
for model_file in sysfs_device_models:
try:
with open(model_file) as model_file_fobj:
if vmedia_device_model in model_file_fobj.read().lower():
vmedia_device = model_file.split('/')[4]
return vmedia_device
except Exception:
pass | e8f8e83b7bf0c73d10d8893a5b4b49670edba7ac | 3,656,758 |
def convert_to_posixpath(file_path):
"""Converts a Windows style filepath to posixpath format. If the operating
system is not Windows, this function does nothing.
Args:
file_path: str. The path to be converted.
Returns:
str. Returns a posixpath version of the file path.
"""
if not is_windows_os():
return file_path
return file_path.replace('\\', '/') | 9a8e6559b7916ba7547f87ce3bba6b50362c7ded | 3,656,759 |
def generateCards(numberOfSymb):
"""
Generates a list of cards which are themselves a list of symbols needed on each card to respect the rules of Dobble.
This algorithm was taken from the french Wikipedia page of "Dobble".
https://fr.wikipedia.org/wiki/Dobble
:param numberOfSymb: Number of symbols needed on each card.
:type numberOfSymb: int
:returns: List of cards which are list of symbols on it.
:rtype: List[List[int]]
"""
nbSymByCard = numberOfSymb
nbCards = (nbSymByCard**2) - nbSymByCard + 1
cards = []
n = nbSymByCard - 1
t = []
t.append([[(i+1)+(j*n) for i in range(n)] for j in range(n)])
for ti in range(n-1):
t.append([[t[0][((ti+1)*i) % n][(j+i) % n] for i in range(n)] for j in range(n)])
t.append([[t[0][i][j] for i in range(n)] for j in range(n)])
for i in range(n):
t[0][i].append(nbCards - n)
t[n][i].append(nbCards - n + 1)
for ti in range(n-1):
t[ti+1][i].append(nbCards - n + 1 + ti + 1)
t.append([[(i+(nbCards-n)) for i in range(nbSymByCard)]])
for ti in t:
cards = cards + ti
return cards | 8f51c1f339d62b6fd88cb8d0fae692053bffc084 | 3,656,760 |
import os
def get_metadata(tmpdirname):
"""
Get metadata from kmp.json if it exists.
If it does not exist then will return get_and_convert_infdata
Args:
inputfile (str): path to kmp file
tmpdirname(str): temp directory to extract kmp
Returns:
list[5]: info, system, options, keyboards, files
see kmpmetadata.parsemetadata for details
"""
kmpjson = os.path.join(tmpdirname, "kmp.json")
if os.path.isfile(kmpjson):
return parsemetadata(kmpjson, False)
else:
return get_and_convert_infdata(tmpdirname) | 7096b2a67fddaa1774fe0e552af7d9d790c66d15 | 3,656,761 |
def match_facilities(facility_datasets,
authoritative_dataset,
manual_matches_df=None,
max_distance=150,
nearest_n=10,
meters_crs='epsg:5070',
reducer_fn=None):
"""Matches facilities. The dataset represented by the authoritative_dataset key
in the facilities_dfs dict will considered authoritative - all other facilities
in the remaining datasets will be dropped if they are not matched, and the point
location of the authoritative dataset will be used.
Args:
facility_datasets (Dict[str, Dict]): A dictionary keyed by
the dataset ID with values being a dictionary containing keys
'df' containing the dataframe of facility data and 'columns'
containing a FacilityColumns object.
authoritative_dataset: The dataset that contains the facilities all
other datasets will match to.
manual_matches_df: Dataframe containing manually matched facilities. Should contain
columns for each of the ID columns of the datasets with matching IDs in each row.
max_distance (int, optional): The maximum distance (in meters) that two matches can be apart.
Defaults to 150 meters.
nearest_n (int, optional): The number of neighbors to consider as potential options.
Defaults to 10.
meters_crs: The EPSG code for the projection to use for meters distance computations.
Defaults to EPSG:5070 (NAD83 / Conus Albers) for the U.S.
reducer_fn: Function to reduce potentially matched facilities. Defaults to
reduce_matched_facility_records. See that function's signature for required
parameters. Pass in alternate implementations to implement other matching approaches.
Result:
(FacilityMatchResult): The result of the match.
Note:
The resulting dataframes will convert the id columns of any dataset into a str type.
"""
MATCH_ID_SEP = '_-_'
if reducer_fn is None:
reducer_fn = reduce_matched_facility_records
def get_id_column(dataset_key):
return facility_datasets[dataset_key]['columns'].facility_id
def get_matched_set(subcomponent):
"""Method for collecting the data for the reducer_fn based on a
connected subcomponent. Returns the records of the matched set and a dictionary
that records the distances between the facilities.
"""
records = []
distances = {}
manual_matches = set([])
for n in s:
ds, facility_id = deconstruct_match_id(n)
df = facility_datasets[ds]['df']
id_column = facility_datasets[ds]['columns'].facility_id
record = df[df[id_column].astype(str) == facility_id].to_dict(orient='record')[0]
record['dataset'] = ds
record['match_id'] = n
records.append(record)
for u, v in G.edges(n):
edge_data = G.get_edge_data(u, v)
distances[(u, v)] = edge_data['weight']
if ds == authoritative_dataset and edge_data.get('manual_override', False):
connected_ds, _ = deconstruct_match_id(v)
manual_matches.add((u, connected_ds, v))
return records, distances, manual_matches
def construct_match_id(dataset_key, facility_id):
id_column = get_id_column(dataset_key)
return '{}{}{}'.format(
dataset_key,
MATCH_ID_SEP,
facility_id
)
def deconstruct_match_id(match_id):
return match_id.split(MATCH_ID_SEP)
assert authoritative_dataset in facility_datasets
# check that dataset ID columns are unique
dataset_id_columns = [
get_id_column(dataset_key)
for dataset_key in facility_datasets
]
if len(set(dataset_id_columns)) != len(dataset_id_columns):
raise Exception('Dataset ID column names must be unique.')
# Setup a distinct order of datasets
dataset_order = [authoritative_dataset] + sorted([x for x in facility_datasets
if x != authoritative_dataset])
# Set of match_ids
ids = []
# Set of (x,y) points aligned with ids, in meters_crs
pts = []
# Mapping from match_id -> point
ids_to_pts = {}
# Construct a reprojected geodataframe per dataset, and
# record the match ids and points for usage in the KNN
# computation below.
for dataset_key in dataset_order:
df = facility_datasets[dataset_key]['df']
meters_df = df.to_crs(meters_crs)
id_column = get_id_column(dataset_key)
meters_df['match_id'] = '{}{}'.format(dataset_key, MATCH_ID_SEP) + \
meters_df[id_column].astype(str)
facility_datasets[dataset_key]['meters_df'] = meters_df
for _, row in meters_df.iterrows():
match_id = row['match_id']
pt = (row['geometry'].x, row['geometry'].y)
ids_to_pts[match_id] = pt
ids.append(match_id)
pts.append(pt)
# Compute the K Nearest Neighbors for all points in the dataset.
kd_tree = libpysal.cg.KDTree(np.array(pts))
nearest_neighbors = libpysal.weights.KNN(kd_tree, k=nearest_n, ids=ids).neighbors
# For every match, make an edge in a graph. Don't add an edge between
# points that are further than the max distance. The weight of the edge
# is the distance between them in meters.
G = nx.Graph()
for match_id in nearest_neighbors:
source_pt = ids_to_pts[match_id]
G.add_node(match_id)
for neighbor_id in nearest_neighbors[match_id]:
neighbor_pt = ids_to_pts[neighbor_id]
dist = euclidean(source_pt, neighbor_pt)
if dist <= max_distance and not G.has_edge(match_id, neighbor_id):
G.add_edge(match_id, neighbor_id, weight=dist)
# Create edges for manual matches and mark them as such.
if manual_matches_df is not None:
auth_id_column = facility_datasets[authoritative_dataset]['columns'].facility_id
for _, row in manual_matches_df.iterrows():
# Get the authoritative dataset ID (required for each row)
auth_id = construct_match_id(authoritative_dataset, row[auth_id_column])
source_pt = ids_to_pts[auth_id]
for dataset_key in facility_datasets:
if dataset_key != authoritative_dataset:
id_column = facility_datasets[dataset_key]['columns'].facility_id
if id_column in row:
if row[id_column]:
neighbor_id = construct_match_id(dataset_key, row[id_column])
neighbor_pt = ids_to_pts[neighbor_id]
dist = euclidean(source_pt, neighbor_pt)
G.add_edge(auth_id, neighbor_id, weight=dist, manual_override=True)
# Set up a dict to be turned into the matches dataframe,
# and a dict that tracks what non-authoritative datasets
# have been matched.
matches = {}
matched_ids = {}
for dataset_key in dataset_order:
matches[get_id_column(dataset_key)] = []
if dataset_key != authoritative_dataset:
matched_ids[dataset_key] = set([])
dataset_columns = dict([(k, facility_datasets[k]['columns']) for k in facility_datasets])
# Iterate over connected components, which gives us the subgraphs that are
# matched, and pass this into the reduce_matches method to
# reduce down each match to a single matched set.
for s in nx.connected_components(G):
# Ignore components that don't have a point from the authoritative dataset.
if authoritative_dataset in [deconstruct_match_id(m)[0] for m in s]:
records, distances, manual_matches = get_matched_set(s)
if len(records) == 1:
reduced_components = [[records[0]['match_id']]]
else:
authoritative_records = [r for r in records if r['dataset'] == authoritative_dataset]
records_to_match = [r for r in records if r['dataset'] != authoritative_dataset]
reduced_components = reducer_fn(authoritative_records,
records_to_match,
distances,
manual_matches,
dataset_columns)
for match_set in reduced_components:
# Ensure that the set has a facility from the authoritative datatset
assert authoritative_dataset in [deconstruct_match_id(match_id)[0]
for match_id in match_set]
ds_ids = {}
for m in match_set:
dataset_key, facility_id = deconstruct_match_id(m)
ds_ids[dataset_key] = facility_id
if dataset_key != authoritative_dataset:
matched_ids[dataset_key].add(facility_id)
for dataset_key in dataset_order:
col = get_id_column(dataset_key)
if not dataset_key in ds_ids:
matches[col].append(None)
else:
matches[col].append(ds_ids[dataset_key])
# Construct the FacilityMatchResult and return
matches_df = pd.DataFrame.from_dict(matches)
unmatched_per_dataset = {}
for dataset_key in matched_ids:
ids = set(facility_datasets[dataset_key]['df'][get_id_column(dataset_key)].astype(str).values)
unmatched_per_dataset[dataset_key] = ids - matched_ids[dataset_key]
# Merge the dataframes, using the geometry from the authoritative dataset and
# prefixing all but the ID columns by the dataset ID.
merged_df = matches_df
for dataset_key in dataset_order:
df = facility_datasets[dataset_key]['df']
id_column = get_id_column(dataset_key)
if dataset_key != authoritative_dataset:
df_prefixed = df.copy().add_prefix('{}_'.format(dataset_key))
df_prefixed = df_prefixed.rename(columns={'{}_{}'.format(dataset_key, id_column): id_column})
df_prefixed = df_prefixed.drop(columns=['{}_geometry'.format(dataset_key)])
else:
df_prefixed = df.copy()
df_prefixed[id_column] = df_prefixed[id_column].astype(str)
merged_df = merged_df.merge(df_prefixed, on=id_column, how='left')
merged_df = gpd.GeoDataFrame(merged_df, crs='epsg:4326') \
.sort_values([facility_datasets[dataset_key]['columns'].facility_id
for dataset_key in dataset_order])
return FacilityMatchResult(merged_df, matches_df, unmatched_per_dataset) | 97f169ccda8cf0b26bfa423936d8b663e6237d22 | 3,656,762 |
def has_field_warning(meta, field_id):
"""Warn if dataset has existing field with same id."""
if meta.has_field(field_id):
print(
"WARN: Field '%s' is already present in dataset, not overwriting."
% field_id
)
print("WARN: Use '--replace' flag to overwrite existing field.")
return 1
return 0 | 1cc5016f8ffcce698bcb53dcf6f307b760d7df55 | 3,656,763 |
def volume(surface):
"""Compute volume of a closed triangulated surface mesh."""
properties = vtk.vtkMassProperties()
properties.SetInput(surface)
properties.Update()
return properties.GetVolume() | 1969e3c6245cd76c50cdea19be41165ff16f73fc | 3,656,764 |
def simulate():
"""
Simulate one thing
"""
doors = getRandomDoorArray()
pickedDoor = chooseDoor()
goatDoor, switchDoor = openGoatDoor(pickedDoor, doors)
return doors[pickedDoor], doors[switchDoor] | 607fc6d0bdb5d24dc68c371c81e9e7028a54631f | 3,656,765 |
def fc_layer(x):
"""Basic Fully Connected (FC) layer with an activation function."""
return x | f26865e13065187363746b8bfe7d95ac221bf236 | 3,656,766 |
from typing import Dict
import collections
def get_slot_counts(cls: type) -> Dict[str, int]:
"""
Collects all of the given class's ``__slots__``, returning a
dict of the form ``{slot_name: count}``.
:param cls: The class whose slots to collect
:return: A :class:`collections.Counter` counting the number of occurrences of each slot
"""
slot_names = (name for name, _ in iter_slots(cls))
return collections.Counter(slot_names) | 7cb7c41c1d4f40aab1acd473f5e1238e4aefad44 | 3,656,767 |
def rot6d_to_axisAngle(x):
""""Convert 6d rotation representation to axis angle
Input:
(B,6) Batch of 6-D rotation representations
Output:
(B,3) Batch of corresponding axis angle
"""
rotMat = rot6d_to_rotmat(x)
return rotationMatrix_to_axisAngle(rotMat) | 17b24e0bb7521baa56df034c4e59658d4320c4cf | 3,656,768 |
import six
def within_tolerance(x, y, tolerance):
"""
Check that |x-y| <= tolerance with appropriate norm.
Args:
x: number or array (np array_like)
y: number or array (np array_like)
tolerance: Number or PercentageString
NOTE: Calculates x - y; may raise an error for incompatible shapes.
Usage
=====
The tolerance can be a number:
>>> within_tolerance(10, 9.01, 1)
True
>>> within_tolerance(10, 9.01, 0.5)
False
If tolerance is a percentage, it is a percent of (the norm of) x:
>>> within_tolerance(10, 9.01, '10%')
True
>>> within_tolerance(9.01, 10, '10%')
False
Works for vectors and matrices:
>>> A = np.array([[1,2],[-3,1]])
>>> B = np.array([[1.1, 2], [-2.8, 1]])
>>> diff = round(np.linalg.norm(A-B), 6)
>>> diff
0.223607
>>> within_tolerance(A, B, 0.25)
True
"""
# When used within graders, tolerance has already been
# validated as a Number or PercentageString
if isinstance(tolerance, six.text_type):
tolerance = np.linalg.norm(x) * percentage_as_number(tolerance)
difference = x - y
return np.linalg.norm(difference) <= tolerance | 918b14e33aeca426e24151d7a1eda2d340423b4d | 3,656,769 |
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c | af1f024f4b6d60793fb3aa6ca2bcbe517c4b178f | 3,656,770 |
def model_netradiation(minTair = 0.7,
maxTair = 7.2,
albedoCoefficient = 0.23,
stefanBoltzman = 4.903e-09,
elevation = 0.0,
solarRadiation = 3.0,
vaporPressure = 6.1,
extraSolarRadiation = 11.7):
"""
- Description:
* Title: NetRadiation Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA Montpellier
* Abstract: It is calculated at the surface of the canopy and is givenby the difference between incoming and outgoing radiation of both short
and long wavelength radiation
- inputs:
* name: minTair
** min : -30
** default : 0.7
** max : 45
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : °C
** description : minimum air temperature
* name: maxTair
** min : -30
** default : 7.2
** max : 45
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : °C
** description : maximum air Temperature
* name: albedoCoefficient
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0.23
** inputtype : parameter
** unit :
** description : albedo Coefficient
* name: stefanBoltzman
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 4.903E-09
** inputtype : parameter
** unit :
** description : stefan Boltzman constant
* name: elevation
** parametercategory : constant
** min : -500
** datatype : DOUBLE
** max : 10000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0
** inputtype : parameter
** unit : m
** description : elevation
* name: solarRadiation
** min : 0
** default : 3
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m-2 d-1
** description : solar Radiation
* name: vaporPressure
** min : 0
** default : 6.1
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : hPa
** description : vapor Pressure
* name: extraSolarRadiation
** min : 0
** default : 11.7
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m2 d-1
** description : extra Solar Radiation
- outputs:
* name: netRadiation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : MJ m-2 d-1
** description : net radiation
* name: netOutGoingLongWaveRadiation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : net OutGoing Long Wave Radiation
"""
Nsr = (1.0 - albedoCoefficient) * solarRadiation
clearSkySolarRadiation = (0.75 + (2 * pow(10.0, -5) * elevation)) * extraSolarRadiation
averageT = (pow(maxTair + 273.16, 4) + pow(minTair + 273.16, 4)) / 2.0
surfaceEmissivity = 0.34 - (0.14 * sqrt(vaporPressure / 10.0))
cloudCoverFactor = 1.35 * (solarRadiation / clearSkySolarRadiation) - 0.35
Nolr = stefanBoltzman * averageT * surfaceEmissivity * cloudCoverFactor
netRadiation = Nsr - Nolr
netOutGoingLongWaveRadiation = Nolr
return (netRadiation, netOutGoingLongWaveRadiation) | 369fffe5eef94148baa8526421adbbac7c3477fd | 3,656,771 |
def get_tagset(sentences, with_prefix):
""" Returns the set of entity types appearing in the list of sentences.
If with_prefix is True, it returns both the B- and I- versions for each
entity found. If False, it merges them (i.e., removes the prefix and only
returns the entity type).
"""
iobs = [iob for sent in sentences for (x,iob) in sent]
tagset = set(iobs)
if not with_prefix:
tagset = set([t[2:] for t in list(tagset) if t != 'O'])
return tagset | c0b00f7c5546bfc7fe10b2d4b35998b5dedeba21 | 3,656,772 |
def pdb_to_psi4(starting_geom, mol_name, method, basis_set, charge=0, multiplicity=1, symmetry='C1', geom_opt=True,
sp_energy=False, fixed_dih=None, mem=None, constrain='dihedral', dynamic_level=3,
consecutive_backsteps=None, geom_maxiter=250, xyz_traj=True):
"""
:param pdb: str
path to pdb file
:param method: list of str
QM method (see psi4 website for options)
If length 2, first one will be used for geom opt and second for spe.
:param basis_set: str
specification of basis set
:param symmetry: str
symmetry of molecule. Default is None.
:param geom_opt: bool
if True, will generate input file for geometry optimization
:param sp_energy: bool
if True, will run a single point energy calculation (if geom_opt also true, SPE calculation will occur after
geom opt
:param fixed_dih: str
string of dihedral that should be fixed at specified angle. Format: "4 7 10 14 90.00"
default: None - will not fix dihedral
Beware:
------
Because of a bug in psi4, dihedral angle can't be exactly 0 (same would apply for 180) so use 0.001 instead
constrain: string. Either 'dihedral' or 'cartesian'
The kind of constrain to use
:param mem: int
memory allocation for calculation
:param outfile: str
if specified, will save file there
:return:
psi4 input string. If outfile, save file to specified path
"""
input_string = ""
if mem is not None:
input_string += "\nmemory {}\n".format(mem)
input_string += "\nmolecule {}".format(mol_name)
input_string += " {\n"
input_string += " symmetry {}\n".format(symmetry)
input_string += " {} {} \n".format(charge, multiplicity)
input_string += starting_geom
input_string += " units Angstrom\n"
input_string += "}\n"
if fixed_dih is not None:
if constrain == 'dihedral':
input_string += '\ndih_string = "{}"'.format(fixed_dih)
# ToDo add string because that's the only thing that seems to work
input_string += '\nset optking { fixed_dihedral = $dih_string\n'
elif constrain == 'cartesian':
input_string += '\n frozen_string = """ \n {} xyz \n {} xyz \n {} xyz \n {} xyz \n"""'.format(fixed_dih[0],
fixed_dih[2],
fixed_dih[4],
fixed_dih[6])
input_string += '\nset optking { opt_coordinates = cartesian\n frozen_cartesian = $frozen_string\n'
else:
raise NameError('Only dihedral or cartesian constraints are valid')
if dynamic_level:
input_string += ' dynamic_level = {}\n'.format(dynamic_level)
if consecutive_backsteps:
input_string += ' consecutive_backsteps = {}\n'.format(consecutive_backsteps)
if geom_maxiter:
input_string += ' geom_maxiter = {}\n'.format(geom_maxiter)
if xyz_traj:
input_string += ' print_trajectory_xyz_file = True '
input_string += '}\n'
if geom_opt:
input_string += "\noptimize('{}/{}')\n".format(method[0], basis_set[0])
if sp_energy:
input_string += "\nenergy('{}/{}')\n".format(method[-1], basis_set[-1])
return input_string | ec9fffe05463f159cd47ce2c809f42d5b4289db4 | 3,656,773 |
def normpath(s: str) -> str:
"""Normalize path. Just for compatibility with normal python3."""
return s | 30c528b11f75f52275b753c789e2e3d5bf71641c | 3,656,774 |
def threshold_num_spikes(
sorting,
threshold,
threshold_sign,
sampling_frequency=None,
**kwargs
):
"""
Computes and thresholds the num spikes in the sorted dataset with the given sign and value.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
threshold: int or float
The threshold for the given metric
threshold_sign: str
If 'less', will threshold any metric less than the given threshold
If 'less_or_equal', will threshold any metric less than or equal to the given threshold
If 'greater', will threshold any metric greater than the given threshold
If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
threshold sorting extractor
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None, apply_filter=False,
duration_in_frames=None, freq_min=300.0, freq_max=6000.0, unit_ids=None,
verbose=params_dict['verbose'], raise_if_empty=False)
ns = NumSpikes(metric_data=md)
threshold_sorting = ns.threshold_metric(threshold, threshold_sign, **kwargs)
return threshold_sorting | 37974060e23f8dbde2d3dd1246b0583ed16d4a87 | 3,656,775 |
def mad(stack, axis=0, scale=1.4826):
"""Median absolute deviation,
default is scaled such that +/-MAD covers 50% (between 1/4 and 3/4)
of the standard normal cumulative distribution
"""
stack_abs = np.abs(stack)
med = np.nanmedian(stack_abs, axis=axis)
return scale * np.nanmedian(np.abs(stack_abs - med), axis=axis) | c9425b8006476b11cc559a025597c5b620294b50 | 3,656,776 |
import logging
import time
from typing import Any
import sys
def set_up_logging(
*,
log_filename: str = "log",
verbosity: int = 0,
use_date_logging: bool = False,
) ->logging.Logger:
"""Set up proper logging."""
# log everything verbosely
LOG.setLevel(logging.DEBUG)
logging.Formatter.converter = time.gmtime
handler: Any
if use_date_logging:
handler = TimedRotatingFileHandler(
filename=log_filename,
when="D",
utc=True,
)
else:
handler = RotatingFileHandler(
filename=log_filename,
maxBytes=1024000000,
backupCount=10,
)
formatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)03dZ - %(levelname)s - %(module)s - %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
LOG.addHandler(handler)
# Provide a stdout handler logging at INFO.
stream_handler = logging.StreamHandler(sys.stdout)
simple_form = logging.Formatter(fmt="%(message)s")
stream_handler.setFormatter(simple_form)
if verbosity > 0:
stream_handler.setLevel(logging.DEBUG)
else:
stream_handler.setLevel(logging.INFO)
LOG.addHandler(stream_handler)
return LOG | 07c7b2623c02d52f8a6468654fa5e3519bf43be9 | 3,656,777 |
def _get_window(append, size=(1000, 600)):
"""
Return a handle to a plot window to use for this plot.
If append is False, create a new plot window, otherwise return
a handle to the given window, or the last created window.
Args:
append (Union[bool, PlotWindow]): If true, return the last
created plot window, if PlotWindow, return that window, otherwise
a new window will be created.
size (Tuple[int, int]): The size in px of the new plot window. If append
is not false, this parameter has no effect.
"""
# Set up a plotting window
if append is None or append is False:
win = PlotWindow()
win.win_title = 'ID: '
win.resize(*size)
elif isinstance(append, PlotWindow):
# Append to the given window
win = append
elif isinstance(append, bool):
# Append to the last trace if true
win = PlotWindow.getWindows()[-1]
else:
raise ValueError("Unknown argument to append. Either give a plot window"
" or true to append to the last plot")
return win | 45ff89055db2caa442f55c80042820194554bed8 | 3,656,778 |
def _proxies_dict(proxy):
"""Makes a proxy dict appropriate to pass to requests."""
if not proxy:
return None
return {'http': proxy, 'https': proxy} | ce51015dc652c494dc89bb11e21f18803ba34c85 | 3,656,779 |
def _get_schedule_times(name, date):
"""
Fetch all `from_time` from [Healthcare Schedule Time Slot]
:param name: [Practitioner Schedule]
:param date: [datetime.date]
:return:
"""
mapped_day = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
time_slots = frappe.get_all(
'Healthcare Schedule Time Slot',
filters={'parent': name, 'day': mapped_day[date.weekday()]},
fields=['from_time']
)
return list(map(lambda x: x.get('from_time'), time_slots)) | 64de318f8bbe827e40566799172590f0e448a3d5 | 3,656,780 |
def delete(client, data, force=False):
"""
"""
param = {'logical-router-port-id': get_id(client, data)}
if force:
param['force'] = True
request = client.__getattr__(MODULE).DeleteLogicalRouterPort(**param)
response, _ = request.result()
return response | e573b962273f4ffc0ea4b0d5693329013fca4a6b | 3,656,781 |
import warnings
def z_standardization(
spark,
idf,
list_of_cols="all",
drop_cols=[],
pre_existing_model=False,
model_path="NA",
output_mode="replace",
print_impact=False,
):
"""
Standardization is commonly used in data pre-processing process. z_standardization standardizes the selected
attributes of an input dataframe by normalizing each attribute to have standard deviation of 1 and mean of 0. For
each attribute, the standard deviation (s) and mean (u) are calculated and a sample x will be standardized into (
x-u)/s. If the standard deviation of an attribute is 0, it will be excluded in standardization and a warning will
be shown. None values will be kept as None in the output dataframe.
Parameters
----------
spark
Spark Session
idf
Input Dataframe
list_of_cols
List of numerical columns to transform e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
"all" can be passed to include all numerical columns for analysis. This is super useful instead of specifying all column names manually.
Please note that this argument is used in conjunction with drop_cols i.e. a column mentioned in
drop_cols argument is not considered for analysis even if it is mentioned in list_of_cols. (Default value = "all")
drop_cols
List of columns to be dropped e.g., ["col1","col2"].
Alternatively, columns can be specified in a string format,
where different column names are separated by pipe delimiter “|” e.g., "col1|col2".
It is most useful when coupled with the “all” value of list_of_cols, when we need to consider all columns except
a few handful of them. (Default value = [])
pre_existing_model
Boolean argument – True or False. True if model files (Mean/stddev for each feature) exists already, False Otherwise (Default value = False)
model_path
If pre_existing_model is True, this argument is path for referring the pre-saved model.
If pre_existing_model is False, this argument can be used for saving the model.
Default "NA" means there is neither pre-existing model nor there is a need to save one.
output_mode
"replace", "append".
“replace” option replaces original columns with transformed column. “append” option append transformed
column to the input dataset with a postfix "_scaled" e.g. column X is appended as X_scaled. (Default value = "replace")
print_impact
True, False (Default value = False)
This argument is to print out the before and after descriptive statistics of rescaled columns.
Returns
-------
DataFrame
Rescaled Dataframe
"""
num_cols = attributeType_segregation(idf)[0]
if list_of_cols == "all":
list_of_cols = num_cols
if isinstance(list_of_cols, str):
list_of_cols = [x.strip() for x in list_of_cols.split("|")]
if isinstance(drop_cols, str):
drop_cols = [x.strip() for x in drop_cols.split("|")]
list_of_cols = list(set([e for e in list_of_cols if e not in drop_cols]))
if any(x not in num_cols for x in list_of_cols):
raise TypeError("Invalid input for Column(s)")
if len(list_of_cols) == 0:
warnings.warn(
"No Standardization Performed - No numerical column(s) to transform"
)
return idf
if output_mode not in ("replace", "append"):
raise TypeError("Invalid input for output_mode")
parameters = []
excluded_cols = []
if pre_existing_model:
df_model = spark.read.parquet(model_path + "/z_standardization")
for i in list_of_cols:
mapped_value = (
df_model.where(F.col("feature") == i)
.select("parameters")
.rdd.flatMap(lambda x: x)
.collect()[0]
)
parameters.append(mapped_value)
else:
for i in list_of_cols:
mean, stddev = idf.select(F.mean(i), F.stddev(i)).first()
parameters.append(
[float(mean) if mean else None, float(stddev) if stddev else None]
)
if stddev:
if round(stddev, 5) == 0.0:
excluded_cols.append(i)
else:
excluded_cols.append(i)
if len(excluded_cols) > 0:
warnings.warn(
"The following column(s) are excluded from standardization because the standard deviation is zero:"
+ str(excluded_cols)
)
odf = idf
for index, i in enumerate(list_of_cols):
if i not in excluded_cols:
modify_col = (i + "_scaled") if (output_mode == "append") else i
odf = odf.withColumn(
modify_col, (F.col(i) - parameters[index][0]) / parameters[index][1]
)
if (not pre_existing_model) & (model_path != "NA"):
df_model = spark.createDataFrame(
zip(list_of_cols, parameters), schema=["feature", "parameters"]
)
df_model.coalesce(1).write.parquet(
model_path + "/z_standardization", mode="overwrite"
)
if print_impact:
if output_mode == "replace":
output_cols = list_of_cols
else:
output_cols = [
(i + "_scaled") for i in list_of_cols if i not in excluded_cols
]
print("Before: ")
idf.select(list_of_cols).describe().show(5, False)
print("After: ")
odf.select(output_cols).describe().show(5, False)
return odf | 962a7aa5721cc7d672c858d573af3c1d021e74d7 | 3,656,782 |
def event_stats(wit_df, wit_im, wit_area, pkey='SYSID'):
"""
Compute inundation event stats with given wit wetness, events defined by (start_time, end_time)
and polygon areas
input:
wit_df: wetness computed from wit data
wit_im: inundation event
wit_area: polygon areas indexed by the key
output:
dataframe of event stats
"""
grouped_im = wit_im[['start_time', 'end_time']].groupby(pkey)
return wit_df.groupby(pkey).apply(get_im_stats, im_time=grouped_im, wit_area=wit_area).droplevel(0) | f1bcef7604e15fc9b5a845ed45b976e22655d469 | 3,656,783 |
def upvote_book(book_id):
"""
Allows a user to upvote a book.
The upvotes field on the book document is updated,
as well as the booksUpvoted array on the user document
and the upvotedBy array on the book document.
"""
user_to_update = mongo.db.users.find_one({"username": session["user"]})
username = user_to_update.get("username")
mongo.db.books.update_one({"_id": ObjectId(book_id)}, {
'$inc': {'upvotes': +1}})
mongo.db.books.update_one({"_id": ObjectId(book_id)}, {
'$push': {'upvotedBy': username}})
mongo.db.users.update_one(
user_to_update, {'$push': {'booksUpvoted': ObjectId(book_id)}})
flash("Book has been upvoted!")
return redirect(url_for("get_book", book_id=book_id)) | 6a27c46e9540b871f4123c166d9cecaebc016c6b | 3,656,784 |
import numpy
def bottlegrowth_split_mig(params, ns):
"""
params = (nuB, nuF, m, T, Ts)
ns = [n1, n2]
Instantanous size change followed by exponential growth then split with
migration.
nuB: Ratio of population size after instantanous change to ancient
population size
nuF: Ratio of contempoary to ancient population size
m: Migration rate between the two populations (2*Na*m).
T: Time in the past at which instantaneous change happened and growth began
(in units of 2*Na generations)
Ts: Time in the past at which the two populations split.
n1, n2: Sample sizes of resulting Spectrum.
"""
nuB, nuF, m, T, Ts = params
nu_func = lambda t: [nuB * numpy.exp(numpy.log(nuF/nuB) * t / T)]
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs.integrate(nu_func, T - Ts, dt_fac=0.01)
# we split the population
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
nu0 = nu_func(T - Ts)[0]
nu_func = lambda t: 2 * [nu0 * numpy.exp(numpy.log(nuF/nu0) * t / Ts)]
fs.integrate(nu_func, Ts, m = numpy.array([[0, m], [m, 0]]))
return fs | dd191a7246d6575b784e61d8e1def17c0f143a7d | 3,656,785 |
def arch_to_macho(arch):
"""Converts an arch string into a macho arch tuple."""
try:
arch = rustcall(lib.symbolic_arch_to_macho, encode_str(arch))
return (arch.cputype, arch.cpusubtype)
except ignore_arch_exc:
pass | 2ffc1be349fc8438bc5b49bab1d9c79e8cbebdd3 | 3,656,786 |
def limit_ops_skeleton(**kwargs):
"""This function provides a skeleton for limit ops calculations"""
group_phase = kwargs['group_phase']
tail = kwargs['tail']
loading_phase = kwargs['loading_phase']
final_phase = kwargs['final_phase']
grouped_df = limit_ops_general_groups(
**group_phase
)
grouped_df = grouped_df.tail(tail)
loaded_table = load_and_rename(**loading_phase)
final_phase['first_df'] = grouped_df
final_phase['second_df'] = loaded_table
final_values = limit_ops_formatter(**final_phase)
return final_values | fb2dd1da8f2229794705376e15076477160bce35 | 3,656,787 |
def xy2traceset(xpos, ypos, **kwargs):
"""Convert from x,y positions to a trace set.
Parameters
----------
xpos, ypos : array-like
X,Y positions corresponding as [nx,Ntrace] arrays.
invvar : array-like, optional
Inverse variances for fitting.
func : :class:`str`, optional
Function type for fitting; defaults to 'legendre'.
ncoeff : :class:`int`, optional
Number of coefficients to fit. Defaults to 3.
xmin, xmax : :class:`float`, optional
Explicitly set minimum and maximum values, instead of computing
them from `xpos`.
maxiter : :class:`int`, optional
Maximum number of rejection iterations; set to 0 for no rejection;
default to 10.
inmask : array-like, optional
Mask set to 1 for good points and 0 for rejected points;
same dimensions as `xpos`, `ypos`. Points rejected by `inmask`
are always rejected from the fits (the rejection is "sticky"),
and will also be marked as rejected in the outmask attribute.
ia, inputans, inputfunc : array-like, optional
These arguments will be passed to :func:`func_fit`.
xjumplo : :class:`float`, optional
x position locating start of an x discontinuity
xjumphi : :class:`float`, optional
x position locating end of that x discontinuity
xjumpval : :class:`float`, optional
magnitude of the discontinuity "jump" between those bounds
(previous 3 keywords motivated by BOSS 2-phase readout)
Returns
-------
:class:`TraceSet`
A :class:`TraceSet` object.
"""
return TraceSet(xpos, ypos, **kwargs) | 7f4146600678cdb3699b239bf49a5d6062ef2a2e | 3,656,788 |
import inspect
def Fn(name, f, n_out=1): # pylint: disable=invalid-name
"""Returns a layer with no weights that applies the function `f`.
`f` can take and return any number of arguments, and takes only positional
arguments -- no default or keyword arguments. It often uses JAX-numpy (`jnp`).
The following, for example, would create a layer that takes two inputs and
returns two outputs -- element-wise sums and maxima:
`Fn('SumAndMax', lambda x0, x1: (x0 + x1, jnp.maximum(x0, x1)), n_out=2)`
The layer's number of inputs (`n_in`) is automatically set to number of
positional arguments in `f`, but you must explicitly set the number of
outputs (`n_out`) whenever it's not the default value 1.
Args:
name: Class-like name for the resulting layer; for use in debugging.
f: Pure function from input tensors to output tensors, where each input
tensor is a separate positional arg, e.g., `f(x0, x1) --> x0 + x1`.
Output tensors must be packaged as specified in the `Layer` class
docstring.
n_out: Number of outputs promised by the layer; default value 1.
Returns:
Layer executing the function `f`.
"""
argspec = inspect.getfullargspec(f)
if argspec.defaults is not None:
raise ValueError('Function has default arguments (not allowed).')
if argspec.varkw is not None:
raise ValueError('Function has keyword arguments (not allowed).')
if argspec.varargs is not None:
raise ValueError('Function has variable args (not allowed).')
def _forward(xs): # pylint: disable=invalid-name
if not isinstance(xs, (tuple, list)):
xs = (xs,)
return f(*xs)
n_in = len(argspec.args)
name = name or 'Fn'
return PureLayer(_forward, n_in=n_in, n_out=n_out, name=name) | 356753d0a10ee8294d4072f559657b8a032974eb | 3,656,789 |
import slate3k as slate
import logging
def leer_pdf_slate(ubicacion_archivo, password=None):
"""
Utiliza la librería slate3k para cargar un archivo PDF y extraer el texto de sus páginas.
:param ubicacion_archivo: (str). Ubicación del archivo PDF que se desea leer.
:param password: (str). Valor por defecto: None. Parámetro opcional para leer archivos \
PDF que están protegidos por contraseña.
:return: (list). Lista de strings, que contienen el texto extraído de cada página del PDF.
"""
# Para no mostrar warnings de slate
logging.getLogger('pdfminer').setLevel(logging.ERROR)
# Abrir el archivo y extraer el texto de las páginas
with open(ubicacion_archivo, 'rb') as f:
if password is not None:
paginas = slate.PDF(f, password)
else:
paginas = slate.PDF(f)
# Retornar el texto extraído
return paginas | 3e52c463238f1ecec30d34661eb8f53a6cf031a7 | 3,656,790 |
def gen_run_entry_str(query_id, doc_id, rank, score, run_id):
"""A simple function to generate one run entry.
:param query_id: query id
:param doc_id: document id
:param rank: entry rank
:param score: entry score
:param run_id: run id
"""
return f'{query_id} Q0 {doc_id} {rank} {score} {run_id}' | 657c59fea34e4aed2159337360c973dc99b53082 | 3,656,791 |
from pathlib import Path
def remove_template(args, output_file_name):
"""
remove the arg to use template; called when you make the template
:param args:
:param output_file_name:
:return:
"""
template_name = ''
dir_name = ''
template_found = False
for i in args:
if i.startswith('--template'):
# print_fun('FOUND')
args.remove(i)
# i ='--template=/s/telos/common/sjm-doc-template.tex'
# eq_loc = i.find("=") + 1
# dir_end = len(i) - i[::-1].find('/') - 1
# dir_name = i[eq_loc:dir_end]
# template_name = i[dir_end + 1:-4]
# template_found = True
# new
p = Path(i.split('=')[1])
dir_name = str(p.parent)
template_name = str(p.name)
template_found = True
# print_fun(template_name)
break
if not template_found:
raise ValueError('\n\n\nERROR: making template, need cla: --template=/template name... command line option!\n'
f'Args are {args}\nAborting.\n\n')
return
args, trash = adjust_output_file(args, dir_name, output_file_name)
return args, template_name | 652fa0112dd0b5287e1f98c5e70f84cacaa979c1 | 3,656,792 |
def replaceext(filepath, new_ext, *considered_exts):
"""replace extension of filepath with new_ext
filepath: a file path
new_ext: extension the returned filepath should have (e.g ".ext")
considered_exts: Each is a case insensitive extension that should be considered a
single extension and replaced accordingly. e.g. if you pass .tar.gz, file.tar.gz
becomes file.new_ext instead of file.tar.new_ext
returns: filepath with its extension replaced
"""
root = splitext(filepath, *considered_exts)[0]
return root + new_ext | 4e1abee01270921d01de1e75614612dcec8485d7 | 3,656,793 |
def textarea(name, content="", id=NotGiven, **attrs):
"""Create a text input area.
"""
attrs["name"] = name
_set_id_attr(attrs, id, name)
return HTML.tag("textarea", content, **attrs) | da85bdeb2d819eaa2e8109036087700afd270a21 | 3,656,794 |
import os
def get_score_checkpoint(loss_score):
"""Retrieves the path to a checkpoint file."""
name = "{}{:4f}.pyth".format(_SCORE_NAME_PREFIX, loss_score)
return os.path.join(get_checkpoint_dir(), name) | 65e147968c738a0b893e5fc25bebbdb0d75c52de | 3,656,795 |
def StretchContrast(pixlist, minmin=0, maxmax=0xff):
""" Stretch the current image row to the maximum dynamic range with
minmin mapped to black(0x00) and maxmax mapped to white(0xff) and
all other pixel values stretched accordingly."""
if minmin < 0: minmin = 0 # pixel minimum is 0
if maxmax > 0xff: maxmax = 0xff # pixel maximum is 255
if maxmax < minmin: maxmax = minmin # range sanity
min, max = maxmax, minmin
for pix in pixlist:
if pix < min and pix >= minmin:
min = pix
if pix > max and pix <= maxmax:
max = pix
if min > max: min = max
if min == max:
f = 1.0
else:
f = 255.0 / (max - min)
n = 0
newpixlist= []
for pix in pixlist:
if pix < minmin: pix = minmin
if pix > maxmax: pix = maxmax
pix = int((pix - min) * f)
newpixlist.append (pix)
return newpixlist | 5f511b4a8bd053d503618767fee06597f1688619 | 3,656,796 |
def detection(array, psf, bkg_sigma=1, mode='lpeaks', matched_filter=False,
mask=True, snr_thresh=5, plot=True, debug=False,
full_output=False, verbose=True, save_plot=None, plot_title=None,
angscale=False, pxscale=0.01):
""" Finds blobs in a 2d array. The algorithm is designed for automatically
finding planets in post-processed high contrast final frames. Blob can be
defined as a region of an image in which some properties are constant or
vary within a prescribed range of values. See <Notes> below to read about
the algorithm details.
Parameters
----------
array : array_like, 2d
Input frame.
psf : array_like
Input psf, normalized with ``vip_hci.phot.normalize_psf``.
bkg_sigma : float, optional
The number standard deviations above the clipped median for setting the
background level.
mode : {'lpeaks','log','dog'}, optional
Sets with algorithm to use. Each algorithm yields different results.
matched_filter : bool, optional
Whether to correlate with the psf of not.
mask : bool, optional
Whether to mask the central region (circular aperture of 2*fwhm radius).
snr_thresh : float, optional
SNR threshold for deciding whether the blob is a detection or not.
plot : bool, optional
If True plots the frame showing the detected blobs on top.
debug : bool, optional
Whether to print and plot additional/intermediate results.
full_output : bool, optional
Whether to output just the coordinates of blobs that fulfill the SNR
constraint or a table with all the blobs and the peak pixels and SNR.
verbose : bool, optional
Whether to print to stdout information about found blobs.
save_plot: string
If provided, the plot is saved to the path.
plot_title : str, optional
Title of the plot.
angscale: bool, optional
If True the plot axes are converted to angular scale.
pxscale : float, optional
Pixel scale in arcseconds/px. Default 0.01 for Keck/NIRC2.
Returns
-------
yy, xx : array_like
Two vectors with the y and x coordinates of the centers of the sources
(potential planets).
If full_output is True then a table with all the candidates that passed the
2d Gaussian fit constrains and their S/N is returned.
Notes
-----
The FWHM of the PSF is measured directly on the provided array. If the
parameter matched_filter is True then the PSF is used to run a matched
filter (correlation) which is equivalent to a convolution filter. Filtering
the image will smooth the noise and maximize detectability of objects with a
shape similar to the kernel.
The background level or threshold is found with sigma clipped statistics
(5 sigma over the median) on the image/correlated image. Then 5 different
strategies can be used to detect the blobs (potential planets):
Local maxima + 2d Gaussian fit. The local peaks above the background on the
(correlated) frame are detected. A maximum filter is used for finding local
maxima. This operation dilates the original image and merges neighboring
local maxima closer than the size of the dilation. Locations where the
original image is equal to the dilated image are returned as local maxima.
The minimum separation between the peaks is 1*FWHM. A 2d Gaussian fit is
done on each of the maxima constraining the position on the subimage and the
sigma of the fit. Finally the blobs are filtered based on its SNR.
Laplacian of Gaussian + 2d Gaussian fit. It computes the Laplacian of
Gaussian images with successively increasing standard deviation and stacks
them up in a cube. Blobs are local maximas in this cube. LOG assumes that
the blobs are again assumed to be bright on dark. A 2d Gaussian fit is done
on each of the candidates constraining the position on the subimage and the
sigma of the fit. Finally the blobs are filtered based on its SNR.
Difference of Gaussians. This is a faster approximation of LoG approach. In
this case the image is blurred with increasing standard deviations and the
difference between two successively blurred images are stacked up in a cube.
DOG assumes that the blobs are again assumed to be bright on dark. A 2d
Gaussian fit is done on each of the candidates constraining the position on
the subimage and the sigma of the fit. Finally the blobs are filtered based
on its SNR.
"""
def check_blobs(array_padded, coords_temp, fwhm, debug):
y_temp = coords_temp[:,0]
x_temp = coords_temp[:,1]
coords = []
# Fitting a 2d gaussian to each local maxima position
for y, x in zip(y_temp, x_temp):
subsi = 2 * int(np.ceil(fwhm))
if subsi %2 == 0:
subsi += 1
subim, suby, subx = get_square(array_padded, subsi, y+pad, x+pad,
position=True, force=True)
cy, cx = frame_center(subim)
gauss = models.Gaussian2D(amplitude=subim.max(), x_mean=cx,
y_mean=cy, theta=0,
x_stddev=fwhm*gaussian_fwhm_to_sigma,
y_stddev=fwhm*gaussian_fwhm_to_sigma)
sy, sx = np.indices(subim.shape)
fitter = fitting.LevMarLSQFitter()
fit = fitter(gauss, sx, sy, subim)
# checking that the amplitude is positive > 0
# checking whether the x and y centroids of the 2d gaussian fit
# coincide with the center of the subimage (within 2px error)
# checking whether the mean of the fwhm in y and x of the fit
# are close to the FWHM_PSF with a margin of 3px
fwhm_y = fit.y_stddev.value*gaussian_sigma_to_fwhm
fwhm_x = fit.x_stddev.value*gaussian_sigma_to_fwhm
mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)])
if fit.amplitude.value > 0 \
and np.allclose(fit.y_mean.value, cy, atol=2) \
and np.allclose(fit.x_mean.value, cx, atol=2) \
and np.allclose(mean_fwhm_fit, fwhm, atol=3):
coords.append((suby + fit.y_mean.value,
subx + fit.x_mean.value))
if debug:
print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x))
print('fit peak = {:.3f}'.format(fit.amplitude.value))
msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}'
print(msg.format(fwhm_y, fwhm_x))
print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit))
pp_subplots(subim, colorb=True, axis=False, dpi=60)
return coords
def print_coords(coords):
print('Blobs found:', len(coords))
print(' ycen xcen')
print('------ ------')
for i in range(len(coords[:, 0])):
print('{:.3f} \t {:.3f}'.format(coords[i,0], coords[i,1]))
def print_abort():
if verbose:
print(sep)
print('No potential sources found')
print(sep)
# --------------------------------------------------------------------------
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if psf.ndim != 2 and psf.shape[0] < array.shape[0]:
raise TypeError('Input psf is not a 2d array or has wrong size')
# Getting the FWHM from the PSF array
cenpsf = frame_center(psf)
outdf = fit_2dgaussian(psf, cent=(cenpsf), debug=debug, full_output=True)
fwhm_x, fwhm_y = outdf['fwhm_x'], outdf['fwhm_y']
fwhm = np.mean([fwhm_x, fwhm_y])
if verbose:
print('FWHM = {:.2f} pxs\n'.format(fwhm))
if debug:
print('FWHM_y', fwhm_y)
print('FWHM_x', fwhm_x)
# Masking the center, 2*lambda/D is the expected IWA
if mask:
array = mask_circle(array, radius=fwhm)
# Matched filter
if matched_filter:
frame_det = correlate(array, psf)
else:
frame_det = array
# Estimation of background level
_, median, stddev = sigma_clipped_stats(frame_det, sigma=5, iters=None)
bkg_level = median + (stddev * bkg_sigma)
if debug:
print('Sigma clipped median = {:.3f}'.format(median))
print('Sigma clipped stddev = {:.3f}'.format(stddev))
print('Background threshold = {:.3f}'.format(bkg_level))
print()
if mode == 'lpeaks' or mode == 'log' or mode == 'dog':
# Padding the image with zeros to avoid errors at the edges
pad = 10
array_padded = np.lib.pad(array, pad, 'constant', constant_values=0)
if debug and plot and matched_filter:
print('Input frame after matched filtering:')
pp_subplots(frame_det, rows=2, colorb=True)
if mode == 'lpeaks':
# Finding local peaks (can be done in the correlated frame)
coords_temp = peak_local_max(frame_det, threshold_abs=bkg_level,
min_distance=int(np.ceil(fwhm)),
num_peaks=20)
coords = check_blobs(array_padded, coords_temp, fwhm, debug)
coords = np.array(coords)
if verbose and coords.shape[0] > 0:
print_coords(coords)
elif mode == 'log':
sigma = fwhm*gaussian_fwhm_to_sigma
coords = feature.blob_log(frame_det.astype('float'),
threshold=bkg_level,
min_sigma=sigma-.5, max_sigma=sigma+.5)
if len(coords) == 0:
print_abort()
return 0, 0
coords = coords[:,:2]
coords = check_blobs(array_padded, coords, fwhm, debug)
coords = np.array(coords)
if coords.shape[0] > 0 and verbose:
print_coords(coords)
elif mode == 'dog':
sigma = fwhm*gaussian_fwhm_to_sigma
coords = feature.blob_dog(frame_det.astype('float'),
threshold=bkg_level, min_sigma=sigma-.5,
max_sigma=sigma+.5)
if len(coords) == 0:
print_abort()
return 0, 0
coords = coords[:, :2]
coords = check_blobs(array_padded, coords, fwhm, debug)
coords = np.array(coords)
if coords.shape[0] > 0 and verbose:
print_coords(coords)
else:
msg = 'Wrong mode. Available modes: lpeaks, log, dog.'
raise TypeError(msg)
if coords.shape[0] == 0:
print_abort()
return 0, 0
yy = coords[:, 0]
xx = coords[:, 1]
yy_final = []
xx_final = []
yy_out = []
xx_out = []
snr_list = []
xx -= pad
yy -= pad
# Checking S/N for potential sources
for i in range(yy.shape[0]):
y = yy[i]
x = xx[i]
if verbose:
print(sep)
print('X,Y = ({:.1f},{:.1f})'.format(x,y))
snr = snr_ss(array, (x,y), fwhm, False, verbose=False)
snr_list.append(snr)
if snr >= snr_thresh:
if verbose:
_ = frame_quick_report(array, fwhm, (x,y), verbose=verbose)
yy_final.append(y)
xx_final.append(x)
else:
yy_out.append(y)
xx_out.append(x)
if verbose:
print('S/N constraint NOT fulfilled (S/N = {:.3f})'.format(snr))
if debug:
_ = frame_quick_report(array, fwhm, (x,y), verbose=verbose)
if debug or full_output:
table = Table([yy.tolist(), xx.tolist(), snr_list],
names=('y', 'x', 'px_snr'))
table.sort('px_snr')
yy_final = np.array(yy_final)
xx_final = np.array(xx_final)
yy_out = np.array(yy_out)
xx_out = np.array(xx_out)
if plot:
coords = list(zip(xx_out.tolist() + xx_final.tolist(),
yy_out.tolist() + yy_final.tolist()))
circlealpha = [0.3] * len(xx_out)
circlealpha += [1] * len(xx_final)
pp_subplots(array, circle=coords, circlealpha=circlealpha,
circlelabel=True, circlerad=fwhm, save=save_plot, dpi=120,
angscale=angscale, pxscale=pxscale, title=plot_title)
if debug:
print(table)
if full_output:
return table
else:
return yy_final, xx_final | 13b9024538b9edff362b409328ebd6cdbbf5fd5b | 3,656,797 |
def get_database_uri(application):
""" Returns database URI. Prefer SQLALCHEMY_DATABASE_URI over components."""
if application.config.get('SQLALCHEMY_DATABASE_URI'):
return application.config['SQLALCHEMY_DATABASE_URI']
return '{driver}://{username}:{password}@{host}:{port}/{name}'\
.format(driver=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_DRIVER'],
username=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_USERNAME'),
password=application.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_DATABASE_PASSWORD'),
host=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_HOST'],
port=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_PORT'],
name=application.config['EQ_SERVER_SIDE_STORAGE_DATABASE_NAME']) | 6b04a9518798aa3392cdf41667e5edf1fdaa5125 | 3,656,798 |
from typing import OrderedDict
def get_s_vol_single_sma(c: CZSC, di: int = 1, t_seq=(5, 10, 20, 60)) -> OrderedDict:
"""获取倒数第i根K线的成交量单均线信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K成交量"
for t in t_seq:
x1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="其他", v2='其他', v3='其他')
x2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="其他", v2='其他', v3='其他')
s[x1.key] = x1.value
s[x2.key] = x2.value
min_k_nums = max(t_seq) + 10
if len(c.bars_raw) < min_k_nums:
return s
if di == 1:
vol = np.array([x.vol for x in c.bars_raw[-min_k_nums:]], dtype=np.float)
else:
vol = np.array([x.vol for x in c.bars_raw[-min_k_nums-di+1:-di+1]], dtype=np.float)
for t in t_seq:
sma = SMA(vol[-t-10:], timeperiod=t)
if vol[-1] >= sma[-1]:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="多头")
else:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="空头")
s[v1.key] = v1.value
if sma[-1] >= sma[-2]:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向上")
else:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向下")
s[v2.key] = v2.value
return s | d4453ec1e52ee2c19448855e0011b6ac31d5755b | 3,656,799 |
Subsets and Splits