content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_sqlite_run_sql_with_parameters():
"""Test running a SQL query using SQLAlchemy templating engine"""
statement = "SELECT 1 + :value;"
database = SqliteDatabase()
response = database.run_sql(statement, parameters={"value": 1})
assert response.first()[0] == 2 | 5,355,100 |
def print_unicodeinfo(val: str, key: str) -> str:
"""
Prints the occurrence, unicode character or guideline rules and additional information
:param args: arguments instance
:param val: count of the occurrences of key
:param key: key (glyph or guideline rules)
:return:
"""
return f"{val:-{6}} {'{'}{repr(key) if controlcharacter_check(key) else key}{'}'}{addinfo(key)}" | 5,355,101 |
def ramp_up_coulomb(
lamda: float,
simulation: app.Simulation,
ligand_indices: List[int],
original_parameters: List[unit.Quantity],
) -> None:
"""
Helper function for the ghost_busters_ligand function. It updates the charge parameter in the nonbonded force of your simulation context.
Args:
lamda (float):
"""
for force in simulation.context.getSystem().getForces():
if type(force).__name__ == "NonbondedForce":
for it, index in enumerate(ligand_indices):
scaled_charge = scale(
initial_value=0 * unit.elementary_charge,
final_value=original_parameters[it][0],
lamda=lamda,
)
force.setParticleParameters(
index,
scaled_charge,
force.getParticleParameters(index)[1],
force.getParticleParameters(index)[2],
)
force.updateParametersInContext(simulation.context) | 5,355,102 |
def test_column_values_not_in_set():
"""
ColumnValuesToBeNotInSet
"""
obj = {
"config": {"forbiddenValues": ["random"]},
"columnTestType": "columnValuesToBeNotInSet",
}
test_case = ColumnTestCase.parse_obj(obj)
assert isinstance(test_case.config, ColumnValuesToBeNotInSet) | 5,355,103 |
def plot_upset_indicators(
intersections,
ax=None,
facecolor="black",
element_size=None,
with_lines=True,
horizontal=True,
height_pad=0.7,
):
# REF: https://github.com/jnothman/UpSetPlot/blob/e6f66883e980332452041cd1a6ba986d6d8d2ae5/upsetplot/plotting.py#L428
"""Plot the matrix of intersection indicators onto ax"""
data = intersections
n_cats = data.index.nlevels
idx = np.flatnonzero(data.index.to_frame()[data.index.names].values)
c = np.array(["lightgrey"] * len(data) * n_cats, dtype="O")
c[idx] = facecolor
x = np.repeat(np.arange(len(data)), n_cats)
y = np.tile(np.arange(n_cats), len(data))
if element_size is not None:
s = (element_size * 0.35) ** 2
else:
# TODO: make s relative to colw
s = 200
ax.scatter(x, y, c=c.tolist(), linewidth=0, s=s)
if with_lines:
line_data = (
pd.Series(y[idx], index=x[idx]).groupby(level=0).aggregate(["min", "max"])
)
ax.vlines(
line_data.index.values,
line_data["min"],
line_data["max"],
lw=2,
colors=facecolor,
)
tick_axis = ax.yaxis
tick_axis.set_ticks(np.arange(n_cats))
tick_axis.set_ticklabels(data.index.names, rotation=0 if horizontal else -90)
# ax.xaxis.set_visible(False)
ax.tick_params(axis="both", which="both", length=0)
if not horizontal:
ax.yaxis.set_ticks_position("top")
ax.set_frame_on(False)
ax.set_ylim((-height_pad, n_cats - 1 + height_pad)) | 5,355,104 |
def qx_to_npx(df):
""" Return df with qx converted to npx.
"""
df = 1 - df
out = df.cumprod().shift()
for i in df.index:
out.loc[i, i] = 1
return out | 5,355,105 |
def homepage(selenium, config):
"""Get homepage with selenium."""
selenium.get(config.BASE_URL)
selenium.set_window_size(config.WINDOW_WIDTH, config.WINDOW_HEIGHT)
custom_click_cookie_rollbar(selenium, config.MAX_WAIT_TIME)
return selenium | 5,355,106 |
def _if_scalar_type_as(g, self, tensor):
"""
Convert self into the same type of tensor, as necessary.
We only support implicit casting for scalars, so we never
actually need to insert an ONNX cast operator here; just
fix up the scalar.
"""
if isinstance(self, torch._C.Value):
return self
elif tensor.type().kind() == "TensorType" or tensor.type().kind() == "CompleteTensorType":
ty = tensor.type().scalarType().lower()
return getattr(self, ty)()
else:
return self | 5,355,107 |
def read_images_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images | 5,355,108 |
def emailIsValid(email):
"""Return true if email is valid otherwise false"""
return EMAIL_RE.match(email) is not None | 5,355,109 |
def calculate_line_number(text):
"""Calculate line numbers in the text"""
return len([line for line in text.split("\n") if line.strip() != ""]) | 5,355,110 |
def remove_user_from_group(user_openid, group_id):
"""Remove specified user from specified group."""
session = get_session()
with session.begin():
(session.query(models.UserToGroup).
filter_by(user_openid=user_openid).
filter_by(group_id=group_id).
delete(synchronize_session=False)) | 5,355,111 |
def links_at_node(shape):
"""Get link ids for each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
(N, 4) ndarray of int
Array of link ids.
Examples
--------
>>> from landlab.grid.structured_quad.links import links_at_node
>>> links_at_node((4, 3)) # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [-1, 4, 1, -1],
[ 5, 7, -1, 2], [ 6, 8, 5, 3], [-1, 9, 6, 4],
[10, 12, -1, 7], [11, 13, 10, 8], [-1, 14, 11, 9],
[15, -1, -1, 12], [16, -1, 15, 13], [-1, -1, 16, 14]])
"""
(south_links, west_links) = _node_in_link_ids(shape)
(north_links, east_links) = _node_out_link_ids(shape)
return (
np.vstack(
(east_links.flat, north_links.flat, west_links.flat, south_links.flat)
)
.transpose()
.copy()
) | 5,355,112 |
def manage_addFancyContent(self, id, REQUEST=None):
"""Add the fancy fancy content."""
id = self._setObject(id, FancyContent(id))
return '' | 5,355,113 |
def convert_sentences(sentences, tokenizer):
"""
Truncate each sentence to 512 bpes in order to fit on BERT and convert it to bpes.
:param tokenizer: The BERT tokenizer we used in order convert each sentence to ids.
:param sentences: The tokenized sentences of the summary we are processing.
:return: The ids of the summary sentences.
"""
sentences_ids = []
for i, sent in enumerate(sentences):
if len(sent) > 512:
sentences[i] = sentences[i][:511].append('[SEP]')
sentences_ids.append(tokenizer.convert_tokens_to_ids(sentences[i]))
return sentences_ids | 5,355,114 |
def imscale(image: Imagelike, scale: Union[float, Tuple[float, float]],
**kwargs) -> np.ndarray:
"""Scale the given image. The result will be a new image
scaled by the specified scale.
"""
global _resizer
if _resizer is None:
_resizer = ImageResizer()
return _resizer.scale(image, scale, **kwargs) | 5,355,115 |
def parse_tooltip(spell: Union[ChampionSpell, SummonerSpell], tooltip: str) -> str:
"""
Improved tooltip parser based on the built-in Cassiopeia `Spell.__replace_variables`
"""
for dto in spell._data.values():
try:
costs_burn = dto.costBurn
effects_burn = dto.effectBurn
break
except AttributeError:
pass
else:
costs_burn = effects_burn = "?"
tooltip = tooltip.replace("{{ cost }}", costs_burn)
for x, effect in enumerate(effects_burn):
tooltip = tooltip.replace(f"{{{{ e{x} }}}}", effect)
try:
variables = spell.variables
except:
# Bug in SummonerSpell.variables throws exception
# TODO: submit patch
variables = []
for var in variables:
if var.link in SPELL_SCALINGS:
vals = '/'.join(f'{coeff * 100:g}' for coeff in var.coefficients)
replacement = f"{vals}% {SPELL_SCALINGS[var.link]}"
elif var.link == "@player.level":
replacement = f"{var.coefficients[0]:g}-{var.coefficients[-1]:g} (based on level)"
elif var.link == "@text":
replacement = '/'.join(f'{coeff:g}' for coeff in var.coefficients)
elif var.link == "@stacks":
replacement = f"{spell.name} stacks"
elif var.link == "@special.viw":
replacement = f"1% per {'/'.join(f'{coeff:g}' for coeff in var.coefficients)} **Bonus** AD"
elif var.link in {"@special.jaxrarmor", "@special.jaxrmr", "@special.BraumWArmor", "@special.BraumWMR"}:
# idk why the spell tooltips even have these variables. the actual numbers are static inside the text...
replacement = "bonus"
elif var.link == "@special.nautilusq":
replacement = ""
else:
replacement = f"{var.coefficients} {var.link}"
tooltip = tooltip.replace(f"{{{{ {var.key} }}}}", replacement)
return tooltip | 5,355,116 |
def _packages_info() -> dict:
"""Return a dict with installed packages version"""
return Dependencies.installed_packages() | 5,355,117 |
def generate(package_path: Optional[Path]) -> None:
"""Generate Poetry package manifests"""
PROJECT_CONFIG.load_requirements()
processor = PackageProcessor()
processor.register_packages()
processor.ensure_no_circular_imports()
if package_path:
processor.generate_package_manifest(package_path)
else:
processor.generate_package_manifests() | 5,355,118 |
def _save_mnist_recreation_indices():
"""Code to find MNIST train, validation and test indices for recreation of
MNIST MAF dataset.
Note this should not be called directly. This is only here for reproducibility."""
warnings.warn('This function should generally not be called because it '
'requires special setup but is kept here in order to reproduce functions if '
'needed.')
# Import maf data
datasets_root = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..', '..', 'maf', 'data',
)
mnist_path = os.path.join(datasets_root, 'mnist', 'mnist.pkl.gz')
with gzip.open(mnist_path, 'rb') as f:
maf_train, maf_val, maf_test = pickle.load(f)
# Import raw mnist data
data_obj = fetch_mldata('MNIST original') # , data_home=custom_data_home)
# Prepare comparison matrices
X_all = data_obj.data / 256.0
y_all = data_obj.target
maf_data_tuple = (maf_train[0], maf_val[0], maf_test[0])
n_maf = [X.shape[0] for X in maf_data_tuple]
X_maf = np.vstack(maf_data_tuple)
y_maf = np.concatenate((maf_train[1], maf_val[1], maf_test[1]))
# Sort maf using all columns
mnist_ind = np.lexsort(np.hstack((X_all, y_all.reshape(-1, 1))).T)
maf_ind = np.lexsort(np.hstack((X_maf, y_maf.reshape(-1, 1))).T)
rev_maf_ind = np.argsort(maf_ind)
# Show that matrices match when sorted by indices
print('Checking if the datasets are the same (should all be 0)')
def n_diff(X, Y):
"""
Parameters
----------
X :
Y :
Returns
-------
"""
return np.count_nonzero(X - Y)
def print_n_diff(X, Y):
"""
Parameters
----------
X :
Y :
"""
print('Number different = %d' % n_diff(X, Y))
print_n_diff(X_all[mnist_ind], X_maf[maf_ind])
print_n_diff(y_all[mnist_ind], y_maf[maf_ind])
# Retrieve indices and show that they are the same
train_idx, val_idx, test_idx = (
mnist_ind[
rev_maf_ind[np.sum(n_maf[:i], dtype=np.int):np.sum(n_maf[:(i + 1)], dtype=np.int)]]
for i in range(3)
)
for idx, maf in zip((train_idx, val_idx, test_idx), (maf_train, maf_val, maf_test)):
print_n_diff(X_all[idx], maf[0])
print_n_diff(y_all[idx], maf[1])
gzip_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'maf_mnist_splits.txt.gz'
)
with gzip.open(gzip_file, 'w+') as f:
f.write('# Indices of MNIST dataset retrieved using '
'sklearn.datasets.fetch_mldata(\'MNIST original\') that correspond to the train, '
'validation and test sets of the MAF paper (one line each).\n')
for i, idx in enumerate([train_idx, val_idx, test_idx]):
s = str(idx.tolist())
s = s[1:-1] # Trim off ends
f.write(s)
if i < 2:
f.write('\n') | 5,355,119 |
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train Hifigan (See detail in examples/hifigan/train_hifigan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["hifigan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiSTFTMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFHifiGANGenerator(
HifiGANGeneratorConfig(**config["hifigan_generator_params"]),
name="hifigan_generator",
)
multiperiod_discriminator = TFHifiGANMultiPeriodDiscriminator(
HifiGANDiscriminatorConfig(**config["hifigan_discriminator_params"]),
name="hifigan_multiperiod_discriminator",
)
multiscale_discriminator = TFMelGANMultiScaleDiscriminator(
MelGANDiscriminatorConfig(
**config["melgan_discriminator_params"],
name="melgan_multiscale_discriminator",
)
)
discriminator = TFHifiGANDiscriminator(
multiperiod_discriminator,
multiscale_discriminator,
name="hifigan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn,
amsgrad=config["generator_optimizer_params"]["amsgrad"],
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn,
amsgrad=config["discriminator_optimizer_params"]["amsgrad"],
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.") | 5,355,120 |
def _first(root: TreeNode) -> TreeNode:
"""Return a first in "inorder" traversal order
of the `root` subtree
Args:
root (TreeNode): root of subtree
Returns:
TreeNode: first node in subtree
"""
if root.left is None:
return root
return _first(root.left) | 5,355,121 |
def mock_plot(mocker):
"""Disable matplotlib plotting in test code"""
try:
import matplotlib.pyplot as plt
mocker.patch.object(plt, "gca")
mocker.patch.object(plt, "show")
except ImportError:
pass | 5,355,122 |
def get_library_version() -> str:
"""
Returns the version of minecraft-launcher-lib
"""
return __version__ | 5,355,123 |
def download_voc_pascal(data_dir='../data'):
"""Download the Pascal VOC2012 Dataset."""
voc_dir = os.path.join(data_dir, 'VOCdevkit/VOC2012')
url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar"
sha1 = '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'
fname = gutils.download(url, data_dir, sha1_hash=sha1)
with tarfile.open(fname, 'r') as f:
f.extractall(data_dir)
return voc_dir | 5,355,124 |
def load_image(image):
"""reshape and convert image to fit the model"""
img = cv2.imread(image) # Load images
img = cv2.resize(img, (257, 257), interpolation=cv2.INTER_LINEAR) # resize
img = (np.float32(img) - 127.5) / 127.5 # change image to float and normalize
img = img.reshape((1, 257, 257, 3)) # resize
return img | 5,355,125 |
def hist_trigger_time_diff(df_dev):
"""
plots
"""
df = devices_trigger_time_diff(df_dev.copy())
fig = go.Figure()
trace = go.Histogram(x=np.log(df['row_duration'].dt.total_seconds()/60),
nbinsx=200,
)
fig.add_trace(trace)
return fig | 5,355,126 |
def verify_spec(spec_utid, proxy_utid):
"""
For a specific unit test id (utid) compares the spec with the proxy
"""
results=''
for key in spec_utid:
results += '%s: spec=%s, proxy=%s (%s) *** ' % (key,spec_utid[key],proxy_utid[key],(spec_utid.get(key)==proxy_utid.get(key)))
return results | 5,355,127 |
def test_Local_dir():
"""
Test Local filesystem directory utilities
"""
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
with TemporaryDirectory() as td:
src = os.path.join(root, 'cloud_fs')
dst = os.path.join(td, 'cloud_fs')
fs = FileSystem(src)
fs.cp(dst)
test = fs.ls()
truth = sorted(os.listdir(src))
assert test == truth, "Source files were not listed properly"
fs = FileSystem(dst)
test = fs.ls()
truth = sorted(os.listdir(dst))
assert test == truth, "Destination files were not listed properly"
truth = sorted(os.listdir(src))
assert test == truth, "Source files don't match destination files"
fs.rm()
assert not fs.exists(), 'Destination is not empty!' | 5,355,128 |
def diff_plot(song_1, song_2, filename=None, folder=None):
"""
Plot the difference between two series.
:param dict song_1:
:param dict song_2:
:param str filename:
:param str folder:
:return:
"""
x_1 = list(song_1.keys())
y_1 = list(song_1.values())
x_2 = list(song_2.keys())
y_2 = list(song_2.values())
y_2_interp = np.interp(x_1,
x_2,
y_2)
fig, ax_1 = plt.subplots()
ax_1.fill_between(x_1, y_1, y_2_interp)
if filename is not None:
f = '' if folder is None else folder
plt.savefig(os.path.join(f,
filename + '.png'))
plt.close(fig) | 5,355,129 |
def lammps_created_gsd(job):
"""Check if the mdtraj has converted the production to a gsd trajectory for the job."""
return job.isfile("trajectory-npt.gsd") | 5,355,130 |
def neo4j_data_age(data, max_data_age=None):
"""
Checks the noclook_last_seen property against datetime.datetime.now() and
if the difference is greater than max_data_age (hours)
(django_settings.NEO4J_MAX_DATA_AGE will be used if max_data_age is not specified)
and the noclook_auto_manage is true the data is said to be expired.
Returns noclook_last_seen as a datetime and a "expired" boolean.
"""
if not max_data_age:
max_data_age = django_settings.NEO4J_MAX_DATA_AGE
max_age = timedelta(hours=int(max_data_age))
now = datetime.now()
last_seen = isots_to_dt(data)
expired = False
if last_seen and (now-last_seen) > max_age and data.get('noclook_auto_manage', False):
expired = True
return last_seen, expired | 5,355,131 |
def ProfileOptions(parser):
"""Build option group for profiling chrome.
Args:
parser: OptionParser object for parsing the command-line.
Returns:
Option group that contains profiling chrome options.
"""
profile_options = optparse.OptionGroup(parser, 'Profile Chrome Options')
browsers = sorted(util.get_supported_browsers().keys())
profile_options.add_option('-b',
'--browser',
help='Select among installed browsers. '
'One of ' + ', '.join(browsers) +
'. "stable" is used by '
'default.',
type='choice',
choices=browsers,
default='stable')
profile_options.add_option('-t',
'--time',
help=('Stops tracing after N seconds. '
'Default is 5 seconds'),
default=5,
metavar='N',
type='int',
dest='trace_time')
profile_options.add_option('-e',
'--serial',
help='adb device serial number.',
type='string',
default=util.get_default_serial(),
dest='device_serial_number')
profile_options.add_option('-f',
'--trace_format',
help='Format of saved trace: proto, json, html.'
' Default is proto.',
default='proto',
dest='trace_format')
profile_options.add_option('-p',
'--platform',
help='Device platform. Only Android is supported.',
default='android',
dest='platform')
profile_options.add_option('--buf-size',
help='Use a trace buffer size '
' of N KB.',
type='int',
metavar='N',
dest='trace_buf_size')
profile_options.add_option(
'--enable_profiler',
help='Comma-separated string of '
'profiling options to use. Supports options for memory or '
'cpu or both. Ex: --enable_profiler=memory '
'or --enable_profiler=memory,cpu. ',
dest='enable_profiler')
profile_options.add_option('--chrome_categories',
help='Chrome tracing '
'categories to record.',
type='string',
default=_DEFAULT_CHROME_CATEGORIES)
profile_options.add_option(
'--skip_symbolize',
help='Skips symbolization after recording trace profile, if specified.',
action='store_true',
dest='skip_symbolize')
profile_options.add_option('--compress',
help='Compress the resulting trace '
'with gzip. ',
action='store_true')
# This is kept for backwards compatibility. Help is suppressed because this
# should be specified through the newer |trace_format| flag.
profile_options.add_option('--json',
help=optparse.SUPPRESS_HELP,
dest='write_json')
return profile_options | 5,355,132 |
def _clean_unicode(value):
"""Return the value as a unicode."""
if isinstance(value, str):
return value.decode('utf-8')
else:
return unicode(value) | 5,355,133 |
def deslugify_province(prov):
"""
Province slug to name, i.e. dashes to spaces and title case.
KZN is a special case.
"""
if prov == 'kwazulu-natal':
return 'KwaZulu-Natal'
return prov.replace('-', ' ').title() | 5,355,134 |
def color_debug():
"""
Color for info
"""
return read_config_color("COLOR", "debug", "grey") | 5,355,135 |
def replace_sym(data: str) -> str:
"""
Converts currency strings such as ``£5.00`` to ``5.00 GBP`` - or ``10 kr`` to ``10 SEK``
"""
origdata = data
data = data.strip()
for s, r in settings.CUR_SYMBOLS.items():
if data.startswith(s) or data.endswith(s):
log.debug(f"Replacing symbol {s!r} with {r!r}")
return f"{data.replace(s, '').strip()} {r}".strip()
if data.upper().startswith(s) or data.upper().endswith(s):
log.debug(f"Replacing symbol {s!r} with {r!r} (uppercase)")
return f"{data.upper().replace(s, '').strip()} {r}".strip()
return origdata | 5,355,136 |
def parse_vectors(vectors):
""" Basic cleanup of vector or vectors
Strip out V from V#s. Similar to parse tables, this by no means guarantees
a valid entry, just helps with some standard input formats
Parameters
----------
vectors : list of str or str
A string or list of strings of vector names to be parsed
Returns
-------
list of str
vectors with unnecessary characters removed
"""
def parse_vector(vector):
"""Strip string to numeric elements only"""
if isinstance(vector, int): # Already parsed earlier
return vector
return int(re.sub(r'\D', '', vector))
if isinstance(vectors, str):
return [parse_vector(vectors)]
return [parse_vector(v) for v in vectors] | 5,355,137 |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', best_filename='model_best.pth.tar'):
"""
:param state:
:param is_best:
:param filename:
:param best_filename:
:return:
"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename) | 5,355,138 |
def sort_slopes(sds):
"""Sort slopes from bottom to top then right to left"""
sds = np.array(sds)
scores = sds[:, 0, 1] + sds[:, 1, 1] * 1e6
inds = np.argsort(scores)
return sds[inds] | 5,355,139 |
def test_horodecki_invalid_a_param():
"""Tests for invalid a_param inputs."""
with np.testing.assert_raises(ValueError):
horodecki(-5)
with np.testing.assert_raises(ValueError):
horodecki(5) | 5,355,140 |
def test_success(database):
""" Test if the DisasterEmergencyFundCode element has a valid COVID-19 related code and TOA is blank, then
GrossOutlayByAward_CPE cannot be blank.
"""
# gross_outlay_amount_by_awa_cpe populated
op1 = AwardFinancialFactory(disaster_emergency_fund_code='l', transaction_obligated_amou=None,
gross_outlay_amount_by_awa_cpe=2)
# 0 in either field is still populated
op2 = AwardFinancialFactory(disaster_emergency_fund_code='m', transaction_obligated_amou=0,
gross_outlay_amount_by_awa_cpe=None)
op3 = AwardFinancialFactory(disaster_emergency_fund_code='m', transaction_obligated_amou=None,
gross_outlay_amount_by_awa_cpe=0)
# wrong DEFC
op4 = AwardFinancialFactory(disaster_emergency_fund_code='z', transaction_obligated_amou=None,
gross_outlay_amount_by_awa_cpe=None)
# DEFC but not COVID
op5 = AwardFinancialFactory(disaster_emergency_fund_code='a', transaction_obligated_amou=None,
gross_outlay_amount_by_awa_cpe=None)
# populated TOA
op6 = AwardFinancialFactory(disaster_emergency_fund_code='n', transaction_obligated_amou=1,
gross_outlay_amount_by_awa_cpe=None)
defc1 = DEFCFactory(code='L', group='covid_19')
defc2 = DEFCFactory(code='M', group='covid_19')
defc3 = DEFCFactory(code='N', group='covid_19')
defc4 = DEFCFactory(code='A')
errors = number_of_errors(_FILE, database, models=[op1, op2, op3, op4, op5, op6, defc1, defc2, defc3, defc4])
assert errors == 0 | 5,355,141 |
def test_researcher_invitation(client, mocker):
"""Test full researcher invitation flow."""
exception = mocker.patch.object(client.application.logger, "exception")
mocker.patch("sentry_sdk.transport.HttpTransport.capture_event")
mocker.patch(
"orcid_hub.views.send_user_invitation.queue",
lambda *args, **kwargs: (views.send_user_invitation(*args, **kwargs) and Mock()))
send_email = mocker.patch("orcid_hub.utils.send_email")
admin = User.get(email="[email protected]")
resp = client.login(admin)
resp = client.post(
"/invite/user",
data={
"name": "TEST APP",
"is_employee": "false",
"email_address": "[email protected]",
"resend": "enable",
"is_student": "true",
"first_name": "test",
"last_name": "test",
"city": "test"
})
assert resp.status_code == 200
assert b"<!DOCTYPE html>" in resp.data, "Expected HTML content"
assert b"[email protected]" in resp.data
send_email.assert_called_once()
_, kwargs = send_email.call_args
invitation_url = urlparse(kwargs["invitation_url"]).path
client.logout()
client.cookie_jar.clear()
# Attempt to login via ORCID with the invitation token
resp = client.get(invitation_url)
auth_url = re.search(r"window.location='([^']*)'", resp.data.decode()).group(1)
qs = parse_qs(urlparse(auth_url).query)
redirect_uri = qs["redirect_uri"][0]
oauth_state = qs["state"][0]
callback_url = redirect_uri + "&state=" + oauth_state
assert session["oauth_state"] == oauth_state
mocker.patch(
"orcid_hub.authcontroller.OAuth2Session.fetch_token",
return_value={
"orcid": "0123-1234-5678-0123",
"name": "TESTER TESTERON",
"access_token": "xyz",
"refresh_token": "xyz",
"scope": "/activities/update",
"expires_in": "12121"
})
resp = client.get(callback_url, follow_redirects=True)
user = User.get(email="[email protected]")
assert user.orcid == "0123-1234-5678-0123"
exception.assert_called() | 5,355,142 |
def _get_mock_dataset(root_dir, base_dir_name):
"""
root_dir: directory to the mocked dataset
"""
base_dir = os.path.join(root_dir, base_dir_name)
os.makedirs(base_dir, exist_ok=True)
if base_dir_name == SQuAD1.__name__:
file_names = ("train-v1.1.json", "dev-v1.1.json")
else:
file_names = ("train-v2.0.json", "dev-v2.0.json")
mocked_data = defaultdict(list)
for file_name in file_names:
txt_file = os.path.join(base_dir, file_name)
with open(txt_file, "w", encoding="utf-8") as f:
mock_json_data = _get_mock_json_data()
f.write(json.dumps(mock_json_data))
split = "train" if "train" in file_name else "dev"
dataset_line = next(
iter(_ParseSQuADQAData([("file_handle", mock_json_data)]))
)
mocked_data[split].append(dataset_line)
return mocked_data | 5,355,143 |
def serialize_skycoord(o):
"""
Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification.
Args:
o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
representation = o.representation.get_name()
frame = o.frame.name
r = o.represent_as('spherical')
d = dict(
_type='astropy.coordinates.SkyCoord',
frame=frame,
representation=representation,
lon=r.lon,
lat=r.lat)
if len(o.distance.unit.to_string()):
d['distance'] = r.distance
return d | 5,355,144 |
def redis_sentinel(create_sentinel, sentinel, loop):
"""Returns Redis Sentinel client instance."""
redis_sentinel = loop.run_until_complete(
create_sentinel([sentinel.tcp_address], timeout=2, loop=loop))
assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG'
return redis_sentinel | 5,355,145 |
def compute_features(
seq_path: str,
map_features_utils_instance: MapFeaturesUtils,
social_features_utils_instance: SocialFeaturesUtils,
) -> Tuple[np.ndarray, Dict[str, np.ndarray]]:
"""Compute social and map features for the sequence.
Args:
seq_path (str): file path for the sequence whose features are to be computed.
map_features_utils_instance: MapFeaturesUtils instance.
social_features_utils_instance: SocialFeaturesUtils instance.
Returns:
merged_features (numpy array): SEQ_LEN x NUM_FEATURES
map_feature_helpers (dict): Dictionary containing helpers for map features
"""
args = parse_arguments()
df = pd.read_csv(seq_path, dtype={"TIMESTAMP": str})
# Get social and map features for the agent
agent_track = df[df["OBJECT_TYPE"] == "AGENT"].values
# Social features are computed using only the observed trajectory
social_features = social_features_utils_instance.compute_social_features(
df, agent_track, args.obs_len, args.obs_len + args.pred_len,
RAW_DATA_FORMAT)
# agent_track will be used to compute n-t distances for future trajectory,
# using centerlines obtained from observed trajectory
map_features, map_feature_helpers = map_features_utils_instance.compute_map_features(
agent_track,
args.obs_len,
args.obs_len + args.pred_len,
RAW_DATA_FORMAT,
args.mode,
)
# Combine social and map features
# If track is of OBS_LEN (i.e., if it's in test mode), use agent_track of full SEQ_LEN,
# But keep (OBS_LEN+1) to (SEQ_LEN) indexes having None values
if agent_track.shape[0] == args.obs_len:
agent_track_seq = np.full(
(args.obs_len + args.pred_len, agent_track.shape[1]), None)
agent_track_seq[:args.obs_len] = agent_track
merged_features = np.concatenate(
(agent_track_seq, social_features, map_features), axis=1)
else:
merged_features = np.concatenate(
(agent_track, social_features, map_features), axis=1)
return merged_features, map_feature_helpers | 5,355,146 |
def summarizeTitlesByLength(titlesAlignments, limit=None):
"""
Sort match titles by sequence length.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param limit: An C{int} limit on the number of results to show.
@return: An C{IPython.display.HTML} instance with match titles sorted by
sequence length.
"""
return _sortHTML(titlesAlignments, 'length', limit) | 5,355,147 |
def _diff_tail(msg):
"""`msg` is an arbitrary length difference "path", which could
be coming from any part of the mapping hierarchy and ending in any kind of
selector tree. The last item is always the change message: add, replace,
delete <blah>. The next to last should always be a selector key of some kind.
Back up from there to find the first mapping tuple.
"""
tail = []
for part in msg[::-1]:
if isinstance(part, tuple) and len(part) == 2 and isinstance(part[0], str) and part[0].endswith("map"):
tail.append(part[1])
break
else:
tail.append(part)
return tuple(reversed(tail)) | 5,355,148 |
def select_standard_name(session, cluster, importance_table_name):
"""
Use cluster members for a WHERE ... IN (...) query
Use SQLAlchemy to handle the escaping
"""
stmt = session.query('name from %s' % importance_table_name) \
.filter(column('name').in_(list(cluster))) \
.order_by('"count" DESC') \
.limit(1)
rv = session.execute(stmt)
res = list(rv)
return res[0][0] | 5,355,149 |
def test_squeeze_sumup():
"""make sure that sumup does not lead to false output shape"""
s = magpy.Sensor(pixel=(1, 2, 3))
ss = magpy.magnet.Sphere((1, 2, 3), 1)
B1 = magpy.getB(ss, s, squeeze=False)
B2 = magpy.getB(ss, s, squeeze=False, sumup=True)
assert B1.shape == B2.shape | 5,355,150 |
def calculate_depth(experiment):
""" Calculate the minor, major, total depth
Args:
experiment (remixt.Experiment): experiment object
Returns:
pandas.DataFrame: read depth table with columns, 'major', 'minor', 'total', 'length'
"""
data = remixt.analysis.experiment.create_segment_table(experiment)
data['segment_length'] = data['end'] - data['start'] + 1
data['length_ratio'] = data['length'] / data['segment_length']
data['allele_readcount'] = data['minor_readcount'] + data['major_readcount']
data['high_quality'] = (
(data['length'] > np.percentile(data['length'].values, 10)) &
(data['allele_readcount'] > np.percentile(data['allele_readcount'].values, 10)) &
(data['length_ratio'] > np.percentile(data['length_ratio'].values, 10)))
phi = remixt.likelihood.estimate_phi(experiment.x)
p = remixt.likelihood.proportion_measureable_matrix(phi)
# Filter segments for which read depth calculation will be nan/inf
data = data[(data['length'] > 0) & np.all(p > 0, axis=1)]
data.rename(columns={
'major_depth': 'major',
'minor_depth': 'minor',
'total_depth': 'total',
}, inplace=True)
data = data[[
'chromosome',
'start',
'end',
'length',
'major',
'minor',
'total',
'high_quality',
]]
return data | 5,355,151 |
def checkerboard(key, nsq, size, dtype=np.float32):
"""Create a checkerboard background image with random colors.
NOTE: only supports a single value for nsq (number squares).
Args:
key: JAX PRNGkey.
nsq (int): number of squares per side of the checkerboard.
size (int): size of one side of the checkerboard in pixels.
dtype: desired return data type.
Returns:
canvas (np.array): checkerboard background image.
"""
assert size % nsq == 0
sq = size // nsq
color1, color2 = random.uniform(key, (2, 3), dtype=dtype)
canvas = np.full((nsq, sq, nsq, sq, 3), color1, dtype=dtype)
canvas = canvas.at[::2, :, 1::2, :, :].set(color2)
canvas = canvas.at[1::2, :, ::2, :, :].set(color2)
return canvas.reshape(sq * nsq, sq * nsq, 3) | 5,355,152 |
def choice(x, a):
"""Generate a random sample from an array of given size."""
if torch.is_tensor(x):
return x[torch.randint(len(x), (a,))]
return x | 5,355,153 |
def correct_gene_names(df):
""" Fix datetime entries in Gene names
"""
update_symbols = []
for i, gs in enumerate(df.Gene_Symbol):
if (not (isinstance(gs, str))) or (':' in gs):
update_symbols.append(mapping.get_name_from_uniprot(df.Uniprot_Id.iloc[i]))
else:
update_symbols.append(gs)
df.Gene_Symbol = update_symbols
return df | 5,355,154 |
def process_repl_args(args):
""" Process PANDA replay-related arguments.
"""
assert False, 'Not implemented yet.'
cmd = []
cmd.extend(['-display', 'none'])
return cmd
# p_test "${panda_rr}-rr-snp" f "trace memory snapshot"
# p_test "${panda_rr}-rr-nondet.log" f "trace nondet log"
# -pandalog ${opts[-plog]} -replay $panda_rr | 5,355,155 |
def test_RotationPlot_methods():
"""This code is lifted from demo-3-v0.1."""
misori = Misorientation([1, 1, 1, 1]) # any will do
ori = Orientation.random()
fig = plt.figure()
ax = fig.add_subplot(projection="axangle", proj_type="ortho", **_SUBPLOT_KWARGS)
ax.scatter(misori)
ax.scatter(ori)
ax.plot(misori)
ax.plot(ori)
ax.plot_wireframe(OrientationRegion.from_symmetry(D6, D6))
plt.close("all")
# Clear the edge case
ax.transform(np.asarray([1, 1, 1])) | 5,355,156 |
def choose(n, k):
"""
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1): # changed from xrange
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0 | 5,355,157 |
def _get_ReaLiSe_dataset(which="15"):
"""
For its
"""
print("Loading ReaLiSe Dataset !")
print("Hint: The Data You loading now is the preprocessed sighan from ReaLise, ")
ddp_exec("os.system('date')")
path = "../SE_tmp_back/milestone/ReaLiSe/data/"
import pickle
train_dataset = pickle.load(open(path + "trainall.times2.pkl", "rb"))
eval_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb"))
test_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb"))
print("Hint: Using **SIGHAN" + which + "** for eval & test !")
def trans2mydataset(features):
new = []
for feature in features:
tmp = {}
tmp["input_ids"] = feature["src_idx"][:128]
tmp["labels"] = feature["tgt_idx"][:128]
tmp["attention_mask"] = ([1] * len(tmp["input_ids"]))[:128]#feature["lengths"])[:128]
new.append(tmp)
return mydataset(new)
print("Loaded successfully !")
ddp_exec("os.system('date')")
print("over")
return trans2mydataset(train_dataset), trans2mydataset(eval_dataset), trans2mydataset(test_dataset) | 5,355,158 |
def generate_check_phrase() -> bytes:
""" Generate check-phrase for connecting of auxiliary socket.
:return: some array of ATOM_LENGTH bytes.
"""
return get_random_bytes(ATOM_LENGTH) | 5,355,159 |
def load_pickle(filename: str):
"""
Load a file from disk.
Parameters
----------
filename: str
Name of the file that is loaded.
Returns
-------
"""
return pickle.load(open(filename, 'rb')) | 5,355,160 |
def find_pssm_missing_proteins(fasta_dict, pssm_dir):
"""find_pssm_missing_proteins function finds the missing pssm files of the proteins in fasta file.
Args:
fasta_dict (dict): This is a dict of fasta file. The keys of fasta_dict are protein ids and
values are protein sequences.
pssm_dir (str): It is full path to the directory that contains pssm files.
Returns:
list: The list of proteins that does not have pssm file in pssm_dir
"""
set_missing_prots = set()
set_prots_pssm_exists = set()
for file in os.listdir(pssm_dir):
protein_id = file.split(".")[0]
set_prots_pssm_exists.add(protein_id)
for protein_id in set_prots_pssm_exists:
file = protein_id + ".pssm"
flag = False
sequence = ""
with open(pssm_dir+"/"+file, "r") as fp:
for line in fp:
list_line = line.strip().split()
if len(list_line) > 0:
if list_line[0] == '1':
flag = True
if len(list_line) == 0:
flag = False
if flag:
sequence += list_line[1]
if protein_id in fasta_dict:
if sequence != fasta_dict[protein_id]:
set_missing_prots.add(protein_id)
set_missing_prots = set_missing_prots.union(set(fasta_dict.keys()) - set_prots_pssm_exists)
return list(set_missing_prots) | 5,355,161 |
def _format_echo(text):
"""Compose system echo command outputs text"""
quote = '' if os.name == 'nt' else '"'
return 'echo {}{}{}'.format(quote, text, quote) | 5,355,162 |
def crop_to_reference(dataset: xr.Dataset, ref_dataset: xr.Dataset) -> xr.Dataset:
""" Crops horizontal coordinates to match reference dataset """
if "longitude" not in dataset.coords.keys():
raise ValueError("Longitude is not a coordinate of dataset.")
if "longitude" not in ref_dataset.coords.keys():
raise ValueError("Longitude is not a coordinate of reference dataset.")
if "latitude" not in dataset.coords.keys():
raise ValueError("Latitude is not a coordinate of dataset.")
if "latitude" not in ref_dataset.coords.keys():
raise ValueError("Latitude is not a coordinate of reference dataset.")
dataset = dataset.where(dataset.latitude == ref_dataset.latitude, drop=True)\
.where(dataset.longitude == ref_dataset.longitude, drop=True)
return dataset | 5,355,163 |
def add_frontend(fe_role, params):
"""
add_frontend
:return:
"""
# TODO the function is not appropriate
# Before add backend change root password, This is not appropriate.
change_root_passowrd(params)
# add doris fe
doris_fe_hostname = params.doris_fe_hostname[0]
doris_fe_observer_hostname = params.doris_fe_observer_hostname
doris_root_password = params.doris_fe_root_password
doris_fe_query_port = params.doris_fe_query_port
doris_fe_edit_log_port = params.doris_fe_edit_log_port
if (len(params.doris_fe_hostname) >= 1) and (fe_role == 'FOLLOWER'):
for fe_host in params.doris_fe_hostname:
if fe_host != doris_fe_hostname:
cmd = format("mysql -uroot -p{doris_root_password} -h {doris_fe_hostname} -P {doris_fe_query_port} "
"-e \"ALTER SYSTEM ADD {fe_role} \'{fe_host}:{doris_fe_edit_log_port}\' \"")
Logger.info("Adding Doris FE Follower Server, commonds is {0}.".format(cmd))
Execute(cmd, user=params.default_user, logoutput=True, tries=5, try_sleep=5)
if (len(params.doris_fe_observer_hostname) >= 1) and (fe_role == 'OBSERVER'):
for fe_observer in params.doris_fe_observer_hostname:
cmd = format("mysql -uroot -p{doris_root_password} -h {doris_fe_hostname} -P {doris_fe_query_port} "
"-e \"ALTER SYSTEM ADD {fe_role} \'{fe_observer}:{doris_fe_edit_log_port}\' \"")
Logger.info("Adding Doris FE Follower Server, commonds is {0}.".format(cmd))
Execute(cmd, user=params.default_user, logoutput=True, tries=5, try_sleep=5) | 5,355,164 |
def split_tasks(lst, n):
"""Split tasks into N-sized chunks."""
n = math.ceil(len(lst) / n)
for j in range(0, len(lst), n):
chunk = lst[j:n + j]
yield chunk | 5,355,165 |
def parse_line(description, inline_comments=_INLINE_COMMENT_PREFIXES):
"""
Parse a line and correctly add the description(s) to a collection
"""
# manually strip out the comments
# py2 cannot ignore comments on a continuation line
# https://stackoverflow.com/q/9110428/1177288
#
# PY3 can do it for you with 'inline_comment_prefixes' = '#;'
if PY2:
for comment_prefix in inline_comments:
pos = description.find(comment_prefix)
if pos != -1:
# comment line or inline comment (after a space)
if pos == 0 or description[pos - 1].isspace():
description = description[:pos]
if not description:
return None
# there can be trailing commas if you copy from source code
descriptions = description.strip(',').split(',')
# strip all the spaces and quotes
descriptions = [desc.strip().strip("'").strip('"').strip()
for desc in descriptions]
return descriptions | 5,355,166 |
def target_frame():
"""Input frame."""
return 'IAU_ENCELADUS' | 5,355,167 |
def cards(cs):
"""Parse cards"""
cs = cs.split(' ')
result = np.zeros([len(valueL), len(colorL)], int)
for c in cs:
result[np.where(valueL == c[0])[0][0], np.where(colorL == c[1])[0][0]] = 1
return result | 5,355,168 |
def _get_max_diag_idx(m, n_A, n_B, diags, start, percentage):
"""
Determine the diag index for when the desired percentage of distances is computed
Parameters
----------
m : int
Window size
n_A : int
The length of the time series or sequence for which to compute the matrix
profile `T_A`
n_B : int
The length of the time series or sequence that contain your query subsequences
of interest `T_B`
diags : ndarray
The diag of diagonals to process and compute
start : int
The (inclusive) diag index from which to start
percentage : float
Approximate percentage completed. The value is between 0.0 and 1.0.
Returns
-------
max_diag_id : int
The diag index that corresponds to desired percentage of distances to compute
n_dist_computed : int
The number of distances computed
"""
max_n_dist = 0
for diag_idx in range(diags.shape[0]):
k = diags[diag_idx]
if k >= 0:
max_n_dist += min(n_A - m + 1 - k, n_B - m + 1)
else:
max_n_dist += min(n_A - m + 1, n_B - m + 1 + k)
n_dist_computed = 0
for diag_idx in range(start, diags.shape[0]):
k = diags[diag_idx]
if k >= 0:
n_dist_computed += min(n_A - m + 1 - k, n_B - m + 1)
else:
n_dist_computed += min(n_A - m + 1, n_B - m + 1 + k)
if n_dist_computed / max_n_dist > percentage: # pragma: no cover
break
max_diag_idx = diag_idx + 1
return max_diag_idx, n_dist_computed | 5,355,169 |
def get_argument_values(arg_defs, arg_asts, variables):
"""Prepares an object map of argument values given a list of argument
definitions and list of argument AST nodes."""
if arg_asts:
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
else:
arg_ast_map = {}
result = {}
for arg_def in arg_defs:
name = arg_def.name
value_ast = arg_ast_map.get(name)
if value_ast:
value_ast = value_ast.value
value = value_from_ast(
value_ast,
arg_def.type,
variables
)
if value is None:
value = arg_def.default_value
if value is not None:
result[name] = value
return result | 5,355,170 |
def get_identifier(positioner_id, command_id, uid=0, response_code=0):
"""Returns a 29 bits identifier with the correct format.
The CAN identifier format for the positioners uses an extended frame with
29-bit encoding so that the 11 higher bits correspond to the positioner
ID, the 8 middle bits are the command number, the following 6 bits are the
unique identifier, and the 4 lower bits are the response code.
Parameters
----------
positioner_id : int
The Id of the positioner to command, or zero for broadcast.
command_id : int
The ID of the command to send.
uid : int
The unique identifier
response_code : int
The response code.
Returns
-------
identifier : `int`
The decimal integer corresponding to the 29-bit identifier.
Examples
--------
::
>>> get_identifier(5, 17, uid=5)
1328128
>>> bin(1328128)
'0b101000100010000000000'
"""
posid_bin = format(positioner_id, "011b")
cid_bin = format(command_id, "08b")
cuid_bin = format(uid, "06b")
response_bin = format(int(response_code), "04b")
identifier = posid_bin + cid_bin + cuid_bin + response_bin
assert len(identifier) == 29
return int(identifier, 2) | 5,355,171 |
def standardized(array):
"""Normalize the values in an array.
Arguments:
array (np.ndarray): Array of values to normalize.
Returns:
array with zero mean and unit standard deviation.
"""
return (array - array.mean()) / max(1e-4, array.std()) | 5,355,172 |
def newaddress(fn,passphrase,addr_type=0):
"""
getnetaddress
"""
wallet = Wallet(fn).fromFile(passphrase)
# Address Types
# addr_type == 0, deposit
# addr_type == 1, change
# addr_type == 2, staking
# addr_type == 3, Dealer
# Address types aren't programmatically important, but help to organize
if addr_type is None:
addr_type = 0
k = wallet.create_address(save=True,addr_type=addr_type)
d = { "new_address" : (k.address_type(),k.address(),k.address(True)) }
return json.dumps(d, sort_keys=True, indent=4) | 5,355,173 |
def index(args):
"""Handles the index step of the program."""
if not args.index: # build index
logging.info(" Building index...")
index_list = generate_index(args.input_dir)
if not index_list: # list is empty
logging.error(" Empty index. Exiting...")
return
logging.info(" Index built!")
if not args.no_index: # save index
np.save(args.dump_index, index_list)
logging.info(" Index saved as: {}".format(args.dump_index))
return index_list
else: # load index from file
index_list = load_index(args.index)
return index_list | 5,355,174 |
def setup_harness(bsize=16, workers=0):
"""
CommandLine:
python ~/code/netharn/netharn/examples/yolo_voc.py setup_harness
Example:
>>> # DISABLE_DOCTSET
>>> harn = setup_harness()
>>> harn.initialize()
"""
xpu = nh.XPU.cast('argv')
nice = ub.argval('--nice', default='Yolo2Baseline')
batch_size = int(ub.argval('--batch_size', default=bsize))
bstep = int(ub.argval('--bstep', 4))
workers = int(ub.argval('--workers', default=workers))
decay = float(ub.argval('--decay', default=0.0005))
lr = float(ub.argval('--lr', default=0.001))
ovthresh = 0.5
# We will divide the learning rate by the simulated batch size
datasets = {
'train': YoloVOCDataset(years=[2007, 2012], split='trainval'),
'test': YoloVOCDataset(years=[2007], split='test'),
}
loaders = {
key: dset.make_loader(batch_size=batch_size, num_workers=workers,
shuffle=(key == 'train'), pin_memory=True)
for key, dset in datasets.items()
}
if workers > 0:
import cv2
cv2.setNumThreads(0)
simulated_bsize = bstep * batch_size
hyper = nh.HyperParams(**{
'nice': nice,
'workdir': ub.truepath('~/work/voc_yolo2'),
'datasets': datasets,
# 'xpu': 'distributed(todo: fancy network stuff)',
# 'xpu': 'cpu',
# 'xpu': 'gpu:0,1,2,3',
'xpu': xpu,
# a single dict is applied to all datset loaders
'loaders': loaders,
'model': (light_yolo.Yolo, {
'num_classes': datasets['train'].num_classes,
'anchors': datasets['train'].anchors,
'conf_thresh': 0.001,
# 'nms_thresh': 0.5, # reproduce original yolo
'nms_thresh': 0.4, # reproduce lightnet
}),
'criterion': (light_region_loss.RegionLoss, {
'num_classes': datasets['train'].num_classes,
'anchors': datasets['train'].anchors,
'object_scale': 5.0,
'noobject_scale': 1.0,
'class_scale': 1.0,
'coord_scale': 1.0,
'thresh': 0.6, # iou_thresh
}),
'initializer': (nh.initializers.Pretrained, {
# 'fpath': light_yolo.demo_voc_weights(),
'fpath': light_yolo.initial_imagenet_weights(),
}),
'optimizer': (torch.optim.SGD, {
'lr': lr / 10,
'momentum': 0.9,
'dampening': 0,
# multiplying by batch size was one of those unpublished details
'weight_decay': decay * simulated_bsize,
}),
# Pascal 2007 + 2012 trainval has 16551 images
# Pascal 2007 test has 4952 images
# In the original YOLO, one batch is 64 images,
# so one epoch is 16551 / 64 = 259 iterations.
#
# From the original YOLO VOC v2 config
# https://github.com/pjreddie/darknet/blob/master/cfg/yolov2-voc.cfg
# learning_rate=0.001
# burn_in=1000
# max_batches = 80200
# policy=steps
# steps=40000,60000
# scales=.1,.1
#
# However, the LIGHTNET values are
# LR_STEPS = [250, 25000, 35000]
#
# Based in this, the iter to batch conversion is
#
# ((np.array([250, 25000, 35000, 1000, 40000, 60000, 80200]) / 256) + 1).astype(np.int)
# array([ 1, 98, 137, 4, 157, 235, 314])
'scheduler': (nh.schedulers.ListedLR, {
'points': {
# dividing by batch size was one of those unpublished details
# 0: lr * 0.1 / simulated_bsize, # burnin
# 4: lr * 1.0 / simulated_bsize,
# 157: lr * 0.1 / simulated_bsize,
# 235: lr * 0.001 / simulated_bsize,
0: lr * 0.1 / simulated_bsize,
1: lr * 1.0 / simulated_bsize,
60: lr * 0.1 / simulated_bsize,
90: lr * 0.001 / simulated_bsize,
},
'interpolate': False
}),
'monitor': (nh.Monitor, {
'minimize': ['loss'],
'maximize': ['mAP'],
'patience': 314,
'max_epoch': 314,
}),
'augment': datasets['train'].augmenter,
'dynamics': {
# Controls how many batches to process before taking a step in the
# gradient direction. Effectively simulates a batch_size that is
# `bstep` times bigger.
'batch_step': bstep,
},
'other': {
# Other params are not used internally, so you are free to set any
# extra params specific to your algorithm, and still have them
# logged in the hyperparam structure. For YOLO this is `ovthresh`.
'batch_size': batch_size,
'nice': nice,
'ovthresh': ovthresh, # used in mAP computation
'input_range': 'norm01',
},
})
harn = YoloHarn(hyper=hyper)
harn.config['use_tqdm'] = False
harn.intervals['log_iter_train'] = 1
harn.intervals['log_iter_test'] = None
harn.intervals['log_iter_vali'] = None
return harn | 5,355,175 |
def _output_object_or_file_map_configurator(prerequisites, args):
"""Adds the output file map or single object file to the command line."""
return _output_or_file_map(
output_file_map = prerequisites.output_file_map,
outputs = prerequisites.object_files,
args = args,
) | 5,355,176 |
def get_file_name():
"""This function asl the user for file and returns it"""
f_name = input('Input your file name: ')
return f_name | 5,355,177 |
def GetMinikubeVersion():
"""Returns the current version of minikube."""
return six.ensure_text(subprocess.check_output([_FindMinikube(), 'version'])) | 5,355,178 |
def minimum_distance(geo1, geo2):
""" get the minimum distance between atoms in geo1 and those in geo2
"""
xyzs1 = coordinates(geo1)
xyzs2 = coordinates(geo2)
return min(cart.vec.distance(xyz1, xyz2)
for xyz1, xyz2 in itertools.product(xyzs1, xyzs2)) | 5,355,179 |
def promptbrowse(tree):
"""A simple prompt with faux commands to browse the filetree"""
tree_pref = tree
def change_dir(dirnm):
"""Change directory"""
nonlocal tree_pref
# Move on up
if ".." in dirnm:
dotlst = dirnm.split("/")
upcnt = 0
for dot in dotlst:
if (dot == "..") and (tree_pref.parent != None):
tree_pref = tree_pref.parent
upcnt += 1
if upcnt > 0:
print("Moving ", "up" * upcnt)
else:
print("Top level")
return
# Go further down
pos_dirs = []
for nd in tree_pref.contents:
if str(nd) == dirnm:
if type(nd) == Node:
print("Changing to dir ", dirnm)
tree_pref = nd
return
else:
print(dirnm, " is not a directory")
return
elif dirnm == str(nd)[: len(dirnm)]:
pos_dirs.append(nd)
if len(pos_dirs) > 1:
print("Possible dirs: ", [str(l) for l in pos_dirs])
return
if pos_dirs:
if type(pos_dirs[0]) == Node:
print("Changing to dir ", pos_dirs[0])
tree_pref = pos_dirs[0]
return
else:
print("Possible loc ", pos_dirs[0], " is not a dir")
return
print("Node not found ", dirnm)
while True:
rd = input("-->: ")
rd_lst = rd.split(maxsplit=1)
# Change a dir
if rd_lst[0] == "cd":
if len(rd_lst) < 2:
print("Need a directory name")
continue
change_dir(rd_lst[1])
# Nothing done with all the other dirs mentioned
# list files
elif rd == "ls":
print([str(l) for l in tree_pref.contents])
elif rd == "ll":
for l in tree_pref.contents:
print(l)
elif rd == "name":
print(tree_pref)
elif rd == "pwd":
loc = tree_pref
full_str = str(loc)
while loc.parent:
loc = loc.parent
full_str = str(loc) + "/" + full_str
print(full_str)
# quit
elif rd == "q":
print("Quitting")
break
elif (rd == "h") or (rd == "help"):
print("List of commands:")
print("cd .. | dirname : Change directory up or down")
print("ls : List of files/dirs in current dir")
print("ll : List of files/dirs underneath")
print("name : Name of current file/dir")
print("pwd : Current path")
print("q : Quit")
else:
print("Unknown command: ", rd, " -- press 'h' for help") | 5,355,180 |
def cli():
"""Blah"""
logging.basicConfig(level=logging.DEBUG) | 5,355,181 |
def test_tcp_telemetry_client_fn():
"""
Create a client from a telemetry daemon, reboot the client a number of
times and verify that it decodes telemetry on each boot.
"""
writer, env, daemon = tcp_env()
with writer.booted(), daemon.booted():
time.sleep(0.5)
client, out_queue = daemon.client()
for _ in range(5):
with client.booted():
time.sleep(0.5)
for _ in range(10):
# advance time, dispatch to generate out-going frames
env.advance_time(10)
frame_count = env.dispatch_now()
for _ in range(frame_count):
assert queue_get(out_queue) is not None
queue_get_none(out_queue) | 5,355,182 |
async def test_switch_context(
opp, entities, opp_admin_user, enable_custom_integrations
):
"""Test that switch context works."""
assert await async_setup_component(opp, "switch", {"switch": {"platform": "test"}})
await opp.async_block_till_done()
state = opp.states.get("switch.ac")
assert state is not None
await opp.services.async_call(
"switch",
"toggle",
{"entity_id": state.entity_id},
True,
core.Context(user_id=opp_admin_user.id),
)
state2 = opp.states.get("switch.ac")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == opp_admin_user.id | 5,355,183 |
def mkdir_p(path):
"""Create path if it doesn't exist already"""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise | 5,355,184 |
def test_string_ilike(feature_list, field_list):
"""
Assertions for 'like' operations with no case sensitivity
:param feature_list: feature collection list
:param field_list: feature field names
"""
cql_ast = get_ast('name ILIKE "lake%"')
assert cql_ast == LikePredicateNode(
AttributeExpression('name'),
LiteralExpression('lake%'),
False,
False,
)
result = like_test(cql_ast, feature_list, field_list)
assert len(result) == 14 | 5,355,185 |
def prepare():
"""
Get the list of filtered tweets by target entity where each item contains the tweet
with its original attributes when downloaded from Twitter
:return:
"""
path = '../../Data.json'
List = loadData(path) # load data
tweets = [List[i]['text'] for i in range(len(List))] # store the text of each tweet in a list
tweets = [process(item, False) for item in tweets] # get the list of processed tweets
filtered_tweets = tweetsEntitiesMapping(tweets) # filter tweets by target entity
ids_list = filtered_tweets[3] # get the list of ids of the filtered tweets in the original list
count = 0
list_tweets = [] # store the filtered tweet objects
for item in List:
if count in ids_list:
list_tweets.append(item)
count = count + 1
return list_tweets | 5,355,186 |
def get_console_script_specs(console: Dict[str, str]) -> List[str]:
"""
Given the mapping from entrypoint name to callable, return the relevant
console script specs.
"""
# Don't mutate caller's version
console = console.copy()
scripts_to_generate = []
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop("pip", None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append("pip = " + pip_script)
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
scripts_to_generate.append(
"pip{} = {}".format(sys.version_info[0], pip_script)
)
scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}")
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r"pip(\d(\.\d)?)?$", k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop("easy_install", None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append("easy_install = " + easy_install_script)
scripts_to_generate.append(
"easy_install-{} = {}".format(
get_major_minor_version(), easy_install_script
)
)
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r"easy_install(-\d\.\d)?$", k)
]
for k in easy_install_ep:
del console[k]
# Generate the console entry points specified in the wheel
scripts_to_generate.extend(starmap("{} = {}".format, console.items()))
return scripts_to_generate | 5,355,187 |
def local_action_StillOnAll(arg=None):
"""{"group": "Playback - All"}"""
query = '*ST\r'
queue.request(lambda: udp.send(query),
lambda resp: handleReqResp('StillOnAll', resp)) | 5,355,188 |
def deletecall(bam_url,api_call,call_parameters,delete_entity,header):
"""API request to delete and return values"""
call_url = "http://"+bam_url+"/Services/REST/v1/"+api_call+"?"
print("You are requesting to delete:")
print(delete_entity)
answer = input("Do you want to proceed (y (yes) or n (no))? ")
try:
if answer.lower() == "y":
response = requests.delete(call_url,params=call_parameters, headers=header)
return response.json()
elif answer.lower() == "n":
return "You aborted deletion"
else:
return "You entered an invalid character"
except requests.exceptions.RequestException as e:
print(e) | 5,355,189 |
def haDecFromAzAlt (azAlt, lat):
"""Converts alt/az position to ha/dec position.
Inputs:
- azAlt (az, alt) (deg)
- lat latitude (degrees);
>0 is north of the equator, <0 is south
Returns a tuple containing:
- haDec (HA, Dec) (deg), a tuple;
HA is in the range (-180, 180]
- atPole true => object near the pole (see Error Conditions)
Error Conditions:
- If converted position is too near the north or south pole,
atPole is set true and HA is some arbitrary value.
Details:
Sign conventions:
- azimuth is 0 south and 90 east
- ha/dec is the usual left-handed coordinate system
History:
3/01 ROwen Converted to Python from TCC's sph_AzAlt2HADec 1-2.
2/02 ROwen Minor tweaks to header.
2002-07-02 ROwen Renamed from azAltToHADec.
2003-05-06 ROwen Changed HA range from [0, 360) to (-180, 180]
"""
# convert spherical az/alt (deg) to direction cosines
azAltDC = dcFromSC (azAlt)
# convert az/alt direction cosines to -ha/dec direction cosines
negHADecDC = Cnv.haDecFromAzAlt (azAltDC, lat)
# convert -ha/dec direction cosines to spherical -ha/dec (deg)
((negHA, dec), atPole) = scFromDC (negHADecDC)
return ((opscore.RO.MathUtil.wrapCtr(-negHA), dec), atPole) | 5,355,190 |
def get_description(expression, options=None):
"""Generates a human readable string for the Cron Expression
Args:
expression: The cron expression string
options: Options to control the output description
Returns:
The cron expression description
"""
descripter = ExpressionDescriptor(expression, options)
return descripter.get_description(DescriptionTypeEnum.FULL) | 5,355,191 |
def check_for_firefox():
""" Determine if Firefox is available. """
if os.path.exists('/Applications/Firefox.app/Contents/MacOS/firefox'):
return True
for exe in ('firefox',):
if find_executable(exe):
return True
return False | 5,355,192 |
def midpoint(close, length=None, offset=None, **kwargs):
"""Indicator: Midpoint"""
# Validate arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 1
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
lowest = close.rolling(length, min_periods=min_periods).min()
highest = close.rolling(length, min_periods=min_periods).max()
midpoint = 0.5 * (lowest + highest)
# Offset
if offset != 0:
midpoint = midpoint.shift(offset)
# Handle fills
if 'fillna' in kwargs:
midpoint.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
midpoint.fillna(method=kwargs['fill_method'], inplace=True)
# Name and Categorize it
midpoint.name = f"MIDPOINT_{length}"
midpoint.category = 'overlap'
return midpoint | 5,355,193 |
def burn_in(task_id: str, build_variant: str, generate_config: GenerateConfig,
repeat_config: RepeatConfig, evg_api: EvergreenApi, evg_conf: EvergreenProjectConfig,
repos: List[Repo], generate_tasks_file: str, install_dir: str) -> None:
"""
Run burn_in_tests.
:param task_id: Id of task running.
:param build_variant: Build variant to run against.
:param generate_config: Configuration for how to generate tasks.
:param repeat_config: Configuration for how to repeat tests.
:param evg_api: Evergreen API client.
:param evg_conf: Evergreen project configuration.
:param repos: Git repos containing changes.
:param generate_tasks_file: File to write generate tasks configuration to.
:param install_dir: Path to bin directory of a testable installation
"""
change_detector = EvergreenFileChangeDetector(task_id, evg_api, os.environ)
executor = GenerateBurnInExecutor(generate_config, repeat_config, evg_api, generate_tasks_file)
burn_in_orchestrator = BurnInOrchestrator(change_detector, executor, evg_conf)
burn_in_orchestrator.burn_in(repos, build_variant, install_dir) | 5,355,194 |
def quad_fejer(order, domain=(0, 1), growth=False, segments=1):
"""
Generate the quadrature abscissas and weights in Fejer quadrature.
Args:
order (int, numpy.ndarray):
Quadrature order.
domain (chaospy.distributions.baseclass.Dist, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
growth (bool):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples.
segments (int):
Split intervals into N subintervals and create a patched
quadrature based on the segmented quadrature. Can not be lower than
`order`. If 0 is provided, default to square root of `order`.
Nested samples only exist when the number of segments are fixed.
Returns:
(numpy.ndarray, numpy.ndarray):
abscissas:
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights:
The quadrature weights with ``weights.shape == (N,)``.
Example:
>>> abscissas, weights = quad_fejer(3, (0, 1))
>>> abscissas.round(4)
array([[0.0955, 0.3455, 0.6545, 0.9045]])
>>> weights.round(4)
array([0.1804, 0.2996, 0.2996, 0.1804])
>>> abscissas, weights = quad_fejer(3, (0, 1), segments=2)
>>> abscissas.round(4)
array([[0.125, 0.375, 0.625, 0.875]])
>>> weights.round(4)
array([0.2222, 0.2222, 0.2222, 0.2222])
"""
from ..distributions.baseclass import Dist
if isinstance(domain, Dist):
abscissas, weights = quad_fejer(
order, (domain.lower, domain.upper), growth)
weights *= domain.pdf(abscissas).flatten()
weights /= numpy.sum(weights)
return abscissas, weights
order = numpy.asarray(order, dtype=int).flatten()
lower, upper = numpy.array(domain)
lower = numpy.asarray(lower).flatten()
upper = numpy.asarray(upper).flatten()
dim = max(lower.size, upper.size, order.size)
order = order*numpy.ones(dim, dtype=int)
lower = lower*numpy.ones(dim)
upper = upper*numpy.ones(dim)
segments = segments*numpy.ones(dim, dtype=int)
if growth:
order = numpy.where(order > 0, 2**(order+1)-2, 0)
abscissas, weights = zip(*[_fejer(order_, segment)
for order_, segment in zip(order, segments)])
return combine_quadrature(abscissas, weights, (lower, upper)) | 5,355,195 |
def probe(app: FastFlixApp, file: Path) -> Box:
""" Run FFprobe on a file """
command = [
f"{app.fastflix.config.ffprobe}",
"-v",
"quiet",
"-loglevel",
"panic",
"-print_format",
"json",
"-show_format",
"-show_streams",
f"{file}",
]
result = execute(command)
try:
return Box.from_json(result.stdout)
except BoxError:
logger.error(f"Could not read output: {result.stdout} - {result.stderr}")
raise FlixError(result.stderr) | 5,355,196 |
def is_standard_time_series(time_series, window=180):
"""
Check the length of time_series. If window = 180, then the length of time_series should be 903.
The mean value of last window should be larger than 0.
:param time_series: the time series to check, like [data_c, data_b, data_a]
:type time_series: pandas.Series
:param window: the length of window
:return: True or False
:return type: boolean
"""
if len(time_series) == 5 * window + 3 and np.mean(time_series[(4 * window + 2):]) > 0:
return True
else:
return False | 5,355,197 |
def get_comment_type(token, comment_syntax):
"""
SQLエンジン関連コメントTypeを返す
"""
if is_block_comment(token):
return comment_syntax.get_block_comment_type(token)
elif is_line_comment(token):
return comment_syntax.get_line_comment_type(token) | 5,355,198 |
def utils_short_term_train(speaker_model, listener_model, candidates, policy4shortgame, stop=0.1, maxrounds=1000, trainspeaker=True, trainlistener=True):
"""
All inputs: Just one instance. No bs dimensize.
"""
new_candidates = copy.deepcopy(candidates)
rr = 0
rewards = []
while rr < maxrounds:
np.random.shuffle(new_candidates)
choice = np.random.randint(len(candidates))
target_input = new_candidates[choice]
target_candidate_idx = np.zeros(len(candidates))
target_candidate_idx[choice] = 1
if policy4shortgame=='sample':
speaker_message, speaker_probs = speaker_model.sample_from_speaker_policy(target_input)
chosen_target_idx, listener_probs, us = listener_model.sample_from_listener_policy(speaker_message, new_candidates)
elif policy4shortgame=='infer':
speaker_message, speaker_probs = speaker_model.infer_from_speaker_policy(target_input)
chosen_target_idx, listener_probs, us = listener_model.infer_from_listener_policy(speaker_message, new_candidates)
else:
assert False
reward_test = utils_calculate_reward(chosen_target_idx, target_candidate_idx)
rewards.append(reward_test)
#print(reward)
if trainspeaker:
reward = utils_calculate_reward_forgame_s(chosen_target_idx, target_candidate_idx)
speaker_model.remember_speaker_training_details(target_input, speaker_message, speaker_probs, reward)
speaker_model.train_speaker_policy_on_batch()
if trainlistener:
reward = utils_calculate_reward_forgame_l(chosen_target_idx, target_candidate_idx)
listener_model.remember_listener_training_details(speaker_message, chosen_target_idx, listener_probs, target_input, new_candidates, reward)
listener_model.train_listener_policy_on_batch()
rr += 1
print(sum(rewards)) | 5,355,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.