hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40b2c91082ea21890c36b449104ec87a0b8d9b4b | 266 | py | Python | tests/test_app/rest_app/rest_app/controllers/config_controller.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
]
| 12 | 2018-09-06T06:14:59.000Z | 2021-04-18T06:30:44.000Z | tests/test_app/rest_app/rest_app/controllers/config_controller.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
]
| null | null | null | tests/test_app/rest_app/rest_app/controllers/config_controller.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
]
| 2 | 2019-09-08T22:01:26.000Z | 2020-08-03T07:23:29.000Z | from guniflask.config import settings
from guniflask.web import blueprint, get_route
@blueprint
class ConfigController:
def __init__(self):
pass
@get_route('/settings/<name>')
def get_setting(self, name):
return {name: settings[name]}
| 20.461538 | 46 | 0.703008 | 167 | 0.62782 | 0 | 0 | 178 | 0.669173 | 0 | 0 | 18 | 0.067669 |
40b4eef32d47c4960807376665ec44995d7e4116 | 14,062 | py | Python | model/_UNet_trainer.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
]
| null | null | null | model/_UNet_trainer.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
]
| null | null | null | model/_UNet_trainer.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
]
| null | null | null | from ._base_trainer import _BaseTrainer, MeasureMemory
import pathlib
import torch.multiprocessing as mp
import torch
from torch import nn
import horovod.torch as hvd
import numpy as np
import xarray as xr
import itertools
from .flow_dataset import FlowDataset
from .unet import UNet
import sys
from .visualization import save_flows
from .converter import save_as_netcdf
class UNetTrainer(_BaseTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model_name = 'UNet'
def _initialize(self, **kwargs):
# Horovod: Initialize library
hvd.init()
torch.manual_seed(self.seed)
if self.device == 'cuda':
# Horovod: Pin GPU to be used to process local rank (one GPU per process)
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(self.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
self.rank, self.size = hvd.rank(), hvd.size()
self.master = self.rank == 0
super()._prepare_dirs()
self.train_loader, self.val_loader, self.test_loader = super()._dataloaders()
self.model = self._get_model(self.run_number)
self.model = self.model.to(self.device)
## Optimizers
# By default, Adasum doesn't need scaling up leraning rate
lr_scaler = hvd.size() if not self.use_adasum else 1
if self.device == 'cuda' and self.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
lr = self.lr * lr_scaler
self.opt = torch.optim.Adam(self.model.parameters(), lr=lr, betas=(self.beta_1, self.beta_2))
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(self.model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.opt, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if self.fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
self.opt = hvd.DistributedOptimizer(self.opt,
named_parameters=self.model.named_parameters(),
compression=compression,
op=hvd.Adasum if self.use_adasum else hvd.Average,
gradient_predivide_factor=self.gradient_predivide_factor)
self.criterion = nn.L1Loss() if self.loss_type == 'mae_loss' else nn.MSELoss(reduction='mean')
# Set normalization coefficients
super()._set_normalization_coefs(shape=[1,-1,1,1])
# Memory measurement
device_name = 'cpu'
if self.device == 'cuda':
local_rank = hvd.local_rank()
device_name = f'{self.device}:{local_rank}'
self.memory = MeasureMemory(device=device_name)
# Synchronize
if self.device == 'cuda':
torch.cuda.synchronize() # Waits for everything to finish running
def _initialize_for_inference(self, **kwargs):
# Set output directory
super()._prepare_dirs()
self.train_loader, self.val_loader, self.test_loader = super()._dataloaders()
self.model = self._get_model(self.run_number)
self.model = self.model.to(self.device)
# Set normalization coefficients
super()._set_normalization_coefs(shape=[1,-1,1,1])
# Memory measurement
self.memory = MeasureMemory(device=self.device)
# Synchronize
if self.device == 'cuda':
torch.cuda.synchronize() # Waits for everything to finish running
def _get_model(self, run_number):
model = UNet(n_layers=8, hidden_dim=8, dim=self.dim, padding_mode=self.padding_mode)
if self.inference_mode:
self.epoch_start = self.load_nth_state_file
# To load the state file for inference
rank = 0
model.load_state_dict( torch.load(f'{self.state_file_dir}/model_{rank}_{self.epoch_start:03}.pt') )
else:
self.epoch_start = 0
if run_number > 0:
if self.master:
print(f'restart, {run_number}')
# Load model states from previous run
prev_run_number = run_number - 1
prev_result_filename = self.out_dir / f'flow_cnn_result_rank{self.rank}_rst{prev_run_number:03}.h5'
if not prev_result_filename.is_file():
raise IOError(f'prev_result_filename')
ds_prev = xr.open_dataset(prev_result_filename, engine='netcdf4')
# To load the previous files
epoch_end = ds_prev.attrs['epoch_end']
model.load_state_dict( torch.load(f'{self.model_dir}/model_{self.rank}_{epoch_end:03}.pt') )
# Next epoch should start from epoch_end + 1
self.epoch_start = int(epoch_end) + 1
return model
def _save_models(self, total_epoch):
torch.save(self.model.state_dict(), f'{self.model_dir}/model_{self.rank}_{total_epoch:03}.pt')
########### Main scripts
def _train(self, data_loader, epoch):
name = 'train'
self.model.train()
log_loss = 0
nb_samples = len(data_loader.sampler)
level = 2
# Timers
for i, (sdf, flows) in enumerate(data_loader):
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Train Lv2
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
loss_mae = self.criterion(pred_flows_Lv2, flows_Lv2)
self.opt.zero_grad()
### Measure memory usage before backward
self.memory.measure()
if 'reserved' not in self.memory_consumption:
self.memory_consumption['reserved'] = self.memory.reserved()
self.memory_consumption['alloc'] = self.memory.alloc()
loss_mae.backward()
self.opt.step()
### Log losses
log_loss += loss_mae.item() / nb_samples
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Saving figures
if i == 0:
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 figures
level = 2
save_flows(flows_Lv2, name=name, img_dir = self.sub_img_dir, type_name = 'ref', level = level, epoch=epoch)
save_flows(pred_flows_Lv2_, name=name, img_dir = self.sub_img_dir, type_name = 'pred', level = level, epoch=epoch)
# Check errors
save_flows(pred_flows_Lv2_-flows_Lv2.cpu(), name=name, img_dir = self.sub_img_dir, type_name = 'error', level = level, epoch=epoch)
self.timer.stop()
self.elapsed_times[f'save_figs_{name}'].append(self.timer.elapsed_seconds())
# Horovod: average metric values across workers.
losses = {}
losses[f'log_loss_{name}_{self.loss_type}_Lv{level}'] = log_loss
for key, value in losses.items():
loss = super()._metric_average(value, key)
self.loss_dict[key].append(loss)
def _validation(self, data_loader, epoch, name):
self.model.eval()
log_loss = 0
nb_samples = len(data_loader.sampler)
level = 2
for i, (sdf, flows) in enumerate(data_loader):
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Train Lv0
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
loss_mae = self.criterion(pred_flows_Lv2, flows_Lv2)
### Log losses
log_loss += loss_mae.item() / nb_samples
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Saving figures
if i == 0:
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 figures
level = 2
save_flows(flows_Lv2, name=name, img_dir = self.sub_img_dir, type_name = 'ref', level = level, epoch=epoch)
save_flows(pred_flows_Lv2_, name=name, img_dir = self.sub_img_dir, type_name = 'pred', level = level, epoch=epoch)
# Check errors
save_flows(pred_flows_Lv2_-flows_Lv2.cpu(), name=name, img_dir = self.sub_img_dir, type_name = 'error', level = level, epoch=epoch)
self.timer.stop()
self.elapsed_times[f'save_figs_{name}'].append(self.timer.elapsed_seconds())
# Horovod: average metric values across workers.
losses = {}
losses[f'log_loss_{name}_{self.loss_type}_Lv{level}'] = log_loss
for key, value in losses.items():
loss = super()._metric_average(value, key)
self.loss_dict[key].append(loss)
### For inference
def _infer(self):
with torch.no_grad():
self._convert(data_loader=self.val_loader, name='validation')
self._convert(data_loader=self.test_loader, name='test')
def _convert(self, data_loader, name):
self.model.eval()
level = 2
for indices, sdf, flows in data_loader:
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Infer Lv2
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Save the data in netcdf format
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 data
save_as_netcdf(sdf=sdf_Lv2_cpu, real_flows=flows_Lv2.cpu(), pred_flows=pred_flows_Lv2_,
indices=indices, epoch=self.epoch_start, level=level, name=name, data_dir=self.inference_dir)
self.timer.stop()
self.elapsed_times[f'save_data_{name}'].append(self.timer.elapsed_seconds())
| 39.061111 | 147 | 0.595435 | 13,689 | 0.973475 | 0 | 0 | 0 | 0 | 0 | 0 | 2,514 | 0.17878 |
40b61330deb7990837ff7794fbef7bc995f41538 | 5,557 | py | Python | agents/vpg_policy_translation_with_dislocation.py | pjarosik/rlus | 5804a37ed9221362c470ad4eb0e6b03e533bf1d8 | [
"Apache-2.0"
]
| 3 | 2020-09-20T20:02:43.000Z | 2022-01-14T09:37:46.000Z | agents/vpg_policy_translation_with_dislocation.py | pjarosik/rlus | 5804a37ed9221362c470ad4eb0e6b03e533bf1d8 | [
"Apache-2.0"
]
| null | null | null | agents/vpg_policy_translation_with_dislocation.py | pjarosik/rlus | 5804a37ed9221362c470ad4eb0e6b03e533bf1d8 | [
"Apache-2.0"
]
| 1 | 2021-06-29T07:02:19.000Z | 2021-06-29T07:02:19.000Z | from spinup import vpg
import tensorflow as tf
import numpy as np
from gym.spaces import Box, Discrete
from envs.focal_point_task_us_env import FocalPointTaskUsEnv
from envs.phantom import (
ScatterersPhantom,
Ball,
Teddy
)
from envs.imaging import ImagingSystem, Probe
from envs.generator import ConstPhantomGenerator, RandomProbeGenerator
import envs.logger
import matplotlib
import argparse
N_STEPS_PER_EPISODE = 16
N_STEPS_PER_EPOCH = 64
EPOCHS = 251 # NO_EPISODES = (NSTEPS_PER_EPOCH/NSTEPS_PER_EPISODE)*EPOCHS
N_WORKERS = 4
def env_fn(trajectory_logger):
probe = Probe(
pos=np.array([-20 / 1000, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=10 / 1000
)
teddy = Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]),
scale=12 / 1000,
head_offset=.9
)
phantom = ScatterersPhantom(
objects=[teddy],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
imaging = ImagingSystem(
c=1540,
fs=100e6,
image_width=40 / 1000,
image_height=90 / 1000,
image_resolution=(40, 90), # [pixels]
median_filter_size=5,
dr_threshold=-200,
dec=1,
no_lines=64
)
env = FocalPointTaskUsEnv(
dx_reward_coeff=1,
dz_reward_coeff=1,
imaging=imaging,
phantom_generator=ConstPhantomGenerator(phantom),
probe_generator=RandomProbeGenerator(
ref_probe=probe,
object_to_align=teddy,
seed=42,
x_pos=np.arange(-20/1000, 24/1000, step=5/1000),
focal_pos=[10/1000]
),
max_steps=N_STEPS_PER_EPISODE,
no_workers=N_WORKERS,
use_cache=True,
trajectory_logger=trajectory_logger,
step_size=5/1000,
# probe_dislocation_prob=0,
# max_probe_dislocation=2,
# dislocation_seed=42
)
return env
AC_KWARGS = dict(
hidden_sizes=[16, 32],
activation=tf.nn.relu
)
# Below functions base on openai.spinup's A-C scheme implementation.
def cnn(x,
training_ph,
hidden_sizes=(32,),
kernel_size=(3, 3),
pool_size=(2, 2),
output_activation=None
):
x = tf.layers.batch_normalization(x, training=training_ph)
for h in hidden_sizes[:-1]:
x = tf.layers.conv2d(x, filters=h, kernel_size=kernel_size)
x = tf.layers.batch_normalization(x, training=training_ph)
x = tf.nn.relu(x)
# x = tf.nn.tanh(x)
x = tf.layers.max_pooling2d(x, pool_size=pool_size, strides=pool_size)
x = tf.layers.flatten(x)
return tf.layers.dense(x, units=hidden_sizes[-1],
activation=output_activation)
def cnn_categorical_policy(x, a, training_ph, hidden_sizes, output_activation,
action_space):
act_dim = action_space.n
logits = cnn(x, training_ph, hidden_sizes=list(hidden_sizes) + [act_dim],
output_activation=None)
logp_all = tf.nn.log_softmax(logits)
pi = tf.squeeze(tf.multinomial(logits, 1),
axis=1) # action drawn from current policy
logp = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logp_all,
axis=1) # log probability of given actions
logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logp_all,
axis=1) # log probability of actions of given pi
return pi, logp, logp_pi, logp_all
def cnn_actor_critic(x, a, training_ph, hidden_sizes=(64, 64),
activation=tf.tanh,
output_activation=None, policy=None, action_space=None):
# default policy builder depends on action space
if policy is None and isinstance(action_space, Box):
policy = cnn_gaussian_policy
elif policy is None and isinstance(action_space, Discrete):
policy = cnn_categorical_policy
with tf.variable_scope('pi'):
pi, logp, logp_pi, logp_all = policy(x, a, training_ph, hidden_sizes,
output_activation, action_space)
with tf.variable_scope('v'):
v = tf.squeeze(
cnn(x, training_ph, hidden_sizes=list(hidden_sizes) + [1],
output_activation=None), axis=1)
return pi, logp, logp_pi, v, logp_all
def main():
matplotlib.use('agg')
np.random.seed(2442)
parser = argparse.ArgumentParser(description="Train agent in env: %s" %
FocalPointTaskUsEnv.__name__)
parser.add_argument("--exp_dir", dest="exp_dir",
help="Where to put all information about the experiment",
required=True)
args = parser.parse_args()
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=".",
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=200
)
spinup_logger_kwargs = dict(output_dir=".", exp_name='log_files')
env_builder = lambda: env_fn(trajactory_logger)
vpg(env_fn=env_builder,
actor_critic=cnn_actor_critic,
ac_kwargs=AC_KWARGS,
steps_per_epoch=N_STEPS_PER_EPOCH,
epochs=EPOCHS,
max_ep_len=N_STEPS_PER_EPISODE,
logger_kwargs=spinup_logger_kwargs,
save_freq=200,
lam=0.95
)
if __name__ == "__main__":
main()
| 31.936782 | 81 | 0.620659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.096275 |
40b7ac2bc7f8f3621a710ed64cc2cd9096f796f1 | 791 | py | Python | lib/losses/dice.py | zongdaoming/CMT | fc3773bb6c6b1ab091688addfffca3fb1e382ae4 | [
"MIT"
]
| 3 | 2021-05-10T20:12:23.000Z | 2021-11-24T18:01:13.000Z | lib/losses/dice.py | zongdaoming/CMT | fc3773bb6c6b1ab091688addfffca3fb1e382ae4 | [
"MIT"
]
| null | null | null | lib/losses/dice.py | zongdaoming/CMT | fc3773bb6c6b1ab091688addfffca3fb1e382ae4 | [
"MIT"
]
| null | null | null | import sys,os
sys.path.append('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai')
from lib.losses.BaseClass import _AbstractDiceLoss
from lib.losses.basic import *
class DiceLoss(_AbstractDiceLoss):
"""Computes Dice Loss according to https://arxiv.org/abs/1606.04797.
For multi-class segmentation `weight` parameter can be used to assign different weights per class.
"""
def __init__(self, classes=4, skip_index_after=None, weight=None, sigmoid_normalization=True ):
super().__init__(weight, sigmoid_normalization)
self.classes = classes
if skip_index_after is not None:
self.skip_index_after = skip_index_after
def dice(self, input, target, weight):
return compute_per_channel_dice(input, target, weights=self.weight)
| 35.954545 | 102 | 0.737042 | 619 | 0.782554 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.292035 |
40b7ae2027514475dd06028f5df19e3941be6b6d | 1,190 | py | Python | icons.py | jasantunes/alfred-golinks | f35fa87adedf07329469c0161c8808401a6925d0 | [
"MIT"
]
| 312 | 2015-01-02T12:44:03.000Z | 2020-06-21T03:53:29.000Z | icons.py | jasantunes/alfred-golinks | f35fa87adedf07329469c0161c8808401a6925d0 | [
"MIT"
]
| 7 | 2015-03-22T11:57:50.000Z | 2020-02-09T08:35:05.000Z | icons.py | jasantunes/alfred-golinks | f35fa87adedf07329469c0161c8808401a6925d0 | [
"MIT"
]
| 35 | 2015-09-06T09:36:32.000Z | 2020-06-21T19:17:55.000Z | # encoding: utf-8
#
# Copyright (c) 2019 Dean Jackson <[email protected]>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2019-09-06
#
"""Overlay check mark on icons."""
from __future__ import print_function, absolute_import
from Cocoa import (
NSBitmapImageRep,
NSPNGFileType,
NSImage,
NSMakeSize,
NSCompositeCopy,
NSSizeToCGSize,
NSZeroPoint,
)
from CoreGraphics import CGRectZero
def overlay(src, overlay, dest):
"""Create image ``dest`` by putting ``overlay`` on top of ``src``.
Args:
src (str): Path to source image.
overlay (str): Path to overlay image.
dest (str): Path to save combined image to.
"""
src = NSImage.alloc().initWithContentsOfFile_(src)
overlay = NSImage.alloc().initWithContentsOfFile_(overlay)
img = NSImage.alloc().initWithSize_(src.size())
img.lockFocus()
rect = (0, 0), src.size()
src.drawInRect_(rect)
overlay.drawInRect_(rect)
img.unlockFocus()
rep = NSBitmapImageRep.imageRepWithData_(img.TIFFRepresentation())
data = rep.representationUsingType_properties_(NSPNGFileType,{})
data.writeToFile_atomically_(dest, False)
| 25.869565 | 70 | 0.691597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.346218 |
40b9ac7b9f67d52c5c73796669c1ff5e0996665b | 2,491 | py | Python | project/python/Main/CTRL/tracker.py | warak/IOT-GrannyWarden | 54ad51b7bf7377ce7b87e72091c9dbf7f686050d | [
"MIT"
]
| null | null | null | project/python/Main/CTRL/tracker.py | warak/IOT-GrannyWarden | 54ad51b7bf7377ce7b87e72091c9dbf7f686050d | [
"MIT"
]
| null | null | null | project/python/Main/CTRL/tracker.py | warak/IOT-GrannyWarden | 54ad51b7bf7377ce7b87e72091c9dbf7f686050d | [
"MIT"
]
| null | null | null | import datetime
from threading import Thread
from time import sleep
import DBC.dbcreate as dbc
class Tracker(Thread):
max_idle_time = 720 # minutes
default_sleep = 3600 # secs
def track(self):
dbcl = dbc.DBClient()
# print(dbcl.getlasttime())
print("Tracker activated")
while True:
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H:%M')
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
# print(yearmonthday)
# print(hoursminutes)
year = int(yearmonthday[0])
month = int(yearmonthday[1])
day = int(yearmonthday[2])
hour = int(hoursminutes[0])
minute = int(hoursminutes[1])
date = dbcl.getlasttime()
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
#print(yearmonthday)
#print(hoursminutes)
yeard = int(yearmonthday[0])
monthd = int(yearmonthday[1])
dayd = int(yearmonthday[2])
hourd = int(hoursminutes[0])
minuted = int(hoursminutes[1])
# tämä loopitus tyhmää, voisi käyttää valmista kirjastoa
if year == yeard:
if month == monthd:
if day == dayd:
if hour == hourd:
away = minute - minuted
else:
away = ((hour*60) + minute) - ((hourd*60) + minuted)
else:
if hour == hourd:
away = ((hourd + (day-dayd)*24 - hour) * 60) + minute - minuted
else:
away = ((day*hour*60) + minute) - ((dayd*hourd*60) + minuted)
else:
# puutteellinen
away = 3
#print(away)
self.actions(away, dbcl.getlastaway())
sleep(self.default_sleep)
def run(self):
self.track()
def actions(self, time, away):
if time < self.max_idle_time:
print("Everything ok")
else:
away = (int(away) * 60)
if time > away:
print("Contacting users")
else:
print("Holiday mode") | 30.753086 | 91 | 0.470092 | 2,400 | 0.960769 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.125701 |
40ba1c6c5aded5c9a1f75bcde2e5830a948185e5 | 39,197 | py | Python | tests/unit/test_snapshot.py | cnnradams/python-spanner | 33055e577288cbcc848aa9abf43ccd382c9907a9 | [
"Apache-2.0"
]
| null | null | null | tests/unit/test_snapshot.py | cnnradams/python-spanner | 33055e577288cbcc848aa9abf43ccd382c9907a9 | [
"Apache-2.0"
]
| null | null | null | tests/unit/test_snapshot.py | cnnradams/python-spanner | 33055e577288cbcc848aa9abf43ccd382c9907a9 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import google.api_core.gapic_v1.method
import mock
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
SQL_QUERY = """\
SELECT first_name, last_name, age FROM citizens ORDER BY age"""
SQL_QUERY_WITH_PARAM = """
SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age"""
PARAMS = {"max_age": 30}
PARAM_TYPES = {"max_age": "INT64"}
SQL_QUERY_WITH_BYTES_PARAM = """\
SELECT image_name FROM images WHERE @bytes IN image_data"""
PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"}
RESUME_TOKEN = b"DEADBEEF"
TXN_ID = b"DEAFBEAD"
SECONDS = 3
MICROS = 123456
class Test_restart_on_unavailable(unittest.TestCase):
def _call_fut(self, restart):
from google.cloud.spanner_v1.snapshot import _restart_on_unavailable
return _restart_on_unavailable(restart)
def _make_item(self, value, resume_token=b""):
return mock.Mock(
value=value, resume_token=resume_token, spec=["value", "resume_token"]
)
def test_iteration_w_empty_raw(self):
raw = _MockIterator()
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), [])
def test_iteration_w_non_empty_raw(self):
ITEMS = (self._make_item(0), self._make_item(1))
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_w_resume_tken(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
self._make_item(3),
)
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_raising_unavailable_no_token(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
)
before = _MockIterator(fail_after=True)
after = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
self.assertEqual(restart.mock_calls, [mock.call(), mock.call(resume_token=b"")])
def test_iteration_w_raw_raising_unavailable(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(*(FIRST + SECOND), fail_after=True)
after = _MockIterator(*LAST)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + LAST))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
def test_iteration_w_raw_raising_unavailable_after_token(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2), self._make_item(3))
before = _MockIterator(*FIRST, fail_after=True)
after = _MockIterator(*SECOND)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + SECOND))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
class Test_SnapshotBase(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import _SnapshotBase
return _SnapshotBase
def _make_one(self, session):
return self._getTargetClass()(session)
def _makeDerived(self, session):
class _Derived(self._getTargetClass()):
_transaction_id = None
_multi_use = False
def _make_txn_selector(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionOptions,
TransactionSelector,
)
if self._transaction_id:
return TransactionSelector(id=self._transaction_id)
options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if self._multi_use:
return TransactionSelector(begin=options)
return TransactionSelector(single_use=options)
return _Derived(session)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def test_ctor(self):
session = _Session()
base = self._make_one(session)
self.assertIs(base._session, session)
self.assertEqual(base._execute_sql_count, 0)
def test__make_txn_selector_virtual(self):
session = _Session()
base = self._make_one(session)
with self.assertRaises(NotImplementedError):
base._make_txn_selector()
def test_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.streaming_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.read(TABLE_NAME, COLUMNS, keyset))
def _read_helper(self, multi_use, first=True, count=0, partition=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1._helpers import _make_value_pb
VALUES = [[u"bharney", 31], [u"phred", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
struct_type_pb = StructType(
fields=[
StructType.Field(name="name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
KEYS = [["[email protected]"], ["[email protected]"]]
keyset = KeySet(keys=KEYS)
INDEX = "email-address-index"
LIMIT = 20
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.streaming_read.return_value = _MockIterator(*result_sets)
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
if not first:
derived._transaction_id = TXN_ID
if partition is not None: # 'limit' and 'partition' incompatible
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, partition=partition
)
else:
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, limit=LIMIT
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
if partition is not None:
expected_limit = 0
else:
expected_limit = LIMIT
api.streaming_read.assert_called_once_with(
self.SESSION_NAME,
TABLE_NAME,
COLUMNS,
keyset._to_pb(),
transaction=expected_transaction,
index=INDEX,
limit=expected_limit,
partition_token=partition,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_read_wo_multi_use(self):
self._read_helper(multi_use=False)
def test_read_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=False, count=1)
def test_read_w_multi_use_wo_first(self):
self._read_helper(multi_use=True, first=False)
def test_read_w_multi_use_wo_first_w_count_gt_0(self):
self._read_helper(multi_use=True, first=False, count=1)
def test_read_w_multi_use_w_first_w_partition(self):
PARTITION = b"FADEABED"
self._read_helper(multi_use=True, first=True, partition=PARTITION)
def test_read_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=True, first=True, count=1)
def test_execute_sql_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.execute_streaming_sql.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.execute_sql(SQL_QUERY))
self.assertEqual(derived._execute_sql_count, 1)
def test_execute_sql_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(ValueError):
derived.execute_sql(SQL_QUERY_WITH_PARAM, PARAMS)
def _execute_sql_helper(
self,
multi_use,
first=True,
count=0,
partition=None,
sql_count=0,
query_options=None,
timeout=google.api_core.gapic_v1.method.DEFAULT,
retry=google.api_core.gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1._helpers import (
_make_value_pb,
_merge_query_options,
)
VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
MODE = 2 # PROFILE
struct_type_pb = StructType(
fields=[
StructType.Field(name="first_name", type=Type(code=STRING)),
StructType.Field(name="last_name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
iterator = _MockIterator(*result_sets)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.execute_streaming_sql.return_value = iterator
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
derived._execute_sql_count = sql_count
if not first:
derived._transaction_id = TXN_ID
result_set = derived.execute_sql(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
query_mode=MODE,
query_options=query_options,
partition=partition,
retry=retry,
timeout=timeout,
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_query_options = database._instance._client._query_options
if query_options:
expected_query_options = _merge_query_options(
expected_query_options, query_options
)
api.execute_streaming_sql.assert_called_once_with(
self.SESSION_NAME,
SQL_QUERY_WITH_PARAM,
transaction=expected_transaction,
params=expected_params,
param_types=PARAM_TYPES,
query_mode=MODE,
query_options=expected_query_options,
partition_token=partition,
seqno=sql_count,
metadata=[("google-cloud-resource-prefix", database.name)],
timeout=timeout,
retry=retry,
)
self.assertEqual(derived._execute_sql_count, sql_count + 1)
def test_execute_sql_wo_multi_use(self):
self._execute_sql_helper(multi_use=False)
def test_execute_sql_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=False, count=1)
def test_execute_sql_w_multi_use_wo_first(self):
self._execute_sql_helper(multi_use=True, first=False, sql_count=1)
def test_execute_sql_w_multi_use_wo_first_w_count_gt_0(self):
self._execute_sql_helper(multi_use=True, first=False, count=1)
def test_execute_sql_w_multi_use_w_first(self):
self._execute_sql_helper(multi_use=True, first=True)
def test_execute_sql_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=True, first=True, count=1)
def test_execute_sql_w_retry(self):
self._execute_sql_helper(multi_use=False, retry=None)
def test_execute_sql_w_timeout(self):
self._execute_sql_helper(multi_use=False, timeout=None)
def test_execute_sql_w_query_options(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._execute_sql_helper(
multi_use=False,
query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3"),
)
def _partition_read_helper(
self, multi_use, w_txn, size=None, max_partitions=None, index=None
):
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
keyset = KeySet(all_=True)
new_txn_id = b"ABECAB91"
token_1 = b"FACE0FFF"
token_2 = b"BADE8CAF"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_read.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_read(
TABLE_NAME,
COLUMNS,
keyset,
index=index,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_read.assert_called_once_with(
session=self.SESSION_NAME,
table=TABLE_NAME,
columns=COLUMNS,
key_set=keyset._to_pb(),
transaction=expected_txn_selector,
index=index,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_read_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=False, w_txn=True)
def test_partition_read_wo_existing_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=True, w_txn=False)
def test_partition_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_read(TABLE_NAME, COLUMNS, keyset))
def test_partition_read_ok_w_index_no_options(self):
self._partition_read_helper(multi_use=True, w_txn=True, index="index")
def test_partition_read_ok_w_size(self):
self._partition_read_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_read_ok_w_max_partitions(self):
self._partition_read_helper(multi_use=True, w_txn=True, max_partitions=4)
def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
from google.cloud.spanner_v1._helpers import _make_value_pb
new_txn_id = b"ABECAB91"
token_1 = b"FACE0FFF"
token_2 = b"BADE8CAF"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_query.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_query(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_query.assert_called_once_with(
session=self.SESSION_NAME,
sql=SQL_QUERY_WITH_PARAM,
transaction=expected_txn_selector,
params=expected_params,
param_types=PARAM_TYPES,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_query_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_query.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_query(SQL_QUERY))
def test_partition_query_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(ValueError):
list(derived.partition_query(SQL_QUERY_WITH_PARAM, PARAMS))
def test_partition_query_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=False, w_txn=True)
def test_partition_query_wo_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=True, w_txn=False)
def test_partition_query_ok_w_index_no_options(self):
self._partition_query_helper(multi_use=True, w_txn=True)
def test_partition_query_ok_w_size(self):
self._partition_query_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_query_ok_w_max_partitions(self):
self._partition_query_helper(multi_use=True, w_txn=True, max_partitions=4)
class TestSnapshot(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import Snapshot
return Snapshot
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def _makeTimestamp(self):
import datetime
from google.cloud._helpers import UTC
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def _makeDuration(self, seconds=1, microseconds=0):
import datetime
return datetime.timedelta(seconds=seconds, microseconds=microseconds)
def test_ctor_defaults(self):
session = _Session()
snapshot = self._make_one(session)
self.assertIs(snapshot._session, session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multiple_options(self):
timestamp = self._makeTimestamp()
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, read_timestamp=timestamp, max_staleness=duration)
def test_ctor_w_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertEqual(snapshot._min_read_timestamp, timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_max_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertEqual(snapshot._max_staleness, duration)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, min_read_timestamp=timestamp, multi_use=True)
def test_ctor_w_multi_use_and_max_staleness(self):
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, max_staleness=duration, multi_use=True)
def test_ctor_w_multi_use_and_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertTrue(snapshot._multi_use)
def test__make_txn_selector_w_transaction_id(self):
session = _Session()
snapshot = self._make_one(session)
snapshot._transaction_id = TXN_ID
selector = snapshot._make_txn_selector()
self.assertEqual(selector.id, TXN_ID)
def test__make_txn_selector_strong(self):
session = _Session()
snapshot = self._make_one(session)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_min_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.min_read_timestamp), timestamp
)
def test__make_txn_selector_w_max_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.max_staleness.seconds, 3)
self.assertEqual(options.read_only.max_staleness.nanos, 123456000)
def test__make_txn_selector_w_exact_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test__make_txn_selector_strong_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp_w_multi_use(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_exact_staleness_w_multi_use(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test_begin_wo_multi_use(self):
session = _Session()
snapshot = self._make_one(session)
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_read_request_count_gt_0(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._read_request_count = 1
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_existing_txn_id(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._transaction_id = TXN_ID
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.begin_transaction.side_effect = RuntimeError()
timestamp = self._makeTimestamp()
session = _Session(database)
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
with self.assertRaises(RuntimeError):
snapshot.begin()
def test_begin_ok_exact_staleness(self):
from google.protobuf.duration_pb2 import Duration
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
duration = self._makeDuration(seconds=SECONDS, microseconds=MICROS)
session = _Session(database)
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_duration = Duration(seconds=SECONDS, nanos=MICROS * 1000)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(exact_staleness=expected_duration)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_begin_ok_exact_strong(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
session = _Session(database)
snapshot = self._make_one(session, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
class _Client(object):
def __init__(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1")
class _Instance(object):
def __init__(self):
self._client = _Client()
class _Database(object):
def __init__(self):
self.name = "testing"
self._instance = _Instance()
class _Session(object):
def __init__(self, database=None, name=TestSnapshot.SESSION_NAME):
self._database = database
self.name = name
class _MockIterator(object):
def __init__(self, *values, **kw):
self._iter_values = iter(values)
self._fail_after = kw.pop("fail_after", False)
def __iter__(self):
return self
def __next__(self):
from google.api_core.exceptions import ServiceUnavailable
try:
return next(self._iter_values)
except StopIteration:
if self._fail_after:
raise ServiceUnavailable("testing")
raise
next = __next__
| 37.725698 | 88 | 0.672858 | 37,973 | 0.968773 | 0 | 0 | 0 | 0 | 0 | 0 | 1,685 | 0.042988 |
40ba39e3f0879514163c0009fb4d3d4f6df2004d | 592 | py | Python | hashtable.py | quake0day/oj | c09333d1738f8735de0d5d825db6f4b707585670 | [
"MIT"
]
| null | null | null | hashtable.py | quake0day/oj | c09333d1738f8735de0d5d825db6f4b707585670 | [
"MIT"
]
| null | null | null | hashtable.py | quake0day/oj | c09333d1738f8735de0d5d825db6f4b707585670 | [
"MIT"
]
| null | null | null | A = ['a','b']
B = ['c','b','a']
def generatehash(A):
hashA = {}
for item in A:
if item not in hashA:
hashA[item] = 1
else:
hashA[item] += 1
return hashA
def compareHash(A, B):
lenA = len(A)
lenB = len(B)
hashA = generatehash(A)
if lenB < lenA:
return False
elif lenB == lenA:
return hashA == generatehash(B)
else:
for i in xrange(lenB-lenA+1):
newB = B[i:i+lenA]
if hashA == generatehash(newB):
return True
return False
print compareHash(A, B)
| 19.733333 | 43 | 0.496622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.025338 |
40bab880835594679397baae0088587d6d0269a6 | 2,904 | py | Python | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
]
| 1 | 2019-12-18T17:28:11.000Z | 2019-12-18T17:28:11.000Z | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
]
| null | null | null | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
]
| null | null | null | # Importing required packages:
import pandas as pd
from tkinter import *
from tkinter.ttk import *
root = Tk()
# To visualize input DataFrame:
def generate_plot(gui_root, df, x_axis, y_axis=None,
plot={'type':None, 'hue':None},
aesthetics={'style':'whitegrid', 'palette':'hsv',
'size':(10,7), 'dpi':100}):
"""
DESCRIPTION: Reads input Pandas DataFrame and returns a plot based on selected parameters.
PARAMETERS:
> gui_root : [Required] Accepts Tkinter application base class (Tk) initialized variable/instance.
> df : [Required] Accepts Pandas DataFrame.
"""
# Importing external dependencies:
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style=aesthetics['style'], palette=aesthetics['palette'])
import warnings
warnings.filterwarnings('ignore')
# Defining Tableau colors:
tableau_20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199,
199),(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scaling over RGB values to [0,1] range (Matplotlib acceptable format):
for i in range(len(tableau_20)):
r,g,b = tableau_20[i]
tableau_20[i] = (r/255., g/255., b/255.)
# Setting up Tkinter Frame:
lf = Labelframe(gui_root)
lf.grid(row=0, column=0, sticky='nwes', padx=3, pady=3)
# Setting up Canvas backed by Matplotlib:
fig = Figure(figsize=aesthetics['size'], dpi=aesthetics['dpi'])
ax = fig.add_subplot(111)
# Drawing various plots with Seaborn:
if plot['type']=='lineplot': # Lineplot
g = sns.lineplot(x=x_axis, y=y_axis, data=df, ax=ax)
elif plot['type']=='regplot': # Regplot
g = sns.regplot(x=x_axis, y=y_axis, data=df, color=tableau_20[16], ax=ax)
elif plot['type']=='distplot': # Distplot
g = sns.distplot(a=df[x_axis].dropna(), color=tableau_20[7],
hist_kws=dict(edgecolor='k', linewidth=0.5), ax=ax)
elif plot['type']=='barplot': # Grouped Barplot
g = sns.catplot(x=x_axis, y=y_axis, hue=plot['hue'], data=df,
kind="bar", palette='rocket', ax=ax)
g.despine(left=True)
else:
# More to be added later
pass
# Displaying plot on Canvas:
canvas = FigureCanvasTkAgg(fig, master=lf)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
generate_plot()
root.mainloop()
| 38.72 | 107 | 0.587466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.301653 |
40bac90ddb15602100d2d54bde81a9850ed02d27 | 4,737 | py | Python | utils/get_dataset.py | gautierdag/pytorch-attentive-lm | d08ce1813a5ee575c0aac86773cd95aa174ab5e1 | [
"MIT"
]
| 16 | 2019-01-28T16:39:52.000Z | 2021-12-25T11:07:55.000Z | utils/get_dataset.py | MarkWuCL/pytorch-attentive-lm | d08ce1813a5ee575c0aac86773cd95aa174ab5e1 | [
"MIT"
]
| 1 | 2021-09-14T22:14:28.000Z | 2021-09-15T16:12:43.000Z | utils/get_dataset.py | MarkWuCL/pytorch-attentive-lm | d08ce1813a5ee575c0aac86773cd95aa174ab5e1 | [
"MIT"
]
| 6 | 2019-01-25T00:08:33.000Z | 2022-02-15T06:47:54.000Z | import os
import torch
from torch.utils.data import DataLoader, TensorDataset
import requests
import io
import zipfile
from .data_reader import read_vocabulary, read_lm_data, lm_data_producer
from .pre_process_wikitext import pre_process
def get_dataset(dataset, batch_size, device):
"""
Returns data iterator for each set and vocabulary
"""
download_dataset(dataset) # downloads and preprocess dataset if needed
if dataset == "wiki-02":
data_files = [".data/wikitext-2/wikitext-2/wiki.train.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.valid.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.test.tokens.sents"]
vocab_size = 33278 + 1 # add 1 to account for PAD
if dataset == 'ptb':
data_files = [".data/penn-treebank/ptb.train.txt",
".data/penn-treebank/ptb.valid.txt",
".data/penn-treebank/ptb.test.txt"]
vocab_size = 10000 + 1 # add 1 to account for PAD
vocabulary = read_vocabulary(data_files, vocab_size)
train_data, valid_data, test_data = read_lm_data(data_files,
vocabulary)
# Convert numpy to datasets and obtain iterators for each
train_data = lm_data_producer(train_data)
train_x = torch.tensor(train_data[0], dtype=torch.long, device=device)
train_y = torch.tensor(train_data[1], dtype=torch.long, device=device)
train_lengths = torch.tensor(
train_data[2], dtype=torch.float, device=device)
train_dataset = TensorDataset(train_x, train_y, train_lengths)
valid_data = lm_data_producer(valid_data)
valid_x = torch.tensor(valid_data[0], dtype=torch.long, device=device)
valid_y = torch.tensor(valid_data[1], dtype=torch.long, device=device)
valid_lengths = torch.tensor(
valid_data[2], dtype=torch.float, device=device)
valid_dataset = TensorDataset(valid_x, valid_y, valid_lengths)
test_data = lm_data_producer(test_data)
test_x = torch.tensor(test_data[0], dtype=torch.long, device=device)
test_y = torch.tensor(test_data[1], dtype=torch.long, device=device)
test_lengths = torch.tensor(test_data[2], dtype=torch.float, device=device)
test_dataset = TensorDataset(test_x, test_y, test_lengths)
train_iter = DataLoader(train_dataset, batch_size=batch_size)
valid_iter = DataLoader(valid_dataset, batch_size=batch_size)
test_iter = DataLoader(test_dataset, batch_size=batch_size)
return train_iter, valid_iter, test_iter, vocabulary
# downloading/preprocessing functions
def download_dataset(dataset):
if not os.path.exists('.data'):
os.makedirs('.data')
if dataset == 'ptb':
folder_name = 'penn-treebank'
filename = 'ptb.test.txt'
if dataset == 'wiki-02':
folder_name = 'wikitext-2'
filename = 'wiki.test.tokens'
dataset_path = '.data/' + folder_name
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
if dataset == 'ptb':
filepath = dataset_path + '/' + filename
if not os.path.exists(filepath):
download_ptb(dataset_path)
if dataset == 'wiki-02':
filepath = dataset_path + '/'+folder_name + '/'+filename
if not os.path.exists(filepath):
download_and_preproc_wiki(dataset_path)
return
def download_ptb(dataset_path):
urls = ['https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt']
# To save to a relative path.
r = requests.get(urls[0])
with open(dataset_path+'/ptb.train.txt', 'wb') as f:
f.write(r.content)
r = requests.get(urls[1])
with open(dataset_path+'/ptb.valid.txt', 'wb') as f:
f.write(r.content)
r = requests.get(urls[2])
with open(dataset_path+'/ptb.test.txt', 'wb') as f:
f.write(r.content)
def download_and_preproc_wiki(dataset_path):
print("Downloading wikitext")
url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(dataset_path)
train = ".data/wikitext-2/wikitext-2/wiki.train.tokens"
valid = ".data/wikitext-2/wikitext-2/wiki.valid.tokens"
test = ".data/wikitext-2/wikitext-2/wiki.test.tokens"
print("Pre-processing wikitext-02 training set...")
pre_process(train)
print("Pre-processing wikitext-02 validation set...")
pre_process(valid)
print("Pre-processing wikitext-02 test set...")
pre_process(test)
| 37.896 | 90 | 0.677222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,333 | 0.281402 |
40bb7183ce1df8b018466acd5e09bcd49d75d2d5 | 289 | py | Python | Conteudo das Aulas/087/calc_est.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
]
| null | null | null | Conteudo das Aulas/087/calc_est.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
]
| null | null | null | Conteudo das Aulas/087/calc_est.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
]
| null | null | null | from tkinter import *
#Cria a nossa tela
instancia = Tk()
#Dá um título a tela
instancia.title('Calculadora para Estatística')
#Dá um tamanho a tela
instancia.geometry("800x600")
#Dá um ícone ao aplicativo
#instancia.wm_iconbitmap('icone.ico')
#Inicia o programa
instancia.mainloop()
| 17 | 47 | 0.761246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.627119 |
40bb7e3f95f2a2dc9b27a2c8dd06c761ef722a37 | 6,235 | py | Python | property_scraper.py | iplaughlin/property_scraping | 739d05a272eddb5f2b48f9fc85f407904067b931 | [
"MIT"
]
| null | null | null | property_scraper.py | iplaughlin/property_scraping | 739d05a272eddb5f2b48f9fc85f407904067b931 | [
"MIT"
]
| null | null | null | property_scraper.py | iplaughlin/property_scraping | 739d05a272eddb5f2b48f9fc85f407904067b931 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 19 09:42:09 2022
@author: iaala
"""
import requests
import sql_configs
import datetime
import os
from bs4 import BeautifulSoup
import time
from find_tables import (
table_information_one,
table_information_two,
table_information_three,
table_information_four,
)
from create_connection import create_sql_connection
import columns
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
def main():
for ppin in range(200001, 18600, -1):
try:
conn = create_sql_connection(
user=sql_configs.USER,
password=sql_configs.PASSWORD,
host=sql_configs.HOST,
database=sql_configs.DATABASE,
)
temp_dict = dict()
print(ppin)
with open(os.path.join(__location__, 'status.txt'), 'w') as f:
f.write(f"currently starting {ppin}")
c = conn.cursor()
c.execute('select pin from parcel;')
items_collected = [int(''.join(map(str, item))) for item in c.fetchall()]
if ppin not in items_collected:
url = f"https://madisonproperty.countygovservices.com/Property/Property/Summary?taxyear=2022&ppin={ppin}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0"
}
resp = requests.get(url, headers=headers)
time.sleep(0.25)
soup = BeautifulSoup(resp.text, "html.parser")
parcel_info = table_information_one(soup, "collapseParcelInfo")
if parcel_info == {}:
parcel_info = {column: "" for column in columns.PARCEL_INFO_COLUMNS}
parcel_info['PIN']= ppin
property_values = table_information_one(soup, "collapseSummaryPropertyValues")
if property_values == {}:
property_values = {column: "" for column in columns.PROPERTY_COLUMNS}
subdivision = table_information_one(soup, "collapseSummarySubdivision")
if subdivision == {}:
subdivision = {column: "" for column in columns.SUBDIVISION_COLUMNS}
tax = table_information_two(soup, "collapseTaxInfo")
if tax == {}:
tax = {column: "" for column in columns.TAX_COLUMNS}
tax_history = table_information_three(soup, "collapseTaxHistory")
details = table_information_three(soup, "collapseSummaryDetailInfo")
building_components = table_information_four(
soup, "collapseSummaryBuildingComponents"
)
improvement = building_components.get("improvement")
computations = building_components.get("computations")
materials = building_components.get("materials")
gis_url = f"https://isv.kcsgis.com/al.Madison_revenue/?fips={ppin}"
temp_dict[ppin] = {
"ppin": ppin,
"date": str(datetime.datetime.now()),
"parcel": parcel_info,
"property_values": property_values,
"subdivision": subdivision,
"tax": tax,
"tax_history": tax_history,
"details": details,
"improvement": improvement,
"computations": computations,
"materials": materials,
"gis_url": f"https://isv.kcsgis.com/al.Madison_revenue/?fips={ppin}",
}
ppin = [ppin]
conn = create_sql_connection(
user=configs.USER,
password=configs.PASSWORD,
host=configs.HOST,
database=configs.DATABASE,
)
c = conn.cursor()
date = [str(datetime.datetime.now())]
parcel_values = list(parcel_info.values()) + date
c.execute(configs.PARCEL_STATEMENT, parcel_values)
property_values = list(property_values.values()) + date + ppin
c.execute(configs.PROPERTY_VALUES_STATEMENT, property_values)
subdivision_values = list(subdivision.values()) + date + ppin
c.execute(configs.SUBDIVISION_STATEMENT, subdivision_values)
tax_values = [str(item) for item in tax.values()] + date + ppin
tax_values = tuple(tax_values)
c.execute(configs.TAX_STATEMENT, tax_values)
for row in zip(*list(tax_history.values())):
c.execute(configs.TAX_HISTORY_STATEMENT, row + tuple(date) + tuple(ppin))
for row in zip(*list(details.values())):
c.execute(configs.DETAILS_STATEMENT, row + tuple(date)+ tuple(ppin))
improvement_values = list(improvement.values()) + date + ppin
improvement_values = tuple(improvement_values)
c.execute(configs.IMPROVEMENTS_STATEMENT, improvement_values)
computations_values = list(computations.values()) + date + ppin
computations_values = tuple(computations_values)
c.execute(configs.COMPUTATION_STATEMENT, computations_values)
for row in zip(*list(materials.values())):
row_length = len(row)
if row_length != 0:
c.execute(configs.MATERIALS_STATEMENT, row + tuple(date)+ tuple(ppin))
urls_values = (url, gis_url) + tuple(date) + tuple(ppin)
c.execute(configs.URLS_STATEMENT, urls_values)
conn.commit()
except Exception as e:
# raise Exception
new_line = '\n'
if isinstance(ppin, int):
ppin = [ppin]
with open(os.path.join(__location__, 'errors.txt'), 'a') as f:
f.write(f"error in {ppin[0]} occurred. error was {e}{new_line}")
if __name__ == "__main__":
main()
| 42.705479 | 121 | 0.559262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 935 | 0.14996 |
40bc7b5a674a2b504a89d6769ec57fdcc5fda4af | 357 | py | Python | Chapter03/Activity11/fibonacci.py | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
]
| null | null | null | Chapter03/Activity11/fibonacci.py | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
]
| null | null | null | Chapter03/Activity11/fibonacci.py | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
]
| null | null | null | def fibonacci_iterative(n):
previous = 0
current = 1
for i in range(n - 1):
current_old = current
current = previous + current
previous = current_old
return current
def fibonacci_recursive(n):
if n == 0 or n == 1:
return n
else:
return fibonacci_recursive(n - 2) + fibonacci_recursive(n - 1)
| 23.8 | 70 | 0.602241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
40be64921673daa0d8c5613e61a252c64c6e744e | 1,002 | py | Python | matches/tests/test_view_index.py | ToxaZ/nostradamus | adda478685d60df24da106c15d734d02f01ed339 | [
"WTFPL"
]
| null | null | null | matches/tests/test_view_index.py | ToxaZ/nostradamus | adda478685d60df24da106c15d734d02f01ed339 | [
"WTFPL"
]
| 4 | 2021-06-02T23:01:40.000Z | 2021-07-25T15:02:42.000Z | matches/tests/test_view_index.py | ToxaZ/nostradamus | adda478685d60df24da106c15d734d02f01ed339 | [
"WTFPL"
]
| null | null | null | from django.urls import resolve, reverse
from django.test import TestCase
from matches.views import matches_index
from matches.models import Match
class AllMatchesTests(TestCase):
def setUp(self):
self.match = Match.objects.create(
match_id=1,
home_team='Netherlands',
guest_team='Russia',
start_time='2008-06-21 19:45Z'
)
url = reverse('matches:matches_index')
self.response = self.client.get(url)
def test_index_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_index_url_resolves_index_view(self):
view = resolve('/matches/')
self.assertEquals(view.func, matches_index)
def test_index_view_contains_link_to_single_match_page(self):
single_match_url = reverse(
'matches:single_match', kwargs={'match_id': self.match.match_id})
self.assertContains(
self.response, 'href="{0}"'.format(single_match_url))
| 33.4 | 77 | 0.675649 | 852 | 0.850299 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.117764 |
40be6781a367fb391b4b06b4f46533ac8dd9e99d | 5,871 | py | Python | hmc/applications/banana/banana.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
]
| 1 | 2021-11-23T15:40:07.000Z | 2021-11-23T15:40:07.000Z | hmc/applications/banana/banana.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
]
| null | null | null | hmc/applications/banana/banana.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
]
| null | null | null | from typing import Callable, Tuple
import numpy as np
def posterior_factory(y: np.ndarray, sigma_y: float, sigma_theta: float) -> Tuple[Callable]:
"""The banana distribution is a distribution that exhibits a characteristic
banana-shaped ridge that resembles the posterior that can emerge from
models that are not identifiable. The distribution is the posterior of the
following generative model.
y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y)
theta[i] ~ Normal(0, sigma_sq_theta)
Args:
y: Observations of the banana model.
sigma_y: Standard deviation of the observations.
sigma_theta: Standard deviation of prior over linear coefficients.
Returns:
log_posterior: Function to compute the log-posterior.
metric: Function to compute the Fisher information metric.
euclidean_auxiliaries: Function to compute the log-posterior and its
gradient.
riemannian_auxiliaries: Function to compute the log-posterior, the
gradient of the log-posterior, the Fisher information metric, and the
derivatives of the Fisher information metric.
"""
sigma_sq_y = np.square(sigma_y)
sigma_sq_theta = np.square(sigma_theta)
def log_posterior(theta: np.ndarray) -> float:
"""The banana-shaped distribution posterior.
Args:
theta: Linear coefficients.
Returns:
out: The log-posterior of the banana-shaped distribution.
"""
p = theta[0] + np.square(theta[1])
ll = -0.5 / sigma_sq_y * np.square(y - p).sum()
lp = -0.5 / sigma_sq_theta * np.square(theta).sum()
return ll + lp
def grad_log_posterior(theta: np.ndarray) -> np.ndarray:
"""Gradient of the banana-shaped distribution with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
out: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
p = theta[0] + np.square(theta[1])
d = np.sum(y - p)
ga = d / sigma_sq_y - theta[0] / sigma_sq_theta
gb = 2.0*d / sigma_sq_y * theta[1] - theta[1] / sigma_sq_theta
return np.hstack((ga, gb))
def metric(theta: np.ndarray) -> np.ndarray:
"""The Fisher information is the negative expected outer product of the
gradient of the posterior.
Args:
theta: Linear coefficients.
Returns:
G: The Fisher information metric of the banana-shaped distribution.
"""
n = y.size
s = 2.0*n*theta[1] / sigma_sq_y
G = np.array([[n / sigma_sq_y + 1.0 / sigma_sq_theta, s],
[s, 4.0*n*np.square(theta[1]) / sigma_sq_y + 1.0 / sigma_sq_theta]])
return G
def grad_metric(theta: np.ndarray) -> np.ndarray:
"""The gradient of the Fisher information metric with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
n = y.size
dG = np.array([
[[0.0, 0.0], [0.0, 2.0*n / sigma_sq_y]],
[[0.0, 2.0*n / sigma_sq_y], [0.0, 8.0*n*theta[1] / sigma_sq_y]]
])
return dG
def euclidean_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior and the gradient of the
log-posterior.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
return lp, glp
def riemannnian_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior, the gradient of the log-posterior,
the Fisher information metric and the derivatives of the Fisher
information metric.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
G: The Fisher information metric of the banana-shaped distribution.
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
G = metric(theta)
dG = grad_metric(theta)
return lp, glp, G, dG
def log_posterior_and_metric(theta: np.ndarray) -> Tuple[np.ndarray]:
lp = log_posterior(theta)
G = metric(theta)
return lp, G
return log_posterior, metric, log_posterior_and_metric, euclidean_auxiliaries, riemannnian_auxiliaries
def generate_data(t: float, sigma_y: float, sigma_theta: float, num_obs: int) -> np.ndarray:
"""Generate data from the banana-shaped posterior distribution.
Args:
t: Free-parameter determining the thetas.
sigma_y: Noise standard deviation.
sigma_theta: Prior standard deviation over the thetas.
num_obs: Number of observations to generate.
Returns:
theta: Linear coefficients of the banana-shaped distribution.
y: Observations from the unidentifiable model.
"""
theta = np.array([t, np.sqrt(1.0 - t)])
y = theta[0] + np.square(theta[1]) + sigma_y * np.random.normal(size=(num_obs, ))
return theta, y
| 35.36747 | 106 | 0.62698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,598 | 0.612843 |
40c04be7b7e97b73786d758981e90307e422880f | 3,141 | py | Python | msp430.py | sprout42/binaryninja-msp430 | 9bc5a79b1c6232260c2abc3bb4334e5ca3478baf | [
"MIT"
]
| null | null | null | msp430.py | sprout42/binaryninja-msp430 | 9bc5a79b1c6232260c2abc3bb4334e5ca3478baf | [
"MIT"
]
| null | null | null | msp430.py | sprout42/binaryninja-msp430 | 9bc5a79b1c6232260c2abc3bb4334e5ca3478baf | [
"MIT"
]
| null | null | null | from binaryninja import (
Architecture,
BranchType,
FlagRole,
InstructionInfo,
LowLevelILFlagCondition,
RegisterInfo,
)
from .instructions import TYPE3_INSTRUCTIONS, Instruction, Registers
from .lifter import Lifter
class MSP430(Architecture):
name = "msp430"
address_size = 2
default_int_size = 2
global_regs = ["sr"]
stack_pointer = "sp"
regs = {r: RegisterInfo(r, 2) for r in Registers}
flags = ["v", "n", "c", "z"]
# The first flag write type is ignored currently.
# See: https://github.com/Vector35/binaryninja-api/issues/513
flag_write_types = ["", "*", "cnv", "cnz"]
flags_written_by_flag_write_type = {
"*": ["v", "n", "c", "z"],
"cnv": ["v", "n", "c"],
"cnz": ["c", "n", "z"],
}
flag_roles = {
"c": FlagRole.CarryFlagRole,
"n": FlagRole.NegativeSignFlagRole,
"z": FlagRole.ZeroFlagRole,
"v": FlagRole.OverflowFlagRole,
}
flags_required_for_flag_condition = {
LowLevelILFlagCondition.LLFC_UGE: ['c'],
LowLevelILFlagCondition.LLFC_UGT: ['c'],
LowLevelILFlagCondition.LLFC_ULT: ['c'],
LowLevelILFlagCondition.LLFC_ULE: ['c'],
LowLevelILFlagCondition.LLFC_SGE: ['n', 'v'],
LowLevelILFlagCondition.LLFC_SLT: ['n', 'v'],
LowLevelILFlagCondition.LLFC_E: ['z'],
LowLevelILFlagCondition.LLFC_NE: ['z'],
LowLevelILFlagCondition.LLFC_NEG: ['n'],
LowLevelILFlagCondition.LLFC_POS: ['n']
}
def get_instruction_info(self, data, addr):
instr = Instruction.decode(data, addr)
if instr is None:
return None
result = InstructionInfo()
result.length = instr.length
# Add branches
if instr.mnemonic in ["ret", "reti"]:
result.add_branch(BranchType.FunctionReturn)
elif instr.mnemonic in ["jmp", "br"] and instr.src.value is not None:
result.add_branch(BranchType.UnconditionalBranch, instr.src.value)
elif instr.type == 3:
result.add_branch(BranchType.TrueBranch, instr.src.value)
result.add_branch(BranchType.FalseBranch, addr + 2)
elif instr.mnemonic == "call" and instr.src.value is not None:
result.add_branch(BranchType.CallDestination, instr.src.value)
return result
def get_instruction_text(self, data, addr):
instr = Instruction.decode(data, addr)
if instr is None:
return None
tokens = instr.generate_tokens()
return tokens, instr.length
def get_instruction_low_level_il(self, data, addr, il):
instr = Instruction.decode(data, addr)
if instr is None:
return None
# Halting the system means turning off interrupts and just looping
# indefinitely
if instr.mnemonic == "dint":
next_instr = Instruction.decode(data[instr.length :], addr + instr.length)
if next_instr.mnemonic == "jmp" and next_instr.src.value == addr:
instr.mnemonic = "hlt"
Lifter.lift(il, instr)
return instr.length
| 30.794118 | 86 | 0.617638 | 2,897 | 0.922318 | 0 | 0 | 0 | 0 | 0 | 0 | 380 | 0.120981 |
40c0c0515519976b7d3396916ff20c4b1d6edd0a | 126 | py | Python | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
]
| null | null | null | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
]
| null | null | null | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
]
| null | null | null | __all__ = [
'session',
'event',
'profile',
'consent',
'segment',
'source',
'rule',
'entity'
]
| 11.454545 | 14 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.515873 |
40c32fb91113902b7c534e034974797ba31567b9 | 3,868 | py | Python | metric_calculation/faster_metrics.py | imatge-upc/saliency-2018-videosalgan | 5e4bf3892d55d8b9f37490e119113c2094be3bce | [
"MIT"
]
| 10 | 2018-09-06T03:56:59.000Z | 2020-07-26T11:02:50.000Z | metric_calculation/faster_metrics.py | imatge-upc/saliency-2018-videosalgan | 5e4bf3892d55d8b9f37490e119113c2094be3bce | [
"MIT"
]
| null | null | null | metric_calculation/faster_metrics.py | imatge-upc/saliency-2018-videosalgan | 5e4bf3892d55d8b9f37490e119113c2094be3bce | [
"MIT"
]
| 3 | 2019-01-07T19:34:12.000Z | 2019-07-03T07:41:48.000Z | from salience_metrics import auc_judd, auc_shuff, cc, nss, similarity, normalize_map
"""
DHF1K paper: "we employ five classic met-rics, namely Normalized Scanpath Saliency (NSS), Sim-ilarity Metric (SIM), Linear Correlation Coefficient (CC),AUC-Judd (AUC-J), and shuffled AUC (s-AUC).""
"""
import cv2
import os
import numpy as np
import time
import pickle
gt_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/maps"
sm_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/predictions"
final_metric_list = []
# The directories are named 1-1000 so it should be easy to iterate over them
def inner_worker(i, packed, gt_path, sm_path): #packed should be a list of tuples (annotation, prediction)
gt, sm = packed
ground_truth = cv2.imread(os.path.join(gt_path, gt),cv2.IMREAD_GRAYSCALE)
saliency_map = cv2.imread(os.path.join(sm_path, sm),cv2.IMREAD_GRAYSCALE)
saliency_map_norm = normalize_map(saliency_map) # The functions are a bit haphazard. Some have normalization within and some do not.
# Calculate metrics
AUC_JUDD = auc_judd(saliency_map_norm, ground_truth)
AUC_SHUF = auc_shuff(saliency_map_norm, ground_truth, ground_truth)
NSS = nss(saliency_map_norm, ground_truth)
# the other ones have normalization within:
CC = cc(saliency_map, ground_truth)
SIM = similarity(saliency_map, ground_truth)
return ( AUC_JUDD,
AUC_SHUF,
NSS,
CC,
SIM )
for i in range(1,701):
start = time.clock()
gt_path = os.path.join(gt_directory, str(i))
sm_path = os.path.join(sm_directory, str(i))
gt_files = os.listdir(gt_path)
sm_files = os.listdir(sm_path)
#Now to sort based on their file number. The "key" parameter in sorted is a function based on which the sorting will happen (I use split to exclude the jpg/png from the).
gt_files_sorted = sorted(gt_files, key = lambda x: int(x.split(".")[0]) )
sm_files_sorted = sorted(sm_files, key = lambda x: int(x.split(".")[0]) )
pack = zip(gt_files_sorted, sm_files_sorted)
print("Files related to video {} sorted.".format(i))
##
##https://stackoverflow.com/questions/35663498/how-do-i-return-a-matrix-with-joblib-python
from joblib import Parallel, delayed
start = time.clock()
metric_list = Parallel(n_jobs=8)(delayed(inner_worker)(n, packed, gt_path, sm_path) for n, packed in enumerate(pack)) #run 8 frames simultaneously
aucj_mean = np.mean([x[0] for x in metric_list])
aucs_mean = np.mean([x[1] for x in metric_list])
nss_mean = np.mean([x[2] for x in metric_list])
cc_mean = np.mean([x[3] for x in metric_list])
sim_mean = np.mean([x[4] for x in metric_list])
print("For video number {} the metrics are:".format(i))
print("AUC-JUDD is {}".format(aucj_mean))
print("AUC-SHUFFLED is {}".format(aucs_mean))
print("NSS is {}".format(nss_mean))
print("CC is {}".format(cc_mean))
print("SIM is {}".format(sim_mean))
print("Time elapsed: {}".format(time.clock()-start))
print("==============================")
final_metric_list.append(( aucj_mean,
aucs_mean,
nss_mean,
cc_mean,
sim_mean ))
with open('metrics.txt', 'wb') as handle:
pickle.dump(final_metric_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
Aucj = np.mean([y[0] for y in final_metric_list])
Aucs = np.mean([y[1] for y in final_metric_list])
Nss = np.mean([y[2] for y in final_metric_list])
Cc = np.mean([y[3] for y in final_metric_list])
Sim = np.mean([y[4] for y in final_metric_list])
print("Final average of metrics is:")
print("AUC-JUDD is {}".format(Aucj))
print("AUC-SHUFFLED is {}".format(Aucs))
print("NSS is {}".format(Nss))
print("CC is {}".format(Cc))
print("SIM is {}".format(Sim))
| 39.876289 | 207 | 0.668046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,199 | 0.309979 |
40c346f9a8e289985909d8a308d6ecd6f7e032ea | 1,061 | py | Python | tests/rules/test_pacman_invalid_option.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
]
| null | null | null | tests/rules/test_pacman_invalid_option.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
]
| null | null | null | tests/rules/test_pacman_invalid_option.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
]
| null | null | null | import pytest
from thefuck.rules.pacman_invalid_option import get_new_command
from thefuck.rules.pacman_invalid_option import match
from thefuck.types import Command
good_output = """community/shared_meataxe 1.0-3
A set of programs for working with matrix representations over finite fields
"""
bad_output = "error: invalid option '-"
@pytest.mark.parametrize("option", "SURQFDVT")
def test_not_match_good_output(option):
assert not match(Command("pacman -{}s meat".format(option), good_output))
@pytest.mark.parametrize("option", "azxcbnm")
def test_not_match_bad_output(option):
assert not match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_match(option):
assert match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_get_new_command(option):
new_command = get_new_command(
Command("pacman -{}v meat".format(option), ""))
assert new_command == "pacman -{}v meat".format(option.upper())
| 31.205882 | 80 | 0.748351 | 0 | 0 | 0 | 0 | 707 | 0.666352 | 0 | 0 | 307 | 0.28935 |
40c37866ffff9cefa653877b146a68fc96c42ddf | 11,329 | py | Python | dimod/reference/composites/scalecomposite.py | joseppinilla/dimod | e33ca5045e31ee2d9d58515f017fb6be5276cd8e | [
"Apache-2.0"
]
| 1 | 2022-02-01T14:40:05.000Z | 2022-02-01T14:40:05.000Z | dimod/reference/composites/scalecomposite.py | xpin/dimod | 5e399317b0bfaae6ed20e22b9f2ef242f5fa5e6c | [
"Apache-2.0"
]
| null | null | null | dimod/reference/composites/scalecomposite.py | xpin/dimod | 5e399317b0bfaae6ed20e22b9f2ef242f5fa5e6c | [
"Apache-2.0"
]
| 1 | 2022-02-01T14:40:31.000Z | 2022-02-01T14:40:31.000Z | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
"""
A composite that scales problem variables as directed. if scalar is not given
calculates it based on quadratic and bias ranges.
"""
try:
import collections.abc as abc
except ImportError:
import collections as abc
from numbers import Number
import numpy as np
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.core.composite import ComposedSampler
__all__ = 'ScaleComposite',
class ScaleComposite(ComposedSampler):
"""Composite to scale variables of a problem
Scales the variables of a bqm and modifies linear and quadratic terms
accordingly.
Args:
sampler (:obj:`dimod.Sampler`):
A dimod sampler
Examples:
This example uses :class:`.ScaleComposite` to instantiate a
composed sampler that submits a simple Ising problem to a sampler.
The composed sampler scales linear, quadratic biases and offset as
indicated by options.
>>> h = {'a': -4.0, 'b': -4.0}
>>> J = {('a', 'b'): 3.2}
>>> sampler = dimod.ScaleComposite(dimod.ExactSolver())
>>> response = sampler.sample_ising(h, J, scalar=0.5,
... ignored_interactions=[('a','b')])
"""
def __init__(self, child_sampler):
self._children = [child_sampler]
@property
def children(self):
return self._children
@property
def parameters(self):
param = self.child.parameters.copy()
param.update({'scalar': [],
'bias_range': [],
'quadratic_range': [],
'ignored_variables': [],
'ignored_interactions': [],
'ignore_offset': []})
return param
@property
def properties(self):
return {'child_properties': self.child.properties.copy()}
def sample(self, bqm, scalar=None, bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the provided binary quadratic model.
if scalar is not given, problem is scaled based on bias and quadratic
ranges. See :meth:`.BinaryQuadraticModel.scale` and
:meth:`.BinaryQuadraticModel.normalize`
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
child = self.child
bqm_copy = _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset)
response = child.sample(bqm_copy, **parameters)
return _scale_back_response(bqm, response, bqm_copy.info['scalar'],
ignored_variables, ignored_interactions,
ignore_offset)
def sample_ising(self, h, J, offset=0, scalar=None,
bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
if any(len(inter) > 2 for inter in J):
# handle HUBO
import warnings
msg = ("Support for higher order Ising models in ScaleComposite is "
"deprecated and will be removed in dimod 0.9.0. Please use "
"PolyScaleComposite.sample_hising instead.")
warnings.warn(msg, DeprecationWarning)
from dimod.reference.composites.higherordercomposites import PolyScaleComposite
from dimod.higherorder.polynomial import BinaryPolynomial
poly = BinaryPolynomial.from_hising(h, J, offset=offset)
ignored_terms = set()
if ignored_variables is not None:
ignored_terms.update(frozenset(v) for v in ignored_variables)
if ignored_interactions is not None:
ignored_terms.update(frozenset(inter) for inter in ignored_interactions)
if ignore_offset:
ignored_terms.add(frozenset())
return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar,
bias_range=bias_range,
poly_range=quadratic_range,
ignored_terms=ignored_terms,
**parameters)
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
return self.sample(bqm, scalar=scalar,
bias_range=bias_range,
quadratic_range=quadratic_range,
ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset, **parameters)
def _scale_back_response(bqm, response, scalar, ignored_interactions,
ignored_variables, ignore_offset):
"""Helper function to scale back the response of sample method"""
if len(ignored_interactions) + len(
ignored_variables) + ignore_offset == 0:
response.record.energy = np.divide(response.record.energy, scalar)
else:
response.record.energy = bqm.energies((response.record.sample,
response.variables))
return response
def _check_params(ignored_variables, ignored_interactions):
"""Helper for sample methods"""
if ignored_variables is None:
ignored_variables = set()
elif not isinstance(ignored_variables, abc.Container):
ignored_variables = set(ignored_variables)
if ignored_interactions is None:
ignored_interactions = set()
elif not isinstance(ignored_interactions, abc.Container):
ignored_interactions = set(ignored_interactions)
return ignored_variables, ignored_interactions
def _calc_norm_coeff(h, J, bias_range, quadratic_range, ignored_variables,
ignored_interactions):
"""Helper function to calculate normalization coefficient"""
if ignored_variables is None or ignored_interactions is None:
raise ValueError('ignored interactions or variables cannot be None')
def parse_range(r):
if isinstance(r, Number):
return -abs(r), abs(r)
return r
def min_and_max(iterable):
if not iterable:
return 0, 0
return min(iterable), max(iterable)
if quadratic_range is None:
linear_range, quadratic_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, quad_range = map(parse_range, (linear_range,
quadratic_range))
lin_min, lin_max = min_and_max([v for k, v in h.items()
if k not in ignored_variables])
quad_min, quad_max = min_and_max([v for k, v in J.items()
if not check_isin(k,
ignored_interactions)])
inv_scalar = max(lin_min / lin_range[0], lin_max / lin_range[1],
quad_min / quad_range[0], quad_max / quad_range[1])
if inv_scalar != 0:
return 1. / inv_scalar
else:
return 1.
def _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset):
"""Helper function of sample for scaling"""
bqm_copy = bqm.copy()
if scalar is None:
scalar = _calc_norm_coeff(bqm_copy.linear, bqm_copy.quadratic,
bias_range, quadratic_range,
ignored_variables, ignored_interactions)
bqm_copy.scale(scalar, ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset)
bqm_copy.info.update({'scalar': scalar})
return bqm_copy
def check_isin(key, key_list):
return sum(set(key) == set(key_tmp) for key_tmp in key_list)
| 37.637874 | 101 | 0.603231 | 6,975 | 0.615677 | 0 | 0 | 527 | 0.046518 | 0 | 0 | 4,796 | 0.423338 |
40c4517b7bccc080e6b7ec11639bdde005bb213a | 739 | py | Python | tests/test_config.py | savilard/flask-ecom-api | d94ee7873b9ec80645c05422e3355e8dc045ebeb | [
"MIT"
]
| 1 | 2021-04-17T15:25:36.000Z | 2021-04-17T15:25:36.000Z | tests/test_config.py | savilard/flask-ecom-api | d94ee7873b9ec80645c05422e3355e8dc045ebeb | [
"MIT"
]
| null | null | null | tests/test_config.py | savilard/flask-ecom-api | d94ee7873b9ec80645c05422e3355e8dc045ebeb | [
"MIT"
]
| 1 | 2021-04-18T15:47:02.000Z | 2021-04-18T15:47:02.000Z | import os
def test_development_config(test_app):
test_app.config.from_object('flask_ecom_api.config.DevelopmentConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
def test_testing_config(test_app):
test_app.config.from_object('flask_ecom_api.config.TestingConfig')
assert test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_TEST_URL')
def test_production_config(test_app):
test_app.config.from_object('flask_ecom_api.config.ProductionConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
| 36.95 | 92 | 0.783491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.361299 |
40c6d377ec913783afe6edc196ecab48e0003b36 | 6,122 | py | Python | leasing/forms.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
]
| 1 | 2021-01-12T08:14:10.000Z | 2021-01-12T08:14:10.000Z | leasing/forms.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
]
| 249 | 2017-04-18T14:00:13.000Z | 2022-03-30T12:18:03.000Z | leasing/forms.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
]
| 7 | 2017-04-18T08:43:54.000Z | 2021-07-28T07:29:30.000Z | from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from leasing.enums import (
InfillDevelopmentCompensationState,
LeaseState,
TenantContactType,
)
from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality
from leasing.validators import validate_business_id
class CommaSeparatedChoiceField(forms.ChoiceField):
def to_python(self, value):
if value in validators.EMPTY_VALUES:
return []
value = [item.strip() for item in str(value).split(",") if item.strip()]
return value
def validate(self, value):
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": val},
)
class LeaseSearchForm(forms.Form):
succinct = forms.BooleanField(label="Succinct", required=False)
identifier = forms.CharField(
label="Lease identifier", max_length=255, required=False, empty_value=None
)
tenant_name = forms.CharField(label="Tenant name", max_length=255, required=False)
tenantcontact_type = CommaSeparatedChoiceField(
label="Tenant role",
required=False,
choices=tuple((x.value, str(x)) for x in TenantContactType),
)
only_past_tenants = forms.BooleanField(label="Only past tenants", required=False)
tenant_activity = forms.ChoiceField(
label="Tenants",
required=False,
choices=(
("all", "All"),
("past", "Only past tenants"),
("active", "Only active tenants"),
),
)
lease_start_date_start = forms.DateField(required=False)
lease_start_date_end = forms.DateField(required=False)
lease_end_date_start = forms.DateField(required=False)
lease_end_date_end = forms.DateField(required=False)
only_active_leases = forms.BooleanField(label="Active", required=False)
only_expired_leases = forms.BooleanField(label="Expired", required=False)
has_geometry = forms.NullBooleanField(label="Has geometry", required=False)
property_identifier = forms.CharField(
label="Real property identifier",
max_length=255,
required=False,
empty_value=None,
)
address = forms.CharField(
label="Address", max_length=255, required=False, empty_value=None
)
lease_type = forms.ModelChoiceField(
label="Lease type", queryset=LeaseType.objects.all(), required=False
)
municipality = forms.ModelChoiceField(
label="Municipality", queryset=Municipality.objects.all(), required=False
)
district = forms.ModelChoiceField(
label="District", queryset=District.objects.all(), required=False
)
sequence = forms.IntegerField(label="Sequence", required=False)
lease_state = CommaSeparatedChoiceField(
label="Lease state",
required=False,
choices=tuple((x.value, str(x)) for x in LeaseState),
)
business_id = forms.CharField(
label="Business id",
max_length=255,
required=False,
empty_value=None,
validators=[validate_business_id],
)
national_identification_number = forms.CharField(
label="National identification number",
max_length=255,
required=False,
empty_value=None,
)
lessor = forms.ModelChoiceField(
label="Lessor", queryset=Contact.objects.filter(is_lessor=True), required=False
)
contract_number = forms.CharField(
label="Contract number", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
invoice_number = forms.CharField(
label="Invoice number", max_length=255, required=False, empty_value=None
)
class BasisOfRentSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class InfillDevelopmentCompensationSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
state = CommaSeparatedChoiceField(
label="State",
required=False,
choices=tuple((x.value, str(x)) for x in InfillDevelopmentCompensationState),
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class AuditLogSearchForm(forms.Form):
type = forms.ChoiceField(
label="Type",
required=True,
choices=(("lease", "Lease"), ("contact", "Contact")),
)
id = forms.IntegerField(label="Id", required=False)
| 36.658683 | 87 | 0.679353 | 5,749 | 0.939072 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.114995 |
40c712bda8811c80835db84231a9e91605ae40b6 | 675 | py | Python | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
]
| null | null | null | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
]
| null | null | null | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
]
| null | null | null | # polls/management/commands/create_admin_user.py
import sys
import logging
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
class Command(BaseCommand):
help = 'Creates the initial admin user'
def handle(self, *args, **options):
if User.objects.filter(username="admin").exists():
print("admin exists")
else:
u = User(username='admin')
u.set_password('website-portfolio-project')
u.is_superuser = True
u.is_staff = True
u.save()
print("admin created")
sys.exit()
| 28.125 | 65 | 0.642963 | 453 | 0.671111 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.222222 |
40c7452a82c23c82f183d4188dfd8d42aa979d41 | 1,597 | py | Python | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
]
| null | null | null | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
]
| null | null | null | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
]
| null | null | null | import unirest
import json
import requests
import os
import subprocess
import time
import argparse
rootUrl = "https://api.unsplash.com/"
unirest.default_header("Accept", "application/json")
unirest.default_header("Accept-Version", "v1")
unirest.default_header("Authorization","<CLIENT-ID>")
def downloadPic(randomPic_response):
content = randomPic_response.body
print 'getting an amazing photo from Unsplash by %s ' % content["user"]["username"]
picData = requests.get(randomPic_response.body["urls"]["regular"]).content#, callback=applyWallpaper)#.body["urls"]["regular"]
applyWallpaper(picData)
def applyWallpaper(picStream):
path = os.path.expanduser('~')+'/.tempWallpaper.jpg'
with open(path, 'wb') as handler:
print "saving"
handler.write(picStream)
print "enjoy your new wallpaper."
if os.environ.get('DESKTOP_SESSION') == "xubuntu":
os.system('xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image && xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image -s %s' %path)
else:
os.system('gsettings set org.gnome.desktop.background picture-uri file:///%s' % path)
while True:
parser = argparse.ArgumentParser()
parser.add_argument('integers', metavar='int', type=int, help='time between wallpaper change (in seconds)')
args = parser.parse_args()
print "waiting for %s seconds" % args.integers
time.sleep(args.integers)
downloadPic(unirest.get(rootUrl + "photos/random", params={"orientation":"landscape"}))#.body["id"]
| 40.948718 | 204 | 0.707577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.435191 |
40c7af774d2446afa75acb06651e91eb7c9447fd | 2,358 | py | Python | tests/client_asyncio_test.py | ninchat/ninchat-python | 7e5fcadb7389ca8c7722c32d69839289675d7baa | [
"BSD-2-Clause"
]
| null | null | null | tests/client_asyncio_test.py | ninchat/ninchat-python | 7e5fcadb7389ca8c7722c32d69839289675d7baa | [
"BSD-2-Clause"
]
| 4 | 2017-10-12T21:05:12.000Z | 2018-05-17T22:19:08.000Z | tests/client_asyncio_test.py | ninchat/ninchat-python | 7e5fcadb7389ca8c7722c32d69839289675d7baa | [
"BSD-2-Clause"
]
| null | null | null | # Copyright (c) 2017, Somia Reality Oy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import asyncio
import logging
from functools import partial
from ninchat.client.asyncio import Session
log = logging.getLogger(__name__)
async def async_test():
def on_session_event(params):
pass
def on_event(params, payload, last_reply):
if params["event"] == "message_received":
log.debug("received %s", payload[0].decode())
s = Session()
s.on_session_event = on_session_event
s.on_event = on_event
s.set_params({"user_attrs": {"name": "ninchat-python"}, "message_types": ["*"]})
async with s as params:
log.debug("opened params = %s", params)
user_id = params["user_id"]
params, _ = await s.call({"action": "describe_conn"})
log.debug("called params = %s", params)
await s.call({"action": "send_message", "message_type": "ninchat.com/text", "user_id": user_id}, [b'{"text": "Hello, me!"}'])
log.info("ok")
def test_client_asyncio():
asyncio.get_event_loop().run_until_complete(async_test())
| 38.655738 | 133 | 0.725615 | 0 | 0 | 0 | 0 | 0 | 0 | 777 | 0.329517 | 1,578 | 0.669211 |
40c853daa30d56c941424e6324401a40fa450528 | 878 | py | Python | msgraph/base.py | jstacoder/python-msgraph | be9a93f4baa2d89c97a8454ab1d31b8e5fdda38e | [
"Apache-2.0"
]
| 2 | 2020-09-23T18:26:22.000Z | 2021-03-10T14:12:49.000Z | msgraph/base.py | jstacoder/python-msgraph | be9a93f4baa2d89c97a8454ab1d31b8e5fdda38e | [
"Apache-2.0"
]
| 4 | 2020-09-23T18:25:32.000Z | 2021-03-22T11:07:32.000Z | msgraph/base.py | jstacoder/python-msgraph | be9a93f4baa2d89c97a8454ab1d31b8e5fdda38e | [
"Apache-2.0"
]
| 1 | 2020-09-23T18:28:14.000Z | 2020-09-23T18:28:14.000Z | from datetime import datetime
class Base(object):
date_format = '%Y-%m-%d'
time_format = '%H:%M:%S'
datetime_format = date_format + 'T%s' % time_format
full_datetime_format = date_format + 'T' + time_format + '.%f'
iso_format = date_format + 'T%sZ' % time_format
standard_datetime_format = date_format + ' ' + time_format
extended_datetime_format = date_format + 'T' + time_format +'.%fZ'
@classmethod
def parse_date_time(cls, text):
instance = None
formats = [cls.extended_datetime_format, cls.full_datetime_format, cls.datetime_format, cls.standard_datetime_format, cls.iso_format, cls.date_format]
for format in formats:
try:
instance = datetime.strptime(text, format)
except Exception:
pass
else:
break
return instance
| 35.12 | 158 | 0.633257 | 845 | 0.962415 | 0 | 0 | 453 | 0.515945 | 0 | 0 | 51 | 0.058087 |
40c934e19e6344f536502d3f0e951d55cb483721 | 5,641 | py | Python | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
]
| 1 | 2021-12-10T14:58:11.000Z | 2021-12-10T14:58:11.000Z | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
]
| 5 | 2020-08-12T15:20:31.000Z | 2021-06-10T13:43:02.000Z | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
]
| null | null | null | from caching.base import CachingManager, CachingMixin
from django.db import models
from tsdata.models import CensusProfile
PURPOSE_CHOICES = (
(1, "Speed Limit Violation"),
(2, "Stop Light/Sign Violation"),
(3, "Driving While Impaired"),
(4, "Safe Movement Violation"),
(5, "Vehicle Equipment Violation"),
(6, "Vehicle Regulatory Violation"),
(7, "Seat Belt Violation"),
(8, "Investigation"),
(9, "Other Motor Vehicle Violation"),
(10, "Checkpoint"),
)
ACTION_CHOICES = (
(1, "Verbal Warning"),
(2, "Written Warning"),
(3, "Citation Issued"),
(4, "On-View Arrest"),
(5, "No Action Taken"),
)
PERSON_TYPE_CHOICES = (("D", "Driver"), ("P", "Passenger"))
GENDER_CHOICES = (("M", "Male"), ("F", "Female"))
ETHNICITY_CHOICES = (("H", "Hispanic"), ("N", "Non-Hispanic"))
RACE_CHOICES = (
("A", "Asian"),
("B", "Black"),
("I", "Native American"),
("U", "Other"),
("W", "White"),
)
SEARCH_TYPE_CHOICES = (
(1, "Consent"),
(2, "Search Warrant"),
(3, "Probable Cause"),
(4, "Search Incident to Arrest"),
(5, "Protective Frisk"),
)
SEARCH_BASIS_CHOICES = (
("ER", "Erratic/Suspicious Behavior"),
("OB", "Observation of Suspected Contraband"),
("OI", "Other Official Information"),
("SM", "Suspicious Movement"),
("TIP", "Informant Tip"),
("WTNS", "Witness Observation"),
)
class Stop(CachingMixin, models.Model):
stop_id = models.PositiveIntegerField(primary_key=True)
agency_description = models.CharField(max_length=100)
agency = models.ForeignKey("Agency", null=True, related_name="stops", on_delete=models.CASCADE)
date = models.DateTimeField(db_index=True)
purpose = models.PositiveSmallIntegerField(choices=PURPOSE_CHOICES)
action = models.PositiveSmallIntegerField(choices=ACTION_CHOICES)
driver_arrest = models.BooleanField(default=False)
passenger_arrest = models.BooleanField(default=False)
encounter_force = models.BooleanField(default=False)
engage_force = models.BooleanField(default=False)
officer_injury = models.BooleanField(default=False)
driver_injury = models.BooleanField(default=False)
passenger_injury = models.BooleanField(default=False)
officer_id = models.CharField(max_length=15) # todo: keys
stop_location = models.CharField(max_length=15) # todo: keys
stop_city = models.CharField(max_length=20)
objects = CachingManager()
class Person(CachingMixin, models.Model):
person_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
type = models.CharField(max_length=2, choices=PERSON_TYPE_CHOICES)
age = models.PositiveSmallIntegerField()
gender = models.CharField(max_length=2, choices=GENDER_CHOICES)
ethnicity = models.CharField(max_length=2, choices=ETHNICITY_CHOICES)
race = models.CharField(max_length=2, choices=RACE_CHOICES)
objects = CachingManager()
class Search(CachingMixin, models.Model):
search_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
type = models.PositiveSmallIntegerField(choices=SEARCH_TYPE_CHOICES)
vehicle_search = models.BooleanField(default=False)
driver_search = models.BooleanField(default=False)
passenger_search = models.BooleanField(default=False)
property_search = models.BooleanField(default=False)
vehicle_siezed = models.BooleanField(default=False)
personal_property_siezed = models.BooleanField(default=False)
other_property_sized = models.BooleanField(default=False)
objects = CachingManager()
class Contraband(CachingMixin, models.Model):
contraband_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
ounces = models.FloatField(default=0, null=True)
pounds = models.FloatField(default=0, null=True)
pints = models.FloatField(default=0, null=True)
gallons = models.FloatField(default=0, null=True)
dosages = models.FloatField(default=0, null=True)
grams = models.FloatField(default=0, null=True)
kilos = models.FloatField(default=0, null=True)
money = models.FloatField(default=0, null=True)
weapons = models.FloatField(default=0, null=True)
dollar_amount = models.FloatField(default=0, null=True)
objects = CachingManager()
class SearchBasis(CachingMixin, models.Model):
search_basis_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
basis = models.CharField(max_length=4, choices=SEARCH_BASIS_CHOICES)
objects = CachingManager()
class Agency(CachingMixin, models.Model):
name = models.CharField(max_length=255)
# link to CensusProfile (no cross-database foreign key)
census_profile_id = models.CharField(max_length=16, blank=True, default="")
last_reported_stop = models.DateField(null=True)
objects = CachingManager()
class Meta(object):
verbose_name_plural = "Agencies"
def __str__(self):
return self.name
@property
def census_profile(self):
if self.census_profile_id:
profile = CensusProfile.objects.get(id=self.census_profile_id)
return profile.get_census_dict()
else:
return dict()
| 34.607362 | 99 | 0.710867 | 4,223 | 0.748626 | 0 | 0 | 234 | 0.041482 | 0 | 0 | 825 | 0.146251 |
40cc65a33578c41b6882d9360507c431c3bb4a45 | 74 | py | Python | flasky/auth/forms/__init__.py | by46/fasky | c6941972b57284c2167dfacf022f981939249256 | [
"MIT"
]
| null | null | null | flasky/auth/forms/__init__.py | by46/fasky | c6941972b57284c2167dfacf022f981939249256 | [
"MIT"
]
| null | null | null | flasky/auth/forms/__init__.py | by46/fasky | c6941972b57284c2167dfacf022f981939249256 | [
"MIT"
]
| null | null | null | from .login import LoginForm
from .registration import RegistrationForm
| 24.666667 | 43 | 0.837838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
40ce727b047c06c9d0537e694ab36bc40c4d524b | 552 | py | Python | API_SIMIT_Mail/multapp/urls.py | profefonso/Services-SM | 98b9949a237121451a13fce5bc8f2945fa5a3cee | [
"MIT"
]
| null | null | null | API_SIMIT_Mail/multapp/urls.py | profefonso/Services-SM | 98b9949a237121451a13fce5bc8f2945fa5a3cee | [
"MIT"
]
| 16 | 2019-12-04T23:02:52.000Z | 2022-02-10T11:57:03.000Z | API_SIMIT_Mail/multapp/urls.py | profefonso/Services-SM | 98b9949a237121451a13fce5bc8f2945fa5a3cee | [
"MIT"
]
| null | null | null | from django.urls import path
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
from .views import notification
schema_view = get_swagger_view(title='MAIL API')
urlpatterns = [
path('front/betsy/irish/embargo/admin/', admin.site.urls),
# Swagger API
path(
'api/',
schema_view,
name='api'
),
# notification
path(
'notification/',
notification.NotificationServicesRest.as_view(),
name=notification.NotificationServicesRest.name
),
]
| 21.230769 | 62 | 0.681159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.175725 |
40d10458dee4b20d938050badf13c455b5c17307 | 1,097 | py | Python | tests/legacy_mocket.py | jepler/Adafruit_CircuitPython_Requests | 9a9527110726036bfad94b14166e62aa61c1276e | [
"MIT"
]
| null | null | null | tests/legacy_mocket.py | jepler/Adafruit_CircuitPython_Requests | 9a9527110726036bfad94b14166e62aa61c1276e | [
"MIT"
]
| null | null | null | tests/legacy_mocket.py | jepler/Adafruit_CircuitPython_Requests | 9a9527110726036bfad94b14166e62aa61c1276e | [
"MIT"
]
| null | null | null | from unittest import mock
SOCK_STREAM = 0
set_interface = mock.Mock()
interface = mock.MagicMock()
getaddrinfo = mock.Mock()
socket = mock.Mock()
class Mocket:
def __init__(self, response):
self.settimeout = mock.Mock()
self.close = mock.Mock()
self.connect = mock.Mock()
self.send = mock.Mock(side_effect=self._send)
self.readline = mock.Mock(side_effect=self._readline)
self.recv = mock.Mock(side_effect=self._recv)
self.fail_next_send = False
self._response = response
self._position = 0
def _send(self, data):
if self.fail_next_send:
self.fail_next_send = False
raise RuntimeError("Send failed")
return None
def _readline(self):
i = self._response.find(b"\r\n", self._position)
r = self._response[self._position : i + 2]
self._position = i + 2
return r
def _recv(self, count):
end = self._position + count
r = self._response[self._position : end]
self._position = end
print(r)
return r
| 26.756098 | 61 | 0.610757 | 946 | 0.862352 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.018232 |
40d10aff20e4192696c984a95bd52419f7e1299a | 2,228 | py | Python | run.py | romeroyakovlev/ii | ae9485df2c3565871994c146001a72db511f3700 | [
"CC0-1.0"
]
| 1 | 2017-09-29T09:35:04.000Z | 2017-09-29T09:35:04.000Z | run.py | romeroyakovlev/ii | ae9485df2c3565871994c146001a72db511f3700 | [
"CC0-1.0"
]
| null | null | null | run.py | romeroyakovlev/ii | ae9485df2c3565871994c146001a72db511f3700 | [
"CC0-1.0"
]
| null | null | null | # -*- coding: utf-8 -*-
import api,points
from api.bottle import *
II_PATH=os.path.dirname(__file__) or '.'
TEMPLATE_PATH.insert(0,II_PATH)
@route('/list.txt')
def list_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
lst = api.load_echo(False)[1:]
if request.query.n:
return '\n'.join([t[0] for t in lst])
else:
return '\n'.join(['%s:%s:%s' % t for t in lst])
@route('/blacklist.txt')
def blacklist_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
return api.ru('blacklist.txt')
@route('/u/m/<h:path>')
def jt_outmsg(h):
response.set_header ('content-type','text/plain; charset=iso-8859-1')
lst = [x for x in h.split('/') if len(x) == 20]
return '\n'.join( [api.mk_jt(x,api.raw_msg(x)) for x in lst] )
@route('/u/e/<names:path>')
def index_list(names):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.echoareas(names.split('/'))
def _point_msg(pauth,tmsg):
msgfrom, addr = points.check_hash(pauth)
if not addr: return 'auth error!'
cfg = api.load_echo(False)
mo = api.toss(msgfrom,'%s,%s' % (cfg[0][1],addr),tmsg.strip())
if mo.msg.startswith('@repto:'):
tmpmsg = mo.msg.splitlines()
mo.repto = tmpmsg[0][7:]
mo.msg = '\n'.join(tmpmsg[1:])
# а ещё лучше - засунуть это в api.toss
if len(mo.msg.encode('utf-8')) < 64100:
h = api.point_newmsg(mo)
if h:
return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea)
else:
return 'error:unknown'
else:
return 'msg big!'
@route('/u/point/<pauth>/<tmsg:path>')
def point_msg_get(pauth,tmsg):
return _point_msg(pauth,tmsg)
@post('/u/point')
def point_msg_get():
return _point_msg(request.POST['pauth'],request.POST['tmsg'])
@route('/m/<msg>')
def get_msg(msg):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.raw_msg(msg)
@route('/e/<echoarea>')
def get_echolist(echoarea):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.get_echoarea(echoarea,True)
import iitpl
iitpl.II_PATH=II_PATH
run(host='127.0.0.1',port=62220,debug=False)
| 29.706667 | 84 | 0.62702 | 0 | 0 | 0 | 0 | 1,323 | 0.588261 | 0 | 0 | 628 | 0.279235 |
40d1ab4064e3ebc59780f56e6705f5033ef843d8 | 2,080 | py | Python | learning_labs/yang/01-yang/add_loopback_ip.py | hpreston/sbx_nxos | 8952d369b80810d22e0c86485b667cde1519272d | [
"MIT"
]
| 33 | 2017-05-19T19:47:33.000Z | 2021-05-16T07:33:39.000Z | learning_labs/yang/01-yang/add_loopback_ip.py | AJNOURI/sbx_nxos | 449ee3c1c88cbd831fd3f90490fc28dd7e02d448 | [
"MIT"
]
| 13 | 2017-08-25T16:48:16.000Z | 2021-09-23T23:21:58.000Z | learning_labs/yang/01-yang/add_loopback_ip.py | vbohinc/sbx_nxos | 744c34fffd32b1c973ac791123afd6cc811e0d8b | [
"MIT"
]
| 20 | 2017-06-19T09:32:20.000Z | 2022-02-20T05:33:14.000Z | #!/usr/bin/env python
from ncclient import manager
import sys
from lxml import etree
# Set the device variables
DEVICES = ['172.16.30.101', '172.16.30.102']
USER = 'admin'
PASS = 'admin'
PORT = 830
LOOPBACK_IP = {
'172.16.30.101': '10.99.99.1/24',
'172.16.30.102': '10.99.99.2/24'
}
DEVICE_NAMES = {'172.16.30.101': '(nx-osv9000-1)',
'172.16.30.102': '(nx-osv9000-2)' }
# create a main() method
def main():
"""
Main method that adds an IP address to interface loopback 99 to
both the spine switches.
"""
loopback_ip_add = """
<config>
<System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device">
<ipv4-items>
<inst-items>
<dom-items>
<Dom-list>
<name>default</name>
<if-items>
<If-list>
<id>lo99</id>
<addr-items>
<Addr-list>
<addr>{}</addr>
</Addr-list>
</addr-items>
</If-list>
</if-items>
</Dom-list>
</dom-items>
</inst-items>
</ipv4-items>
</System>
</config>"""
for device in DEVICES:
with manager.connect(host=device, port=PORT, username=USER,
password=PASS, hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False, allow_agent=False) as m:
# Add the loopback interface
print("\nNow adding IP address {} to device {} {}...\n".format(LOOPBACK_IP[device], DEVICE_NAMES[device],
device))
new_ip = loopback_ip_add.format(LOOPBACK_IP[device])
netconf_response = m.edit_config(target='running', config=new_ip)
# Parse the XML response
print(netconf_response)
if __name__ == '__main__':
sys.exit(main())
| 30.144928 | 117 | 0.479327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,212 | 0.582692 |
40d1e35fdcc4995890f4efabdc25434ef9f00eb5 | 5,704 | py | Python | Plotly_Dash/spacex_dash_app.py | AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone | 2659fcae1a8d5fec13ab632aee1492a29e9585ee | [
"MIT"
]
| null | null | null | Plotly_Dash/spacex_dash_app.py | AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone | 2659fcae1a8d5fec13ab632aee1492a29e9585ee | [
"MIT"
]
| null | null | null | Plotly_Dash/spacex_dash_app.py | AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone | 2659fcae1a8d5fec13ab632aee1492a29e9585ee | [
"MIT"
]
| null | null | null | # To run this file, Win Start > cmd > file dir > run: python spacex_dash_app.py
# Import required libraries
import pandas as pd
import dash
from dash import html
from dash import dcc
from dash.dependencies import Input, Output
import plotly.express as px
# Read the airline data into pandas dataframe
spacex_df = pd.read_csv("spacex_launch_dash.csv")
max_payload = spacex_df['Payload Mass (kg)'].max()
min_payload = spacex_df['Payload Mass (kg)'].min()
# Dropdown list(s)
launch_site_list = []
launch_site_list.append('ALL')
for index, row in spacex_df['Launch Site'].value_counts().to_frame().iterrows():
launch_site_list.append(row.name)
# Create a dash application
app = dash.Dash(__name__)
# Create an app layout
app.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',
style={'textAlign': 'center', 'color': '#503D36',
'font-size': 40}),
# TASK 1: Add a dropdown list to enable Launch Site selection
# The default select value is for ALL sites
# dcc.Dropdown(id='site-dropdown',...)
dcc.Dropdown(id='site-dropdown',
options=[{'label': i, 'value': i} for i in launch_site_list],
style={'width':'100%', 'padding':'3px', 'font-size': '20px', 'text-align-last': 'left'},
value='ALL'),
html.Br(),
# TASK 2: Add a pie chart to show the total successful launches count for all sites
# If a specific launch site was selected, show the Success vs. Failed counts for the site
html.Div(dcc.Graph(id='success-pie-chart')),
html.Br(),
html.P("Payload range (Kg):"),
# TASK 3: Add a slider to select payload range
#dcc.RangeSlider(id='payload-slider',...)
dcc.RangeSlider(id='payload-slider', min=min_payload, max=max_payload, step=1000, value=[min_payload, max_payload]),
# TASK 4: Add a scatter chart to show the correlation between payload and launch success
html.Div(dcc.Graph(id='success-payload-scatter-chart')),
])
# TASK 2:
# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output
@app.callback(Output(component_id='success-pie-chart', component_property='figure'),
Input(component_id='site-dropdown', component_property='value'))
def get_pie_chart(entered_site):
if entered_site == 'ALL':
filtered_df = spacex_df[['Launch Site', 'class']].groupby(by=['Launch Site'], as_index=False).mean()
fig = px.pie(filtered_df, values='class',
names='Launch Site',
title='Total Success Launches by Site')
return fig
else:
# return the outcomes piechart for a selected site
filtered_df = spacex_df[['Launch Site', 'class']][spacex_df['Launch Site'] == entered_site]
mean = filtered_df.groupby(by='Launch Site', as_index=False).mean()
means = {}
means[1] = mean['class'][0]
means[0] = 1 - means[1]
fig = px.pie(values=means.values(), names=means.keys(),
title=f'Total Success Launches by Site: {entered_site}')
return fig
# TASK 4:
# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output
@app.callback(Output(component_id='success-payload-scatter-chart', component_property='figure'),
[Input(component_id='site-dropdown', component_property='value'),
Input(component_id="payload-slider", component_property="value")])
def get_scatter_plot(entered_site, payload_range):
# print('min:', payload_range[0], '\tmax:', payload_range[1])
# print(entered_site)
if entered_site == 'ALL':
payload_filtered_df = spacex_df[['Payload Mass (kg)', 'Booster Version Category', 'Launch Site', 'class']][(spacex_df['Payload Mass (kg)'] <= payload_range[1]) & (spacex_df['Payload Mass (kg)'] >= payload_range[0])]
else:
payload_filtered_df = spacex_df[['Payload Mass (kg)', 'Booster Version Category', 'Launch Site', 'class']][(spacex_df['Payload Mass (kg)'] <= payload_range[1]) &
(spacex_df['Payload Mass (kg)'] >= payload_range[0]) &
(spacex_df['Launch Site'] == entered_site)]
fig = px.scatter(data_frame=payload_filtered_df, x='Payload Mass (kg)', y='class', color='Booster Version Category')
return fig
# Run the app
if __name__ == '__main__':
app.run_server(debug=True)
# Finding Insights Visually
# Now with the dashboard completed, you should be able to use it to analyze SpaceX launch data, and answer the following questions:
#
# Which site has the largest successful launches?
### KSC LC-39A
# Which site has the highest launch success rate?
### KSC LC-39A
# Which payload range(s) has the highest launch success rate?
### 2000 - 4000
# Which payload range(s) has the lowest launch success rate?
### 6000 - 9000
# Which F9 Booster version (v1.0, v1.1, FT, B4, B5, etc.) has the highest launch success rate?
### B5
| 52.330275 | 223 | 0.58082 | 0 | 0 | 0 | 0 | 2,301 | 0.403401 | 0 | 0 | 2,568 | 0.45021 |
40d2f193783c709a9e4416360cda1c6098d93420 | 1,430 | py | Python | configs/mnist_paper_residual_cnn_gp.py | rhaps0dy/cnn-gp | 5726bd10debbddcffb3dc1f5c671f5dceedf007d | [
"BSD-2-Clause"
]
| 23 | 2019-06-21T15:03:45.000Z | 2022-01-24T11:34:16.000Z | configs/mnist_paper_residual_cnn_gp.py | rhaps0dy/cnn-gp | 5726bd10debbddcffb3dc1f5c671f5dceedf007d | [
"BSD-2-Clause"
]
| null | null | null | configs/mnist_paper_residual_cnn_gp.py | rhaps0dy/cnn-gp | 5726bd10debbddcffb3dc1f5c671f5dceedf007d | [
"BSD-2-Clause"
]
| 10 | 2019-06-21T14:52:55.000Z | 2021-12-01T19:05:38.000Z | """
The best randomly-searched ResNet reported in the paper.
In the original paper there is a bug. This network sums together layers after
the ReLU nonlinearity, which are not Gaussian, and also do not have mean 0. As
a result, the overall network does not converge to a Gaussian process. The
defined kernel is still valid, even if it doesn't correspond to a NN.
In the interest of making the results replicable, we have replicated this bug
as well.
The correct way to use ResNets is to sum things after a Conv2d layer, see for
example the `resnet_block` in `cnn_gp/kernels.py`.
"""
import torchvision
from cnn_gp import Conv2d, ReLU, Sequential, Sum
train_range = range(5000, 55000)
validation_range = list(range(55000, 60000)) + list(range(0, 5000))
test_range = range(60000, 70000)
dataset_name = "MNIST"
model_name = "ResNet"
dataset = torchvision.datasets.MNIST
transforms = []
epochs = 0
in_channels = 1
out_channels = 10
var_bias = 4.69
var_weight = 7.27
initial_model = Sequential(
*(Sum([
Sequential(),
Sequential(
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
)]) for _ in range(8)),
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
Conv2d(kernel_size=28, padding=0, var_weight=var_weight,
var_bias=var_bias),
)
| 31.086957 | 79 | 0.702098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.427972 |
40d397efdfc75b4459bce3aac322fa920256a163 | 2,956 | py | Python | python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 07/ch07_rec08_scatterplot.py | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
]
| null | null | null | python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 07/ch07_rec08_scatterplot.py | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
]
| null | null | null | python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 07/ch07_rec08_scatterplot.py | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
]
| 2 | 2020-09-22T18:37:46.000Z | 2021-09-02T11:02:59.000Z | import matplotlib.pyplot as plt
import numpy as np
# daily search trend for keyword 'flowers' for a year
d = [
1.04, 1.04, 1.16, 1.22, 1.46, 2.34, 1.16, 1.12, 1.24, 1.30, 1.44, 1.22, 1.26,
1.34, 1.26, 1.40, 1.52, 2.56, 1.36, 1.30, 1.20, 1.12, 1.12, 1.12, 1.06, 1.06,
1.00, 1.02, 1.04, 1.02, 1.06, 1.02, 1.04, 0.98, 0.98, 0.98, 1.00, 1.02, 1.02,
1.00, 1.02, 0.96, 0.94, 0.94, 0.94, 0.96, 0.86, 0.92, 0.98, 1.08, 1.04, 0.74,
0.98, 1.02, 1.02, 1.12, 1.34, 2.02, 1.68, 1.12, 1.38, 1.14, 1.16, 1.22, 1.10,
1.14, 1.16, 1.28, 1.44, 2.58, 1.30, 1.20, 1.16, 1.06, 1.06, 1.08, 1.00, 1.00,
0.92, 1.00, 1.02, 1.00, 1.06, 1.10, 1.14, 1.08, 1.00, 1.04, 1.10, 1.06, 1.06,
1.06, 1.02, 1.04, 0.96, 0.96, 0.96, 0.92, 0.84, 0.88, 0.90, 1.00, 1.08, 0.80,
0.90, 0.98, 1.00, 1.10, 1.24, 1.66, 1.94, 1.02, 1.06, 1.08, 1.10, 1.30, 1.10,
1.12, 1.20, 1.16, 1.26, 1.42, 2.18, 1.26, 1.06, 1.00, 1.04, 1.00, 0.98, 0.94,
0.88, 0.98, 0.96, 0.92, 0.94, 0.96, 0.96, 0.94, 0.90, 0.92, 0.96, 0.96, 0.96,
0.98, 0.90, 0.90, 0.88, 0.88, 0.88, 0.90, 0.78, 0.84, 0.86, 0.92, 1.00, 0.68,
0.82, 0.90, 0.88, 0.98, 1.08, 1.36, 2.04, 0.98, 0.96, 1.02, 1.20, 0.98, 1.00,
1.08, 0.98, 1.02, 1.14, 1.28, 2.04, 1.16, 1.04, 0.96, 0.98, 0.92, 0.86, 0.88,
0.82, 0.92, 0.90, 0.86, 0.84, 0.86, 0.90, 0.84, 0.82, 0.82, 0.86, 0.86, 0.84,
0.84, 0.82, 0.80, 0.78, 0.78, 0.76, 0.74, 0.68, 0.74, 0.80, 0.80, 0.90, 0.60,
0.72, 0.80, 0.82, 0.86, 0.94, 1.24, 1.92, 0.92, 1.12, 0.90, 0.90, 0.94, 0.90,
0.90, 0.94, 0.98, 1.08, 1.24, 2.04, 1.04, 0.94, 0.86, 0.86, 0.86, 0.82, 0.84,
0.76, 0.80, 0.80, 0.80, 0.78, 0.80, 0.82, 0.76, 0.76, 0.76, 0.76, 0.78, 0.78,
0.76, 0.76, 0.72, 0.74, 0.70, 0.68, 0.72, 0.70, 0.64, 0.70, 0.72, 0.74, 0.64,
0.62, 0.74, 0.80, 0.82, 0.88, 1.02, 1.66, 0.94, 0.94, 0.96, 1.00, 1.16, 1.02,
1.04, 1.06, 1.02, 1.10, 1.22, 1.94, 1.18, 1.12, 1.06, 1.06, 1.04, 1.02, 0.94,
0.94, 0.98, 0.96, 0.96, 0.98, 1.00, 0.96, 0.92, 0.90, 0.86, 0.82, 0.90, 0.84,
0.84, 0.82, 0.80, 0.80, 0.76, 0.80, 0.82, 0.80, 0.72, 0.72, 0.76, 0.80, 0.76,
0.70, 0.74, 0.82, 0.84, 0.88, 0.98, 1.44, 0.96, 0.88, 0.92, 1.08, 0.90, 0.92,
0.96, 0.94, 1.04, 1.08, 1.14, 1.66, 1.08, 0.96, 0.90, 0.86, 0.84, 0.86, 0.82,
0.84, 0.82, 0.84, 0.84, 0.84, 0.84, 0.82, 0.86, 0.82, 0.82, 0.86, 0.90, 0.84,
0.82, 0.78, 0.80, 0.78, 0.74, 0.78, 0.76, 0.76, 0.70, 0.72, 0.76, 0.72, 0.70,
0.64]
# Now let's generate random data for the same period
d1 = np.random.random(365)
assert len(d) == len(d1)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.scatter(d, d1, alpha=0.5)
ax1.set_title('No correlation')
ax1.grid(True)
ax2 = fig.add_subplot(222)
ax2.scatter(d1, d1, alpha=0.5)
ax2.set_title('Ideal positive correlation')
ax2.grid(True)
ax3 = fig.add_subplot(223)
ax3.scatter(d1, d1*-1, alpha=0.5)
ax3.set_title('Ideal negative correlation')
ax3.grid(True)
ax4 = fig.add_subplot(224)
ax4.scatter(d1, d1+d, alpha=0.5)
ax4.set_title('Non ideal positive correlation')
ax4.grid(True)
plt.tight_layout()
plt.show() | 46.1875 | 78 | 0.552436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.070704 |
40d5469fd32315fb7f4708a40672a155712e5afb | 22,427 | py | Python | src/thespian/tweaks.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
]
| 1 | 2022-03-28T16:10:15.000Z | 2022-03-28T16:10:15.000Z | src/thespian/tweaks.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
]
| null | null | null | src/thespian/tweaks.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
]
| null | null | null | from dataclasses import dataclass
import logging
from attributes import get_ability_modifier
from sourcetree.utils import (
get_feats_list,
get_feat_perks,
get_feat_proficiencies,
get_feat_requirements,
)
from stdio import prompt
log = logging.getLogger("thespian.tweaks")
class AbilityScoreImprovementError(Exception):
"""Handles ability score improvement errors."""
class FlagParserError(Exception):
"""Handles an invalid flag format error."""
class FeatOptionParser:
"""Generates and parses feat characteristic flags by feat.
FLAG OPTION PARSER SYSTEM
PIPEBAR: Used to separate flags. i.e: ability=Strength|proficiency=skills
Two flag options are designated in the above example: 'ability', and 'proficiency'.
ALLOWED FLAG OPTIONS:
Designates certain instructions for applying feat related "perks" to a character.
- ability
- proficiency
- savingthrows
- speed
COMMA: Used to identify the number of occurences of a flag. i.e: languages,2
The example above means that a player can choose two languages.
EQUAL SIGN: Used to separate option parameters. i.e ability=Strength,0
The example above means Strength is a designated parameter for the ability option.
In this case the character would get an enhancement to Strength.
There is more to this and is explained further below.
DOUBLE AMPERSAND: Used to separater parameter options. i.e ability=Strength&&Dexerity,1
The example above means the player can choose a one time ehancement to Strength or Dexterity.
PLUS SIGN: Used to seperate parameter options. i.e ability=Strength+Dexterity
The example above means the player can gain an enhancement in both Strength and Dexterity.
"""
# Parser Option Separators
PARSER_OPTIONS = "|"
OPTION_INCREMENT = ","
OPTION_PARAMETER = "="
PARAM_SINGLE_SELECTION = "&&"
PARAM_MULTIPLE_SELECTION = "+"
def __init__(self, feat, prof):
self.feat = feat
self.profile = prof
self.perks = get_feat_perks(self.feat)
def _get_proficiency_options(self, prof_type: str) -> list:
"""Returns a list of bonus proficiencies for a feat by proficiency type."""
return get_feat_proficiencies(self.feat, prof_type)
def _get_sub_menu_options(self, available_options) -> dict | bool:
"""Creates a dictionary of sub menu options, if applicable."""
if self.is_sub_menu(available_options):
sub_options = dict()
for option in available_options:
sub_options[option] = self._get_proficiency_options(option)
return sub_options
return False
@staticmethod
def _is_sub_menu(available_options) -> bool:
"""Returns True if sub menu options are available. False otherwise."""
for option in available_options:
if not option.islower():
return False
return True
def _parse_flags(self) -> dict:
"""Generates the characteristics for the specified feat."""
parsed_flags = dict()
raw_flags = self.perks.get("flags")
if raw_flags is None:
return parsed_flags
flag_pairs = raw_flags.split(self.PARSER_OPTIONS)
for flag_pair in flag_pairs:
if self.OPTION_INCREMENT not in flag_pair:
raise FlagParserError("Pairs must be formatted in 'name,value' pairs.")
attribute_name, increment = flag_pair.split(self.OPTION_INCREMENT)
if self.OPTION_PARAMETER not in attribute_name:
parsed_flags[attribute_name] = {"increment": increment}
else:
flag_options = attribute_name.split(self.OPTION_PARAMETER)
# Allowable flags: ability, proficiency, savingthrows, speed
attribute_name = flag_options[0]
try:
if attribute_name not in (
"ability",
"proficiency",
"savingthrows",
"speed",
):
raise FlagParserError(
f"Illegal flag name '{attribute_name}' specified."
)
except FlagParserError:
# pass
return parsed_flags
if self.PARAM_SINGLE_SELECTION in flag_options[1]:
options = flag_options[1].split(self.PARAM_SINGLE_SELECTION)
else:
options = flag_options[1]
parsed_flags[attribute_name] = {
"increment": increment,
"options": options,
}
return parsed_flags
def parse(self) -> dict:
"""Parses the generated flags for the chosen feat."""
final_flag = self._parse_flags()
if len(final_flag) == 0:
return
parsed_flag = dict()
for flag, options in final_flag.items():
if flag in ("ability", "proficiency"):
increment = int(options["increment"])
menu_options = options["options"]
if len(menu_options) < 1:
raise FlagParserError("Malformed parser instructions error.")
if flag == "ability":
if increment == 0:
raise FlagParserError(
"Flag attribute 'ability' requires a positive integer value."
)
# For feats that use the 'savingthrows' flag.
# Limits choices based on current saving throw proficiencies.
if "savingthrows" in final_flag:
menu_options = [
x for x in menu_options if x not in self.profile["savingthrows"]
]
if isinstance(menu_options, str):
my_ability = menu_options
elif isinstance(menu_options, list):
for _ in range(increment):
my_ability = prompt(
"Choose the ability you would like to apply a bonus to.",
menu_options,
)
menu_options.remove(my_ability)
log.info(f"You selected the ability '{my_ability}'.")
# If 'savingthrows' flag specified, add proficiency for ability saving throw.
if "savingthrows" in final_flag:
self.profile["savingthrows"].append(my_ability)
log.info(
f"You gained proficiency in the '{my_ability}' saving throw."
)
bonus_value = self.perks[flag][my_ability]
parsed_flag[flag] = (my_ability, bonus_value)
elif flag == "proficiency":
# Increment value of 0 means append ALL listed bonuses.
# Increment values other than 0 means add # of bonuses == increment value.
chosen_options = dict()
submenu_options = None
if isinstance(menu_options, str) and increment == 0:
chosen_options[menu_options] = self._get_proficiency_options(
menu_options
)
elif isinstance(menu_options, list):
for _ in range(increment):
my_bonus = prompt(f"Choose your bonus: '{flag}'.", menu_options)
if not self._is_sub_menu(menu_options):
menu_options.remove(my_bonus)
else:
# Generate submenu options, if applicable.
if submenu_options is None:
submenu_options = self._get_sub_menu_options(
menu_options
)
submenu_options[my_bonus] = [
x
for x in submenu_options[my_bonus]
if x not in self.profile[my_bonus]
]
# Create storage handler for selections, if applicable.
if len(chosen_options) == 0:
for opt in submenu_options:
chosen_options[opt] = list()
submenu_choice = prompt(
f"Choose submenu option: '{my_bonus}'.",
submenu_options.get(my_bonus),
)
chosen_options[my_bonus].append(submenu_choice)
submenu_options[my_bonus].remove(submenu_choice)
# Reset the submenu options after use
submenu_options = None
log.info(
f"You selected the {flag} ({my_bonus}) bonus '{submenu_choice}'."
)
elif isinstance(menu_options, str):
for prof_type in menu_options.split(self.PARAM_MULTIPLE_SELECTION):
chosen_proficiencies = list()
# Pull full collection of bonus proficiencies,
proficiency_options = get_feat_proficiencies(
self.feat, prof_type
)
# If collection is dict, sort through sub categories,
# And choose only the unselected options in that category.
# Otherwise, simply sort out the unselected options
if isinstance(proficiency_options, dict):
temp = list()
for types in tuple(proficiency_options.keys()):
if types not in self.profile[prof_type]:
temp += proficiency_options[types]
proficiency_options = temp
else:
proficiency_options = [
x
for x in proficiency_options
if x not in self.profile[prof_type]
]
for _ in range(increment):
# Clear out the temporarily chosen options.
proficiency_options = [
x
for x in proficiency_options
if x not in chosen_proficiencies
]
my_bonus = prompt(
f"Choose your bonus: {flag}.", proficiency_options
)
chosen_proficiencies.append(my_bonus)
proficiency_options.remove(my_bonus)
log.info(
f"You selected the {flag} ({prof_type}) bonus '{my_bonus}'."
)
chosen_options[prof_type] = chosen_proficiencies
for k, v in chosen_options.items():
parsed_flag[k] = v
elif flag == "speed":
speed_value = self.perks[flag]
if speed_value != 0:
parsed_flag[flag] = speed_value
elif flag == "spells":
bonus_spells = self.perks[flag]
for index, spell in enumerate(bonus_spells):
if isinstance(spell, list):
spell_choice = prompt("Choose your bonus spell.", spell)
bonus_spells[index] = spell_choice
log.info(f"You selected the spell {spell_choice}.")
parsed_flag[flag] = bonus_spells
return parsed_flag
@dataclass
class AbilityScoreImprovement:
"""Used to apply ability and/or feat upgrades."""
character: dict
def _add_feat_perks(self, feat: str) -> None:
"""Applies feat related perks."""
parsed_attributes = FeatOptionParser(feat, self.character).parse()
if parsed_attributes is None:
return
for flag, options in parsed_attributes.items():
if flag == "ability":
ability, bonus = options
self._set_ability_score(ability, bonus)
else:
self.character[flag] += options
def _count_upgrades(self) -> int:
"""Returns the number of available upgrades."""
upgrade_count = 0
for x in range(1, self.character["level"] + 1):
if (x % 4) == 0 and x != 20:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 6:
upgrade_count += 1
if self.character["klass"] == "Rogue" and self.character["level"] >= 8:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 14:
upgrade_count += 1
if self.character["level"] >= 19:
upgrade_count += 1
return upgrade_count
def _has_requirements(self, feat: str) -> bool:
"""Checks if feat requirements have been met."""
# Character already has feat
if feat in self.character["feats"]:
return False
# If Heavily, Lightly, or Moderately Armored feat or a Monk.
# "Armor Related" or Weapon Master feat but already proficient.
if (
feat
in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
)
and self.character["klass"] == "Monk"
):
return False
elif feat in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
"Weapon Master",
):
# Heavily Armored: Character already has heavy armor proficiency.
# Lightly Armored: Character already has light armor proficiency.
# Moderately Armored: Character already has medium armor proficiency.
# Weapon Master: Character already has martial weapon proficiency.
if feat == "Heavily Armored" and "Heavy" in self.character["armors"]:
return False
elif feat == "Lightly Armored" and "Light" in self.character["armors"]:
return False
elif feat == "Moderately Armored" and "Medium" in self.character["armors"]:
return False
elif feat == "Weapon Master" and "Martial" in self.character["weapons"]:
return False
# Cycle through ALL prerequisites for the feat.
prerequisite = get_feat_requirements(feat)
for requirement, _ in prerequisite.items():
# Ignore requirements that are None
if prerequisite.get(requirement) is None:
continue
# Check ability requirements
if requirement == "ability":
for ability, required_score in prerequisite.get(requirement).items():
my_score = self.character["scores"][ability]
if my_score < required_score:
return False
# Check caster requirements
if requirement == "caster":
# If no spellcasting ability.
if prerequisite[requirement] and self.character["spellslots"] == "0":
return False
# Magic Initiative requirements check
if feat == "Magic Initiative" and self.character["klass"] not in (
"Bard",
"Cleric",
"Druid",
"Sorcerer",
"Warlock",
"Wizard",
):
return False
# Ritual Caster requirements check
if feat == "Ritual Caster":
primary_ability = self.ability[0]
my_score = self.scores[primary_ability]
required_score = prerequisite["ability"][primary_ability]
if my_score < required_score:
return False
# Check proficiency requirements
if requirement == "proficiency":
if feat in (
"Heavy Armor Master",
"Heavily Armored",
"Medium Armor Master",
"Moderately Armored",
):
armors = prerequisite.get(requirement).get("armors")
for armor in armors:
if armor not in self.character["armors"]:
return False
# Check race requirements
if requirement == "race":
if self.character["race"] not in prerequisite.get(requirement):
return False
# Check subrace requirements
if requirement == "subrace":
if self.character["subrace"] not in prerequisite.get(requirement):
return False
return True
def _is_adjustable(self, ability: str, bonus: int = 1) -> bool:
"""Checks if ability is adjustable < 20."""
if not isinstance(ability, str):
raise AbilityScoreImprovementError(
"Argument 'ability' must be of type 'str'."
)
if not isinstance(bonus, int):
raise AbilityScoreImprovementError(
"Argument 'bonus' must be of type 'int'."
)
if ability not in self.character["scores"]:
raise AbilityScoreImprovementError(
f"Invalid ability '{ability}' specified."
)
if (self.character["scores"][ability] + bonus) > 20:
return False
return True
def run(self) -> None:
"""Executes the ability score improvement class."""
# Determine actual hp.
modifier = get_ability_modifier("Constitution", self.character["scores"])
log.info(f"You have a Constitution modifier of {modifier}.")
bonus_hit_points = modifier * self.character["level"]
log.info(f"Your modifier*level provide {bonus_hit_points} bonus hit points.")
total_hit_points = self.character["hit_points"] + bonus_hit_points
self.character["hit_points"] = total_hit_points
log.info(f"You have {total_hit_points} total hit points.")
if self.character["level"] < 4:
return
num_of_upgrades = self._count_upgrades()
while num_of_upgrades > 0:
if num_of_upgrades > 1:
log.info(f"You have {num_of_upgrades} upgrades available.")
else:
log.info("You have 1 upgrade available.")
my_path = prompt(
"Follow which upgrade path?", ["Upgrade Ability", "Choose Feat"]
)
# Path #1: Upgrade an Ability.
if my_path == "Upgrade Ability":
my_bonus = prompt("Apply how many points?", ["1", "2"])
log.info(f"You chose an ability bonus of: +{my_bonus}.")
my_bonus = int(my_bonus)
ability_options = [
a
for a in (
"Strength",
"Dexterity",
"Constitution",
"Intelligence",
"Wisdom",
"Charisma",
)
if self._is_adjustable(a, my_bonus)
]
# Apply +2 bonus to one ability.
# Apply +1 bonus to two abilities.
if my_bonus == 1:
for _ in range(2):
my_ability = prompt(
"Which ability?",
ability_options,
)
ability_options.remove(my_ability)
self._set_ability_score(my_ability, my_bonus)
elif my_bonus == 2:
my_ability = prompt(
"Which ability?",
ability_options,
)
self._set_ability_score(my_ability, my_bonus)
# Path #2: Add a new Feat.
elif my_path == "Choose Feat":
feat_options = [
x for x in get_feats_list() if x not in self.character["feats"]
]
my_feat = prompt(
"Which feat do you want to acquire?",
feat_options,
)
log.info(f"Checking requirements for the requested feat {my_feat}...")
while not self._has_requirements(my_feat):
feat_options.remove(my_feat)
log.warn(
f"You don't meet the requirements for '{my_feat}'.",
)
my_feat = prompt(
f"Which feat do you want to acquire?",
feat_options,
)
else:
self._add_feat_perks(my_feat)
self.character["feats"].append(my_feat)
log.info(f"You selected the feat {my_feat}.")
num_of_upgrades -= 1
def _set_ability_score(self, ability, bonus=1) -> None:
"""Applies a bonus to a specified ability."""
if not self._is_adjustable(ability, bonus):
log.warn(f"Ability '{ability}' is not adjustable.")
else:
new_score = self.character.get("scores").get(ability) + bonus
self.character["scores"][ability] = new_score
log.info(f"You applied a +{bonus} bonus to your {ability}.")
log.info(f"Your {ability} score is now a {new_score}.")
| 40.481949 | 101 | 0.514112 | 22,113 | 0.985999 | 0 | 0 | 10,438 | 0.465421 | 0 | 0 | 6,347 | 0.283007 |
40d5a5d08da3928cd30de9755f828a4c98f498bf | 4,135 | py | Python | atomic1D/ImpuritySpecies.py | TBody/atomic1D | fcab88f3b303468f23ac75b847c76244593f4b7f | [
"MIT"
]
| 1 | 2019-05-18T22:32:21.000Z | 2019-05-18T22:32:21.000Z | atomic1D/ImpuritySpecies.py | TBody/atomic1D | fcab88f3b303468f23ac75b847c76244593f4b7f | [
"MIT"
]
| null | null | null | atomic1D/ImpuritySpecies.py | TBody/atomic1D | fcab88f3b303468f23ac75b847c76244593f4b7f | [
"MIT"
]
| null | null | null | class ImpuritySpecies(object):
# For storing OpenADAS data related to a particular impurity species
# Loosely based on cfe316/atomic/atomic_data.py/AtomicData class (although with much less code since
# all of the F77 importing is done in the seperate <<make json_update>> code since BOUT++ protocol
# requires fortran code be isolated from main operation)
def __init__(self,symbol,adas_files_dict={},rate_coefficients={},impurity_fraction=None):
# Searches for year, atomic_number, has_charge_exchange from user_input.json
#
# Default initialiser for class
# symbol : (str) | element symbol (e.g. 'C')
# name : (str) | full name of element (for printing only)
# year : (int) | year for which OpenADAS data was searched (1996)
# has_charge_exchange : (bool) | whether cx_power (prc) was found for this element-year combination (True)
# atomic_number : (int) | number of protons for impurity species (6)
# adas_files_dict : (str -> str) | dictionary of OpenADAS files, indexed by file-type ('ionisation': 'scd96_c', ...)
# rate_coefficients : (str -> RateCoefficient) | dictionary of RateCoefficient objects corresponding to adas files ('ionisation': <RateCoefficientObject>, ...)
import json
self = ImpuritySpecies
with open('user_input.json','r') as fp:
data_dict = json.load(fp)
element_dict = data_dict[symbol]
assert symbol == element_dict['symbol']
self.symbol = symbol
self.name = element_dict['name']
self.year = element_dict['year']
self.has_charge_exchange = element_dict['has_charge_exchange']
self.atomic_number = element_dict['atomic_number']
self.adas_files_dict = adas_files_dict
self.rate_coefficients = rate_coefficients
def __str__(self):
# Printing method, for easier inspection of object data
_print_adas_dict = ''
if len(self.adas_files_dict) == 0:
_print_adas_check = 'Not initialised'
else:
_print_adas_check = 'Initialised'
for key, value in self.adas_files_dict.items():
_print_adas_dict = _print_adas_dict + '{:>25} -> {}\n'.format(key,value)
if len(self.rate_coefficients) == 0:
_print_rate_check = 'Not initialised'
else:
_print_rate_check = 'Initialised'
_printing_string = 'ImpuritySpecies object with attributes'+\
'\n{:>25} = {}'.format('symbol', self.symbol)+\
'\n{:>25} = {}'.format('year', self.year)+\
'\n{:>25} = {}'.format('has_charge_exchange', self.has_charge_exchange)+\
'\n{:>25} = {}'.format('atomic_number', self.atomic_number)+\
'\n{:>25} = {}'.format('adas_files_dict', _print_adas_check)+\
'\n{:>25} = {}'.format('rate_coefficients', _print_rate_check)
if len(self.adas_files_dict) != 0:
_printing_string += '\n--------------------------------------------------\n'+_print_adas_dict
return _printing_string
def addJSONFiles(self,physics_process,filetype_code,JSON_database_path):
# 1. Make the filename string expected for the json adas file
# 2. Check that this file exists in the JSON_database_path/json_data directory
# 3. Add this file to the atomic data .adas_files_dict attribute
import os.path
filename = '{}{}_{}.json'.format(filetype_code,str(self.year)[-2:],self.symbol)
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
if not(os.path.isfile(full_path)):
raise FileNotFoundError('File {} not found in {}/json_data'.format(filename,JSON_database_path))
self.adas_files_dict[physics_process] = filename
def makeRateCoefficients(self,JSON_database_path):
# Calls the RateCoefficient.__init__ method for each entry in the .adas_files_dict
# Generates a dictionary of RateCoefficient objects as .rate_coefficients
from atomic1D import RateCoefficient
for physics_process, filename in self.adas_files_dict.items():
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
self.rate_coefficients[physics_process] = RateCoefficient(full_path)
| 47.528736 | 163 | 0.680774 | 4,134 | 0.999758 | 0 | 0 | 0 | 0 | 0 | 0 | 2,130 | 0.515115 |
40d5ed5ea76d8603996be2780920650b434417e6 | 9,213 | py | Python | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
]
| null | null | null | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
]
| null | null | null | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
]
| null | null | null | import os
import platform
import numpy
class Matrix:
def __init__(self):
if platform.system() == "Windows":
self.delimiterForPath = "\\"
else:
self.delimiterForPath = "/"
self.labelsDType = numpy.int32
self.imagesDType = numpy.float32
def deleteRows(self, array, rows, axis):
return numpy.delete(array, rows, axis)
def swapAxes(self, array, axe1, axe2):
return numpy.swapaxes(array, axe1, axe2)
def getImageCategoryFromPath(self, imagePath):
# path in format : ..\\Category\\ImageName
return numpy.array(imagePath.split(self.delimiterForPath, len(imagePath))[
len(imagePath.split(self.delimiterForPath, len(imagePath))) - 2], dtype=self.labelsDType)
def getNumberOfClasses(self, array):
return len(numpy.unique(array))
def getImagesInDirectory(self, folderPath, extensions=('.jpg', '.jpeg', '.png', '.bmp', '.gif')):
imagesList = []
assert os.path.isdir(folderPath), 'No folder with that name exists : %r ' % folderPath
# for all images in folder path
for root, dirs, files in os.walk(folderPath):
for name in files:
if name.endswith(extensions):
imagesList.append(root + self.delimiterForPath + name)
return imagesList
def addDimension(self, array, axis):
return numpy.expand_dims(a=array, axis=axis)
def ExtractImages(self, folderPath, image_size=(256, 256), convertion=None, imageChannels=3,
preprocessImages=False ,normalize=True ,normalizeRange=(0,1) ):
from Images.ImageProcessing import ImageProcessing
imageList = self.getImagesInDirectory(folderPath=folderPath)
assert len(imageList) > 0, 'No images in folder : %r' % folderPath
if convertion != "Grayscale" and imageChannels != 3:
if convertion == None:
convertion = "RGB"
raise ValueError(' %r supports only 3 image channels!' % convertion)
images_list = []
labels_list = []
# for all images in folder path
for imagePath in imageList:
# get category of image and add category to array
labels_list.append(
self.getImageCategoryFromPath(imagePath=imagePath))
# get image array and add image to array
images_list.append(
ImageProcessing().getImageArray(imagePath=imagePath, imageSize=image_size, convertion=convertion,
imageChannels=imageChannels,preprocessImages=preprocessImages,
Normalize=normalize,NormalizeRange=normalizeRange))
# convert lists to numpy array
allLabelsArray = numpy.array(labels_list).reshape(len(labels_list))
allImagesArray = numpy.array(images_list).reshape(len(imageList), imageChannels, image_size[0], image_size[1])
return [allImagesArray, allLabelsArray]
# returns batches from data with size batchSize
def chunker(self,data, batchSize):
return (data[pos:pos + batchSize] for pos in xrange(0, len(data), batchSize))
def shuffleMatrix(self,array):
numpy.random.shuffle(array)
def shuffleMatrixAlongWithLabels(self, array1, array2):
# shuffle array1 (images) with corresponding labels array2
from random import shuffle
array1_shuf = []
array2_shuf = []
index_shuf = range(len(array1))
shuffle(index_shuf)
for i in index_shuf:
array1_shuf.append(array1[i])
array2_shuf.append(array2[i])
return [numpy.array(array1_shuf, dtype=self.imagesDType).astype('float32'), numpy.array(array2_shuf, dtype=self.labelsDType).astype('float32')]
def TakeExamplesFromEachCategory(self,features,labels,maxImagesPerCategory=10):
import gc
import os
validationArray = []
validation_labels=[]
# for 0 to number of output classes
for index in range(0,self.getNumberOfClasses(labels)):
print ('mpika 1')
# find indexes of category index
indexes = numpy.where(labels == index)
# if train has 1 instance don't take it for validation
if len(indexes[0]) in [ 0 , 1 ]:
continue
# if instances are less than max categories given
if len(indexes[0]) <= maxImagesPerCategory:
# take half for validation
maxImagesPerCategory= len(indexes[0])/2
print ('mpika 2')
assert len(indexes[0]) >= maxImagesPerCategory ,\
"Error : Validation examples per category more than train instances. Category: {0}" \
" validation pes category : {1} , training examples : {2} ".format(index,maxImagesPerCategory,len(indexes[0]),)
count = 0
# for indexes in category
for catIndex in indexes[0]:
print ('mpika 3')
count +=1
if count > maxImagesPerCategory:
print ('mpika 3.1')
break
print ('mpika 3.2')
validationArray.append(features[catIndex])
print ('mpika 3.3')
validation_labels.append(labels[catIndex ])
print ('mpika 3.4 catIndex' , catIndex)
features = numpy.delete(features,catIndex,axis=0)
print ('mpika 3.5')
labels = numpy.delete(labels,catIndex,axis=0)
print ('mpika 3.6')
gc.collect()
print ('mpika 4')
return [features, numpy.array(validationArray,dtype=self.imagesDType).astype('float32'), labels,
numpy.array(validation_labels,dtype=self.labelsDType).astype('int32')]
def takeLastExamples(self,trainArray, train_labels, validationPercentage=.2):
# take validationPercentage of training data for validation
validationExamples = int(validationPercentage * len(trainArray))
# We reserve the last validationExamples training examples for validation.
trainArray, validationArray = trainArray[:-validationExamples], trainArray[-validationExamples:]
train_labels, validation_labels = train_labels[:-validationExamples], train_labels[-validationExamples:]
return [trainArray, validationArray, train_labels, validation_labels]
def SplitTrainValidation(self, trainArray, train_labels, validationPercentage=.2,takeLastExamples=False,maxImagesPerCategory=10):
if takeLastExamples:
return self.takeLastExamples(trainArray, train_labels, validationPercentage)
else:
return self.TakeExamplesFromEachCategory(trainArray, train_labels,maxImagesPerCategory)
def moveFile(self, src, dest):
import shutil
shutil.move(src, dest)
if __name__ == '__main__':
trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TrainSet'
testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TestSet'
#
# trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy'
# testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy - Copy'
#
# # trainFolder = '/home/leonidas/Desktop/images/train'
# # testFolder = '/home/leonidas/Desktop/images/test'
#
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, outputClasses] = \
# load_dataset(trainFolder, testFolder,imageSize=(3,3),convertion='L',imageChannels=1)
#
# print trainArray.shape
# print trainArray
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
#
# trainPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512.txt'
# testPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512.txt'
# trainLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512_labels.txt'
# testLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512_labels.txt'
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels,
# outputClasses] = loadFeatures(trainPath=trainPath, trainLabels=trainLabelPath, testPath=testPath,
# testLabels=testLabelPath);
i=0;
for trainArray,train_labels in Matrix().getArrayOfImagesUsingMiniBatches(folderPath=trainFolder,image_size=(100,100),batch_size=15):
print (trainArray.shape)
print (train_labels.shape)
i+=len(trainArray)
print "aaasdasdas d : ",i
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
| 43.457547 | 151 | 0.64485 | 6,917 | 0.750787 | 0 | 0 | 0 | 0 | 0 | 0 | 2,875 | 0.312059 |
40d66ffe931947e9a30f4f5ac4f0646b982e924f | 7,906 | py | Python | networking_calico/plugins/ml2/drivers/calico/policy.py | manojcode/networking-calico | db709fb27b492d3be3c094fa43e8c696962369b7 | [
"Apache-2.0"
]
| null | null | null | networking_calico/plugins/ml2/drivers/calico/policy.py | manojcode/networking-calico | db709fb27b492d3be3c094fa43e8c696962369b7 | [
"Apache-2.0"
]
| null | null | null | networking_calico/plugins/ml2/drivers/calico/policy.py | manojcode/networking-calico | db709fb27b492d3be3c094fa43e8c696962369b7 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_calico.common import config as calico_config
from networking_calico.compat import log
from networking_calico import datamodel_v3
from networking_calico.plugins.ml2.drivers.calico.syncer import ResourceSyncer
LOG = log.getLogger(__name__)
# Each OpenStack security group is mapped to a Calico NetworkPolicy. A VM's
# security group membership is represented by the VM having a label for each
# security group that it belongs to; thus the selector
# 'has(<security-group-label>)' represents the VMs that belong to that security
# group.
#
# The label for each security group is 'sg.projectcalico.org/openstack-'
# followed by the security group ID, and the name of the NetworkPolicy for each
# security group is 'ossg.default.' followed by the security group ID.
SG_LABEL_PREFIX = 'sg.projectcalico.org/openstack-'
SG_NAME_LABEL_PREFIX = 'sg-name.projectcalico.org/openstack-'
SG_NAME_MAX_LENGTH = (datamodel_v3.SANITIZE_LABEL_MAX_LENGTH -
len(SG_NAME_LABEL_PREFIX))
SG_NAME_PREFIX = 'ossg.default.'
class PolicySyncer(ResourceSyncer):
def __init__(self, db, txn_from_context):
super(PolicySyncer, self).__init__(db,
txn_from_context,
"NetworkPolicy")
self.region_string = calico_config.get_region_string()
self.namespace = datamodel_v3.get_namespace(self.region_string)
def delete_legacy_etcd_data(self):
if self.namespace != datamodel_v3.NO_REGION_NAMESPACE:
datamodel_v3.delete_legacy(self.resource_kind, SG_NAME_PREFIX)
def get_all_from_etcd(self):
results = []
for r in datamodel_v3.get_all(self.resource_kind, self.namespace):
name, _, _ = r
if name.startswith(SG_NAME_PREFIX):
results.append(r)
return results
def create_in_etcd(self, name, spec):
return datamodel_v3.put(self.resource_kind,
self.namespace,
name,
spec,
mod_revision=0)
def update_in_etcd(self, name, spec, mod_revision=None):
return datamodel_v3.put(self.resource_kind,
self.namespace,
name,
spec,
mod_revision=mod_revision)
def delete_from_etcd(self, name, mod_revision):
return datamodel_v3.delete(self.resource_kind,
self.namespace,
name,
mod_revision=mod_revision)
def get_all_from_neutron(self, context):
return dict((SG_NAME_PREFIX + sg['id'], sg)
for sg in self.db.get_security_groups(context))
def neutron_to_etcd_write_data(self, sg, context, reread=False):
if reread:
# We don't need to reread the SG row itself here, because we don't
# use any information from it, apart from its ID as a key for the
# following rules.
pass
rules = self.db.get_security_group_rules(
context,
filters={'security_group_id': [sg['id']]}
)
return policy_spec(sg['id'], rules)
def write_sgs_to_etcd(self, sgids, context):
rules = self.db.get_security_group_rules(
context, filters={'security_group_id': sgids}
)
for sgid in sgids:
self.update_in_etcd(SG_NAME_PREFIX + sgid,
policy_spec(sgid, rules))
def policy_spec(sgid, rules):
"""Generate JSON NetworkPolicySpec for the given security group."""
# <rules> can include those for several security groups. Pick out the
# rules for the security group that we are translating right now.
sg_rules = (r for r in rules if r['security_group_id'] == sgid)
# Split the rules based on direction, and map to Calico form.
inbound_rules = []
outbound_rules = []
for rule in sg_rules:
if rule['direction'] == 'ingress':
inbound_rules.append(_neutron_rule_to_etcd_rule(rule))
else:
outbound_rules.append(_neutron_rule_to_etcd_rule(rule))
return {
'ingress': inbound_rules,
'egress': outbound_rules,
'selector': 'has(%s)' % (SG_LABEL_PREFIX + sgid),
}
def _neutron_rule_to_etcd_rule(rule):
"""_neutron_rule_to_etcd_rule
Translate a single Neutron rule dict to a single dict in our
etcd format.
"""
ethertype = rule['ethertype']
etcd_rule = {'action': 'Allow'}
# Map the ethertype field from Neutron to etcd format.
etcd_rule['ipVersion'] = {'IPv4': 4,
'IPv6': 6}[ethertype]
# Map the protocol field from Neutron to etcd format.
if rule['protocol'] is None or rule['protocol'] == -1:
pass
elif rule['protocol'] == 'ipv6-icmp':
etcd_rule['protocol'] = 'ICMPv6'
elif rule['protocol'] == 'icmp':
etcd_rule['protocol'] = {'IPv4': 'ICMP',
'IPv6': 'ICMPv6'}[ethertype]
elif isinstance(rule['protocol'], int):
etcd_rule['protocol'] = rule['protocol']
else:
etcd_rule['protocol'] = rule['protocol'].upper()
port_spec = None
if rule['protocol'] == 'icmp' or rule['protocol'] == 'ipv6-icmp':
# OpenStack stashes the ICMP match criteria in
# port_range_min/max.
icmp_fields = {}
icmp_type = rule['port_range_min']
if icmp_type is not None and icmp_type != -1:
icmp_fields['type'] = icmp_type
icmp_code = rule['port_range_max']
if icmp_code is not None and icmp_code != -1:
icmp_fields['code'] = icmp_code
if icmp_fields:
etcd_rule['icmp'] = icmp_fields
else:
# src/dst_ports is a list in which each entry can be a
# single number, or a string describing a port range.
if rule['port_range_min'] == -1:
port_spec = None
elif rule['port_range_min'] == rule['port_range_max']:
if rule['port_range_min'] is not None:
port_spec = [rule['port_range_min']]
else:
port_spec = ['%s:%s' % (rule['port_range_min'],
rule['port_range_max'])]
entity_rule = {}
if rule['remote_group_id'] is not None:
entity_rule['selector'] = 'has(%s)' % (SG_LABEL_PREFIX +
rule['remote_group_id'])
if rule['remote_ip_prefix'] is not None:
entity_rule['nets'] = [rule['remote_ip_prefix']]
LOG.debug("=> Entity rule %s" % entity_rule)
# Store in source or destination field of the overall rule.
if entity_rule:
if rule['direction'] == 'ingress':
etcd_rule['source'] = entity_rule
if port_spec is not None:
etcd_rule['destination'] = {'ports': port_spec}
else:
if port_spec is not None:
entity_rule['ports'] = port_spec
etcd_rule['destination'] = entity_rule
LOG.debug("=> %s Calico rule %s" % (rule['direction'], etcd_rule))
return etcd_rule
| 39.728643 | 79 | 0.611055 | 2,598 | 0.328611 | 0 | 0 | 0 | 0 | 0 | 0 | 2,856 | 0.361245 |
40d757b3788e8715de2cff8adf8b1027f7b43c6d | 4,359 | py | Python | 25/main.py | gosha20777/mipt-bioinfo-2021 | ed14975e9f597e7b2427bc589f12ac08d451c509 | [
"MIT"
]
| null | null | null | 25/main.py | gosha20777/mipt-bioinfo-2021 | ed14975e9f597e7b2427bc589f12ac08d451c509 | [
"MIT"
]
| null | null | null | 25/main.py | gosha20777/mipt-bioinfo-2021 | ed14975e9f597e7b2427bc589f12ac08d451c509 | [
"MIT"
]
| null | null | null | def global_alignment(seq1, seq2, score_matrix, penalty):
len1, len2 = len(seq1), len(seq2)
s = [[0] * (len2 + 1) for i in range(len1 + 1)]
backtrack = [[0] * (len2 + 1) for i in range(len1 + 1)]
for i in range(1, len1 + 1):
s[i][0] = - i * penalty
for j in range(1, len2 + 1):
s[0][j] = - j * penalty
for i in range(1, len1 + 1):
for j in range(1, len2 + 1):
score_list = [s[i - 1][j] - penalty, s[i][j - 1] - penalty,
s[i - 1][j - 1] + score_matrix[seq1[i - 1], seq2[j - 1]]]
s[i][j] = max(score_list)
backtrack[i][j] = score_list.index(s[i][j])
indel_insert = lambda seq, i: seq[:i] + '-' + seq[i:]
align1, align2 = seq1, seq2
a, b = len1, len2
max_score = str(s[a][b])
while a * b != 0:
if backtrack[a][b] == 0:
a -= 1
align2 = indel_insert(align2, b)
elif backtrack[a][b] == 1:
b -= 1
align1 = indel_insert(align1, a)
else:
a -= 1
b -= 1
for i in range(a):
align2 = indel_insert(align2, 0)
for j in range(b):
align1 = indel_insert(align1, 0)
return max_score, align1, align2
def mid_column_score(v, w, score_matrix, penalty):
s = [[i * j * penalty for i in range(-1, 1)] for j in range(len(v) + 1)]
s[0][1] = -penalty
backtrack = [0] * (len(v) + 1)
for j in range(1, len(w) // 2 + 1):
for i in range(0, len(v) + 1):
if i == 0:
s[i][1] = -j * penalty
else:
scores = [s[i - 1][0] + score_matrix[v[i - 1], w[j - 1]], s[i][0] - penalty, s[i - 1][1] - penalty]
s[i][1] = max(scores)
backtrack[i] = scores.index(s[i][1])
if j != len(w) // 2:
s = [[row[1]] * 2 for row in s]
return [i[1] for i in s], backtrack
def mid_edge(v, w, score_matrix, penalty):
source = mid_column_score(v, w, score_matrix, penalty)[0]
mid_to_sink, backtrack = list(map(lambda l: l[::-1], mid_column_score(v[::-1], w[::-1] + ['', '$'][
len(w) % 2 == 1 and len(w) > 1], score_matrix, penalty)))
scores = list(map(sum, zip(source, mid_to_sink)))
max_mid = max(range(len(scores)), key = lambda i: scores[i])
if max_mid == len(scores) - 1:
next_node = (max_mid, len(w) // 2 + 1)
else:
next_node = [(max_mid + 1, len(w) // 2 + 1), (max_mid, len(w) // 2 + 1), (max_mid + 1, len(w) // 2), ][
backtrack[max_mid]]
return (max_mid, len(w) // 2), next_node
def linear_space_alignment(top, bottom, left, right, score_matrix):
v = seq1
w = seq2
if left == right:
return [v[top:bottom], '-' * (bottom - top)]
elif top == bottom:
return ['-' * (right - left), w[left:right]]
elif bottom - top == 1 or right - left == 1:
return global_alignment(v[top:bottom], w[left:right], score_matrix, penalty)[1:]
else:
mid_node, next_node = mid_edge(v[top:bottom], w[left:right], score_matrix, penalty)
mid_node = tuple(map(sum, zip(mid_node, [top, left])))
next_node = tuple(map(sum, zip(next_node, [top, left])))
current = [['-', v[mid_node[0] % len(v)]][next_node[0] - mid_node[0]],
['-', w[mid_node[1] % len(w)]][next_node[1] - mid_node[1]]]
a = linear_space_alignment(top, mid_node[0], left, mid_node[1], score_matrix)
b = linear_space_alignment(next_node[0], bottom, next_node[1], right, score_matrix)
return [a[i] + current[i] + b[i] for i in range(2)]
def linear_space_global_alignment(v, w, score_matrix, penalty):
align1, align2 = linear_space_alignment(0, len(v), 0, len(w), score_matrix)
p = []
for i in zip(align1, align2):
if '-' in i:
p.append(-penalty)
else:
p.append(score_matrix[i])
score = sum(p)
return str(score), align1, align2
if __name__ == '__main__':
with open('input.txt') as f:
seq1 = f.readline().strip()
seq2 = f.readline().strip()
with open('BLOSUM62.txt') as f1:
lines = [line.strip().split() for line in f1.readlines()]
matrix = {(i[0], i[1]): int(i[2]) for i in lines}
penalty = 5
alignment = '\n'.join(linear_space_global_alignment(seq1, seq2, matrix, penalty))
print(alignment)
| 39.990826 | 115 | 0.532003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.014223 |
40d75b3cb34c1d537273d852cc304bd850526e28 | 10,039 | py | Python | utils/visualize_tree.py | moyiming1/Retrosynthesis-pathway-ranking | 380f31189d09395d0de911759b8bcea436b559b2 | [
"MIT"
]
| 10 | 2021-02-24T02:31:40.000Z | 2022-02-17T07:58:46.000Z | utils/visualize_tree.py | wangxr0526/Retrosynthesis-pathway-ranking | 380f31189d09395d0de911759b8bcea436b559b2 | [
"MIT"
]
| 1 | 2022-02-14T16:13:59.000Z | 2022-02-14T16:13:59.000Z | utils/visualize_tree.py | wangxr0526/Retrosynthesis-pathway-ranking | 380f31189d09395d0de911759b8bcea436b559b2 | [
"MIT"
]
| 3 | 2021-01-05T11:43:03.000Z | 2022-02-17T08:52:27.000Z | import os, sys
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_path)
import pickle
def construct_tree_for_visual(tree, node_info_key, depth=0):
tree_for_visual = {'smiles': 'http://askcos.mit.edu/draw/smiles/' + str(tree['smiles']).replace('#', '%23'),
'depth': depth,
'children': []}
if node_info_key in tree.keys():
if type(tree[node_info_key]) is not str:
tree_for_visual['score'] = '{:.3f}'.format(tree[node_info_key])
else:
tree_for_visual['score'] = tree[node_info_key]
else:
tree_for_visual['score'] = ''
if tree['child']:
# tree_for_visual['children'] = []
for child in tree['child']:
tree_for_visual['children'].append(construct_tree_for_visual(child, node_info_key, depth+1))
return tree_for_visual
def construct_tree_for_d3_visualization(tree, depth, new_tree={}, max_children=0):
# if 'is_chemical' in tree.keys():
new_tree['smiles'] = 'http://askcos.mit.edu/draw/smiles/' + str(tree['smiles']).replace('#', '%23')
if 'score' in tree.keys():
new_tree['score'] = str(tree['score'])
else:
new_tree['score'] = ''
# new_tree['smiles'] = str(new_tree['smiles'])
new_tree['children'] = []
if tree['child']:
# print(len(tree['child']))
if max_children < len(tree['child']):
max_children = len(tree['child'])
for child in tree['child']:
new_tree['children'].append({})
_, max_children = construct_tree_for_d3_visualization(child, depth + 1, new_tree['children'][-1], max_children)
return new_tree, max_children
def count_tree_depth_children(tree, count):
count[tree['depth']] += 1
if tree['children']:
for child in tree['children']:
count = count_tree_depth_children(child, count)
return count
def create_tree_html(trees, file_name, tree_info=None, node_info_key='score',
width_factor=1, max_depth=10):
try:
outfile = file_name
except Exception as e:
print(e)
print('Need to specify file name to write results to')
trees_for_visualization = {'name': 'dummy_root', 'children': []}
max_children = 1
for i, tree in enumerate(trees):
output = construct_tree_for_visual(tree, node_info_key)
trees_for_visualization['children'].append(output)
# print()
current_children = max(count_tree_depth_children(output, count=[0]*20))
if current_children > max_children:
max_children = current_children
if tree_info:
# print(tree_info[i])
trees_for_visualization['children'][-1]['_id'] = tree_info[i]
else:
trees_for_visualization['children'][-1]['_id'] = ('T%d' % i)
# print(max_children)
max_children = max(max_children, 3)
height = 300 * len(trees) * max_children / 3 * width_factor
page_width = max_depth * 300
fid_out = open(outfile + '.html', 'w')
fid_out.write('<!DOCTYPE html>\n')
fid_out.write(' <head>\n')
fid_out.write(' <meta charset="utf-8">\n')
fid_out.write(' <title>{}</title>\n'.format(outfile))
fid_out.write(' <style>\n')
fid_out.write(' .node circle {\n')
fid_out.write(' fill: #fff;\n')
fid_out.write(' stroke: steelblue;\n')
fid_out.write(' stroke-width: 3px;\n')
fid_out.write(' }\n')
fid_out.write(' .node rect {\n')
fid_out.write(' fill: #fff;\n')
fid_out.write(' stroke: steelblue;\n')
fid_out.write(' stroke_width: 3px;\n')
fid_out.write(' }\n')
fid_out.write(' .node text { font: 12px sans-serif; }\n')
fid_out.write(' .link {\n')
fid_out.write(' fill: none;\n')
fid_out.write(' stroke: #ccc;\n')
fid_out.write(' stroke-width: 2px;\n')
fid_out.write(' }\n')
fid_out.write(' </style>\n')
fid_out.write(' </head>\n')
fid_out.write(' <body>\n')
fid_out.write('<!-- load the d3.js library --> \n')
fid_out.write('<script src="http://d3js.org/d3.v3.min.js"></script>\n')
fid_out.write('<script>\n')
fid_out.write('var treeData = [\n')
fid_out.write('{}\n'.format(trees_for_visualization))
fid_out.write('];\n')
fid_out.write('var margin = {top: 20, right: 120, bottom: 20, left: 0},\n')
fid_out.write(' width = {} - margin.right - margin.left,\n'.format(page_width))
fid_out.write(' height = {} - margin.top - margin.bottom;\n'.format(height))
fid_out.write('var i = 0;\n')
fid_out.write('var tree = d3.layout.tree()\n')
fid_out.write(' .size([height, width]);\n')
fid_out.write('var diagonal = d3.svg.diagonal()\n')
fid_out.write(' .projection(function(d) { return [d.y, d.x]; });\n')
fid_out.write('var svg = d3.select("body").append("svg")\n')
fid_out.write(' .attr("width", width + margin.right + margin.left)\n')
fid_out.write(' .attr("height", height + margin.top + margin.bottom)\n')
fid_out.write(' .append("g")\n')
fid_out.write(' .attr("transform", \n')
fid_out.write(' "translate(" + margin.left + "," + margin.top + ")");\n')
fid_out.write('root = treeData[0];\n')
fid_out.write('update(root);\n')
fid_out.write('function update(source) {\n')
fid_out.write(' // Compute the new tree layout.\n')
fid_out.write(' var nodes = tree.nodes(root).reverse(),\n')
fid_out.write(' links = tree.links(nodes);\n')
fid_out.write(' // Normalize for fixed-depth.\n')
fid_out.write(' nodes.forEach(function(d) { d.y = d.depth * 250; });\n')
fid_out.write(' // Declare the nodes…\n')
fid_out.write(' var node = svg.selectAll("g.node")\n')
fid_out.write(' .data(nodes, function(d) { return d.id || (d.id = ++i); });\n')
fid_out.write(' // Enter the nodes.\n')
fid_out.write(' var nodeEnter = node.enter().append("g")\n')
fid_out.write(' .attr("class", "node")\n')
fid_out.write(' .attr("transform", function(d) { \n')
fid_out.write(' return "translate(" + d.y + "," + d.x + ")"; });\n')
fid_out.write(' nodeEnter.append("image")\n')
fid_out.write(' .attr("xlink:href", function(d) { return d.smiles; })\n')
fid_out.write(' .attr("x", "-60px")\n')
fid_out.write(' .attr("y", "-60px")\n')
fid_out.write(' .attr("width", "120px")\n')
fid_out.write(' .attr("height", "120px");\n')
fid_out.write(' nodeEnter.append("path")\n')
fid_out.write(' .style("stroke", "black")\n')
fid_out.write(' .style("fill", function(d) { if (d.freq==1) { return "white"; }\n')
fid_out.write(' else if (d.freq==2) { return "yellow";}\n')
fid_out.write(' else if (d.freq==3) { return "orange"; }\n')
fid_out.write(' else if (d.freq>=4) { return "red"; }\n')
fid_out.write(' else {return "white";}\n')
fid_out.write(' })\n')
fid_out.write(' .attr("d", d3.svg.symbol()\n')
fid_out.write(' .size(0)\n')
fid_out.write(' .type(function(d) {if\n')
fid_out.write(' (d.rc_type == "chemical") {return "circle";} else if\n')
fid_out.write(' (d.rc_type == "reaction") {return "cross";}\n')
fid_out.write(' }));\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 0)\n')
fid_out.write(' .attr("y", 35)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d.names; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 200)\n')
fid_out.write(' .attr("y", 120)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d._id; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 0)\n')
fid_out.write(' .attr("y", -30)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d.score; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' // Declare the links…\n')
fid_out.write(' var link = svg.selectAll("path.link")\n')
fid_out.write(' .data(links, function(d) { return d.target.id; });\n')
fid_out.write(' // Enter the links.\n')
fid_out.write(' link.enter().insert("path", "g")\n')
fid_out.write(' .attr("class", "link")\n')
fid_out.write(' .style("stroke", function(d) { return d.target.level; })\n')
fid_out.write(' .attr("d", diagonal);\n')
fid_out.write(' // remove the first level, leaving the targets as the first level\n')
fid_out.write(' node.each(function(d){\n')
fid_out.write(' if (d.name == "dummy_root")\n')
fid_out.write(' d3.select(this).remove();});\n')
fid_out.write(' link.each(function(d){\n')
fid_out.write(' if (d.source.name == "dummy_root") \n')
fid_out.write(' d3.select(this).remove();});\n')
fid_out.write('}\n')
fid_out.write('</script>\n')
fid_out.write(' </body>\n')
fid_out.write('</html>\n')
fid_out.close()
if __name__ == "__main__":
file_name = project_path + '/data/pathway_train_example.pkl'
with open(file_name, 'rb') as f:
data = pickle.load(f)
trees_to_plot = [d['tree'] for d in data['generated_paths'][0:10]]
create_tree_html(trees_to_plot, 'plotted_trees')
| 47.131455 | 123 | 0.574858 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,899 | 0.487802 |
40d7ebe962811bafc69c16d6ae16e6cb4f35d53d | 3,955 | py | Python | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
]
| null | null | null | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
]
| null | null | null | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
]
| null | null | null | '''
Homework assignment for the 'Python is easy' course by Pirple.
Written be Ed Yablonsky.
Snowman(Hangman) game.
'''
from os import (
name as os_name,
system as system_call,
)
from os.path import (
abspath,
dirname,
join as join_path,
)
'''
Screen displays game output
'''
class Screen:
def clear(self):
if os_name == 'nt':
system_call('cls')
else:
system_call('clear')
def draw(self, frame):
for line in frame:
print(line)
'''
Input represents game input device
'''
class Input:
def ask(self, message):
answer = ''
while answer == '':
answer = input(message)
return answer
'''
Art is a game art which is set of frames that get loaded from a text file.
Draws its current frame on a screen.
'''
class Art:
def __init__(self):
self.frames = []
self.current_frame = 0
def load(self, name):
frames = []
art_path = join_path(dirname(abspath(__file__)), join_path('arts', name))
with open(art_path, 'r') as art_file:
frame_height = int(art_file.readline())
frame = []
line_count = 0
for line in art_file:
frame.append(line.strip('\n\r'))
line_count += 1
if line_count % frame_height == 0:
frames.append(frame)
frame = []
self.frames = frames
self.current_frame = 0
def draw(self, screen):
screen.draw(self.frames[self.current_frame])
def frames_number(self):
return len(self.frames)
def next_frame(self):
self.current_frame = (self.current_frame + 1) % self.frames_number()
return self.current_frame
'''
Riddle holds secret word and gets solved by guesses
'''
class Riddle:
def __init__(self, key):
self.key = key
self.clue = ['_'] * len(key)
def length(self):
return len(self.key)
def range(self):
return range(0, self.length())
def guess(self, g):
guess_count = 0
for i in self.range():
if g == self.key[i]:
guess_count += 1
self.clue[i] = g
return guess_count
def solved(self):
for i in self.range():
if self.clue[i] != self.key[i]:
return False
return True
def unsolved(self):
return self.solved() == False
def draw(self, screen):
screen.draw([' '.join(self.clue)])
'''
Game is a game itself
'''
class Game:
def __init__(self):
self.screen = Screen()
self.input = Input()
self.art = Art()
self.riddle = Riddle('riddle')
def play(self):
self.start()
self.propose_riddle()
while self.in_progress():
self.play_round()
self.display_result()
def start(self):
self.art.load('snowman')
self.game_over = False
def propose_riddle(self):
self.riddle = Riddle(self.input.ask('Player 1 pick a word: ').lower())
def in_progress(self):
return self.riddle.unsolved() and self.game_over == False
def draw_frame(self):
self.screen.clear()
self.art.draw(self.screen)
self.riddle.draw(self.screen)
def play_round(self):
self.draw_frame()
clue = input('Player 2 guess a letter: ').lower()
if len(clue) > 0:
if clue[0] == '.':
self.stop()
elif self.riddle.guess(clue[0]) == 0:
self.art.next_frame()
if self.art.current_frame == self.art.frames_number() - 1:
self.stop()
def stop(self):
self.game_over = True
def display_result(self):
self.draw_frame()
if self.game_over:
self.screen.draw(['Player 2 lost'])
else:
self.screen.draw(['Player 2 wins'])
Game().play()
| 23.682635 | 81 | 0.551707 | 3,378 | 0.854109 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.138053 |
40d82abf6ddc30ada008f9205fa131b2828d8ba2 | 2,569 | py | Python | src/GenericTsvReader.py | getzlab/ABSOLUTE | cd443ec9370df98778d98227bb9a11c3e24c00cb | [
"BSD-3-Clause"
]
| null | null | null | src/GenericTsvReader.py | getzlab/ABSOLUTE | cd443ec9370df98778d98227bb9a11c3e24c00cb | [
"BSD-3-Clause"
]
| null | null | null | src/GenericTsvReader.py | getzlab/ABSOLUTE | cd443ec9370df98778d98227bb9a11c3e24c00cb | [
"BSD-3-Clause"
]
| null | null | null | """
Created on Jul 5, 2012
@author: lichtens
"""
import csv
import os
class GenericTsvReader(object):
"""
Read a TSV file.
This class wraps a DictReader, but handles comments, which are not handled gracefully in the python csv library.
The next() method assumes user is interested in the content, not the comments.
Get the comments using getComments or getCommentsAsList. The latter assumes each comment is a line of text.
Notes:
IMPORTANT: At this time, this class does not support comments below the header line.
This class will load all comment lines into RAM at one time. This could theoretically cause a bottleneck in some files.
"""
def __init__(self, filename, commentPrepend='#', fieldNames=None, delimiter='\t'):
"""
Constructor
"""
self.__dict__.update(locals())
self.inputContentFP = open(filename, 'r')
self.commentLines = ''
self.commentPrepend = commentPrepend
# The comment lines must be loaded before the dict reader is initialized.
self._loadCommentLines()
self.dictReader = csv.DictReader(self.inputContentFP, delimiter=delimiter, fieldnames=fieldNames)
def _loadCommentLines(self):
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
# Get rid of blank lines
while nextChar in ['\n', '\r']:
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
while nextChar == self.commentPrepend:
self.commentLines = self.commentLines + (self.commentPrepend + self.inputContentFP.readline())
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
# Go back one character to make sure that we have moved the file pointer to the
# beginning of the first non-comment line.
self.inputContentFP.seek(resetLocation, os.SEEK_SET)
# python3 needs __next__ instead of next
def __next__(self):
return self.dictReader.__next__()
def getFieldNames(self):
return self.dictReader.fieldnames
def getComments(self):
return self.commentLines
def getCommentsAsList(self):
""" Return each comment line as an entry in a list """
return self.commentLines.strip().split('\n')
def getInputContentFP(self):
return self.inputContentFP
def __iter__(self):
return self | 33.802632 | 124 | 0.652005 | 2,488 | 0.96847 | 0 | 0 | 0 | 0 | 0 | 0 | 1,020 | 0.397042 |
40db83a1176151c4bc0bdff2477e10a8b1ab20a4 | 12,376 | py | Python | examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | f02e7c7c2c021c85823cace405ca2c58ad4ff147 | [
"MIT"
]
| 5,678 | 2016-07-19T10:22:35.000Z | 2022-03-31T22:46:41.000Z | examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | f02e7c7c2c021c85823cace405ca2c58ad4ff147 | [
"MIT"
]
| 759 | 2016-07-19T05:41:59.000Z | 2022-03-28T11:00:10.000Z | examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | f02e7c7c2c021c85823cace405ca2c58ad4ff147 | [
"MIT"
]
| 1,165 | 2016-07-19T22:56:42.000Z | 2022-03-31T22:46:45.000Z | """
==========================================================
Fitting model on imbalanced datasets and how to fight bias
==========================================================
This example illustrates the problem induced by learning on datasets having
imbalanced classes. Subsequently, we compare different approaches alleviating
these negative effects.
"""
# Authors: Guillaume Lemaitre <[email protected]>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Problem definition
# ------------------
#
# We are dropping the following features:
#
# - "fnlwgt": this feature was created while studying the "adult" dataset.
# Thus, we will not use this feature which is not acquired during the survey.
# - "education-num": it is encoding the same information than "education".
# Thus, we are removing one of these 2 features.
# %%
from sklearn.datasets import fetch_openml
df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True)
df = df.drop(columns=["fnlwgt", "education-num"])
# %% [markdown]
# The "adult" dataset as a class ratio of about 3:1
# %%
classes_count = y.value_counts()
classes_count
# %% [markdown]
# This dataset is only slightly imbalanced. To better highlight the effect of
# learning from an imbalanced dataset, we will increase its ratio to 30:1
# %%
from imblearn.datasets import make_imbalance
ratio = 30
df_res, y_res = make_imbalance(
df,
y,
sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio},
)
y_res.value_counts()
# %% [markdown]
# We will perform a cross-validation evaluation to get an estimate of the test
# score.
#
# As a baseline, we could use a classifier which will always predict the
# majority class independently of the features provided.
# %%
from sklearn.model_selection import cross_validate
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy="most_frequent")
scoring = ["accuracy", "balanced_accuracy"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")
# %% [markdown]
# Instead of using the accuracy, we can use the balanced accuracy which will
# take into account the balancing issue.
# %%
print(
f"Balanced accuracy score of a dummy classifier: "
f"{cv_result['test_balanced_accuracy'].mean():.3f}"
)
# %% [markdown]
# Strategies to learn from an imbalanced dataset
# ----------------------------------------------
# We will use a dictionary and a list to continuously store the results of
# our experiments and show them as a pandas dataframe.
# %%
index = []
scores = {"Accuracy": [], "Balanced accuracy": []}
# %% [markdown]
# Dummy baseline
# ..............
#
# Before to train a real machine learning model, we can store the results
# obtained with our :class:`~sklearn.dummy.DummyClassifier`.
# %%
import pandas as pd
index += ["Dummy classifier"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Linear classifier baseline
# ..........................
#
# We will create a machine learning pipeline using a
# :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard,
# we will need to one-hot encode the categorical columns and standardized the
# numerical columns before to inject the data into the
# :class:`~sklearn.linear_model.LogisticRegression` classifier.
#
# First, we define our numerical and categorical pipelines.
# %%
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
num_pipe = make_pipeline(
StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True)
)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
# %% [markdown]
# Then, we can create a preprocessor which will dispatch the categorical
# columns to the categorical pipeline and the numerical columns to the
# numerical pipeline
# %%
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector as selector
preprocessor_linear = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
# %% [markdown]
# Finally, we connect our preprocessor with our
# :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our
# model.
# %%
from sklearn.linear_model import LogisticRegression
lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000))
# %%
index += ["Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that our linear model is learning slightly better than our dummy
# baseline. However, it is impacted by the class imbalance.
#
# We can verify that something similar is happening with a tree-based model
# such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of
# classifier, we will not need to scale the numerical data, and we will only
# need to ordinal encode the categorical data.
# %%
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import RandomForestClassifier
num_pipe = SimpleImputer(strategy="mean", add_indicator=True)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
)
preprocessor_tree = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
rf_clf = make_pipeline(
preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2)
)
# %%
index += ["Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by
# the class imbalanced, slightly less than the linear model. Now, we will
# present different approach to improve the performance of these 2 models.
#
# Use `class_weight`
# ..................
#
# Most of the models in `scikit-learn` have a parameter `class_weight`. This
# parameter will affect the computation of the loss in linear model or the
# criterion in the tree-based model to penalize differently a false
# classification from the minority and majority class. We can set
# `class_weight="balanced"` such that the weight applied is inversely
# proportional to the class frequency. We test this parametrization in both
# linear model and tree-based model.
# %%
lr_clf.set_params(logisticregression__class_weight="balanced")
index += ["Logistic regression with balanced class weights"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf.set_params(randomforestclassifier__class_weight="balanced")
index += ["Random forest with balanced class weights"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that using `class_weight` was really effective for the linear
# model, alleviating the issue of learning from imbalanced classes. However,
# the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward
# the majority class, mainly due to the criterion which is not suited enough to
# fight the class imbalance.
#
# Resample the training set during learning
# .........................................
#
# Another way is to resample the training set by under-sampling or
# over-sampling some of the samples. `imbalanced-learn` provides some samplers
# to do such processing.
# %%
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
lr_clf = make_pipeline_with_sampler(
preprocessor_linear,
RandomUnderSampler(random_state=42),
LogisticRegression(max_iter=1000),
)
# %%
index += ["Under-sampling + Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf = make_pipeline_with_sampler(
preprocessor_tree,
RandomUnderSampler(random_state=42),
RandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Under-sampling + Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Applying a random under-sampler before the training of the linear model or
# random forest, allows to not focus on the majority class at the cost of
# making more mistake for samples in the majority class (i.e. decreased
# accuracy).
#
# We could apply any type of samplers and find which sampler is working best
# on the current dataset.
#
# Instead, we will present another way by using classifiers which will apply
# sampling internally.
#
# Use of specific balanced algorithms from imbalanced-learn
# .........................................................
#
# We already showed that random under-sampling can be effective on decision
# tree. However, instead of under-sampling once the dataset, one could
# under-sample the original dataset before to take a bootstrap sample. This is
# the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and
# :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
# %%
from imblearn.ensemble import BalancedRandomForestClassifier
rf_clf = make_pipeline(
preprocessor_tree,
BalancedRandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Balanced random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The performance with the
# :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than
# applying a single random under-sampling. We will use a gradient-boosting
# classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bag_clf = make_pipeline(
preprocessor_tree,
BalancedBaggingClassifier(
base_estimator=HistGradientBoostingClassifier(random_state=42),
n_estimators=10,
random_state=42,
n_jobs=2,
),
)
index += ["Balanced bag of histogram gradient boosting"]
cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# This last approach is the most effective. The different under-sampling allows
# to bring some diversity for the different GBDT to learn and not focus on a
# portion of the majority class.
| 33.906849 | 87 | 0.747253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,178 | 0.579994 |
40dc4792e5546b69652c162537bffd53c76ae2d8 | 3,949 | py | Python | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
]
| 9 | 2015-11-19T06:03:08.000Z | 2021-02-16T19:14:42.000Z | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
]
| null | null | null | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
]
| 1 | 2018-10-09T23:12:34.000Z | 2018-10-09T23:12:34.000Z | #!/usr/bin/python
import argparse
import re
parser = argparse.ArgumentParser(description='Fix page breaks in War of The Rebellion text')
parser.add_argument('files', nargs='*',
help='Files to process')
args = parser.parse_args()
for file in args.files:
outfile = open(file + ".joined-pagebreak", "w")
text = ''.join(open(file).readlines())
pages = re.split("PAGEBREAK\n", text)
# Remove empty pages
pages = [x for x in pages if x]
for i in xrange(0, len(pages) - 1):
# Remove extraneous blank lines
pages[i] = re.sub("\n\n\n+", "\n\n", pages[i])
# Undo HTML entities
pages[i] = re.sub("&", "&", pages[i])
pages[i] = re.sub("<", "<", pages[i])
pages[i] = re.sub(">", ">", pages[i])
# Do the following a second time to handle cases of
# &amp;, which are common
pages[i] = re.sub("&", "&", pages[i])
m = re.match(r"^( *\[*CHAP\. [A-Z]+\.\]* *\n\n?)(.*)", pages[i], re.S)
if m:
pages[i] = m.group(2)
print "Removed CHAP heading on page %s:\n[%s]\n" % (i, m.group(1))
m = re.match("(.*?)(\n?(?: *[0-9]+|S) *(?:R R(?: *[-_VY]+ *[^\n]*)?|R *-+ *[^\n]*)\n)(.*)$", pages[i], re.S)
if m:
pages[i] = m.group(1) + m.group(3)
print "Removed R R notation on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match(r"(.*?\n)(\n* *------+\n( *(?:[*+#@$|^\\/&~=>!?]|[abc] |[abc][A-Z])[^\n]*\n|\n)* *-------+\n+(?:[*+#@$|^\\/&~=>!?] *[A-Z][^\n]*\n|\n)*)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed footnote on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match("(.*?\n)(\n*[*]?MAP[^\n]*\n+)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed MAP notation on page %s:\n[%s]\n" % (i, m.group(2))
while pages[i] and pages[i][-1] == "\n":
pages[i] = pages[i][0:-1]
if "\n" not in pages[i]:
lastlinelen = len(pages[i])
else:
m = re.match(".*\n([^\n]*)$", pages[i], re.S)
assert m
lastlinelen = len(m.group(1))
shortline = lastlinelen < 60
join = False
hyphenjoin = False
if not pages[i]:
continue
if len(pages[i]) >= 2 and pages[i][-1] == '-' and pages[i][-2].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE HYPHEN, NOT JOINED"
else:
msg = "PAGEBREAK HYPHEN-JOINED"
hyphenjoin = True
join = True
elif pages[i + 1] and pages[i + 1][0].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE NEXT PAGE STARTS LOWERCASE, NOT JOINED"
else:
msg = "PAGEBREAK NEXT PAGE STARTS LOWERCASE, JOINED"
join = True
elif len(pages[i]) >= 3 and pages[i][-1] == '.' and pages[i][-2].isupper() and pages[i][-3] in ['.', ' ']:
if shortline:
msg = "PAGEBREAK SHORT-LINE ENDS WITH ABBREVIATION PERIOD, NOT JOINED"
else:
msg = "PAGEBREAK ENDS ABBREV-PERIOD, JOINED"
join = True
elif pages[i][-1] == '.':
msg = "PAGEBREAK ENDS PERIOD, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '*' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD STAR, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '"' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD QUOTE, NOT JOINED"
elif pages[i][-1] == ':':
msg = "PAGEBREAK ENDS COLON, NOT JOINED"
elif pages[i][-1] == ',':
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE COMMA, NOT JOINED"
else:
msg = "PAGEBREAK ENDS COMMA, JOINED"
join = True
else:
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE OTHER, NOT JOINED"
else:
msg = "PAGEBREAK ENDS OTHER, JOINED"
join = True
print "Page %s: %s" % (i, msg)
if hyphenjoin:
outfile.write(pages[i][0:-1])
elif join:
outfile.write(pages[i] + " ")
else:
outfile.write(pages[i])
outfile.write("\n\n")
outfile.write("\n%s\n" % msg)
outfile.close()
| 36.564815 | 171 | 0.52469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,443 | 0.365409 |
40dd78243c51556a2be73588a2b4ac205cbb6f28 | 570 | py | Python | 2021/02/part2.py | FranciscoAT/advent-of-code | 69f20696e4c59ff6dfa010b22dd3593ea3d12208 | [
"MIT"
]
| null | null | null | 2021/02/part2.py | FranciscoAT/advent-of-code | 69f20696e4c59ff6dfa010b22dd3593ea3d12208 | [
"MIT"
]
| null | null | null | 2021/02/part2.py | FranciscoAT/advent-of-code | 69f20696e4c59ff6dfa010b22dd3593ea3d12208 | [
"MIT"
]
| null | null | null | def main(file: str) -> None:
depth = 0
distance = 0
aim = 0
with open(f"{file}.in") as f:
for line in f.readlines():
line = line.rstrip().split(" ")
command = line[0]
unit = int(line[1])
if command == "forward":
distance += unit
depth += aim * unit
elif command == "down":
aim += unit
else:
aim -= unit
print(f"{file}: {depth * distance}")
if __name__ == "__main__":
main("test")
main("puzzle")
| 23.75 | 43 | 0.436842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.145614 |
40dd84dd99f72ec266d9a45433b99fd282b94576 | 6,068 | py | Python | tf_seal/python/tensor.py | karlhigley/tf-seal | 74a38e3ff71d29e862881d56bca84aaa23efd710 | [
"Apache-2.0"
]
| 94 | 2019-08-06T16:03:27.000Z | 2022-03-24T18:19:07.000Z | tf_seal/python/tensor.py | karlhigley/tf-seal | 74a38e3ff71d29e862881d56bca84aaa23efd710 | [
"Apache-2.0"
]
| 23 | 2019-08-19T16:22:12.000Z | 2022-03-31T15:09:58.000Z | tf_seal/python/tensor.py | karlhigley/tf-seal | 74a38e3ff71d29e862881d56bca84aaa23efd710 | [
"Apache-2.0"
]
| 17 | 2019-08-08T22:45:46.000Z | 2022-03-22T08:05:16.000Z | import numpy as np
import tensorflow as tf
import tf_seal.python.ops.seal_ops as ops
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops as tf_ops
class Tensor(object):
def __init__(self, value, secret_key, public_keys):
assert isinstance(value, tf.Tensor), type(value)
assert value.dtype is tf.variant, value.dtype
self._raw = value
self._public_keys = public_keys
self._secret_key = secret_key
@property
def shape(self):
return self._raw.shape
@property
def name(self):
return self._raw.name
@property
def dtype(self):
return tf.int32
# return tf.string
def eval(self, session=None, dtype=None):
tf_tensor = convert_from_tensor(self, dtype=dtype)
evaluated = tf_tensor.eval(session=session)
return evaluated
def __add__(self, other):
if isinstance(other, Tensor):
res = ops.seal_add(self._raw, other._raw)
else:
res = ops.seal_add_plain(self._raw, other)
return Tensor(res, self._secret_key, self._public_keys)
# def __sub__(self, other):
# other = convert_to_tensor(other)
# res = ops.big_sub(self._raw, other._raw)
# return Tensor(res)
def __mul__(self, other):
if isinstance(other, Tensor):
res = ops.seal_mul(self._raw, other._raw, self._public_keys)
else:
res = ops.seal_mul_plain(self._raw, other)
return Tensor(res, self._secret_key, self._public_keys)
def matmul(self, other):
if isinstance(other, Tensor):
res = ops.seal_mat_mul(self._raw, other._raw, self._public_keys)
else:
res = ops.seal_mat_mul_plain(self._raw, other, self._public_keys)
return Tensor(res, self._secret_key, self._public_keys)
def _fetch_function(seal_tensor):
unwrapped = [convert_from_tensor(seal_tensor, dtype=tf.float64)]
rewrapper = lambda components_fetched: components_fetched[0].astype(np.float64)
return unwrapped, rewrapper
def _feed_function(seal_tensor, feed_value):
return [(seal_tensor._raw, feed_value)]
def _feed_function_for_partial_run(seal_tensor):
return [seal_tensor._raw]
# this allows tf_seal.Tensor to be passed directly to tf.Session.run,
# unwrapping and converting the result as needed
tf_session.register_session_run_conversion_functions(
tensor_type=Tensor,
fetch_function=_fetch_function,
feed_function=_feed_function,
feed_function_for_partial_run=_feed_function_for_partial_run,
)
def _tensor_conversion_function(tensor, dtype=None, name=None, as_ref=False):
assert name is None, "Not implemented, name='{}'".format(name)
assert not as_ref, "Not implemented, as_ref={}".format(as_ref)
assert dtype in [tf.float32, tf.float64, None], dtype
return convert_from_tensor(tensor, dtype=dtype)
# TODO(Morten)
# this allows implicit convertion of tf_seal.Tensor to tf.Tensor,
# but since the output dtype is determined by the outer context
# we essentially have to export with the implied risk of data loss
tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function)
# this allows Tensor to pass the tf.is_tensor test
tf_ops.register_dense_tensor_like_type(Tensor)
# this allows tf_big.Tensor to be plumbed through Keras layers
# but seems only truly useful when used in conjunction with
# `register_tensor_conversion_function`
tf_utils.register_symbolic_tensor_type(Tensor)
def constant(tensor, secret_key, public_keys):
assert isinstance(tensor, (np.ndarray, list, tuple)), type(tensor)
return convert_to_tensor(tensor, secret_key, public_keys)
def _convert_numpy_tensor(tensor, secret_key, public_keys):
if len(tensor.shape) > 2:
raise ValueError("Only matrices are supported for now.")
# make sure we have a full matrix
while len(tensor.shape) < 2:
tensor = np.expand_dims(tensor, 0)
if np.issubdtype(tensor.dtype, np.float32) \
or np.issubdtype(tensor.dtype, np.float64):
# supported as-is
return Tensor(ops.seal_encrypt(tensor, public_keys), secret_key, public_keys)
raise ValueError("Don't know how to convert NumPy tensor with dtype '{}'".format(tensor.dtype))
def _convert_tensorflow_tensor(tensor, secret_key, public_keys):
if len(tensor.shape) > 2:
raise ValueError("Only matrices are supported for now.")
# make sure we have a full matrix
while len(tensor.shape) < 2:
tensor = tf.expand_dims(tensor, 0)
if tensor.dtype in (tf.float32, tf.float64):
# supported as-is
return Tensor(ops.seal_encrypt(tensor, public_keys), secret_key, public_keys)
raise ValueError("Don't know how to convert TensorFlow tensor with dtype '{}'".format(tensor.dtype))
def convert_to_tensor(tensor, secret_key, public_keys):
if isinstance(tensor, Tensor):
return tensor
if tensor is None:
return None
if isinstance(tensor, (float)):
return _convert_numpy_tensor(np.array([tensor]), secret_key, public_keys)
if isinstance(tensor, (list, tuple)):
return _convert_numpy_tensor(np.array(tensor), secret_key, public_keys)
if isinstance(tensor, np.ndarray):
return _convert_numpy_tensor(tensor, secret_key, public_keys)
if isinstance(tensor, tf.Tensor):
return _convert_tensorflow_tensor(tensor, secret_key, public_keys)
raise ValueError("Don't know how to convert value of type {}".format(type(tensor)))
def convert_from_tensor(value, dtype=None):
assert isinstance(value, Tensor), type(value)
if dtype is None:
dtype = tf.float64
if dtype in [tf.float32, tf.float64]:
return ops.seal_decrypt(value._raw, value._secret_key, dtype=dtype)
raise ValueError("Don't know how to evaluate to dtype '{}'".format(dtype))
def add(x, y):
# TODO(Morten) lifting etc
return x + y
def sub(x, y):
# TODO(Morten) lifting etc
return x - y
def mul(x, y):
# TODO(Morten) lifting etc
return x * y
def matmul(x, y):
# TODO(Morten) lifting etc
return x.matmul(y)
def poly_eval(x, coeffs):
res = ops.seal_poly_eval(x._raw, coeffs, x._public_keys)
return Tensor(res, x._secret_key, x._public_keys)
| 30.492462 | 102 | 0.742914 | 1,529 | 0.251978 | 0 | 0 | 179 | 0.029499 | 0 | 0 | 1,229 | 0.202538 |
40de3e3ad949140a43a1a19f490e5fc039aedb2f | 25,087 | py | Python | program/admin.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
]
| null | null | null | program/admin.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
]
| null | null | null | program/admin.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
]
| null | null | null | from django.core.exceptions import ObjectDoesNotExist
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render
from django.conf import settings
from .models import Language, Type, MusicFocus, Category, Topic, RTRCategory, Host, Note, RRule, Schedule, Show, TimeSlot
from .forms import MusicFocusForm
from datetime import date, datetime, time, timedelta
class ActivityFilter(admin.SimpleListFilter):
title = _("Activity")
def lookups(self, request, model_admin):
return (
('yes', _("active")),
('no', _("inactive"))
)
def queryset(self, request, queryset):
if self.parameter_name == 'has_timeslots': # active/inactive Schedules
if self.value() == 'yes':
return queryset.filter(until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_schedules_timeslots': # active/inactive Shows
if self.value() == 'yes':
return queryset.filter(schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(schedules__until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_shows_schedules_timeslots': # active/inactive Hosts
if self.value() == 'yes':
return queryset.filter(shows__schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(shows__schedules__until__lt=datetime.now()).distinct()
class ActiveSchedulesFilter(ActivityFilter):
parameter_name = 'has_timeslots'
class ActiveShowsFilter(ActivityFilter):
parameter_name = 'has_schedules_timeslots'
class ActiveHostsFilter(ActivityFilter):
parameter_name = 'has_shows_schedules_timeslots'
class TypeAdmin(admin.ModelAdmin):
list_display = ('type', 'admin_color', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('type',)}
class MusicFocusAdmin(admin.ModelAdmin):
form = MusicFocusForm
list_display = ('focus', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('focus',)}
class CategoryAdmin(admin.ModelAdmin):
list_display = ('category', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('category',)}
class LanguageAdmin(admin.ModelAdmin):
list_display = ('name', 'is_active')
list_filter = ('is_active',)
class TopicAdmin(admin.ModelAdmin):
list_display = ('topic', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('topic',)}
class RTRCategoryAdmin(admin.ModelAdmin):
list_display = ('rtrcategory', 'abbrev', 'is_active' )
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('rtrcategory',)}
class HostAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'is_active')
list_filter = (ActiveHostsFilter, 'is_active',)
def get_queryset(self, request):
if request.user.is_superuser:
return Host.objects.all()
# Common users only see hosts of shows they own
return Host.objects.filter(shows__in=request.user.shows.all()).distinct()
class NoteAdmin(admin.ModelAdmin):
date_hierarchy = 'start'
list_display = ('title', 'show', 'start', 'status', 'user')
fields = (( 'show', 'timeslot'), 'title', 'slug', 'summary', 'content', 'image', 'host', 'status', 'cba_id')
prepopulated_fields = {'slug': ('title',)}
list_filter = ('status',)
ordering = ('timeslot',)
save_as = True
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/note_change.js', ]
def get_queryset(self, request):
if request.user.is_superuser:
shows = Show.objects.all()
else:
# Commons users only see notes of shows they own
shows = request.user.shows.all()
return super(NoteAdmin, self).get_queryset(request).filter(show__in=shows)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
four_weeks_ago = datetime.now() - timedelta(weeks=4)
in_twelve_weeks = datetime.now() + timedelta(weeks=12)
if db_field.name == 'timeslot':
# Adding/Editing a note: load timeslots of the user's shows into the dropdown
# TODO: Don't show any timeslot in the select by default.
# User should first choose show, then timeslots are loaded into the select via ajax.
#
# How to do this while not constraining the queryset?
# Saving won't be possible otherwise, if queryset doesn't contain the selectable elements beforehand
#kwargs['queryset'] = TimeSlot.objects.filter(show=-1)
# Superusers see every timeslot for every show
if request.user.is_superuser:
kwargs['queryset'] = TimeSlot.objects.filter(start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
# Users see timeslots of shows they own
else:
kwargs['queryset'] = TimeSlot.objects.filter(show__in=request.user.shows.all(), start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
if db_field.name == 'show':
# Adding/Editing a note: load user's shows into the dropdown
# Common users only see shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Show.objects.filter(pk__in=request.user.shows.all(), is_active=True)
if db_field.name == 'host':
# Common users only see hosts of shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Host.objects.filter(shows__in=request.user.shows.all(), is_active=True).distinct()
return super(NoteAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def save_model(self, request, obj, form, change):
# Save the creator when adding a note
if not change:
obj.user = request.user
# Try to get direct audio URL from CBA
obj.audio_url = Note.get_audio_url(obj.cba_id)
obj.save()
class TimeSlotInline(admin.TabularInline):
model = TimeSlot
ordering = ('-end',)
class TimeSlotAdmin(admin.ModelAdmin):
model = TimeSlot
class ScheduleAdmin(admin.ModelAdmin):
actions = ('renew',)
inlines = (TimeSlotInline,)
fields = (('rrule', 'byweekday'), ('dstart', 'tstart', 'tend'), 'until', 'is_repetition', 'automation_id', 'fallback_id')
list_display = ('get_show_name', 'byweekday', 'rrule', 'tstart', 'tend', 'until')
list_filter = (ActiveSchedulesFilter, 'byweekday', 'rrule', 'is_repetition')
ordering = ('byweekday', 'dstart')
save_on_top = True
search_fields = ('show__name',)
def renew(self, request, queryset):
next_year = date.today().year + 1
until = date(next_year, 12, 31)
renewed = queryset.update(until=until)
if renewed == 1:
message = _("1 schedule was renewed until %s") % until
else:
message = _("%s schedule were renewed until %s") % (renewed, until)
self.message_user(request, message)
renew.short_description = _("Renew selected schedules")
def get_show_name(self, obj):
return obj.show.name
get_show_name.admin_order_field = 'show'
get_show_name.short_description = "Show"
class ScheduleInline(admin.TabularInline):
model = Schedule
ordering = ('pk', '-until', 'byweekday')
class ShowAdmin(admin.ModelAdmin):
filter_horizontal = ('hosts', 'owners', 'musicfocus', 'category', 'topic', 'language')
inlines = (ScheduleInline,)
list_display = ('name', 'short_description')
list_filter = (ActiveShowsFilter, 'type', 'category', 'topic', 'musicfocus', 'rtrcategory', 'language')
ordering = ('slug',)
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name', 'short_description', 'description')
fields = (
'predecessor', 'type', 'name', 'slug', 'image', 'logo', 'short_description', 'description',
'email', 'website', 'hosts', 'owners', 'language', 'category', 'rtrcategory', 'topic',
'musicfocus', 'fallback_id', 'cba_series_id',
)
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/show_change.js', ]
css = { 'all': ('/program/styles.css',) }
def get_queryset(self, request):
if request.user.is_superuser:
# Superusers see all shows
shows = Show.objects.all()
else:
# Users only see shows they own
shows = request.user.shows.all()
return super(ShowAdmin, self).get_queryset(request).filter(pk__in=shows)
def get_readonly_fields(self, request, obj=None):
'''Limit field access for common users'''
if not request.user.is_superuser:
# TODO: how to set field 'name' readonly although it's required?
return ('predecessor', 'type', 'hosts', 'owners', 'language', 'category', 'topic', 'musicfocus', 'rtrcategory')
return list()
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
try:
show_id = int(request.get_full_path().split('/')[-2])
except ValueError:
show_id = None
print(db_field.name)
if db_field.name == 'predecessor' and show_id:
kwargs['queryset'] = Show.objects.exclude(pk=show_id)
if db_field.name == 'type':
kwargs['queryset'] = Type.objects.filter(is_active=True)
if db_field.name == 'rtrcategory':
kwargs['queryset'] = RTRCategory.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'hosts':
kwargs["queryset"] = Host.objects.filter(is_active=True)
if db_field.name == 'language':
kwargs["queryset"] = Language.objects.filter(is_active=True)
if db_field.name == 'category':
kwargs["queryset"] = Category.objects.filter(is_active=True)
if db_field.name == 'topic':
kwargs["queryset"] = Topic.objects.filter(is_active=True)
if db_field.name == 'musicfocus':
kwargs["queryset"] = MusicFocus.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def save_formset(self, request, form, formset, change):
"""
Is called after the "save show"-form or collision-form were submitted
Saves the show after first submit
If any changes in schedules happened
* added/changed schedules are used to generate new timeslots and
matched against existing ones, which will be displayed in the collision form
If a collision form was submitted
* save the current schedule
* delete/create timeslots and relink notes after confirmation
Each step passes on to response_add or response_change which will
* either display the collision form for the next step
* or redirect to the original show-form if the resolving process has been finished
(= if either max_steps was surpassed or end_reached was True)
"""
self.end_reached = False
schedule_instances = formset.save(commit=False)
# If there are no schedules to save, do nothing
if schedule_instances:
show_id = schedule_instances[0].show.id
else:
self.end_reached = True
schedule = []
timeslots = []
max_steps = int(len(schedule_instances)) if len(schedule_instances) > 0 else 1
step = 1
if request.POST.get('step') == None:
# First save-show submit
# Generate thumbnails
if form.instance.image.name and settings.THUMBNAIL_SIZES:
for size in settings.THUMBNAIL_SIZES:
thumbnail = form.instance.image.crop[size].name
# Save show data only
form.save();
# Delete schedules (as well as related timeslots and notes) if flagged as such
for obj in formset.deleted_objects:
obj.delete()
# If nothing else changed, do nothing and redirect to show-form
if not formset.changed_objects and not formset.new_objects:
self.end_reached = True
else:
# If a collision form was submitted
step = int(request.POST.get('step'))
if request.POST.get('num_inputs') != None and int(request.POST.get('num_inputs')) > 0:
print("Resolving conflicts...")
'''Declare and retrieve variables'''
# Either datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') to create
# or ints of colliding timeslots to keep otherwise
resolved_timeslots = []
# IDs of colliding timeslots found in the db. If there's no corresponding collision to the
# same index in create_timeslot, value will be None
collisions = []
# Datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') for timeslots to create
create_timeslots = []
# IDs of timeslots to delete
delete_timeslots = set()
# Number of timeslots to be generated
num_inputs = int(request.POST.get('num_inputs'))
# Numbers of notes to relink for existing timeslots and newly created ones
# each of them relating to one of these POST vars:
# POST.ntids[idx][id] and POST.ntids[idx][note_id] contain ids of existing timeslots and note_ids to link, while
# POST.ntind[idx][id] and POST.ntind[idx][note_id] contain indices of corresponding elements in create_timeslots
# and note_ids which will be linked after they're created and thus split into two lists beforehand
num_ntids = int(request.POST.get('num_ntids'))
num_ntind = int(request.POST.get('num_ntind'))
# Retrieve POST vars of current schedule
schedule_id = int(request.POST.get('ps_save_id')) if request.POST.get('ps_save_id') != 'None' else None
rrule = RRule.objects.get(pk=int(request.POST.get('ps_save_rrule_id')))
show = Show.objects.get(pk=show_id)
byweekday = int(request.POST.get('ps_save_byweekday'))
tstart = datetime.strptime(request.POST.get('ps_save_tstart'), '%H:%M').time()
tend = datetime.strptime(request.POST.get('ps_save_tend'), '%H:%M').time()
dstart = datetime.strptime(request.POST.get('ps_save_dstart'), '%Y-%m-%d').date()
if dstart < datetime.today().date(): # Create or delete upcoming timeslots only
dstart = datetime.today().date()
until = datetime.strptime(request.POST.get('ps_save_until'), '%Y-%m-%d').date()
is_repetition = request.POST.get('ps_save_is_repetition')
automation_id = int(request.POST.get('ps_save_automation_id')) if request.POST.get('ps_save_automation_id') != 'None' else 0
fallback_id = int(request.POST.get('ps_save_fallback_id')) if request.POST.get('ps_save_fallback_id') != 'None' else 0
# Put timeslot POST vars into lists with same indices
for i in range(num_inputs):
resolved_ts = request.POST.get('resolved_timeslots[' + str(i) + ']')
if resolved_ts != None:
resolved_timeslots.append( resolved_ts )
create_timeslots.append( request.POST.get('create_timeslots[' + str(i) + ']') ) # May contain None
collisions.append( request.POST.get('collisions[' + str(i) + ']') ) # May contain None
else:
num_inputs -= 1
'''Prepare resolved timeslots'''
# Separate timeslots to delete from those to create
keep_collisions = []
for x in range(num_inputs):
if resolved_timeslots[x] == None or resolved_timeslots[x].isdigit():
# If it's a digit, keep the existing timeslot by preventing the new one from being created
create_timeslots[x] = None
keep_collisions.append(int(collisions[x]))
else:
# Otherwise collect the timeslot ids to be deleted later
if len(collisions[x]) > 0:
delete_timeslots.add(int(collisions[x]))
# Collect IDs of upcoming timeslots of the same schedule to delete except those in keep_collision
if schedule_id != None:
for ts in TimeSlot.objects.filter(start__gte=dstart,end__lte=until,schedule_id=schedule_id).exclude(pk__in=keep_collisions).values_list('id', flat=True):
delete_timeslots.add(ts)
'''Save schedule'''
new_schedule = Schedule(pk=schedule_id,
rrule=rrule,
byweekday=byweekday,
show=show,
dstart=dstart,
tstart=tstart,
tend=tend,
until=until,
is_repetition=is_repetition,
automation_id=automation_id,
fallback_id=fallback_id)
# Only save schedule if any timeslots changed
if len(resolved_timeslots) > 0:
new_schedule.save()
'''Relink notes to existing timeslots and prepare those to be linked'''
# Relink notes with existing timeslot ids
for i in range(num_ntids):
try:
note = Note.objects.get(pk=int(request.POST.get('ntids[' + str(i) + '][note_id]')))
note.timeslot_id = int(request.POST.get('ntids[' + str(i) + '][id]'))
note.save(update_fields=["timeslot_id"])
print("Rewrote note " + str(note.id) + "...to timeslot_id " + str(note.timeslot_id))
except ObjectDoesNotExist:
pass
# Put list indices of yet to be created timeslots and note_ids in corresponding lists to relink them during creation
note_indices = []
note_ids = []
for i in range(num_ntind):
note_indices.append( int(request.POST.get('ntind[' + str(i) + '][id]')) )
note_ids.append( int(request.POST.get('ntind[' + str(i) + '][note_id]')) )
'''Database changes for resolved timeslots and relinked notes for newly created'''
for idx, ts in enumerate(create_timeslots):
if ts != None:
start_end = ts.split(' - ')
# Only create upcoming timeslots
if datetime.strptime(start_end[0], "%Y-%m-%d %H:%M:%S") > datetime.today():
timeslot_created = TimeSlot.objects.create(schedule=new_schedule, is_repetition=new_schedule.is_repetition, start=start_end[0], end=start_end[1])
# Link a note to the new timeslot
if idx in note_indices:
note_idx = note_indices.index( idx ) # Get the note_id's index...
note_id = note_ids[note_idx] # ...which contains the note_id to relate to
try:
note = Note.objects.get(pk=note_id)
note.timeslot_id = timeslot_created.id
note.save(update_fields=["timeslot_id"])
print("Timeslot " + str(timeslot_created.id) + " linked to note " + str(note_id))
except ObjectDoesNotExist:
pass
# Finally delete discarded timeslots
for timeslot_id in delete_timeslots:
TimeSlot.objects.filter(pk=timeslot_id).delete()
if step > max_steps:
self.end_reached = True
'''
Everything below here is called when a new collision is loaded before being handed over to the client
'''
# Generate timeslots from current schedule
k = 1
for instance in schedule_instances:
if isinstance(instance, Schedule):
if k == step:
timeslots = Schedule.generate_timeslots(instance)
schedule = instance
break
k += 1
# Get collisions for timeslots
collisions = Schedule.get_collisions(timeslots)
# Get notes of colliding timeslots
notes = []
for id in collisions:
try:
notes.append( Note.objects.get(timeslot_id=id) )
except ObjectDoesNotExist:
pass
self.schedule = schedule
self.timeslots = timeslots
self.collisions = collisions
self.num_collisions = len([ s for s in self.collisions if s != 'None']) # Number of real collisions displayed to the user
self.notes = notes
self.showform = form
self.schedulesform = formset
self.step = step + 1 # Becomes upcoming step
self.max_steps = max_steps
# Pass it on to response_add() or response_change()
return self
def response_add(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def response_change(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def respond(self, request, obj):
"""
Redirects to the show-change-form if no schedules changed or resolving has been finished (or any other form validation error occured)
Displays the collision form for the current schedule otherwise
"""
# Never check for collisions if not superuser
# Common users can't edit the formset, so save_formset() will never be called thus end_reached wasn't set yet
if not request.user.is_superuser:
self.end_reached = True
if self.end_reached:
return super(ShowAdmin, self).response_change(request, obj)
timeslots_to_collisions = list(zip(self.timeslots, self.collisions))
return render(request, 'collisions.html', {'self' : self, 'obj': obj, 'request': request,
'timeslots': self.timeslots,
'collisions': self.collisions,
'schedule': self.schedule,
'timeslots_to_collisions': timeslots_to_collisions,
'schedulesform': self.schedulesform,
'showform': self.showform,
'num_inputs': len(self.timeslots),
'step': self.step,
'max_steps': self.max_steps,
'now': datetime.now(),
'num_collisions': self.num_collisions})
admin.site.register(Language, LanguageAdmin)
admin.site.register(Type, TypeAdmin)
admin.site.register(MusicFocus, MusicFocusAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(RTRCategory, RTRCategoryAdmin)
admin.site.register(Host, HostAdmin)
admin.site.register(Note, NoteAdmin)
#admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(TimeSlot, TimeSlotAdmin)
admin.site.register(Show, ShowAdmin) | 42.305228 | 173 | 0.585363 | 24,146 | 0.962491 | 0 | 0 | 0 | 0 | 0 | 0 | 7,658 | 0.305258 |
40de4834e40d2116182061d040e90ff70baa0986 | 2,177 | py | Python | test/logic/test_block_features.py | Sam-prog-sudo/Sam.github.io | 75f41ec26b4b3aafcb117e467e38fca1e69f5c87 | [
"MIT"
]
| 3 | 2020-07-06T21:03:03.000Z | 2020-07-18T07:02:59.000Z | test/logic/test_block_features.py | Sam-prog-sudo/Sam.github.io | 75f41ec26b4b3aafcb117e467e38fca1e69f5c87 | [
"MIT"
]
| null | null | null | test/logic/test_block_features.py | Sam-prog-sudo/Sam.github.io | 75f41ec26b4b3aafcb117e467e38fca1e69f5c87 | [
"MIT"
]
| 4 | 2020-07-17T11:16:05.000Z | 2020-10-01T08:57:21.000Z | import hashlib
import json
from time import time
import pytest
from app.chaine.blockchain import Blockchain
@pytest.fixture
def first_block():
return {
'index': 1,
'timestamp': time(),
'transactions': [],
'proof': 1989,
'previous_hash': 1,
}
def test_initialization_blockchain(first_block):
bc = Blockchain()
assert bc.chain[0]['index'] == first_block['index']
assert isinstance(
bc.chain[0]['timestamp'],
type(first_block['timestamp'])
)
assert bc.chain[0]['transactions'] == first_block['transactions']
assert bc.chain[0]['proof'] == first_block['proof']
assert bc.chain[0]['previous_hash'] == first_block['previous_hash']
def test_last_block():
bc = Blockchain()
assert bc.last_block == bc.chain[-1]
@pytest.fixture
def a_valid_block():
block_1 = {
'index': 2,
'timestamp': time(),
'transactions': [],
'proof': 123,
'previous_hash': 'abc',
}
return block_1
@pytest.fixture
def an_invalid_block():
block_2 = {
'index': 'salut',
'timestamp': list('cava',),
'transactions': 22,
'proof': None,
'previous_hash': 46,
}
return block_2
@pytest.mark.parametrize('some_blocks', [
'a_valid_block',
'an_invalid_block'
]
)
def test_hachage(some_blocks):
bc = Blockchain()
block_json = json.dumps(
some_blocks,
sort_keys=True
).encode()
hash_test = hashlib.sha256(block_json).hexdigest()
assert len(hash_test) == 64
assert isinstance(
hash_test,
type(bc.hachage(some_blocks))
)
assert hash_test == bc.hachage(some_blocks)
def test_block_creation(a_valid_block, proof=123, previous_hash='abc'):
bc = Blockchain()
block_a_tester = bc.new_block(proof, previous_hash)
assert block_a_tester['index'] == a_valid_block['index']
assert isinstance(
block_a_tester['timestamp'],
type(a_valid_block['timestamp'])
)
assert block_a_tester['proof'] == a_valid_block['proof']
assert block_a_tester['previous_hash'] == a_valid_block['previous_hash']
| 23.408602 | 76 | 0.622876 | 0 | 0 | 0 | 0 | 1,076 | 0.494258 | 0 | 0 | 419 | 0.192467 |
40e031fd64128f14855fedd41208af0c66f89410 | 886 | py | Python | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
]
| null | null | null | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
]
| 16 | 2017-08-06T09:49:01.000Z | 2021-09-01T08:40:58.000Z | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
]
| null | null | null | # from django.conf.urls import patterns, url, include
# from django.views.generic import TemplateView
# from . import views, APP_NAME
#
# urlpatterns = patterns('',
# url(r'^$', views.index, name='%s.index' % APP_NAME),
# )
from django.urls import path, re_path, include
from . import views, APP_NAME
from .api import LayerResource
from tastypie.api import Api
Resources_api = Api(api_name="api")
Resources_api.register(LayerResource())
urlpatterns = [
re_path(r'^$', views.index, name='%s.index' % APP_NAME),
path('styles/<str:layername>/', views.layer_styles, name='%s.layer_styles' % APP_NAME),
path('styles/save/<str:layer_name>/<str:style_name>', views.save_style, name='%s.save_style' % APP_NAME),
re_path(r'^proxy/geoserver/rest/(?P<suburl>.*)$', views.geoserver_rest_proxy, name='%s.proxy' % APP_NAME),
re_path(r'^', include(Resources_api.urls)),
]
| 34.076923 | 110 | 0.705418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.450339 |
40e07b3163c543bc0b7215aac128eae673625978 | 1,721 | py | Python | core/rest/wscdn.py | cybert79/Osmedeus | 684d853144e2f85343c3367440120142455f296b | [
"MIT"
]
| 1 | 2019-06-13T09:14:11.000Z | 2019-06-13T09:14:11.000Z | core/rest/wscdn.py | KbaHaxor/Osmedeus | 0894d52ad5949e9151b0fd05d9746ecafc8057b5 | [
"MIT"
]
| null | null | null | core/rest/wscdn.py | KbaHaxor/Osmedeus | 0894d52ad5949e9151b0fd05d9746ecafc8057b5 | [
"MIT"
]
| 2 | 2020-01-09T17:48:23.000Z | 2020-01-09T17:48:24.000Z | import os
import glob
import json
from pathlib import Path
from flask_restful import Api, Resource, reqparse
from flask_jwt_extended import jwt_required
from flask import Flask, request, escape, make_response, send_from_directory
import utils
# incase you can't install ansi2html it's won't break the api
try:
from ansi2html import Ansi2HTMLConverter
except:
pass
current_path = os.path.dirname(os.path.realpath(__file__))
'''
render stdout content
'''
class Wscdn(Resource):
def verify_file(self, filename):
option_files = glob.glob(
current_path + '/storages/**/options.json', recursive=True)
# loop though all options avalible
for option in option_files:
json_option = utils.reading_json(option)
stdout_path = json_option.get('WORKSPACES') + "/" + filename
if utils.not_empty_file(stdout_path):
return json_option.get('WORKSPACES'), os.path.normpath(filename)
# get real path
p = Path(filename)
ws = p.parts[0]
if ws != utils.url_encode(ws):
# just replace the first one
filename_encode = filename.replace(ws, utils.url_encode(ws), 1)
stdout_path_encode = json_option.get('WORKSPACES') + filename_encode
if utils.not_empty_file(stdout_path_encode):
return json_option.get('WORKSPACES'), os.path.normpath(filename_encode)
return False, False
def get(self, filename):
ws_path, stdout_path = self.verify_file(filename)
if not stdout_path:
return 'Custom 404 here', 404
return send_from_directory(ws_path, stdout_path)
| 30.732143 | 91 | 0.654852 | 1,253 | 0.728065 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.153399 |
40e08ff17bc877d0938f412ad22362a39a6d45db | 13,534 | py | Python | custom_components/hahm/services.py | noxhirsch/custom_homematic | afc03c813f44d342f75477e6fcce85fc78515258 | [
"MIT"
]
| null | null | null | custom_components/hahm/services.py | noxhirsch/custom_homematic | afc03c813f44d342f75477e6fcce85fc78515258 | [
"MIT"
]
| null | null | null | custom_components/hahm/services.py | noxhirsch/custom_homematic | afc03c813f44d342f75477e6fcce85fc78515258 | [
"MIT"
]
| null | null | null | """Module with hahomematic services."""
from __future__ import annotations
from datetime import datetime
import logging
from hahomematic.const import (
ATTR_ADDRESS,
ATTR_INTERFACE_ID,
ATTR_NAME,
ATTR_PARAMETER,
ATTR_VALUE,
HmPlatform,
)
from hahomematic.device import HmDevice
from hahomematic.entity import BaseEntity, GenericEntity
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, ATTR_TIME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import comp_entity_ids
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from .const import (
ATTR_PARAMSET,
ATTR_PARAMSET_KEY,
ATTR_RX_MODE,
ATTR_VALUE_TYPE,
DOMAIN,
)
from .control_unit import ControlUnit, HaHub
from .helpers import get_device_address_at_interface_from_identifiers
_LOGGER = logging.getLogger(__name__)
ATTR_CHANNEL = "channel"
ATTR_DEVICE_ID = "device_id"
DEFAULT_CHANNEL = 1
SERVICE_EXPORT_DEVICE_DEFINITION = "export_device_definition"
SERVICE_PUT_PARAMSET = "put_paramset"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
HAHM_SERVICES = [
SERVICE_EXPORT_DEVICE_DEFINITION,
SERVICE_PUT_PARAMSET,
SERVICE_SET_DEVICE_VALUE,
SERVICE_SET_INSTALL_MODE,
SERVICE_SET_VARIABLE_VALUE,
]
SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
}
)
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): comp_entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
}
)
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema(
{
vol.Required(ATTR_INTERFACE_ID): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMETER): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_VALUE_TYPE): vol.In(
["boolean", "dateTime.iso8601", "double", "int", "string"]
),
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
async def async_setup_services(hass: HomeAssistant) -> None:
"""Create the hahomematic services."""
@verify_domain_control(hass, DOMAIN)
async def async_call_hahm_service(service: ServiceCall) -> None:
"""Call correct HomematicIP Cloud service."""
service_name = service.service
if service_name == SERVICE_EXPORT_DEVICE_DEFINITION:
await _async_service_export_device_definition(hass=hass, service=service)
elif service_name == SERVICE_PUT_PARAMSET:
await _async_service_put_paramset(hass=hass, service=service)
elif service_name == SERVICE_SET_INSTALL_MODE:
await _async_service_set_install_mode(hass=hass, service=service)
elif service_name == SERVICE_SET_DEVICE_VALUE:
await _async_service_set_device_value(hass=hass, service=service)
elif service_name == SERVICE_SET_VARIABLE_VALUE:
await _async_service_set_variable_value(hass=hass, service=service)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_EXPORT_DEVICE_DEFINITION,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_VARIABLE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_DEVICE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE,
)
async_register_admin_service(
hass=hass,
domain=DOMAIN,
service=SERVICE_SET_INSTALL_MODE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_PUT_PARAMSET,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_PUT_PARAMSET,
)
async def async_unload_services(hass: HomeAssistant) -> None:
"""Unload HAHM services."""
if hass.data[DOMAIN]:
return
for hahm_service in HAHM_SERVICES:
hass.services.async_remove(domain=DOMAIN, service=hahm_service)
async def _async_service_export_device_definition(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
if hm_device := _get_device(hass=hass, device_id=device_id):
await hm_device.export_device_definition()
_LOGGER.debug(
"Calling export_device_definition: %s, %s",
hm_device.name,
hm_device.device_address,
)
async def _async_service_set_variable_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic system variable."""
entity_id = service.data[ATTR_ENTITY_ID]
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if hub := _get_hub_by_entity_id(hass=hass, entity_id=entity_id):
await hub.async_set_variable(name=name, value=value)
async def _async_service_set_device_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
parameter = service.data[ATTR_PARAMETER]
value = service.data[ATTR_VALUE]
rx_mode = service.data.get(ATTR_RX_MODE)
# Convert value into correct XML-RPC Type.
# https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy
if value_type := service.data.get(ATTR_VALUE_TYPE):
if value_type == "int":
value = int(value)
elif value_type == "double":
value = float(value)
elif value_type == "boolean":
value = bool(value)
elif value_type == "dateTime.iso8601":
value = datetime.strptime(value, "%Y%m%dT%H:%M:%S")
else:
# Default is 'string'
value = str(value)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling setValue: %s, %s, %s, %s, %s, %s",
interface_id,
channel_address,
parameter,
value,
value_type,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.set_value(
interface_id=interface_id,
channel_address=channel_address,
parameter=parameter,
value=value,
rx_mode=rx_mode,
)
async def _async_service_set_install_mode(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to set interface_id into install mode."""
interface_id = service.data[ATTR_INTERFACE_ID]
mode: int = service.data.get(ATTR_MODE, 1)
time: int = service.data.get(ATTR_TIME, 60)
device_address = service.data.get(ATTR_ADDRESS)
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
await control_unit.central.set_install_mode(
interface_id, t=time, mode=mode, device_address=device_address
)
async def _async_service_put_paramset(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call the putParamset method on a HomeMatic connection."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
paramset_key = service.data[ATTR_PARAMSET_KEY]
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data[ATTR_PARAMSET])
rx_mode = service.data.get(ATTR_RX_MODE)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s, %s",
interface_id,
channel_address,
paramset_key,
paramset,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.put_paramset(
interface_id=interface_id,
channel_address=channel_address,
paramset=paramset_key,
value=paramset,
rx_mode=rx_mode,
)
def _get_device(hass: HomeAssistant, device_id: str) -> HmDevice | None:
"""Return the homematic device."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.hm_devices.get(device_address)
return None
def _get_interface_channel_address(
hass: HomeAssistant, device_id: str, channel: int
) -> tuple[str, str] | None:
"""Return interface and channel_address with given device_id and channel."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
channel_address = f"{device_address}:{channel}"
return interface_id, channel_address
def _get_entity(hass: HomeAssistant, entity_id: str) -> BaseEntity | None:
"""Return entity by given entity_id."""
control_unit: ControlUnit
for control_unit in hass.data[DOMAIN].values():
if hm_entity := control_unit.async_get_hm_entity(entity_id=entity_id):
if isinstance(hm_entity, BaseEntity):
return hm_entity
return None
def _get_entities_by_platform(
hass: HomeAssistant, platform: HmPlatform
) -> list[BaseEntity]:
"""Return entities by given platform."""
control_unit: ControlUnit
hm_entities: list[BaseEntity] = []
for control_unit in hass.data[DOMAIN].values():
hm_entities.extend(
control_unit.async_get_hm_entities_by_platform(platform=platform)
)
return hm_entities
def _get_hm_entity(
hass: HomeAssistant, interface_id: str, channel_address: str, parameter: str
) -> GenericEntity | None:
"""Get homematic entity."""
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.get_hm_entity_by_parameter(
channel_address=channel_address, parameter=parameter
)
return None
def _get_cu_by_interface_id(
hass: HomeAssistant, interface_id: str
) -> ControlUnit | None:
"""Get ControlUnit by interface_id."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if control_unit and control_unit.central.clients.get(interface_id):
return control_unit
return None
def _get_hub_by_entity_id(hass: HomeAssistant, entity_id: str) -> HaHub | None:
"""Get ControlUnit by device address."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if (
control_unit
and control_unit.hub
and control_unit.hub.entity_id == entity_id
):
return control_unit.hub
return None
| 32.455635 | 85 | 0.691813 | 0 | 0 | 0 | 0 | 874 | 0.064578 | 7,014 | 0.51825 | 1,446 | 0.106842 |
40e0c14f05e9b921525413c2427c2d6661b5419f | 865 | py | Python | app/migrations/0001_initial.py | MariaAlice00/ifpi-tds-projeto-integrador | e2ce73279c9bf31de0b33b105723ae7a24deac54 | [
"MIT"
]
| null | null | null | app/migrations/0001_initial.py | MariaAlice00/ifpi-tds-projeto-integrador | e2ce73279c9bf31de0b33b105723ae7a24deac54 | [
"MIT"
]
| null | null | null | app/migrations/0001_initial.py | MariaAlice00/ifpi-tds-projeto-integrador | e2ce73279c9bf31de0b33b105723ae7a24deac54 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.3 on 2021-06-03 00:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Livro',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagem', models.ImageField(upload_to='imagens')),
('titulo', models.CharField(max_length=150)),
('autor', models.CharField(max_length=50)),
('genero', models.CharField(max_length=50)),
('serieunico', models.CharField(max_length=50)),
('nota', models.CharField(max_length=2)),
('opiniao', models.CharField(max_length=300)),
],
),
]
| 30.892857 | 117 | 0.564162 | 772 | 0.892486 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.149133 |
40e13f8b874a94920da4e07d42899e93081c3e2f | 4,284 | py | Python | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
]
| 1 | 2019-05-28T13:04:32.000Z | 2019-05-28T13:04:32.000Z | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
]
| null | null | null | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
]
| null | null | null | # Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import re
COPYRIGHT_HEADER = """\
/*
* Copyright (c) 2017-2019, Oracle and/or its affiliates.
* Copyright (c) 2014 by Bart Kiers
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
// Checkstyle: stop
// JaCoCo Exclude
//@formatter:off
{0}
"""
PTRN_SUPPRESS_WARNINGS = re.compile(r"@SuppressWarnings.*")
def replace_suppress_warnings(line):
return PTRN_SUPPRESS_WARNINGS.sub('@SuppressWarnings("all")', line)
def replace_rulectx(line):
return line.replace("(RuleContext)_localctx", "_localctx")
def replace_localctx(line):
return re.sub(r'\(\((([a-zA-Z]*?_?)*[a-zA-Z]*)\)_localctx\)', '_localctx', line)
TRANSFORMS = [
replace_suppress_warnings,
replace_rulectx,
replace_localctx,
]
def postprocess(file):
lines = []
for line in file:
for transform in TRANSFORMS:
line = transform(line)
lines.append(line)
return ''.join(lines)
if __name__ == '__main__':
fpath = sys.argv[1]
with open(fpath, 'r') as FILE:
content = COPYRIGHT_HEADER.format(postprocess(FILE))
with open(fpath, 'w+') as FILE:
FILE.write(content)
| 37.911504 | 88 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,399 | 0.793417 |
40e2d06c8105c95bcdc7c6b4d3475a48fa240fbc | 6,284 | py | Python | scripts/ape_protocol_deploy.py | coordinape/coordinape-protocol | 8c90de1b1fbc19bab05e1c5848813d022492753a | [
"MIT"
]
| 22 | 2021-10-17T23:19:38.000Z | 2022-03-24T05:13:56.000Z | scripts/ape_protocol_deploy.py | coordinape/coordinape-protocol | 8c90de1b1fbc19bab05e1c5848813d022492753a | [
"MIT"
]
| 12 | 2021-09-29T16:27:03.000Z | 2022-03-30T17:54:08.000Z | scripts/ape_protocol_deploy.py | coordinape/coordinape-protocol | 8c90de1b1fbc19bab05e1c5848813d022492753a | [
"MIT"
]
| 4 | 2021-10-14T19:08:36.000Z | 2022-03-29T16:42:21.000Z | from brownie import accounts, Wei, chain, ApeToken, ApeVaultFactory, ApeDistributor, ApeRegistry, ApeRouter, FeeRegistry, MockRegistry, MockVaultFactory, MockToken, MockVault
def deploy_token():
funds = accounts.load('moist', '\0')
user = accounts.load('ape_deployer', '\0')
multi_sig = '0x15B513F658f7390D8720dCE321f50974B28672EF'
# funds.transfer(to=user, amount='1 ether')
# ape = ApeToken.deploy({'from':user}, publish_source=True)
# ape.transferOwnership(multi_sig, {'from':user})
gas_used = Wei('150 gwei') * 21000
remaining = user.balance() - gas_used
ask_back = Wei('1 ether') - remaining
print(f'to ask back: {Wei(ask_back).to("ether")}')
user.transfer(to=funds, amount=remaining, gas_price='150 gwei')
def deploy_protocol():
user = accounts.load('ape_deployer', '\0')
multi_sig = ''
lock_length = 60 * 60 * 24 * 14 # 14 days
yearn_reg = '0x50c1a2ea0a861a967d9d0ffe2ae4012c2e053804'
ape_reg = ApeRegistry.deploy(0, {'from':user}, publish_source=True)
ape_factory = ApeVaultFactory.deploy(yearn_reg, ape_reg, {'from':user}, publish_source=True)
ape_router = ApeRouter.deploy(yearn_reg, ape_factory, 0, {'from':user}, publish_source=True)
ape_distro = ApeDistributor.deploy({'from':user}, publish_source=True)
ape_fee = FeeRegistry.deploy({'from':user}, publish_source=True)
setup_protocol(ape_reg, ape_fee, ape_distro, ape_router, ape_factory, user)
min_delay_call = ape_reg.changeMinDelay.encode_input(lock_length)
ape_reg.schedule(ape_reg, min_delay_call, '', '', 0, {'from':user})
ape_fee.schedule(ape_fee, min_delay_call, '', '', 0, {'from':user})
ape_router.schedule(ape_router, min_delay_call, '', '', 0, {'from':user})
ape_reg.execute(ape_reg, min_delay_call, '', '', 0, {'from':user})
ape_fee.execute(ape_fee, min_delay_call, '', '', 0, {'from':user})
ape_router.execute(ape_router, min_delay_call, '', '', 0, {'from':user})
ape_reg.transferOwnership(multi_sig, {'from':user})
ape_fee.transferOwnership(multi_sig, {'from':user})
ape_router.transferOwnership(multi_sig, {'from':user})
def deploy_protocol_testnet():
user = accounts.load('moist', '\0')
multi_sig = user
lock_length = 60 * 60 * 24 * 14 # 14 days
mock_yearn_reg = MockRegistry.deploy({'from':user}, publish_source=True)
mock_yearn_vault_factories = MockVaultFactory.deploy(mock_yearn_reg, {'from':user}, publish_source=True)
mock_ape_reg = ApeRegistry.deploy(0, {'from':user}, publish_source=True)
mock_ape_factory = ApeVaultFactory.deploy(mock_yearn_reg, mock_ape_reg, {'from':user}, publish_source=True)
mock_ape_router = ApeRouter.deploy(mock_yearn_reg, mock_ape_factory, 0, {'from':user}, publish_source=True)
mock_ape_distro = ApeDistributor.deploy({'from':user}, publish_source=True)
mock_ape_fee = FeeRegistry.deploy({'from':user}, publish_source=True)
setup_protocol(mock_ape_reg, mock_ape_fee, mock_ape_distro, mock_ape_router, mock_ape_factory, user)
# setup_mockvaults(mock_yearn_vault_factories, user)
# min_delay_call = mock_ape_reg.changeMinDelay.encode_input(lock_length)
# mock_ape_reg.schedule(mock_ape_reg, min_delay_call, '', '', 0, {'from':user})
# mock_ape_fee.schedule(mock_ape_fee, min_delay_call, '', '', 0, {'from':user})
# mock_ape_router.schedule(mock_ape_router, min_delay_call, '', '', 0, {'from':user})
# mock_ape_reg.execute(mock_ape_reg, min_delay_call, '', '', 0, {'from':user})
# mock_ape_fee.execute(mock_ape_fee, min_delay_call, '', '', 0, {'from':user})
# mock_ape_router.execute(mock_ape_router, min_delay_call, '', '', 0, {'from':user})
# mock_ape_reg.transferOwnership(multi_sig, {'from':user})
# mock_ape_fee.transferOwnership(multi_sig, {'from':user})
# mock_ape_router.transferOwnership(multi_sig, {'from':user})
base_uri = 'https://rinkeby.etherscan.io/address/'
print(f'Mock yearn reg: {base_uri + mock_yearn_reg.address}')
print(f'Mock yearn Vault factory: {base_uri + mock_yearn_vault_factories.address}')
print(f'Mock ape reg: {base_uri + mock_ape_reg.address}')
print(f'Mock ape factory: {base_uri + mock_ape_factory.address}')
print(f'Mock ape router: {base_uri + mock_ape_router.address}')
print(f'Mock ape distro: {base_uri + mock_ape_distro.address}')
print(f'Mock ape fee: {base_uri + mock_ape_fee.address}')
def setup_protocol(ape_reg, ape_fee, ape_distro, ape_router, ape_factory, minter):
set_fee_call = ape_reg.setFeeRegistry.encode_input(ape_fee)
set_router_call = ape_reg.setRouter.encode_input(ape_router)
set_distro_call = ape_reg.setDistributor.encode_input(ape_distro)
set_factory_call = ape_reg.setFactory.encode_input(ape_factory)
set_treasury_call = ape_reg.setTreasury.encode_input(minter)
ape_reg.schedule(ape_reg, set_fee_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_router_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_distro_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_factory_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_treasury_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_fee_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_router_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_distro_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_factory_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_treasury_call, '', '', 0, {'from':minter})
def setup_mockvaults(mock_yearn_vault_factories, user):
usdc = MockToken.deploy('USD Coin', 'USDC', {'from':user}, publish_source=True)
dai = MockToken.deploy('Dai', 'DAI', {'from':user})
ape = MockToken.deploy('Ape', 'OOH', {'from':user})
tx1 = mock_yearn_vault_factories.createVault(usdc, 'yearnVault USDC', 'yvUSDC', {'from':user})
MockVault.publish_source(tx1.new_contracts[0])
tx2 = mock_yearn_vault_factories.createVault(dai, 'yearnVault DAI', 'yvDAI', {'from':user})
tx3 = mock_yearn_vault_factories.createVault(ape, 'yearnVault Ape', 'yvOOH', {'from':user})
base_uri = 'https://rinkeby.etherscan.io/address/'
print(f'Mock usdc: {base_uri + usdc.address}')
print(f'Mock dai: {base_uri + dai.address}')
print(f'Mock ape token: {base_uri + ape.address}')
print(f'Mock usdc vault: {base_uri + tx1}')
print(f'Mock dai vault: {base_uri + tx2}')
print(f'Mock ape vault: {base_uri + tx3}')
| 56.107143 | 174 | 0.732336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,269 | 0.361076 |
40e6bbe29a59bd4a98298179d233b2bfddb4c1e0 | 971 | py | Python | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
]
| 10 | 2017-11-25T01:47:20.000Z | 2020-03-24T18:28:24.000Z | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
]
| 319 | 2017-11-16T09:56:03.000Z | 2022-03-28T00:24:37.000Z | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
]
| 6 | 2017-11-12T14:04:08.000Z | 2021-03-10T09:41:18.000Z | from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView, ListView, UpdateView
from .models import Committee
class CommitteeList(ListView):
model = Committee
template_name = 'groups/committee_list.html'
context_object_name = 'committees'
class CommitteeDetailView(DetailView):
model = Committee
template_name = 'groups/committee_detail.html'
context_object_name = 'committee'
class EditCommitteeView(PermissionRequiredMixin, UpdateView):
permission_required = ('groups.change_committee',)
model = Committee
fields = ('clickbait', 'description', 'email', 'image')
success_url = reverse_lazy('committee_list')
class CommitteeAdminView(PermissionRequiredMixin, ListView):
permission_required = ('groups.change_committee',)
model = Committee
template_name = 'groups/committee_admin.html'
context_object_name = 'committees'
| 30.34375 | 65 | 0.77137 | 762 | 0.784758 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.23275 |
40e730ac41b56af4d3f51d091a10e9b22fdce408 | 2,200 | py | Python | src/programy/braintree.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
]
| null | null | null | src/programy/braintree.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
]
| null | null | null | src/programy/braintree.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
]
| 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.storage.factory import StorageFactory
from programy.config.brain.braintree import BrainBraintreeConfiguration
class BraintreeManager(object):
def __init__(self, braintree_configuration, admin_user="system"):
assert (braintree_configuration is not None)
assert (isinstance(braintree_configuration, BrainBraintreeConfiguration))
self._configuration = braintree_configuration
self._save_as_user = self._configuration.save_as_user
def dump_brain_tree(self, client_context):
if self._configuration.create is True:
YLogger.debug(self, "Dumping AIML Graph as tree to [%s]", self._configuration.file)
if client_context.client.storage_factory.entity_storage_engine_available(StorageFactory.BRAINTREE) is True:
storage_engine = client_context.client.storage_factory.entity_storage_engine(StorageFactory.BRAINTREE)
braintree_storage = storage_engine.braintree_storage()
braintree_storage.save_braintree(client_context)
| 55 | 120 | 0.782727 | 923 | 0.419545 | 0 | 0 | 0 | 0 | 0 | 0 | 1,143 | 0.519545 |
40ea3c645ea543c1874475b7543e5383d030798e | 6,095 | py | Python | reana_commons/publisher.py | marcdiazsan/reana-commons | 6e3a64db6798ab86aa521da02fa889459a382083 | [
"MIT"
]
| null | null | null | reana_commons/publisher.py | marcdiazsan/reana-commons | 6e3a64db6798ab86aa521da02fa889459a382083 | [
"MIT"
]
| null | null | null | reana_commons/publisher.py | marcdiazsan/reana-commons | 6e3a64db6798ab86aa521da02fa889459a382083 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Commons module to manage AMQP connections on REANA."""
import json
import logging
from kombu import Connection, Exchange, Queue
from .config import (
MQ_CONNECTION_STRING,
MQ_DEFAULT_EXCHANGE,
MQ_DEFAULT_FORMAT,
MQ_DEFAULT_QUEUES,
MQ_PRODUCER_MAX_RETRIES,
)
class BasePublisher(object):
"""Base publisher to MQ."""
def __init__(
self,
queue,
routing_key,
connection=None,
exchange=None,
durable=False,
max_priority=None,
):
"""Initialise the BasePublisher class.
:param connection: A :class:`kombu.Connection`, if not provided a
:class:`kombu.Connection` with the default configuration will
be instantiated.
:param queue: String which represents the queue the messages will
be sent to.
:param routing_key: String which represents the routing key which
will be used to send the messages, if not provided default
routing key will be used.
:param exchange: A :class:`kombu.Exchange` where the messages will
be delivered to, if not provided, it will be instantiated with
the default configuration.
"""
self._routing_key = routing_key
self._exchange = (
exchange
if isinstance(exchange, Exchange)
else Exchange(name=exchange or MQ_DEFAULT_EXCHANGE, type="direct")
)
self._queue = (
queue
if isinstance(queue, Queue)
else Queue(
queue,
durable=durable,
exchange=self._exchange,
routing_key=self._routing_key,
max_priority=max_priority,
)
)
self._connection = connection or Connection(MQ_CONNECTION_STRING)
self.producer = self._build_producer()
def _build_producer(self):
"""Instantiate a :class:`kombu.Producer`."""
return self._connection.Producer(serializer=MQ_DEFAULT_FORMAT)
def __error_callback(self, exception, interval):
"""Execute when there is an error while sending a message.
:param exception: Exception which has been thrown while trying to send
the message.
:param interval: Interval in which the message delivery will be
retried.
"""
logging.error("Error while publishing {}".format(exception))
logging.info("Retry in %s seconds.", interval)
def _publish(self, msg, priority=None):
"""Publish, handling retries, a message in the queue.
:param msg: Object which represents the message to be sent in
the queue. Note that this object should be serializable in the
configured format (by default JSON).
:param priority: Message priority.
"""
connection = self._connection.clone()
publish = connection.ensure(
self.producer,
self.producer.publish,
errback=self.__error_callback,
max_retries=MQ_PRODUCER_MAX_RETRIES,
)
publish(
json.dumps(msg),
exchange=self._exchange,
routing_key=self._routing_key,
declare=[self._queue],
priority=priority,
)
logging.debug("Publisher: message sent: %s", msg)
def close(self):
"""Close connection."""
logging.debug("Publisher: closing queue connection")
self._connection.release()
class WorkflowStatusPublisher(BasePublisher):
"""Progress publisher to MQ."""
def __init__(self, **kwargs):
"""Initialise the WorkflowStatusPublisher class."""
queue = "jobs-status"
if "queue" not in kwargs:
kwargs["queue"] = "jobs-status"
if "routing_key" not in kwargs:
kwargs["routing_key"] = MQ_DEFAULT_QUEUES[queue]["routing_key"]
if "durable" not in kwargs:
kwargs["durable"] = MQ_DEFAULT_QUEUES[queue]["durable"]
super(WorkflowStatusPublisher, self).__init__(**kwargs)
def publish_workflow_status(self, workflow_uuid, status, logs="", message=None):
"""Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow.
"""
msg = {
"workflow_uuid": workflow_uuid,
"logs": logs,
"status": status,
"message": message,
}
self._publish(msg)
class WorkflowSubmissionPublisher(BasePublisher):
"""Workflow submission publisher."""
def __init__(self, **kwargs):
"""Initialise the WorkflowSubmissionPublisher class."""
queue = "workflow-submission"
super(WorkflowSubmissionPublisher, self).__init__(
queue,
MQ_DEFAULT_QUEUES[queue]["routing_key"],
durable=MQ_DEFAULT_QUEUES[queue]["durable"],
max_priority=MQ_DEFAULT_QUEUES[queue]["max_priority"],
**kwargs
)
def publish_workflow_submission(
self, user_id, workflow_id_or_name, parameters, priority=0, min_job_memory=0,
):
"""Publish workflow submission parameters."""
msg = {
"user": user_id,
"workflow_id_or_name": workflow_id_or_name,
"parameters": parameters,
"priority": priority,
"min_job_memory": min_job_memory,
}
self._publish(msg, priority)
| 34.828571 | 85 | 0.620673 | 5,571 | 0.914028 | 0 | 0 | 0 | 0 | 0 | 0 | 2,785 | 0.456932 |
40ea5c5e0176d43f5d51fa89b969ce72cc0fce56 | 1,219 | py | Python | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
]
| 3 | 2020-03-24T08:06:37.000Z | 2020-03-29T08:53:55.000Z | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
]
| 7 | 2020-03-23T12:36:01.000Z | 2020-04-11T08:14:06.000Z | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
]
| null | null | null | from sqlalchemy import (
BigInteger,
Column,
DateTime,
Text,
String,
Integer,
)
from sqlalchemy.sql.functions import current_timestamp
from model.base import BaseObject
class Commit(BaseObject):
__tablename__ = 'commits'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
sha = Column(String(40), unique=True, nullable=False)
message = Column(Text)
parent_a = Column(String(40))
parent_b = Column(String(40))
production_reported = Column(Integer)
class Issue(BaseObject):
__tablename__ = 'issues'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
number = Column(Integer, unique=True, nullable=False)
state = Column(String(10))
title = Column(Text)
body = Column(Text)
labels = Column(String(128))
assignee = Column(String(128))
| 31.25641 | 108 | 0.721903 | 1,019 | 0.835931 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.013946 |
40eaa3da9e931ca4a3dcce107069762aa322fa53 | 24 | py | Python | drae/__init__.py | hso/drae.py | b78772fa055fe5f8acb2bb44d7e7573af277226b | [
"MIT"
]
| null | null | null | drae/__init__.py | hso/drae.py | b78772fa055fe5f8acb2bb44d7e7573af277226b | [
"MIT"
]
| null | null | null | drae/__init__.py | hso/drae.py | b78772fa055fe5f8acb2bb44d7e7573af277226b | [
"MIT"
]
| null | null | null | from drae import search
| 12 | 23 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
40ead0d637c17ba1e4a9c64f3e4137d28ac75a83 | 13,825 | py | Python | tests/components/template/test_select.py | JeffersonBledsoe/core | 3825f80a2dd087ae70654079cd9f3071289b8423 | [
"Apache-2.0"
]
| 5 | 2017-01-26T16:33:09.000Z | 2018-07-20T13:50:47.000Z | tests/components/template/test_select.py | JeffersonBledsoe/core | 3825f80a2dd087ae70654079cd9f3071289b8423 | [
"Apache-2.0"
]
| 87 | 2020-07-06T22:22:54.000Z | 2022-03-31T06:01:46.000Z | tests/components/template/test_select.py | yuvalkob/home-assistant | 6a5895222ec908acad3cf478897ca2455f88f730 | [
"Apache-2.0"
]
| 3 | 2021-05-31T15:32:08.000Z | 2021-08-10T22:08:42.000Z | """The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ICON, CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_multiple_configs(hass, calls):
"""Test: multiple select entities get created."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": [
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
]
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
_verify(hass, "a", ["a", "b"], f"{_TEST_SELECT}_2")
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("select") == []
async def test_templates_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "Hello Name",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
async def test_template_icon_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
"icon": f"{{% if (states('{_OPTION_INPUT_SELECT}') == 'a') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
async def test_template_icon_with_trigger(hass):
"""Test trigger based template select."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"trigger": {"platform": "state", "entity_id": _OPTION_INPUT_SELECT},
"select": {
"unique_id": "b",
"state": "{{ trigger.to_state.state }}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"icon": "{% if (trigger.to_state.state or '') == 'a' %}mdi:greater{% else %}mdi:less{% endif %}",
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state is not None
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "a"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
| 32.529412 | 132 | 0.502351 | 0 | 0 | 0 | 0 | 131 | 0.009476 | 12,354 | 0.893599 | 3,294 | 0.238264 |
40eb080a05a597358c0a6ee395b1cbd8baf803e7 | 7,211 | py | Python | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
]
| null | null | null | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
]
| null | null | null | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
]
| null | null | null | import os
import random
import string
import base64
from django.utils import timezone
from django.contrib.auth.hashers import make_password, check_password
from django.test import TestCase
from parameterized import parameterized
from core.models import Module, EntryPoint, ExternalAuthorizationSession, User
AUTHORIZATION_MODULE_LIST = ["ihna", "google", "mailru"]
class TestApplicationProcess(TestCase):
PASSWORD_LENGTH = 25
auth_sessions = None
uuid_list = None
@classmethod
def setUpTestData(cls):
cls.auth_sessions = {}
cls.session_keys = {}
user = User(login="sergei.kozhukhov")
user.save()
for module in AUTHORIZATION_MODULE_LIST:
password = cls.generate_random_password()
password_hash = make_password(password)
module_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=module)
session = ExternalAuthorizationSession(
authorization_module=module_app,
session_key=password_hash,
session_key_expiry_date=timezone.now()
)
session.save()
session_key = base64.encodebytes((str(session.id) + ":" + password).encode("utf-8")).decode("utf-8")
cls.auth_sessions[module] = session_key
Account = cls.get_account_class(module)
Account(user=user, email="[email protected]").save()
cls.uuid_list = {}
for apps_used in ['imaging', 'roi']:
cls.uuid_list[apps_used] = Module.objects.get(alias=apps_used).uuid
@parameterized.expand([
(["core", "authorizations"], [
("standard", None),
("ihna", "<div class='auth ihna'></div>"),
("google", "<div class='auth google'></div>"),
("mailru", "<div class='auth mailru'></div>"),
("unix", None),
("cookie", None),
("password_recovery", None),
("auto", None),
]),
(["core", "synchronizations"], [
("ihna_employees", None),
]),
(["core", "projects"], [
("imaging", None),
]),
(["core", "projects", "imaging", "processors"], [
("roi", None),
]),
])
def test_widgets_show(self, route, expected_widget_list):
app = None
entry_point = None
current_route = list(route)
current_look = "app"
while len(current_route) > 0:
route_element = current_route.pop(0)
if current_look == "app":
app = Module.objects.get(alias=route_element, parent_entry_point=entry_point)
current_look = "ep"
elif current_look == "ep":
entry_point = EntryPoint.objects.get(alias=route_element, belonging_module=app)
current_look = "app"
self.assertEquals(current_look, "app")
values = Module.objects.filter(parent_entry_point=entry_point).values("alias", "html_code")
self.assertEquals(len(values), len(expected_widget_list),
"Number of modules attached to this entry point is not the same as expected")
for value in values:
alias = value['alias']
html_code = value['html_code']
expected_widget_found = False
for expected_alias, expected_widget in expected_widget_list:
if expected_alias == alias:
expected_widget_found = True
if html_code is not None and expected_widget is None:
self.fail("HTML code for module '%s' does not exist but expected" % alias)
if html_code is None and expected_widget is not None:
self.fail("HTML code for module '%s' exists but not expected" % alias)
if html_code is not None and expected_widget is not None:
self.assertHTMLEqual(html_code, expected_widget,
"HTML code for module '%s' is not the same as expected" % html_code)
break
self.assertTrue(expected_widget_found, "the module '%s' is not within the list of expected modules" %
alias)
@parameterized.expand([
("standard", "core.authorizations.StandardAuthorization"),
("ihna", "authorizations.ihna.App"),
("google", "authorizations.google.App"),
("mailru", "authorizations.mailru.App"),
("unix", "core.authorizations.UnixAuthorization"),
("cookie", "authorizations.cookie.App"),
("password_recovery", "core.authorizations.PasswordRecoveryAuthorization"),
("auto", "core.authorizations.AutomaticAuthorization"),
])
def test_authorization_modules(self, alias, expected_authorization_module):
authorization_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=alias)
authorization_module = authorization_app.app_class
self.assertEquals(authorization_module, expected_authorization_module)
def test_authorization_sessions(self):
for module, session_key in self.auth_sessions.items():
session_info = base64.decodebytes(session_key.encode("utf-8")).decode("utf-8")
session_id, session_password = session_info.split(":", 1)
session = ExternalAuthorizationSession.objects.get(authorization_module__alias=module, id=session_id)
stored_password_hash = session.session_key
self.assertTrue(check_password(session_password, stored_password_hash))
module_class = session.authorization_module.app_class
session.delete()
self.assertTrue(module_class.split('.')[1], module)
def test_find_user(self):
for module in AUTHORIZATION_MODULE_LIST:
account_class = self.get_account_class(module)
account = account_class.objects.get(email="[email protected]")
self.assertEquals(account.user.login, "sergei.kozhukhov")
def test_account_contigency(self):
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 1)
User.objects.get(login="sergei.kozhukhov").delete()
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 0)
def test_access_by_uuid(self):
for module_name, uuid in self.uuid_list.items():
module_class = Module.objects.get(uuid=uuid).app_class
actual_module_name, module_class = module_class.split('.')
self.assertEquals(actual_module_name, module_name)
self.assertEquals(module_class, "App")
@classmethod
def generate_random_password(cls):
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
random.seed = (os.urandom(1024))
return ''.join(random.choice(chars) for i in range(cls.PASSWORD_LENGTH))
@classmethod
def get_account_class(cls, module):
import authorizations
auth_module = getattr(authorizations, module)
return auth_module.models.Account
| 43.969512 | 113 | 0.627791 | 6,840 | 0.948551 | 0 | 0 | 5,029 | 0.697407 | 0 | 0 | 1,229 | 0.170434 |
40eb7e71257ab84eead04db6c8b696939ea7b84e | 6,729 | py | Python | cmsfix/lib/macro.py | trmznt/cmsfix | 18d0be238f9247421db9603f1946478452336afb | [
"BSD-2-Clause"
]
| null | null | null | cmsfix/lib/macro.py | trmznt/cmsfix | 18d0be238f9247421db9603f1946478452336afb | [
"BSD-2-Clause"
]
| null | null | null | cmsfix/lib/macro.py | trmznt/cmsfix | 18d0be238f9247421db9603f1946478452336afb | [
"BSD-2-Clause"
]
| null | null | null |
from rhombus.lib.utils import get_dbhandler
from rhombus.lib.tags import *
from cmsfix.models.node import Node
import re
# the pattern below is either
# ///123
# <<MacroName>>
# [[MacroName]]
pattern = re.compile('///(\d+)|///\{([\w-]+)\}|\<\;\<\;(.+)\>\;\>\;|\[\[(.+)\]\]')
# syntax for Macro is:
# [[MacroName|option1|option2|option3]]
class MacroError(RuntimeError):
pass
def postrender(buffer, node, request):
""" return a new buffer """
dbh = get_dbhandler()
nb = ''
start_pos = 0
for m in pattern.finditer(buffer):
nb += buffer[start_pos:m.start()]
group = m.group()
print(group)
if group.startswith('///'):
nb += node_link(group, dbh)
elif group.startswith('[['):
nb += run_macro(group, node, dbh, request)
else:
nb += '{{ ERR: macro pattern unprocessed }}'
start_pos = m.end()
nb += buffer[start_pos:]
return nb
def postedit(content, node):
""" post edit the content, return a new modified content """
dbh = get_dbhandler()
nc = ''
start_pos = 0
for m in pattern.finditer(content):
nc += content[start_pos:m.start()]
group = m.group()
if group.startswith('///'):
if group[3] != '{':
# convert to UUID
node = dbh.get_node_by_id(int(group[3:]))
nc += ('///{' + str(node.uuid) + '}' if node else group)
else:
nc += group
else:
nc += group
start_pos = m.end()
nc += content[start_pos:]
return nc
def node_link(text, dbh):
try:
if text[3] == '{':
node = dbh.get_nodes_by_uuids(text[4:-1])
else:
node = dbh.get_node_by_id(int(text[3:]))
except:
node = None
if node is None:
return literal('<b>%s</b>' % text)
return literal('<a href="/%s">%s</a>' % (node.url, node.title))
def run_macro(text, node, dbh, request):
global _MACROS_
text = text[2:-2]
components = text.split('|')
macro_name = components[0]
if macro_name not in _MACROS_:
return '[[ERR - macro %s not found]]' % macro_name
try:
return _MACROS_[macro_name](node, components[1:], request)
except MacroError as m_err:
return '[[%s ERR: %s]]' % (macro_name, m_err)
_MACROS_ = {}
def macro(func):
global _MACROS_
macro_name = func.__name__
if not macro_name.startswith('M_'):
raise RuntimeError('function name does not start with M_')
_MACROS_[macro_name[2:]] = func
return func
def macro_dict():
return _MACROS_
## -- MACRO --
##
## all macro functions should return either html or literal objects
##
@macro
def M_ListChildNodes(node, components, request):
""" Create list of child nodes.
[[ListChildNodes|option|option|..]]
Options:
type=Nodetype(PageNode,JournalNode, etc)
order=[+-]slug/id/mtime/title
Example:
[[ListChildNodes|type=PageNode|order=+title]]
"""
nodetype=[]
children = node.children
for c in components:
if c.startswith('type='):
nodetype.append( c[5:] )
elif c.startswith('order='):
order = c[6:].strip().lower()
desc = False
if order[0] == '-':
desc = True
order = order[1:]
elif order[0] == '+':
order = order[1:]
# we cancel the default ordering first
children = node.children.order_by(None)
if order == 'slug':
if desc: children = children.order_by(Node.slug.desc())
else: children = children.order_by(Node.slug)
elif order == 'id':
if desc: children = children.order_by(Node.id.desc())
else: children = children.order_by(Node.id)
elif order == 'mtime':
if desc: children = children.order_by(Node.stamp.desc())
else: children = children.order_by(Node.stamp)
elif order == 'title':
children_list = sorted( [(n.title or n.slug, n) for n in children.all()],
reverse = desc)
children = [n for (k, n) in children_list]
else:
raise MacroError("unknown order option: %s" % order )
html = div()
toc = ul()
if not nodetype:
nodetype.append( 'PageNode' )
for c in children:
if c.__class__.__name__ in nodetype:
toc.add(
li(a(c.title, href=c.path))
)
html.add(toc)
return html
@macro
def M_Img(node, components, request):
""" Show embedded images in the text.
[[Img|source|option|option|...]]
source: link to source (//ID, /images/a.jpg, http://domain/image.jpg, path/to/image.jpg)
Options:
currently none
"""
path = components[0]
if path.startswith('http') or path.startswith('ftp'):
url = path
elif path.startswith('//'):
image_node_id = int(path[2:])
image_node = get_dbhandler().get_node_by_id(image_node_id)
if not image_node:
return '[[ Invalid image macro: non existent node %d]]' % image_node_id
url = image_node.path
elif path.startswith('/'):
# check node with this path
path_node = get_dbhandler().get_node_by_path(path)
if not path_node:
return '[[ Invalid image macro: not existent path %s ]]' % path
url = path
else:
url = '/%s/%s' % (node.url, path)
#return '[[ Invalid image macro (%s) ]]' % path
return literal('<img src="%s" />' % url)
@macro
def M_ListNode(node, components, request):
""" Create list of nodes that are accessible by the current user.
[[ListNode|option|...]]
Options:
level = node level
tags = only nodes which have these tags
Example:
[[ListNode|level=2|tags=keyword1;keyword2]]
"""
kwargs = {}
for c in components:
if c.startswith('level='):
kwargs['level'] = int(c[6:])
elif c.startswith('tags='):
kwargs['tags'] = c[5:].split(';')
elif c.startswith('limit='):
pass
nodes = get_dbhandler().get_nodes(**kwargs)
# check accessibility
nodes = [ n for n in nodes if get_workflow(n).is_accessible(n, request) ]
html = div()
toc = ul()
for n in nodes:
# check user accessibility
toc.add(
li(a(n.title or n.slug, href=n.path))
)
html.add(toc)
return html
| 24.558394 | 96 | 0.541537 | 40 | 0.005944 | 0 | 0 | 3,956 | 0.587903 | 0 | 0 | 1,788 | 0.265716 |
40ed1faf7a529d9d2608043132523587818592bc | 2,629 | py | Python | xastropy/sdss/qso.py | bpholden/xastropy | 66aff0995a84c6829da65996d2379ba4c946dabe | [
"BSD-3-Clause"
]
| 3 | 2015-08-23T00:32:58.000Z | 2020-12-31T02:37:52.000Z | xastropy/sdss/qso.py | Kristall-WangShiwei/xastropy | 723fe56cb48d5a5c4cdded839082ee12ef8c6732 | [
"BSD-3-Clause"
]
| 104 | 2015-07-17T18:31:54.000Z | 2018-06-29T17:04:09.000Z | xastropy/sdss/qso.py | Kristall-WangShiwei/xastropy | 723fe56cb48d5a5c4cdded839082ee12ef8c6732 | [
"BSD-3-Clause"
]
| 16 | 2015-07-17T15:50:37.000Z | 2019-04-21T03:42:47.000Z | '''
#;+
#; NAME:
#; sdss.qso
#; Version 1.1
#;
#; PURPOSE:
#; Class for SDSS QSO
#; 2015 Written by JXP
#;-
#;------------------------------------------------------------------------------
'''
# Import libraries
import numpy as np
import os
from astropy.table import QTable, Column
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.units import Quantity
from xastropy.obs import radec as xor
from xastropy.xutils import xdebug as xdb
class SdssQso(object):
'''Class to handle a single SDSS Quasar
Parameters:
----------
coord: SkyCoord, optional
RA/Dec of the sightline
z: float, optional
Emission redshift
database: SdssQuasars class, optional
Required for grabbing data, etc.
'''
# Init
def __init__(self, coord=None, z=0., database=None, verbose=True):
# Init
if coord is None:
radec = (0.*u.deg, 0.*u.deg)
self.coord = SkyCoord(ra=radec[0], dec=radec[0])
else:
self.coord = coord
self.z = z
self.verbose = verbose
self.database = database
# None init
self._specfil = None
def get_specfil(self):
'''Parse the SDSS spectrum file
Requires a link to the database Class
'''
if self.database is None:
raise IOError('SdssQso: Need to be linked to an SDSS Database')
# Generate file name (DR4 is different)
pnm = '{0:04d}'.format(
self.database._data[self.database.index]['PLATE'])
#fnm = '{0:04d}'.format(
# self.database._data[self.database.index]['FIBERID'])
fnm = '{0:03d}'.format(
self.database._data[self.database.index]['FIBERID'])
mjd = str(self.database._data[self.database.index]['MJD'])
sfil = self.database._datdir+pnm+'/1d/'+'spSpec-'
# Finish
self._specfil = sfil+mjd+'-'+pnm+'-'+fnm+'.fit' # Is usually gzipped
def load_spec(self):
'''Input the Spectrum
'''
from linetools.spectra.xspectrum1d import XSpectrum1D
if self._specfil is None:
self.get_specfil()
#
if self.verbose:
print('SdssQso: Loading spectrum from {:s}'.format(self._specfil))
self.spec = XSpectrum1D.from_file(self._specfil)
def __repr__(self):
''' For printing
'''
return '[{:s}: {:s} {:s}, z={:g}]'.format(self.__class__.__name__,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True,alwayssign=True), self.z)
| 30.218391 | 80 | 0.573602 | 2,145 | 0.8159 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.37771 |
40ee9a52429bac1502e511dda17968ae00643dd6 | 41 | py | Python | ez_sten/__init__.py | deadlift1226/ez-sten | 7f754e5648ce6b7d5207a901618b77a8e4382c86 | [
"MIT"
]
| null | null | null | ez_sten/__init__.py | deadlift1226/ez-sten | 7f754e5648ce6b7d5207a901618b77a8e4382c86 | [
"MIT"
]
| null | null | null | ez_sten/__init__.py | deadlift1226/ez-sten | 7f754e5648ce6b7d5207a901618b77a8e4382c86 | [
"MIT"
]
| null | null | null | name = "module"
from .module import func
| 13.666667 | 24 | 0.731707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.195122 |
40ef2f9956caa7a12ca34a8e2817ab06584f9a11 | 3,110 | py | Python | wisdem/test/test_optimization_drivers/test_dakota_driver.py | johnjasa/WISDEM | a4571e71cb5b9869c81790f8abb1bb7fba8fdb02 | [
"Apache-2.0"
]
| 81 | 2015-01-19T18:17:31.000Z | 2022-03-17T07:14:43.000Z | wisdem/test/test_optimization_drivers/test_dakota_driver.py | johnjasa/WISDEM | a4571e71cb5b9869c81790f8abb1bb7fba8fdb02 | [
"Apache-2.0"
]
| 159 | 2015-02-05T01:54:52.000Z | 2022-03-30T22:44:39.000Z | wisdem/test/test_optimization_drivers/test_dakota_driver.py | johnjasa/WISDEM | a4571e71cb5b9869c81790f8abb1bb7fba8fdb02 | [
"Apache-2.0"
]
| 70 | 2015-01-02T15:22:39.000Z | 2022-02-11T00:33:07.000Z | import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
| 40.921053 | 112 | 0.630868 | 2,743 | 0.881994 | 0 | 0 | 2,813 | 0.904502 | 0 | 0 | 714 | 0.229582 |
40f05be8c6d026f9f65c428c8494f859b10c0a2f | 6,848 | py | Python | lab4_runTFCurveFitting.py | pskdev/EveryBodyTensorFlow | 5166a366fca850a72de66e5ac48c421d4bb766f4 | [
"Unlicense"
]
| 1 | 2018-04-15T07:36:22.000Z | 2018-04-15T07:36:22.000Z | lab4_runTFCurveFitting.py | pskdev/EveryBodyTensorFlow | 5166a366fca850a72de66e5ac48c421d4bb766f4 | [
"Unlicense"
]
| null | null | null | lab4_runTFCurveFitting.py | pskdev/EveryBodyTensorFlow | 5166a366fca850a72de66e5ac48c421d4bb766f4 | [
"Unlicense"
]
| null | null | null | #-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab4_runTFCurveFitting.py
This is an example for linear regression in tensorflow
Which is a curve fitting example
written by Jaewook Kang @ Aug 2017
#------------------------------------------------------------
'''
from os import getcwd
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# from __future__ import print_function
# Preparing data set ================================================
from tensorflow.examples.tutorials.mnist import input_data
# generation of sinusoid data set
total_size = 5000
training_size = 4000
validation_size = total_size - training_size
xsize = 50 # the size of single x_data
x_data = np.zeros([xsize, total_size])
cos_x = np.zeros([xsize, total_size])
mag = 1.0
phase_rad = np.pi/4
rad_freq = np.pi / 2.0
for i in range(total_size):
x_data[:,i] = np.linspace(-4,4,xsize)
cos_x = np.cos(rad_freq*x_data + phase_rad)
noise_var = 0.01
noise = np.sqrt(noise_var) * np.random.randn(xsize,total_size)
y_clean = cos_x
y_data = y_clean + noise
x_training_data = x_data[:,0:training_size]
y_training_data = y_data[:,0:training_size]
x_validation_data = x_data[:,training_size:-1]
y_validation_data = y_data[:,training_size:-1]
# signal plot
# hfig1= plt.figure(1,figsize=[10,10])
# plt.plot(cos_x[:,1],color='b',label='clean')
# plt.plot(y_data[:,1],color='r',label='noisy')
# plt.legend()
# configure training parameters =====================================
learning_rate = 0.01
training_epochs = 20
batch_size = 100
display_step = 1
# computational TF graph construction ================================
##---------------- Define graph nodes -------------------
# tf Graph data input holder
# (x,y) : input / output of prediction model
# which will be feeded by training data in the TF graph computation
# (a,b,c,d) : model parameters
# which will be learned from training data in the TF graph computation
x = tf.placeholder(tf.float32, [xsize,None])
y = tf.placeholder(tf.float32, [xsize,None])
# Set model weights which is calculated in the TF graph
a = tf.Variable(1.) # initialization by 1
b = tf.Variable(1.)
c = tf.Variable(1.)
d = tf.Variable(1.)
print ('TF graph nodes are defined')
##--------------------- Define function -----------------
# define relationshitp btw instance data x and label data y
# define optimizer used in the learning phase
# define cost function for optimization
# Construct model
pred_y = c*tf.cos(a*x+b)+d
# Minimize error using MSE function
cost = tf.reduce_mean(tf.reduce_sum( tf.square(y - pred_y) , reduction_indices=1), name="mse")
# Gradient Descent
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
print ('Functions in TF graph are ready')
## Performance evaluation model ========================_y===========
# y : data output
# pred_y: prediction output by model, a x^3 + b x^2 + c x + d
correct_prediction = cost
# Calculate error rate using data --------------
# where
# tf_reduce_mean(input_tensor, axis) : reduce dimension of tensor by computing the mean of elements
# # 'x' is [[1., 1.]
# [2., 2.]]
# tf.reduce_mean(x) ==> 1.5
# tf.reduce_mean(x, 0) ==> [1.5, 1.5]
# tf.reduce_mean(x, 1) ==> [1., 2.]
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
error_rate_training = np.zeros(training_epochs)
error_rate_validation = np.zeros(training_epochs)
# Launch the graph (execution) ========================================
# Initializing the variables
init = tf.global_variables_initializer()
## -------------------- Learning iteration start --------------------
with tf.Session() as sess:
sess.run(init) # this for variable use
# Training cycle
for epoch in range(training_epochs): # iteration loop
avg_cost = 0.
total_batch = int(training_size/batch_size) #
# Loop over all batches
for i in range(total_batch): # batch loop
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[:,data_start_index:data_end_index]
batch_ys = y_training_data[:,data_start_index:data_end_index]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost/batch_size))
batch_xs = x_training_data
batch_ys = y_training_data
error_rate_training[epoch] = accuracy.eval({x: batch_xs, y: batch_ys},session=sess)/training_size
error_rate_validation[epoch] = accuracy.eval({x: x_validation_data, y: y_validation_data},session=sess)/validation_size
print("Training set MSE:", error_rate_training[epoch])
print("Validation set MSE:", error_rate_validation[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
pred_a = sess.run(a)
pred_b = sess.run(b)
pred_c = sess.run(c)
pred_d = sess.run(d)
hfig1 = plt.figure(1,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,error_rate_training,label='Training data',color='r',marker='o')
plt.plot(epoch_index,error_rate_validation,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('MSE of prediction:')
plt.xlabel('Iteration epoch')
plt.ylabel('MSE')
hfig2 = plt.figure(2,figsize=(10,10))
pred_y = pred_c * np.cos(pred_a * x_data[:,0] + pred_b) +pred_d
plt.plot(x_validation_data[:,0],y_validation_data[:,0],label='noisy data',color='b',marker='*')
plt.plot(x_validation_data[:,0], pred_y,label='prediction',color='r')
plt.legend()
plt.title('A line fitting example:')
plt.xlabel('X data')
plt.ylabel('Y data')
# FIG_SAVE_DIR = getcwd() + '/figs/'
# hfig1.savefig(FIG_SAVE_DIR + 'runExample_TFLogisticReg_aymeric_ErrRate.png')
# hfig1.clear()
| 32.923077 | 131 | 0.631425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,057 | 0.446408 |
40f1379ab73e0f4b4e9297a1caebe96d0365e7e2 | 577 | py | Python | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
]
| null | null | null | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
]
| null | null | null | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
]
| null | null | null | # coding=utf-8
from app.route.stats.processor import *
from app.api.base.base_router import BaseRouter
from app.api.base import base_name as names
class Stats(BaseRouter):
def __init__(self):
super().__init__()
self.args = [names.face, names.post, names.socnet, names.likes, names.views, names.comments]
def get(self):
self._read_args()
print(self.data)
answer = get_stat(self.data)
return answer or {}
def put(self):
self._read_args()
answer = update_stat(self.data)
return answer or {}
| 25.086957 | 100 | 0.646447 | 427 | 0.740035 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.024263 |
40f148fc7af6cb3cf9e625820f51746d54b4fd9d | 1,168 | py | Python | script/calculate_correct_percentage_kingdom.py | xie186/dragmap-meth | 6e9ccfd281bd317a56b8c4e87b5386978eb8de45 | [
"MIT"
]
| 4 | 2021-12-18T20:33:16.000Z | 2022-01-03T02:54:13.000Z | script/calculate_correct_percentage_kingdom.py | xie186/dragmap-meth | 6e9ccfd281bd317a56b8c4e87b5386978eb8de45 | [
"MIT"
]
| null | null | null | script/calculate_correct_percentage_kingdom.py | xie186/dragmap-meth | 6e9ccfd281bd317a56b8c4e87b5386978eb8de45 | [
"MIT"
]
| null | null | null | from Bio import TogoWS
import argparse
import sys
import os
def summary(options):
num_reads = 0
num_correct = 0
with open(options.input) as file_input:
for line in file_input:
line = line.rstrip()
ele = line.split("\t")
if "FAILED" in line:
continue
if "BAD" in ele[0]:
num_reads += 1
if "Bacteria" in line:
num_correct += 1
#raise("Makeblastdb failed!")
else:
num_reads += 1
if options.species in line:
num_correct += 1
#raise("Makeblastdb failed!")
percentage = 100* num_correct/num_reads
print("Percente: {perc}\n".format(perc = percentage))
if __name__ == '__main__':
## description - Text to display before the argument help (default: none)
parser=argparse.ArgumentParser(description='mbmeth')
parser.add_argument("-i", '--input', help="Input list")
parser.add_argument("-s", '--species', help="species")
options = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
summary(options)
| 29.948718 | 77 | 0.5625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.21661 |
40f24ffc2a5ce750fd7226190ea187a0e43d6f6d | 296 | py | Python | borax/patterns/singleton.py | kinegratii/borax | 3595f554b788c31d0f07be4099db68c854db65f7 | [
"MIT"
]
| 51 | 2018-04-18T13:52:15.000Z | 2022-03-23T13:46:02.000Z | borax/patterns/singleton.py | kinegratii/borax | 3595f554b788c31d0f07be4099db68c854db65f7 | [
"MIT"
]
| 26 | 2019-05-26T02:22:34.000Z | 2022-03-14T07:50:32.000Z | borax/patterns/singleton.py | kinegratii/borax | 3595f554b788c31d0f07be4099db68c854db65f7 | [
"MIT"
]
| 7 | 2018-09-30T08:17:29.000Z | 2020-12-16T01:49:24.000Z | # coding=utf8
class MetaSingleton(type):
def __init__(cls, *args):
type.__init__(cls, *args)
cls.instance = None
def __call__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = type.__call__(cls, *args, **kwargs)
return cls.instance
| 22.769231 | 62 | 0.597973 | 279 | 0.942568 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.043919 |
40f2de4fdec91fb98024a2bfc2b3ed4d725f2c72 | 5,108 | py | Python | aiida/backends/general/migrations/utils.py | pranavmodx/aiida-core | 0edbbf82dfb97ab130914d1674a6f2217eba5971 | [
"BSD-2-Clause",
"MIT"
]
| null | null | null | aiida/backends/general/migrations/utils.py | pranavmodx/aiida-core | 0edbbf82dfb97ab130914d1674a6f2217eba5971 | [
"BSD-2-Clause",
"MIT"
]
| 2 | 2019-03-06T11:23:42.000Z | 2020-03-09T09:34:07.000Z | aiida/backends/general/migrations/utils.py | lorisercole/aiida-core | 84c2098318bf234641219e55795726f99dc25a16 | [
"MIT",
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Various utils that should be used during migrations and migrations tests because the AiiDA ORM cannot be used."""
import datetime
import errno
import os
import re
import numpy
from aiida.common import json
ISOFORMAT_DATETIME_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+(\+\d{2}:\d{2})?$')
def ensure_repository_folder_created(uuid):
"""Make sure that the repository sub folder for the node with the given UUID exists or create it.
:param uuid: UUID of the node
"""
dirpath = get_node_repository_sub_folder(uuid)
try:
os.makedirs(dirpath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def put_object_from_string(uuid, name, content):
"""Write a file with the given content in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
:param content: the content to write to the file
"""
ensure_repository_folder_created(uuid)
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath, 'w', encoding='utf-8') as handle:
handle.write(content)
def get_object_from_repository(uuid, name):
"""Return the content of a file with the given name in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
"""
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath) as handle:
return handle.read()
def get_node_repository_sub_folder(uuid):
"""Return the absolute path to the sub folder `path` within the repository of the node with the given UUID.
:param uuid: UUID of the node
:return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path`
"""
from aiida.manage.configuration import get_profile
uuid = str(uuid)
repo_dirpath = os.path.join(get_profile().repository_path, 'repository')
node_dirpath = os.path.join(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')
return node_dirpath
def get_numpy_array_absolute_path(uuid, name):
"""Return the absolute path of a numpy array with the given name in the repository of the node with the given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
:return: the absolute path of the numpy array file
"""
return os.path.join(get_node_repository_sub_folder(uuid), name + '.npy')
def store_numpy_array_in_repository(uuid, name, array):
"""Store a numpy array in the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:param array: the numpy array to store
"""
ensure_repository_folder_created(uuid)
filepath = get_numpy_array_absolute_path(uuid, name)
with open(filepath, 'wb') as handle:
numpy.save(handle, array)
def delete_numpy_array_from_repository(uuid, name):
"""Delete the numpy array with a given name from the repository corresponding to a node with a given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
try:
os.remove(filepath)
except (IOError, OSError):
pass
def load_numpy_array_from_repository(uuid, name):
"""Load and return a numpy array from the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:return: the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
return numpy.load(filepath)
def recursive_datetime_to_isoformat(value):
"""Convert all datetime objects in the given value to string representations in ISO format.
:param value: a mapping, sequence or single value optionally containing datetime objects
"""
if isinstance(value, list):
return [recursive_datetime_to_isoformat(_) for _ in value]
if isinstance(value, dict):
return dict((key, recursive_datetime_to_isoformat(val)) for key, val in value.items())
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def dumps_json(dictionary):
"""Transforms all datetime object into isoformat and then returns the JSON."""
return json.dumps(recursive_datetime_to_isoformat(dictionary))
| 34.053333 | 119 | 0.66758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,808 | 0.549726 |
40f3ddcdfc03bc9856328d9f89786ad5e9dd0772 | 88 | py | Python | src/models/__init__.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
]
| null | null | null | src/models/__init__.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
]
| null | null | null | src/models/__init__.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
]
| null | null | null | __all__ = ["transformers", "vision"]
from .transformers import *
from .vision import *
| 17.6 | 36 | 0.715909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.25 |
40f4220eb6198005a87664aaa2c6ba2fd068a95c | 350 | py | Python | packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
]
| 4,391 | 2019-05-07T01:18:57.000Z | 2022-03-31T20:45:44.000Z | packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
]
| 2,740 | 2019-05-07T03:29:30.000Z | 2022-03-31T12:57:46.000Z | packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
]
| 455 | 2019-05-07T12:55:14.000Z | 2022-03-31T17:09:15.000Z | # This sample tests the checker's ability to enforce
# type invariance for type arguments.
# pyright: strict
from typing import Dict, Union
foo: Dict[Union[int, str], str] = {}
bar: Dict[str, str] = {}
# This should generate an error because
# both type parameters for Dict are invariant,
# and str isn't assignable to Union[int, str].
foo = bar
| 23.333333 | 52 | 0.72 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.677143 |
40f50e67874d55319f2743b79ff2d604900796f7 | 224 | py | Python | test.py | Naveenkhasyap/udacity-ml | 6df851f7b21dee120a8e8f246df7961ea065eeac | [
"MIT"
]
| null | null | null | test.py | Naveenkhasyap/udacity-ml | 6df851f7b21dee120a8e8f246df7961ea065eeac | [
"MIT"
]
| null | null | null | test.py | Naveenkhasyap/udacity-ml | 6df851f7b21dee120a8e8f246df7961ea065eeac | [
"MIT"
]
| null | null | null | how_many_snakes = 1
snake_string = """
Welcome to Python3!
____
/ . .\\
\\ ---<
\\ /
__________/ /
-=:___________/
<3, Juno
"""
print(snake_string * how_many_snakes) | 14 | 37 | 0.473214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.665179 |
40f5c3fea77f91c61ea3a74c27daae2c26011e43 | 658 | py | Python | Nelson_Alvarez/Assignments/flask_fund/ninja_turtle/turtle.py | webguru001/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
]
| 5 | 2019-05-17T01:30:02.000Z | 2021-06-17T21:02:58.000Z | Nelson_Alvarez/Assignments/flask_fund/ninja_turtle/turtle.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
]
| null | null | null | Nelson_Alvarez/Assignments/flask_fund/ninja_turtle/turtle.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
]
| null | null | null | from flask import Flask
from flask import render_template, redirect, session, request
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route('/')
def nothing():
return render_template('index.html')
@app.route('/ninja')
def ninja():
x = 'tmnt'
return render_template('ninjas.html', x=x)
@app.route('/ninja/<any>')
def ninjas_colors_any(any):
ninja_dict = {'blue': 'leonardo', 'red': 'raphael', 'purple': 'donatello', 'orange': 'michelangelo'}
if any in ninja_dict:
x = ninja_dict[any]
return render_template('ninjas.html', x=x)
else:
x= 'notapril'
return render_template('ninjas.html', x=x)
app.run(debug=True) | 22.689655 | 102 | 0.682371 | 0 | 0 | 0 | 0 | 490 | 0.744681 | 0 | 0 | 177 | 0.268997 |
40f5d8bb4fa97a86898d698a3335896827401fd2 | 941 | py | Python | neo/Network/Inventory.py | BSathvik/neo-python | 90eddde0128f8ba41207d88fd68041682e307315 | [
"MIT"
]
| 15 | 2018-02-27T13:07:00.000Z | 2021-01-29T10:27:41.000Z | neo/Network/Inventory.py | BSathvik/neo-python | 90eddde0128f8ba41207d88fd68041682e307315 | [
"MIT"
]
| 3 | 2021-03-20T05:43:51.000Z | 2022-02-11T03:47:50.000Z | neo/Network/Inventory.py | BSathvik/neo-python | 90eddde0128f8ba41207d88fd68041682e307315 | [
"MIT"
]
| 6 | 2018-07-13T05:00:44.000Z | 2020-10-28T19:41:54.000Z | # -*- coding:utf-8 -*-
"""
Description:
Inventory Class
Usage:
from neo.Network.Inventory import Inventory
"""
from neo.IO.MemoryStream import MemoryStream
from neocore.IO.BinaryWriter import BinaryWriter
class Inventory(object):
"""docstring for Inventory"""
def __init__(self):
"""
Create an instance
"""
super(Inventory, self).__init__()
self.hash = None
def GetHashData(self):
"""
Get the hashable data.
Returns:
bytes:
"""
ms = MemoryStream()
w = BinaryWriter(ms)
self.SerializeUnsigned(w)
ms.flush()
return ms.ToArray()
def GetScriptHashesForVerifying(self):
pass
def Serialize(self, writer):
pass
def SerializeUnsigned(self, writer):
pass
def Deserialize(self, reader):
pass
def DeserializeUnsigned(self, reader):
pass
| 18.82 | 48 | 0.587673 | 724 | 0.769394 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.287991 |
40f5e193e0cc75def4b2ba8e4e082e5183a4bea7 | 4,748 | py | Python | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
]
| 3 | 2021-05-14T08:13:09.000Z | 2021-05-26T11:25:35.000Z | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
]
| 27 | 2021-05-13T08:43:19.000Z | 2021-08-24T17:19:36.000Z | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
]
| null | null | null | """
Copyright (C) 2021 Clariteia SL
This file is part of minos framework.
Minos framework can not be copied and/or distributed without the express permission of Clariteia SL.
"""
import unittest
from minos.api_gateway.common import (
EmptyMinosModelSequenceException,
MinosAttributeValidationException,
MinosConfigDefaultAlreadySetException,
MinosConfigException,
MinosException,
MinosMalformedAttributeException,
MinosModelAttributeException,
MinosModelException,
MinosParseAttributeException,
MinosRepositoryAggregateNotFoundException,
MinosRepositoryDeletedAggregateException,
MinosRepositoryException,
MinosRepositoryManuallySetAggregateIdException,
MinosRepositoryManuallySetAggregateVersionException,
MinosRepositoryNonProvidedException,
MinosRepositoryUnknownActionException,
MinosReqAttributeException,
MinosTypeAttributeException,
MultiTypeMinosModelSequenceException,
)
class TestExceptions(unittest.TestCase):
def test_type(self):
self.assertTrue(issubclass(MinosException, Exception))
def test_base_repr(self):
exception = MinosException("test")
self.assertEqual("MinosException(message='test')", repr(exception))
def test_base_str(self):
exception = MinosException("test")
self.assertEqual("test", str(exception))
def test_config(self):
self.assertTrue(issubclass(MinosConfigException, MinosException))
def test_config_default_already_set(self):
self.assertTrue(issubclass(MinosConfigDefaultAlreadySetException, MinosConfigException))
def test_repository_aggregate_not_found(self):
self.assertTrue(issubclass(MinosRepositoryAggregateNotFoundException, MinosRepositoryException))
def test_repository_deleted_aggregate(self):
self.assertTrue(issubclass(MinosRepositoryDeletedAggregateException, MinosRepositoryException))
def test_repository_manually_set_aggregate_id(self):
self.assertTrue(issubclass(MinosRepositoryManuallySetAggregateIdException, MinosRepositoryException))
def test_repository_manually_set_aggregate_version(self):
self.assertTrue(issubclass(MinosRepositoryManuallySetAggregateVersionException, MinosRepositoryException,))
def test_repository_bad_action(self):
self.assertTrue(issubclass(MinosRepositoryUnknownActionException, MinosRepositoryException))
def test_repository_non_set(self):
self.assertTrue(issubclass(MinosRepositoryNonProvidedException, MinosRepositoryException))
def test_model(self):
self.assertTrue(issubclass(MinosModelException, MinosException))
def test_model_emtpy_sequence(self):
self.assertTrue(issubclass(EmptyMinosModelSequenceException, MinosModelException))
def test_model_multi_type_sequence(self):
self.assertTrue(issubclass(MultiTypeMinosModelSequenceException, MinosModelException))
def test_model_attribute(self):
self.assertTrue(issubclass(MinosModelAttributeException, MinosException))
def test_required_attribute(self):
self.assertTrue(issubclass(MinosReqAttributeException, MinosModelAttributeException))
def test_type_attribute(self):
self.assertTrue(issubclass(MinosTypeAttributeException, MinosModelAttributeException))
def test_type_attribute_repr(self):
exception = MinosTypeAttributeException("foo", float, True)
message = (
"MinosTypeAttributeException(message=\"The <class 'float'> expected type for 'foo' "
"does not match with the given data type: <class 'bool'>\")"
)
self.assertEqual(message, repr(exception))
def test_malformed_attribute(self):
self.assertTrue(issubclass(MinosMalformedAttributeException, MinosModelAttributeException))
def test_parse_attribute(self):
self.assertTrue(issubclass(MinosParseAttributeException, MinosModelAttributeException))
def test_attribute_parse_repr(self):
exception = MinosParseAttributeException("foo", 34, ValueError())
message = (
'MinosParseAttributeException(message="ValueError() '
"was raised while parsing 'foo' field with 34 value.\")"
)
self.assertEqual(message, repr(exception))
def test_attribute_validation(self):
self.assertTrue(issubclass(MinosAttributeValidationException, MinosModelAttributeException))
def test_attribute_validation_repr(self):
exception = MinosAttributeValidationException("foo", 34)
message = "MinosAttributeValidationException(message=\"34 value does not pass the 'foo' field validation.\")"
self.assertEqual(message, repr(exception))
if __name__ == "__main__":
unittest.main()
| 39.566667 | 117 | 0.771272 | 3,737 | 0.787068 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.127843 |
40f7a744294465f0d9fa2d8e7fd481a7d36370d7 | 977 | py | Python | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
]
| 177 | 2018-01-05T01:46:07.000Z | 2018-03-09T05:32:45.000Z | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
]
| 15 | 2018-01-05T03:28:38.000Z | 2018-01-17T03:04:06.000Z | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
]
| 55 | 2018-01-05T05:24:55.000Z | 2018-01-25T11:53:38.000Z | # coding: utf-8
# quote from kmaiya/HQAutomator
# 谷歌搜索部分原版搬运,未做修改
import time
import json
import requests
import webbrowser
questions = []
def get_answer():
resp = requests.get('http://htpmsg.jiecaojingxuan.com/msg/current',timeout=4).text
resp_dict = json.loads(resp)
if resp_dict['msg'] == 'no data':
return 'Waiting for question...'
else:
resp_dict = eval(str(resp))
question = resp_dict['data']['event']['desc']
question = question[question.find('.') + 1:question.find('?')]
if question not in questions:
questions.append(question)
webbrowser.open("https://www.baidu.com/s?ie=UTF-8&wd=" + question)
else:
return 'Waiting for new question...'
def main():
while True:
print(time.strftime('%H:%M:%S',time.localtime(time.time())))
print(get_answer())
time.sleep(1)
if __name__ == '__main__':
main()
| 25.710526 | 87 | 0.58956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.290387 |
40f82a11d157a4c060d3cd0a073c10873cb2a999 | 21,936 | py | Python | src/utils/TensorflowModel_pb2.py | nicolas-ivanov/MimicAndRephrase | 446674e1e6af133618e0e9888c3650c0ce9012e4 | [
"MIT"
]
| 12 | 2019-06-17T19:41:35.000Z | 2022-02-17T19:51:45.000Z | src/utils/TensorflowModel_pb2.py | nicolas-ivanov/MimicAndRephrase | 446674e1e6af133618e0e9888c3650c0ce9012e4 | [
"MIT"
]
| 1 | 2021-02-23T15:28:32.000Z | 2021-02-23T15:28:32.000Z | src/utils/TensorflowModel_pb2.py | isabella232/MimicAndRephrase | bd29a995b211cb4f7933fa990b0bba1564c22450 | [
"MIT"
]
| 3 | 2020-09-07T16:44:11.000Z | 2020-11-14T19:00:05.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: TensorflowModel.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='TensorflowModel.proto',
package='ai.eloquent',
syntax='proto3',
serialized_pb=_b('\n\x15TensorflowModel.proto\x12\x0b\x61i.eloquent\"\x8f\x01\n\x0fTensorflowModel\x12\x18\n\x10serialized_graph\x18\x01 \x01(\x0c\x12.\n\x0ctoken_mapper\x18\x02 \x01(\x0b\x32\x18.ai.eloquent.TokenMapper\x12\x16\n\x0etrain_set_size\x18\x04 \x01(\x03\x12\x1a\n\x12train_set_positive\x18\x05 \x01(\x03\"\x80\x01\n\x0cTokenMapping\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.ai.eloquent.TokenMappingType\x12\r\n\x05regex\x18\x02 \x01(\t\x12\x10\n\x08num_hash\x18\x03 \x01(\x05\x12\x12\n\ndebug_base\x18\x04 \x01(\t\x12\x0e\n\x06tokens\x18\x05 \x03(\t\"\x9d\x01\n\x0bTokenMapper\x12\x30\n\rtoken_mapping\x18\x01 \x03(\x0b\x32\x19.ai.eloquent.TokenMapping\x12.\n\x0bunk_mapping\x18\x02 \x03(\x0b\x32\x19.ai.eloquent.TokenMapping\x12,\n\x07vectors\x18\x03 \x03(\x0b\x32\x1b.ai.eloquent.TunedEmbedding\"\x1f\n\x0eTunedEmbedding\x12\r\n\x05value\x18\x01 \x03(\x02\"\xf0\x03\n\x1aTensorflowModelPerformance\x12\x0f\n\x07\x64\x65v_set\x18\x01 \x03(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x17\n\x0f\x64\x65v_set_version\x18\x03 \x01(\x03\x12\x16\n\x0etrain_set_size\x18\x04 \x01(\x03\x12\x1d\n\x15train_set_total_votes\x18\x05 \x01(\x03\x12\x14\n\x0c\x64\x65v_set_size\x18\x06 \x01(\x03\x12\x1b\n\x13\x64\x65v_set_total_votes\x18\x07 \x01(\x03\x12\x12\n\nbest_epoch\x18\x08 \x01(\x05\x12\x0f\n\x07\x64ropout\x18\t \x01(\x02\x12\x13\n\x0brandom_seed\x18\n \x01(\x05\x12\x12\n\nhidden_dim\x18\x0b \x01(\x05\x12\x15\n\rtrue_positive\x18\x0c \x01(\x03\x12\x16\n\x0e\x66\x61lse_positive\x18\r \x01(\x03\x12\x16\n\x0e\x66\x61lse_negative\x18\x0e \x01(\x03\x12\x15\n\rtrue_negative\x18\x0f \x01(\x03\x12\x11\n\tprecision\x18\x10 \x01(\x02\x12\x0e\n\x06recall\x18\x11 \x01(\x02\x12\n\n\x02\x66\x31\x18\x12 \x01(\x02\x12\x10\n\x08\x61\x63\x63uracy\x18\x13 \x01(\x02\x12@\n\x08\x65xamples\x18\x14 \x03(\x0b\x32..ai.eloquent.TensorflowModelPerformanceExample\"S\n!TensorflowModelPerformanceExample\x12\r\n\x05input\x18\x01 \x03(\t\x12\x0f\n\x07guesses\x18\x02 \x03(\x02\x12\x0e\n\x06labels\x18\x03 \x03(\x05*2\n\x10TokenMappingType\x12\t\n\x05REGEX\x10\x00\x12\x08\n\x04HASH\x10\x01\x12\t\n\x05TOKEN\x10\x02\x42)\n\x10\x61i.eloquent.dataB\x15TensorflowModelProtosb\x06proto3')
)
_TOKENMAPPINGTYPE = _descriptor.EnumDescriptor(
name='TokenMappingType',
full_name='ai.eloquent.TokenMappingType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REGEX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HASH', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOKEN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1092,
serialized_end=1142,
)
_sym_db.RegisterEnumDescriptor(_TOKENMAPPINGTYPE)
TokenMappingType = enum_type_wrapper.EnumTypeWrapper(_TOKENMAPPINGTYPE)
REGEX = 0
HASH = 1
TOKEN = 2
_TENSORFLOWMODEL = _descriptor.Descriptor(
name='TensorflowModel',
full_name='ai.eloquent.TensorflowModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='serialized_graph', full_name='ai.eloquent.TensorflowModel.serialized_graph', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='token_mapper', full_name='ai.eloquent.TensorflowModel.token_mapper', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_size', full_name='ai.eloquent.TensorflowModel.train_set_size', index=2,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_positive', full_name='ai.eloquent.TensorflowModel.train_set_positive', index=3,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=182,
)
_TOKENMAPPING = _descriptor.Descriptor(
name='TokenMapping',
full_name='ai.eloquent.TokenMapping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='ai.eloquent.TokenMapping.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='regex', full_name='ai.eloquent.TokenMapping.regex', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_hash', full_name='ai.eloquent.TokenMapping.num_hash', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_base', full_name='ai.eloquent.TokenMapping.debug_base', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokens', full_name='ai.eloquent.TokenMapping.tokens', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=185,
serialized_end=313,
)
_TOKENMAPPER = _descriptor.Descriptor(
name='TokenMapper',
full_name='ai.eloquent.TokenMapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token_mapping', full_name='ai.eloquent.TokenMapper.token_mapping', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unk_mapping', full_name='ai.eloquent.TokenMapper.unk_mapping', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vectors', full_name='ai.eloquent.TokenMapper.vectors', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=316,
serialized_end=473,
)
_TUNEDEMBEDDING = _descriptor.Descriptor(
name='TunedEmbedding',
full_name='ai.eloquent.TunedEmbedding',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='ai.eloquent.TunedEmbedding.value', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=475,
serialized_end=506,
)
_TENSORFLOWMODELPERFORMANCE = _descriptor.Descriptor(
name='TensorflowModelPerformance',
full_name='ai.eloquent.TensorflowModelPerformance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dev_set', full_name='ai.eloquent.TensorflowModelPerformance.dev_set', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='ai.eloquent.TensorflowModelPerformance.version', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dev_set_version', full_name='ai.eloquent.TensorflowModelPerformance.dev_set_version', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_size', full_name='ai.eloquent.TensorflowModelPerformance.train_set_size', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_total_votes', full_name='ai.eloquent.TensorflowModelPerformance.train_set_total_votes', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dev_set_size', full_name='ai.eloquent.TensorflowModelPerformance.dev_set_size', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dev_set_total_votes', full_name='ai.eloquent.TensorflowModelPerformance.dev_set_total_votes', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='best_epoch', full_name='ai.eloquent.TensorflowModelPerformance.best_epoch', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout', full_name='ai.eloquent.TensorflowModelPerformance.dropout', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='random_seed', full_name='ai.eloquent.TensorflowModelPerformance.random_seed', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hidden_dim', full_name='ai.eloquent.TensorflowModelPerformance.hidden_dim', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='true_positive', full_name='ai.eloquent.TensorflowModelPerformance.true_positive', index=11,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='false_positive', full_name='ai.eloquent.TensorflowModelPerformance.false_positive', index=12,
number=13, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='false_negative', full_name='ai.eloquent.TensorflowModelPerformance.false_negative', index=13,
number=14, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='true_negative', full_name='ai.eloquent.TensorflowModelPerformance.true_negative', index=14,
number=15, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='precision', full_name='ai.eloquent.TensorflowModelPerformance.precision', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='recall', full_name='ai.eloquent.TensorflowModelPerformance.recall', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f1', full_name='ai.eloquent.TensorflowModelPerformance.f1', index=17,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accuracy', full_name='ai.eloquent.TensorflowModelPerformance.accuracy', index=18,
number=19, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='examples', full_name='ai.eloquent.TensorflowModelPerformance.examples', index=19,
number=20, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=509,
serialized_end=1005,
)
_TENSORFLOWMODELPERFORMANCEEXAMPLE = _descriptor.Descriptor(
name='TensorflowModelPerformanceExample',
full_name='ai.eloquent.TensorflowModelPerformanceExample',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='ai.eloquent.TensorflowModelPerformanceExample.input', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='guesses', full_name='ai.eloquent.TensorflowModelPerformanceExample.guesses', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='labels', full_name='ai.eloquent.TensorflowModelPerformanceExample.labels', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1007,
serialized_end=1090,
)
_TENSORFLOWMODEL.fields_by_name['token_mapper'].message_type = _TOKENMAPPER
_TOKENMAPPING.fields_by_name['type'].enum_type = _TOKENMAPPINGTYPE
_TOKENMAPPER.fields_by_name['token_mapping'].message_type = _TOKENMAPPING
_TOKENMAPPER.fields_by_name['unk_mapping'].message_type = _TOKENMAPPING
_TOKENMAPPER.fields_by_name['vectors'].message_type = _TUNEDEMBEDDING
_TENSORFLOWMODELPERFORMANCE.fields_by_name['examples'].message_type = _TENSORFLOWMODELPERFORMANCEEXAMPLE
DESCRIPTOR.message_types_by_name['TensorflowModel'] = _TENSORFLOWMODEL
DESCRIPTOR.message_types_by_name['TokenMapping'] = _TOKENMAPPING
DESCRIPTOR.message_types_by_name['TokenMapper'] = _TOKENMAPPER
DESCRIPTOR.message_types_by_name['TunedEmbedding'] = _TUNEDEMBEDDING
DESCRIPTOR.message_types_by_name['TensorflowModelPerformance'] = _TENSORFLOWMODELPERFORMANCE
DESCRIPTOR.message_types_by_name['TensorflowModelPerformanceExample'] = _TENSORFLOWMODELPERFORMANCEEXAMPLE
DESCRIPTOR.enum_types_by_name['TokenMappingType'] = _TOKENMAPPINGTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorflowModel = _reflection.GeneratedProtocolMessageType('TensorflowModel', (_message.Message,), dict(
DESCRIPTOR = _TENSORFLOWMODEL,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TensorflowModel)
))
_sym_db.RegisterMessage(TensorflowModel)
TokenMapping = _reflection.GeneratedProtocolMessageType('TokenMapping', (_message.Message,), dict(
DESCRIPTOR = _TOKENMAPPING,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TokenMapping)
))
_sym_db.RegisterMessage(TokenMapping)
TokenMapper = _reflection.GeneratedProtocolMessageType('TokenMapper', (_message.Message,), dict(
DESCRIPTOR = _TOKENMAPPER,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TokenMapper)
))
_sym_db.RegisterMessage(TokenMapper)
TunedEmbedding = _reflection.GeneratedProtocolMessageType('TunedEmbedding', (_message.Message,), dict(
DESCRIPTOR = _TUNEDEMBEDDING,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TunedEmbedding)
))
_sym_db.RegisterMessage(TunedEmbedding)
TensorflowModelPerformance = _reflection.GeneratedProtocolMessageType('TensorflowModelPerformance', (_message.Message,), dict(
DESCRIPTOR = _TENSORFLOWMODELPERFORMANCE,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TensorflowModelPerformance)
))
_sym_db.RegisterMessage(TensorflowModelPerformance)
TensorflowModelPerformanceExample = _reflection.GeneratedProtocolMessageType('TensorflowModelPerformanceExample', (_message.Message,), dict(
DESCRIPTOR = _TENSORFLOWMODELPERFORMANCEEXAMPLE,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TensorflowModelPerformanceExample)
))
_sym_db.RegisterMessage(TensorflowModelPerformanceExample)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\020ai.eloquent.dataB\025TensorflowModelProtos'))
# @@protoc_insertion_point(module_scope)
| 42.594175 | 2,175 | 0.746034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,887 | 0.268372 |
40f93ae054bebaa285f8c2f48242d86d8297b31f | 8,460 | py | Python | python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
]
| 136 | 2015-01-03T04:03:23.000Z | 2022-02-07T11:08:57.000Z | python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
]
| 11 | 2017-02-09T20:05:04.000Z | 2021-01-24T22:25:59.000Z | python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
]
| 26 | 2015-08-18T12:11:02.000Z | 2020-12-19T01:53:31.000Z | """Classes representing color entries and mappings."""
# =============================================================================
# IMPORTS
# =============================================================================
from __future__ import annotations
# Standard Library
import re
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
import hou
# =============================================================================
# CLASSES
# =============================================================================
class StyleConstant:
"""This class represents a named constant style.
:param name: The constant's name.
:param color: The constant's color.
:param color_type: The color type.
:param shape: The constant's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleConstant {} ({})>".format(self.name, self.color)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
# -------------------------------------------------------------------------
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
# -------------------------------------------------------------------------
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
# -------------------------------------------------------------------------
@property
def name(self) -> str:
"""The name the color is mapped to."""
return self._name
# -------------------------------------------------------------------------
@property
def shape(self) -> Optional[str]:
"""The mapped shape."""
return self._shape
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class StyleRule:
"""This class represents a color application bound to a name.
:param name: The rule's name.
:param color: The rule's color.
:param color_type: The rule's color type.
:param shape: The rule's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleRule {} ({})>".format(self.name, self.color)
def __str__(self):
value = self._get_typed_color_value()
components = [re.sub("\\.*0+$", "", "{:0.3f}".format(val)) for val in value]
return "(" + ", ".join(components) + ")"
# -------------------------------------------------------------------------
# NON-PUBLIC METHODS
# -------------------------------------------------------------------------
def _get_typed_color_value(self) -> Tuple[float]:
"""Get the appropriately typed color values.
:return: The color value in the correct type.
"""
to_func = getattr(self.color, self.color_type.lower())
return to_func()
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
@property
def shape(self) -> Optional[str]:
"""The mapped shape name."""
return self._shape
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class ConstantRule:
"""This class represents a style application bound to a named constant.
:param name: The rule's name.
:param constant_name: The constant name.
:param file_path: The path to the definition file.
:return:
"""
def __init__(self, name: str, constant_name: str, file_path: Optional[str] = None):
self._constant_name = constant_name
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash((self.constant_name, self.name))
def __ne__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<ConstantRule {} ({})>".format(self.name, self.constant_name)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def constant_name(self) -> str:
"""The mapped constant."""
return self._constant_name
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
| 28.389262 | 87 | 0.450473 | 7,904 | 0.934279 | 0 | 0 | 1,463 | 0.172931 | 0 | 0 | 3,959 | 0.467967 |
40f9e62c7e463cdddcd04524566bd56b8cb59940 | 1,407 | py | Python | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
]
| null | null | null | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
]
| null | null | null | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
]
| null | null | null | import math
import numpy as np
# return an array K of size (d_max, d_max, N, N), K[i][j] is kernel value of depth i + 1 with first j layers fixed
def kernel_value_batch(X, d_max):
K = np.zeros((d_max, d_max, X.shape[0], X.shape[0]))
for fix_dep in range(d_max):
S = np.matmul(X, X.T)
H = np.zeros_like(S)
for dep in range(d_max):
if fix_dep <= dep:
H += S
K[dep][fix_dep] = H
L = np.diag(S)
P = np.clip(np.sqrt(np.outer(L, L)), a_min = 1e-9, a_max = None)
Sn = np.clip(S / P, a_min = -1, a_max = 1)
S = (Sn * (math.pi - np.arccos(Sn)) + np.sqrt(1.0 - Sn * Sn)) * P / 2.0 / math.pi
H = H * (math.pi - np.arccos(Sn)) / 2.0 / math.pi
return K
# return an array K of size (N, N), depth d_max, first fix_dep layers fixed
def kernel_value(X, d_max, fix_dep):
K = np.zeros((d_max, X.shape[0], X.shape[0]))
S = np.matmul(X, X.T)
H = np.zeros_like(S)
for dep in range(d_max):
if fix_dep <= dep:
H += S
K[dep] = H
L = np.diag(S)
P = np.clip(np.sqrt(np.outer(L, L)), a_min = 1e-9, a_max = None)
Sn = np.clip(S / P, a_min = -1, a_max = 1)
S = (Sn * (math.pi - np.arccos(Sn)) + np.sqrt(1.0 - Sn * Sn)) * P / 2.0 / math.pi
H = H * (math.pi - np.arccos(Sn)) / 2.0 / math.pi
return K[d_max - 1] | 40.2 | 115 | 0.509595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.135039 |
40fbdeebc9d14240c78ed2bb4a08d9c0a87ce714 | 1,509 | py | Python | nlpproject/main/words.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
]
| null | null | null | nlpproject/main/words.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
]
| null | null | null | nlpproject/main/words.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
]
| null | null | null | import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk.tokenize import sent_tokenize , word_tokenize
import glob
import re
import os
import numpy as np
import sys
nltk.download('stopwords')
nltk.download('punkt')
Stopwords = set(stopwords.words('english'))
all_words = []
dict_global = {}
file_folder = 'main/documents/*'
idx = 1
files_with_index = {}
def finding_all_unique_words_and_freq(words):
words_unique = []
word_freq = {}
for word in words:
if word not in words_unique:
words_unique.append(word)
for word in words_unique:
word_freq[word] = words.count(word)
return word_freq
def finding_freq_of_word_in_doc(word,words):
freq = words.count(word)
def remove_special_characters(text):
regex = re.compile('[^a-zA-Z0-9\s]')
text_returned = re.sub(regex,'',text)
return text_returned
for file in glob.glob(file_folder):
fname = file
file = open(file , "r")
text = file.read()
text = remove_special_characters(text)
text = re.sub(re.compile('\d'),'',text)
sentences = sent_tokenize(text)
words = word_tokenize(text)
words = [word for word in words if len(words)>1]
words = [word.lower() for word in words]
words = [word for word in words if word not in Stopwords]
dict_global.update(finding_all_unique_words_and_freq(words))
files_with_index[idx] = os.path.basename(fname)
idx = idx + 1
unique_words_all = set(dict_global.keys())
| 28.471698 | 64 | 0.705765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.047714 |
40fd39b618c9cae6572cdfad086049a95c4b491f | 4,911 | py | Python | oseoserver/operations/describeresultaccess.py | pyoseo/oseoserver | 8c97ee5a7d698cc989e1c8cab8cfe0db78491307 | [
"Apache-2.0"
]
| null | null | null | oseoserver/operations/describeresultaccess.py | pyoseo/oseoserver | 8c97ee5a7d698cc989e1c8cab8cfe0db78491307 | [
"Apache-2.0"
]
| 10 | 2015-02-10T17:10:33.000Z | 2018-04-05T10:05:01.000Z | oseoserver/operations/describeresultaccess.py | pyoseo/oseoserver | 8c97ee5a7d698cc989e1c8cab8cfe0db78491307 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2017 Ricardo Garcia Silva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the OSEO DescribeResultAccess operation"""
from __future__ import absolute_import
import logging
import datetime as dt
from django.core.exceptions import ObjectDoesNotExist
import pytz
import pyxb
import pyxb.bundles.opengis.oseo_1_0 as oseo
from .. import errors
from .. import models
from ..models import Order
from .. import utilities
logger = logging.getLogger(__name__)
def describe_result_access(request, user):
"""Implements the OSEO DescribeResultAccess operation.
This operation returns the location of the order items that are
ready to be downloaded by the user.
The DescribeResultAccess operation only reports on the availability
of order items that specify onlineDataAccess as their delivery option.
Parameters
----------
request: oseo.DescribeResultAccess
The incoming request
user: django.contrib.auth.User
The django user that placed the request
Returns
-------
response: oseo.SubmitAck
The response SubmitAck instance
"""
try:
order = Order.objects.get(id=request.orderId)
except ObjectDoesNotExist:
raise errors.InvalidOrderIdentifierError()
if order.user != user:
raise errors.AuthorizationFailedError
completed_items = get_order_completed_items(order, request.subFunction)
logger.debug("completed_items: {}".format(completed_items))
order.last_describe_result_access_request = dt.datetime.now(pytz.utc)
order.save()
response = oseo.DescribeResultAccessResponse(status='success')
item_id = None
for item in completed_items:
iut = oseo.ItemURLType()
iut.itemId = item_id or item.item_specification.item_id
iut.productId = oseo.ProductIdType(
identifier=item.identifier,
)
iut.productId.collectionId = utilities.get_collection_identifier(
item.item_specification.collection)
iut.itemAddress = oseo.OnLineAccessAddressType()
iut.itemAddress.ResourceAddress = pyxb.BIND()
iut.itemAddress.ResourceAddress.URL = item.url
iut.expirationDate = item.expires_on
response.URLs.append(iut)
return response
def get_order_completed_items(order, behaviour):
"""Get the completed order items for product orders.
Parameters
----------
order: oseoserver.models.Order
The order for which completed items are to be returned
behaviour: str
Either 'allReady' or 'nextReady', as defined in the OSEO
specification
Returns
--------
list
The completed order items for this order
"""
batches = order.batches.all()
all_complete = []
for batch in batches:
complete_items = get_batch_completed_items(batch, behaviour)
all_complete.extend(complete_items)
return all_complete
def get_batch_completed_items(batch, behaviour):
last_time = batch.order.last_describe_result_access_request
list_all_items = last_time is None or behaviour == batch.ALL_READY
order_delivery = batch.order.selected_delivery_option.delivery_type
batch_complete_items = []
queryset = batch.order_items.filter(
status=batch.order.COMPLETED
).order_by("item_specification__id")
for item in queryset:
item_spec = item.item_specification
try:
delivery = (
item_spec.selected_delivery_option.delivery_type)
except models.ItemSpecificationDeliveryOption.DoesNotExist:
delivery = order_delivery
if delivery != models.BaseDeliveryOption.ONLINE_DATA_ACCESS:
# describeResultAccess only applies to items that specify
# 'onlinedataaccess' as delivery type
logger.debug(
"item {} does not specify onlinedataaccess as its "
"delivery type, skipping item...".format(item)
)
continue
completed_since_last = (item.completed_on is None or
last_time is None or
item.completed_on >= last_time)
list_this_item = (
behaviour == batch.NEXT_READY and completed_since_last)
if list_all_items or list_this_item:
batch_complete_items.append(item)
return batch_complete_items
| 34.584507 | 76 | 0.696192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,851 | 0.376909 |
40feb012148cebe6483dabf37d02607456645a00 | 2,210 | py | Python | utils/decorator/dasyncio.py | masonsxu/red-flask | e8b978ee08072efcb2b3b7964065f272d8c875ab | [
"MIT"
]
| null | null | null | utils/decorator/dasyncio.py | masonsxu/red-flask | e8b978ee08072efcb2b3b7964065f272d8c875ab | [
"MIT"
]
| null | null | null | utils/decorator/dasyncio.py | masonsxu/red-flask | e8b978ee08072efcb2b3b7964065f272d8c875ab | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# 基于python Threading模块封装的异步函数装饰器
import time
from functools import wraps
from threading import Thread
def async_call(fn):
"""一次简单的异步处理操作,装饰在要异步执行的函数前,再调用该函数即可执行单次异步操作(开辟一条新的线程)
Args:
:fn(function):需要异步处理的方法
Return:
:wrapper(function):
"""
@wraps(fn) # 解决被装饰的函数的名字会变成装饰器函数,并还原函数名称
def wrapper(*args, **kwargs):
Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def async_pool(pool_links):
"""可定义链接数的线程池装饰器,可用于并发执行多次任务
Args:
:pool_links(int):进程的数量
Returns:
:sub_wrapper(function):对象装饰器
"""
def sub_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
for _ in range(0, pool_links):
Thread(target=func, args=args, kwargs=kwargs).start()
# func(*args, **kwargs)
return wrapper
return sub_wrapper
def async_retry(retry_times, space_time):
"""自动重试类装饰器,不支持单独异步,但可嵌套于 call 和 pool中使用
Args:
:retry_times(int):重试次数
"""
def sub_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
try_times = retry_times
while try_times > 0:
try:
func(*args, **kwargs)
break
except Exception as e:
print(e)
time.sleep(space_time)
try_times = try_times - 1
return wrapper
return sub_wrapper
# 以下为测试案例代码
# @async_call
# def sleep2andprint():
# time.sleep(2)
# print('22222222')
# @async_pool(pool_links=5)
# def pools():
# time.sleep(1)
# print('hehe')
# @async_retry(retry_times=3, space_time=1)
# def check():
# a = 1
# b = '2'
# print(a + b)
# def check_all():
# print('正在测试async_call组件')
# print('111111')
# sleep2andprint()
# print('333333')
# print('若3333出现在22222此前,异步成功')
# print('正在测试async_pool组件')
# pools()
# print('在一秒内打印出5个hehe为成功')
# print('正在测试async_retry组件')
# check()
# print('打印三次异常则成功')
# print(check.__name__)
# print(sleep2andprint.__name__)
# print(pools.__name__)
# check_all()
| 19.557522 | 69 | 0.570588 | 0 | 0 | 0 | 0 | 757 | 0.28523 | 0 | 0 | 1,582 | 0.596081 |
40ff8361da6ba11cdb915421c267126671120831 | 872 | py | Python | oo/pessoa.py | wfs18/pythonbirds | aa3332763f9109c1fb7f1140a82a4b51c6402fdb | [
"MIT"
]
| null | null | null | oo/pessoa.py | wfs18/pythonbirds | aa3332763f9109c1fb7f1140a82a4b51c6402fdb | [
"MIT"
]
| null | null | null | oo/pessoa.py | wfs18/pythonbirds | aa3332763f9109c1fb7f1140a82a4b51c6402fdb | [
"MIT"
]
| null | null | null | class Person:
olhos = 2
def __init__(self, *children, name=None, year=0):
self.year = year
self.name = name
self.children = list(children)
def cumprimentar(self):
return 'Hello'
@staticmethod
def metodo_estatico():
return 123
@classmethod
def metodo_classe(cls):
return f'{cls} - {cls.olhos}'
if __name__ == '__main__':
p = Person()
eu = Person(name='marcio')
wes = Person(eu, name='Wesley')
print(p.cumprimentar())
print(p.year) # Atributo de instancia
print(p.name) # Atributo de dados
for filhos in wes.children:
print(filhos.year)
p.sobre = 'eu'
print(p.sobre)
del p.sobre
print(p.__dict__)
print(p.olhos)
print(eu.olhos)
print(p.metodo_estatico(), eu.metodo_estatico())
print(p.metodo_classe(), eu.metodo_classe())
| 22.947368 | 53 | 0.605505 | 372 | 0.426606 | 0 | 0 | 137 | 0.15711 | 0 | 0 | 101 | 0.115826 |
40ff943d89da7510322d2d4989457bad5b652c0f | 179 | py | Python | tests/integration/test_combined.py | jonathan-winn-geo/new-repo-example | 2fbc54b1d42c57ca1105b1066e47627832cc8185 | [
"BSD-3-Clause"
]
| null | null | null | tests/integration/test_combined.py | jonathan-winn-geo/new-repo-example | 2fbc54b1d42c57ca1105b1066e47627832cc8185 | [
"BSD-3-Clause"
]
| 85 | 2020-08-12T15:59:48.000Z | 2022-01-17T10:28:56.000Z | tests/integration/test_combined.py | cma-open/cmatools | ce5743dca7c5bf1f6ab7fe3af24893a65d0c2db7 | [
"BSD-3-Clause"
]
| null | null | null | """Test combined function."""
from cmatools.combine.combine import combined
def test_combined():
"""Test of combined function"""
assert combined() == "this hello cma"
| 17.9 | 45 | 0.692737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.424581 |
dc002c294c966dc124207adcde546a050c2603e1 | 1,323 | py | Python | elastalert_modules/top_count_keys_enhancement.py | OpenCoreCH/elastalert | 28502d8e81e67649976a6a3d2ccc198a5dd60631 | [
"Apache-2.0"
]
| null | null | null | elastalert_modules/top_count_keys_enhancement.py | OpenCoreCH/elastalert | 28502d8e81e67649976a6a3d2ccc198a5dd60631 | [
"Apache-2.0"
]
| 1 | 2018-10-05T14:38:22.000Z | 2018-10-05T14:38:22.000Z | elastalert_modules/top_count_keys_enhancement.py | OpenCoreCH/elastalert | 28502d8e81e67649976a6a3d2ccc198a5dd60631 | [
"Apache-2.0"
]
| 4 | 2018-10-05T12:11:42.000Z | 2022-01-31T10:31:26.000Z | """Enhancement to reformat `top_events_X`
from match in order to reformat and put it
back to be able to use in alert message.
New format:
top_events_keys_XXX -- contains array of corresponding key values defined in `top_count_keys`,
where `XXX` key from `top_count_keys` array.
top_events_values_XXX -- contains array of corresponding counts.
Example:
Original:
{"top_events_KEY.NAME":{"key_value1": 10, "key_value2": 20}}
Reformatted:
{
"top_events_keys_KEY.NAME":["key_value1", "key_value2"]
"top_events_values_KEY.NAME":[10, 20]
}
Can be used in the rule like:
top_count_keys:
- 'KEY.NAME'
match_enhancements:
- 'elastalert_modules.top_count_keys_enhancement.Enhancement'
alert_text_args:
- top_events_keys_KEY.NAME[0]
"""
from elastalert.enhancements import BaseEnhancement
class Enhancement(BaseEnhancement):
def process(self, match):
top_count_keys = self.rule['top_count_keys']
if top_count_keys:
for k in top_count_keys:
key = "top_events_%s" % k
if match[key]:
filtered = {key: value for (key, value) in match[key].items() if key}
match["top_events_keys_%s" % k] = list(filtered.keys())
match["top_events_values_%s" % k] = list(filtered.values())
| 31.5 | 94 | 0.675737 | 501 | 0.378685 | 0 | 0 | 0 | 0 | 0 | 0 | 839 | 0.634165 |
dc0041528fa6c63f72d3e18e309efd1fc5282e9f | 4,054 | py | Python | nets.py | koreyou/SWEM-chainer | 728443fb5fc53409648d8bff3ae3e545fb9ac36c | [
"MIT"
]
| null | null | null | nets.py | koreyou/SWEM-chainer | 728443fb5fc53409648d8bff3ae3e545fb9ac36c | [
"MIT"
]
| null | null | null | nets.py | koreyou/SWEM-chainer | 728443fb5fc53409648d8bff3ae3e545fb9ac36c | [
"MIT"
]
| null | null | null | import numpy
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import reporter
embed_init = chainer.initializers.Uniform(.25)
def block_embed(embed, x, dropout=0.):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
class SWEMBase(chainer.Chain):
"""The base class for SWEM (Simple Word-Embedding-based Models)
This model embed tokens to word embedding, encode embedding to
with pooling (which needs to be implemented in derived classes)
and applies two layer MLP.
Args:
n_class (int): The number of classes to be predicted.
n_vocab (int): The size of vocabulary.
emb_size (int): The number of units word embedding.
n_units (int): The number of units of MLP.
dropout (float): The dropout ratio.
"""
def __init__(self, n_class, n_vocab, emb_size, n_units,
dropout=0.2, initial_emb=None):
super(SWEMBase, self).__init__()
if initial_emb is None:
initial_emb = embed_init
with self.init_scope():
self.embed = L.EmbedID(
n_vocab, emb_size, ignore_label=-1, initialW=initial_emb)
self.l1 = L.Linear(None, n_units)
self.l2 = L.Linear(n_units, n_class)
self.dropout = dropout
def forward(self, xs):
return self.predict(xs)
def predict(self, xs, softmax=False, argmax=False):
x_block = chainer.dataset.convert.concat_examples(xs, padding=-1)
ex_block = block_embed(self.embed, x_block, self.dropout)
x_len = [len(x) for x in xs]
z = self.encode(ex_block, x_len)
h = F.relu(self.l1(F.dropout(z, self.dropout)))
logits = self.l2(F.dropout(h, self.dropout))
if softmax:
return F.softmax(logits).array
elif argmax:
return self.xp.argmax(logits.array, axis=1)
else:
return logits
def encode(self, ex_block, x_len):
raise NotImplementedError()
class SWEMhier(SWEMBase):
"""Hierarchical variation of SWEM (SWEM-hier)
Args:
n_class (int): The number of classes to be predicted.
n_vocab (int): The size of vocabulary.
emb_size (int): The number of units word embedding.
n_units (int): The number of units of MLP.
dropout (float): The dropout ratio.
"""
def __init__(self, n_class, n_vocab, emb_size, n_units,
dropout=0.2, initial_emb=None, window=5):
super(SWEMhier, self).__init__(
n_class, n_vocab, emb_size, n_units, dropout=dropout,
initial_emb=initial_emb)
self.window = window
def encode(self, ex_block, x_len):
if ex_block.shape[2] > self.window:
# no need for pooling when length is smaller than the window
ex_block = F.average_pooling_2d(ex_block, [self.window, 1], stride=1)
return F.max(F.squeeze(ex_block, -1), axis=2)
class SWEMconcat(SWEMBase):
def encode(self, ex_block, x_len):
emb_ave = F.sum(F.squeeze(ex_block, -1), axis=2) / self.xp.array(x_len)[:, None]
emb_max = F.max(F.squeeze(ex_block, -1), axis=2)
return F.concat((emb_max, emb_ave), axis=1)
| 34.355932 | 88 | 0.620868 | 2,945 | 0.726443 | 0 | 0 | 0 | 0 | 0 | 0 | 1,668 | 0.411445 |
dc00b897bcfec50069749b3f13a2b807436fbaab | 904 | py | Python | src/entities/users.py | MillaKelhu/ohtu-lukuvinkkikirjasto | d195e53824bc5d13ded97112a8c388e05775666c | [
"MIT"
]
| null | null | null | src/entities/users.py | MillaKelhu/ohtu-lukuvinkkikirjasto | d195e53824bc5d13ded97112a8c388e05775666c | [
"MIT"
]
| null | null | null | src/entities/users.py | MillaKelhu/ohtu-lukuvinkkikirjasto | d195e53824bc5d13ded97112a8c388e05775666c | [
"MIT"
]
| null | null | null | from flask_login import UserMixin
class Users(UserMixin):
# Luodaan näennäinen tietokanta käyttäjistä
user_database = {"kayttaja": ("kayttaja", "salasana"),
"tunnus": ("tunnus", "passu")}
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def get_username(self):
return self.username
@classmethod
def get_password(cls, username):
username, password = cls.user_database.get(username)
return password
@classmethod
def get_username(cls, username):
username, password = cls.user_database.get(username)
return username
def is_active(self):
return True
def get_id(self):
return self.id
def roles(self):
return ["ADMIN", "USER"]
@classmethod
def get(cls, id):
return cls.user_database.get(id)
| 23.179487 | 60 | 0.634956 | 872 | 0.959296 | 0 | 0 | 343 | 0.377338 | 0 | 0 | 114 | 0.125413 |
dc00c9713e8a8c4632743cc1feb90632ddde0bf5 | 13,726 | py | Python | artifacts/kernel_db/autotvm_scripts/tune_tilling_dense_select_codegen.py | LittleQili/nnfusion | 6c1a25db5be459a1053798f1c75bfbd26863ed08 | [
"MIT"
]
| null | null | null | artifacts/kernel_db/autotvm_scripts/tune_tilling_dense_select_codegen.py | LittleQili/nnfusion | 6c1a25db5be459a1053798f1c75bfbd26863ed08 | [
"MIT"
]
| null | null | null | artifacts/kernel_db/autotvm_scripts/tune_tilling_dense_select_codegen.py | LittleQili/nnfusion | 6c1a25db5be459a1053798f1c75bfbd26863ed08 | [
"MIT"
]
| 1 | 2021-08-11T09:09:53.000Z | 2021-08-11T09:09:53.000Z | """
matmul autotvm
[batch,in_dim] x [in_dim,out_dim]
search_matmul_config(batch,in_dim,out_dim,num_trials):
input: batch,in_dim,out_dim,num_trials
[batch,in_dim] x [in_dim,out_dim]
num_trials: num of trials, default: 1000
output: log (json format)
use autotvm to search configs for the matmul
lookup_matmul_config():
find a proper matmul config
note: trade off kernel's performance and grid & block size
launch_matmul_from_config(config):
input: config (json string)
usage:
1. use search_matmul_config(batch,in_dim,out_dim,num_trials) to search configs
2. use lookup_matmul_config() to get a proper config
3. write the config (in json format) to "matmul_config.json"
4. use launch_matmul_from_config("matmul_config.json") to print the matmul kernel code
"""
import numpy as np
import tvm
import logging
import sys
from tvm import autotvm
import topi
import json
import os
from topi.util import get_const_tuple
import tensorflow as tf
flags = tf.flags
flags.DEFINE_string("input_path", "", "path of input file")
flags.DEFINE_string("autotvm_log", "../autotvm_logs/all_tuned_tilling_dense_nn.1000.log", "path of autotvm tuning log")
flags.DEFINE_string("tvm_profile_log",
"/tmp/tvm_profile.log", "path of tvm profile")
flags.DEFINE_string("output_path", "", "path of output file")
FLAGS = flags.FLAGS
@autotvm.template
def tvm_matmul_tune_op(batch, in_dim, out_dim):
"""
autotvm tuning template
D=A*B
[batch, in_dim] x [in_dim, out_dim]
"""
A = tvm.placeholder((batch, in_dim), name='A', dtype="float32")
B = tvm.placeholder((in_dim, out_dim), name='B', dtype="float32")
k = tvm.reduce_axis((0, in_dim), name='k')
C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum(
A[i, k] * B[k, j], axis=k), name='C')
cfg = autotvm.get_config()
s = tvm.create_schedule(C.op)
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
y, x = C.op.axis
k = CC.op.reduce_axis[0]
cfg.define_split('tile_k', cfg.axis(k), num_outputs=3)
ko, kt, ki = cfg['tile_k'].apply(s, CC, k)
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
thread_x = tvm.thread_axis('threadIdx.x')
thread_y = tvm.thread_axis('threadIdx.y')
cfg.define_split('tile_y', cfg.axis(y), num_outputs=4)
cfg.define_split('tile_x', cfg.axis(x), num_outputs=4)
by, tyz, ty, yi = cfg['tile_y'].apply(s, C, y)
bx, txz, tx, xi = cfg['tile_x'].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, tvm.thread_axis('vthread'))
s[C].bind(txz, tvm.thread_axis('vthread'))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
s[stage].double_buffer()
for stage in [AA, BB]:
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg['tile_y'].size[2])
tx, xi = s[stage].split(tx, nparts=cfg['tile_x'].size[2])
_, xi = s[stage].split(xi, factor=4)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob('auto_unroll_max_step', [512, 1500])
s[C].pragma(by, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val)
s[C].pragma(by, 'unroll_explicit', False)
cfg.add_flop(batch * in_dim * out_dim * 2)
return s, [A, B, C]
def search_matmul_config(batch, in_dim, out_dim, num_trials):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(
batch, in_dim, out_dim), target='cuda')
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4)
)
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = "tuned_kernels/" + op_name + ".log"
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(n_trial=num_trials, measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)])
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print('\nBest config:')
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
def lookup_matmul_config(batch, in_dim, out_dim, output_log):
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = FLAGS.autotvm_log
with open(log_name, "r") as fin:
log_lines = fin.readlines()
# log_records=tvm.autotvm.record.load_from_file(log_name)
log_records_all = []
log_records = []
for line in log_lines:
line = line.rstrip('\n')
# print(line)
record_json = json.loads(line)
tm = record_json['r'][0][0]
if tm > 10000000: # filter bad configs
continue
if record_json['i'][2][0] != batch or record_json['i'][2][1] != in_dim or record_json['i'][2][2] != out_dim: # filter other configs
continue
griddim_x = record_json['i'][5]["e"][2][2][0]
if griddim_x == -1:
griddim_x = int(out_dim / record_json['i'][5]["e"][2][2][1] / record_json['i'][5]["e"][2][2][2] / record_json['i'][5]["e"][2][2][3])
griddim_y = record_json['i'][5]["e"][1][2][0]
if griddim_y == -1:
griddim_y = int(batch / record_json['i'][5]["e"][1][2][1] / record_json['i'][5]["e"][1][2][2] / record_json['i'][5]["e"][1][2][3])
record = {"time": tm,
"grid": [griddim_x, griddim_y, 1],
"block": [record_json['i'][5]["e"][2][2][2], record_json['i'][5]["e"][1][2][2], 1],
"config": line}
log_records_all.append((tm, record))
# if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
# continue
# if record["grid"][0] * record["grid"][1] * record["grid"][2] < 16:
# continue
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * record["block"][0] * record["block"][1] * record["block"][2]
if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * (record["block"][0] * record["block"][1] * record["block"][2] / 32 + 1) * 32
record.update({"opt": opt})
log_records.append((tm, record))
# print(log_records[-1])
log_records_all.sort(key=lambda item: item[0])
log_records.sort(key=lambda item: item[0])
print(op_name)
log_records_fast = log_records[0:100]
# log_records_fast = log_records
log_records = []
for i in range(len(log_records_fast)):
log_records.append((log_records_fast[i][1]["opt"], log_records_fast[i][1]))
log_records.sort(key=lambda item: item[0])
print("fastest kernel:", log_records_all[0][1]["time"], "grid:", log_records_all[0][1]["grid"], "block:", log_records_all[0][1]["block"])
# print(log_records_fast[0][1]["config"])
print("efficient kernel:",log_records[0][1]["time"], "grid:", log_records[0][1]["grid"], "block:", log_records[0][1]["block"])
with open(output_log, 'a') as fout:
fout.write(log_records[0][1]["config"] + "\n")
def launch_matmul_from_config(config_json_path):
with open(config_json_path, "r") as fin:
config = json.load(fin)
batch = config["i"][2][0]
in_dim = config["i"][2][1]
out_dim = config["i"][2][2]
# print(batch, in_dim, out_dim)
task = autotvm.task.create(
tvm_matmul_tune_op, args=(batch, in_dim, out_dim), target='cuda')
# dispatch_context = autotvm.task.ApplyConfig(config)
dispatch_context = autotvm.apply_history_best(config_json_path)
best_config = dispatch_context.query(task.target, task.workload)
print("Using pretuned config:")
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
output_log_file = "matmul_nn_autotvm_select_result.log"
if os.path.exists(output_log_file):
os.remove(output_log_file)
lookup_matmul_config(4, 256, 256, output_log_file)
lookup_matmul_config(16, 256, 256, output_log_file)
def tune_dot_codegen(m, k, n, log_path):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(m, k, n), target='cuda')
op_name = "tuned_dot_nn_op_float_m%d_k%d_n%d" % (m, k, n)
# log_name = "tuned_dot_op_float_%d_%d_%d" % (m, k, n)
# log_name = "tuned_kernels/" + log_name + ".log"
log_name = log_path
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(m,k,n)
func = tvm.build(s, arg_bufs, 'cuda', name=op_name)
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=[m,k]).astype("float32")
w_np = np.random.uniform(size=[k,n]).astype("float32")
c_np = np.zeros([m,n]).astype("float32")
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.array(c_np, ctx)
kernel_code = func.imported_modules[0].get_source()
func(a, w, c)
return kernel_code
def extract_ops_from_log():
dot_ops = []
dot_ops.append({'arg0_shape': [4, 256], 'arg1_shape': [256, 256], 'out_shape': [4, 256], 'transpose_A': False, 'transpose_B': False})
dot_ops.append({'arg0_shape': [16, 256], 'arg1_shape': [256, 256], 'out_shape': [16, 256], 'transpose_A': False, 'transpose_B': False})
return dot_ops
def get_tvm_topi_func_name(m, k, n):
func_name = "tuned_dot_nn_op_float_m%d_k%d_n%d_kernel0" % (m, k, n)
return func_name
def extract_tvm_profiling_from_log(log_path):
lines = open(log_path).readlines()
deduped_lines = list(set(lines))
# print(deduped_lines)
# print("#convs:", len(lines), "#deduped_convs:", len(deduped_lines))
profiling_result = {}
for line in deduped_lines:
items = line.rstrip('\n').split('|')
profiling_data = {
'gridDim': [int(items[1]), int(items[2]), int(items[3])],
'blockDim': [int(items[4]), int(items[5]), int(items[6])]
}
profiling_result.update({items[0]: profiling_data})
return profiling_result
def generate_db_topi_ops(dot_ops, log_path):
topi_ops = []
tvm_profiling_log_path = FLAGS.tvm_profile_log
if os.path.exists(tvm_profiling_log_path):
os.remove(tvm_profiling_log_path)
for dot_op in dot_ops:
m = dot_op['arg0_shape'][0]
k = dot_op['arg0_shape'][1]
n = dot_op['arg1_shape'][1]
topi_code = tune_dot_codegen(m, k, n, log_path)
topi_op = {
'tvm_func_name': get_tvm_topi_func_name(m, k, n),
'op_type': 'Dot',
'parameters': dot_op,
'code': topi_code
}
topi_ops.append(topi_op)
profiling_result = extract_tvm_profiling_from_log(tvm_profiling_log_path)
for topi_op in topi_ops:
tvm_func_name = topi_op['tvm_func_name']
topi_op.update(profiling_result[tvm_func_name])
return topi_ops
dot_ops = extract_ops_from_log()
topi_ops = generate_db_topi_ops(dot_ops, output_log_file)
with open(FLAGS.output_path, 'w') as fout:
json.dump(topi_ops, fout)
os.remove(output_log_file) | 35.4677 | 159 | 0.633396 | 0 | 0 | 0 | 0 | 2,431 | 0.177109 | 0 | 0 | 3,245 | 0.236413 |
dc00d047f5d2f7ce7b721b7c45d3556d9ebe4b5d | 2,240 | py | Python | src/olympia/activity/admin.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
]
| null | null | null | src/olympia/activity/admin.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
]
| null | null | null | src/olympia/activity/admin.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
]
| null | null | null | from django.contrib import admin
from .models import ActivityLog, ReviewActionReasonLog
from olympia.reviewers.models import ReviewActionReason
class ActivityLogAdmin(admin.ModelAdmin):
list_display = (
'created',
'user',
'__str__',
)
raw_id_fields = ('user',)
readonly_fields = (
'created',
'user',
'__str__',
)
date_hierarchy = 'created'
fields = (
'user',
'created',
'__str__',
)
raw_id_fields = ('user',)
view_on_site = False
def lookup_allowed(self, lookup, value):
if lookup == 'addonlog__addon':
return True
return super().lookup_allowed(lookup, value)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReviewActionReasonLogAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
fields = (
'created',
'activity_log',
'activity_log__user__email',
'reason',
)
list_display = (
'created',
'activity_log',
'reason',
'activity_log__user__email',
)
list_filter = ('reason',)
list_select_related = ('activity_log__user',)
readonly_fields = (
'created',
'activity_log',
'activity_log__user__email',
)
search_fields = ('activity_log__user__email',)
view_on_site = False
def activity_log__user__email(self, obj):
return obj.activity_log.user.email
def has_add_permission(self, request):
return False
def get_form(self, request, obj=None, **kwargs):
form = super(ReviewActionReasonLogAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['reason'].widget.can_add_related = False
form.base_fields['reason'].widget.can_change_related = False
form.base_fields['reason'].empty_label = None
form.base_fields['reason'].choices = [
(reason.id, reason.labelled_name())
for reason in ReviewActionReason.objects.all()
]
return form
admin.site.register(ActivityLog, ActivityLogAdmin)
admin.site.register(ReviewActionReasonLog, ReviewActionReasonLogAdmin)
| 26.666667 | 87 | 0.634821 | 1,965 | 0.877232 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.166071 |
dc01dc4bc345b863361dbfcbff2946a74c676b49 | 1,261 | py | Python | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
]
| 4 | 2021-04-22T19:19:13.000Z | 2022-02-10T09:26:58.000Z | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
]
| null | null | null | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
]
| 1 | 2022-02-03T19:29:46.000Z | 2022-02-03T19:29:46.000Z | import subprocess
import sys
import time
import os
#############################
# COLORING YOUR SHELL #
#############################
R = "\033[1;31m" #
B = "\033[1;34m" #
Y = "\033[1;33m" #
G = "\033[1;32m" #
RS = "\033[0m" #
W = "\033[1;37m" #
#############################
os.system("clear")
print(" ")
print(R + "[" + G + "User Summary " + R + "]" + RS)
print("""
Shows extra information about IPv6 addresses, such as embedded MAC or IPv4 addresses when available.
Some IP address formats encode extra information; for example some IPv6 addresses encode an IPv4 address or MAC address
script can decode these address formats:
• IPv4-compatible IPv6 addresses,
• IPv4-mapped IPv6 addresses,
• Teredo IPv6 addresses,
• 6to4 IPv6 addresses,
• IPv6 addresses using an EUI-64 interface ID,
• IPv4-embedded IPv6 addresses,
• ISATAP Modified EUI-64 IPv6 addresses.
• IPv4-translated IPv6 addresses and
See RFC 4291 for general IPv6 addressing architecture and the definitions of some terms.
""")
print(" ")
webb = input("" + RS + "[" + B + "ENTER TARGET " + R + "WEBSITE " + Y + "IP" + RS + "]" + G + ": " + RS)
subprocess.check_call(['nmap', '-sV', '-sC', webb])
| 32.333333 | 120 | 0.57732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 949 | 0.743148 |
dc022c593385d4751afcdb05a041b275d5e72149 | 2,041 | py | Python | tests/utilities/test_upgrade_checkpoint.py | cuent/pytorch-lightning | b50ad528e69618d831aa01ee69f29b4f2a6a3e84 | [
"Apache-2.0"
]
| null | null | null | tests/utilities/test_upgrade_checkpoint.py | cuent/pytorch-lightning | b50ad528e69618d831aa01ee69f29b4f2a6a3e84 | [
"Apache-2.0"
]
| null | null | null | tests/utilities/test_upgrade_checkpoint.py | cuent/pytorch-lightning | b50ad528e69618d831aa01ee69f29b4f2a6a3e84 | [
"Apache-2.0"
]
| null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities.upgrade_checkpoint import upgrade_checkpoint
@pytest.mark.skip
@pytest.mark.parametrize(
"old_checkpoint, new_checkpoint",
[
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best": 0.34},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_score": 0.34}}},
),
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best_model_score": 0.99},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_score": 0.99}}},
),
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best_model_path": 'path'},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_path": 'path'}}},
),
(
{"epoch": 1, "global_step": 23, "early_stop_callback_wait": 2, "early_stop_callback_patience": 4},
{"epoch": 1, "global_step": 23, "callbacks": {EarlyStopping: {"wait_count": 2, "patience": 4}}},
),
],
)
def test_upgrade_checkpoint(tmpdir, old_checkpoint, new_checkpoint):
filepath = os.path.join(tmpdir, "model.ckpt")
torch.save(old_checkpoint, filepath)
upgrade_checkpoint(filepath)
updated_checkpoint = torch.load(filepath)
assert updated_checkpoint == new_checkpoint
| 40.82 | 110 | 0.677609 | 0 | 0 | 0 | 0 | 1,265 | 0.619794 | 0 | 0 | 1,065 | 0.521803 |
dc02390fc5cc8acb642fb9142268442719d14ed1 | 4,258 | py | Python | rnn/train_rnn_oneflow.py | XinYangDong/models | ea1ab12add5812c8a3e14ecfad6b39fa56a779a9 | [
"Apache-2.0"
]
| null | null | null | rnn/train_rnn_oneflow.py | XinYangDong/models | ea1ab12add5812c8a3e14ecfad6b39fa56a779a9 | [
"Apache-2.0"
]
| null | null | null | rnn/train_rnn_oneflow.py | XinYangDong/models | ea1ab12add5812c8a3e14ecfad6b39fa56a779a9 | [
"Apache-2.0"
]
| null | null | null | import oneflow.experimental as flow
from oneflow.experimental import optim
import oneflow.experimental.nn as nn
from utils.dataset import *
from utils.tensor_utils import *
from models.rnn_model import RNN
import argparse
import time
import math
import numpy as np
flow.env.init()
flow.enable_eager_execution()
def _parse_args():
parser = argparse.ArgumentParser("flags for compare oneflow and pytorch speed")
parser.add_argument(
"--seed", nargs="?", type=int, const=0, help="specify random seed"
)
return parser.parse_args()
def train(category_tensor, line_tensor, rnn, criterion, of_sgd):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
of_sgd.step()
of_sgd.zero_grad()
return output, loss.numpy()[0]
# refer to: https://blog.csdn.net/Nin7a/article/details/107631078
def topk_(matrix, K, axis=1):
if axis == 0:
row_index = np.arange(matrix.shape[1 - axis])
topk_index = np.argpartition(-matrix, K, axis=axis)[0:K, :]
topk_data = matrix[topk_index, row_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[topk_index_sort, row_index]
topk_index_sort = topk_index[0:K, :][topk_index_sort, row_index]
else:
column_index = np.arange(matrix.shape[1 - axis])[:, None]
topk_index = np.argpartition(-matrix, K, axis=axis)[:, 0:K]
topk_data = matrix[column_index, topk_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[column_index, topk_index_sort]
topk_index_sort = topk_index[:, 0:K][column_index, topk_index_sort]
return topk_data_sort, topk_index_sort
def categoryFromOutput(output):
top_n, top_i = topk_(output.numpy(), 1)
category_i = top_i[0][0]
return all_categories[category_i], category_i
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return now, "%ds" % s
n_iters = 100000
print_every = 500
plot_every = 1000
learning_rate = (
0.005 # If you set this too high, it might explode. If too low, it might not learn
)
# decrease learning rate if loss goes to NaN, increase learnig rate if it learns too slow
def main(args):
random.seed(args.seed)
dataset_path = "./data/names"
n_categories = processDataset(dataset_path)
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
criterion = nn.NLLLoss()
rnn.to("cuda")
criterion.to("cuda")
of_sgd = optim.SGD(rnn.parameters(), lr=learning_rate)
# Keep track of losses for plotting
current_loss = 0
all_losses = []
start = time.time()
samples = 0.0
correct_guess = 0.0
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor, rnn, criterion, of_sgd)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
start, time_str = timeSince(start)
guess, guess_i = categoryFromOutput(output)
correct = "✓" if guess == category else "✗ (%s)" % category
if correct == "✓":
correct_guess += 1
samples += 1
print(
"iter: %d / %f%%, time_for_every_%d_iter: %s, loss: %.4f, predict: %s / %s, correct? %s, acc: %f"
% (
iter,
float(iter) / n_iters * 100,
print_every,
time_str,
loss,
line,
guess,
correct,
correct_guess / samples,
)
)
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
writer = open("all_losses.txt", "w")
for o in all_losses:
writer.write("%f\n" % o)
writer.close()
if __name__ == "__main__":
args = _parse_args()
main(args)
| 30.198582 | 113 | 0.615782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 606 | 0.14212 |
dc0343ffb97fa10db053e01b9eed2a7adc7c042b | 4,763 | py | Python | metaflow/datastore/local_storage.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
]
| 2 | 2020-03-05T08:33:05.000Z | 2021-05-31T12:54:40.000Z | metaflow/datastore/local_storage.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
]
| 5 | 2021-12-12T21:04:10.000Z | 2022-01-22T21:05:58.000Z | metaflow/datastore/local_storage.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
]
| 2 | 2020-04-18T22:45:03.000Z | 2020-06-25T14:36:20.000Z | import json
import os
from ..metaflow_config import DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_LOCAL
from .datastore_storage import CloseAfterUse, DataStoreStorage
from .exceptions import DataException
class LocalStorage(DataStoreStorage):
TYPE = "local"
METADATA_DIR = "_meta"
@classmethod
def get_datastore_root_from_config(cls, echo, create_on_absent=True):
result = DATASTORE_SYSROOT_LOCAL
if result is None:
try:
# Python2
current_path = os.getcwdu()
except: # noqa E722
current_path = os.getcwd()
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
check_dir = os.path.realpath(check_dir)
orig_path = check_dir
top_level_reached = False
while not os.path.isdir(check_dir):
new_path = os.path.dirname(current_path)
if new_path == current_path:
top_level_reached = True
break # We are no longer making upward progress
current_path = new_path
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
if top_level_reached:
if create_on_absent:
# Could not find any directory to use so create a new one
echo(
"Creating local datastore in current directory (%s)" % orig_path
)
os.mkdir(orig_path)
result = orig_path
else:
return None
else:
result = check_dir
else:
result = os.path.join(result, DATASTORE_LOCAL_DIR)
return result
@staticmethod
def _makedirs(path):
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
def is_file(self, paths):
results = []
for path in paths:
full_path = self.full_uri(path)
results.append(os.path.isfile(full_path))
return results
def info_file(self, path):
file_exists = self.is_file([path])[0]
if file_exists:
full_meta_path = "%s_meta" % self.full_uri(path)
try:
with open(full_meta_path, "r") as f:
return True, json.load(f)
except OSError:
return True, None
return False, None
def size_file(self, path):
file_exists = self.is_file([path])[0]
if file_exists:
path = self.full_uri(path)
try:
return os.path.getsize(path)
except OSError:
return None
return None
def list_content(self, paths):
results = []
for path in paths:
if path == self.METADATA_DIR:
continue
full_path = self.full_uri(path)
try:
for f in os.listdir(full_path):
if f == self.METADATA_DIR:
continue
results.append(
self.list_content_result(
path=self.path_join(path, f),
is_file=self.is_file([self.path_join(path, f)])[0],
)
)
except FileNotFoundError as e:
pass
return results
def save_bytes(self, path_and_bytes_iter, overwrite=False, len_hint=0):
for path, obj in path_and_bytes_iter:
if isinstance(obj, tuple):
byte_obj, metadata = obj
else:
byte_obj, metadata = obj, None
full_path = self.full_uri(path)
if not overwrite and os.path.exists(full_path):
continue
LocalStorage._makedirs(os.path.dirname(full_path))
with open(full_path, mode="wb") as f:
f.write(byte_obj.read())
if metadata:
with open("%s_meta" % full_path, mode="w") as f:
json.dump(metadata, f)
def load_bytes(self, paths):
def iter_results():
for path in paths:
full_path = self.full_uri(path)
metadata = None
if os.path.exists(full_path):
if os.path.exists("%s_meta" % full_path):
with open("%s_meta" % full_path, mode="r") as f:
metadata = json.load(f)
yield path, full_path, metadata
else:
yield path, None, None
return CloseAfterUse(iter_results())
| 34.766423 | 88 | 0.515431 | 4,561 | 0.95759 | 563 | 0.118203 | 1,667 | 0.34999 | 0 | 0 | 233 | 0.048919 |
dc03c7056424871c088a27b25411021c5ef255a8 | 669 | py | Python | src/Models/tools/quality.py | rahlk/MOOSE | e45b64cf625bb90aa8c1c24ab1c8f52ab485a316 | [
"MIT"
]
| null | null | null | src/Models/tools/quality.py | rahlk/MOOSE | e45b64cf625bb90aa8c1c24ab1c8f52ab485a316 | [
"MIT"
]
| 9 | 2015-09-14T21:07:06.000Z | 2015-12-08T01:38:08.000Z | src/Models/tools/quality.py | rahlk/MAPGen | 25bc1a84f07e30ab0dbb638cd2aa1ce416c510ff | [
"MIT"
]
| null | null | null | from __future__ import division, print_function
from scipy.spatial.distance import euclidean
from numpy import mean
from pdb import set_trace
class measure:
def __init__(self,model):
self.mdl = model
def convergence(self, obtained):
"""
Calculate the convergence metric with respect to ideal
solutions
"""
gammas=[]
ideals = self.mdl.get_pareto()
def nearest(a,lst):
# dist = euclidean(a, sorted(lst, key=lambda x:euclidean(x,a))[0])
# set_trace()
return euclidean(a, sorted(lst, key=lambda x:euclidean(x,a))[0])
gammas = [nearest(self.mdl.solve(member),ideals) for member in obtained]
return mean(gammas) | 30.409091 | 76 | 0.693572 | 526 | 0.786248 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.243647 |
dc0442493abb70d64838a4469e6b402804bec72d | 2,499 | py | Python | script/spider/www_chinapoesy_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
]
| 1 | 2021-08-03T03:07:41.000Z | 2021-08-03T03:07:41.000Z | script/spider/www_chinapoesy_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
]
| null | null | null | script/spider/www_chinapoesy_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
]
| null | null | null |
'''
pip3 install BeautifulSoup4
pip3 install pypinyin
'''
import requests
import re
import os
import shutil
from bs4 import BeautifulSoup
from util import Profile, write_poem
def parse_poem_profile_td(td):
container = td.find('div')
if container is None:
container = td
title_a = container.find('a')
if title_a is None:
# maybe appears on the last page
return None
href = title_a.get('href')
title = title_a.get('title')
title = title.replace('\r\n', '').replace(
'————', '——').replace(',', ',').replace('(长诗节选)', '_长诗节选').strip()
title_a.extract()
# Wrong name 席慕蓉
author_text = container.text.replace('席慕蓉', '席慕容').strip()
author = re.findall(r'(.*)\((\d*?)\)', author_text, re.S)[0][0]
return Profile(href=href, title=title, author=author)
def read_poem_list(page):
'''
Read poem list
@param page:int
@return (poem_list:Profile[], has_next_page:Boolean)
'''
page_url = 'http://www.chinapoesy.com/XianDaiList_' + str(page) + '.html'
response = requests.get(page_url)
if response.status_code is not 200:
return ([], False)
text = response.text
soup = BeautifulSoup(text, features='lxml')
# profiles
main_table = soup.find('table', id='DDlTangPoesy')
td_ = main_table.find_all('td')
poet_list = []
for td in td_:
poem = parse_poem_profile_td(td)
if poem is not None:
poet_list.append(poem)
img_neg = soup.find('img', src='/Images/Pager/nextn.gif')
return (poet_list, img_neg is not None)
def read_poem(poem):
url = 'http://www.chinapoesy.com/' + poem.href
response = requests.get(url)
if response.status_code is not 200:
return None
soup = BeautifulSoup(response.text, features='lxml')
container = soup.find_all('div', class_='HeightBorderCenter')[-1]
return container.text.strip()
def main():
# delete the temp directory
has_next_page = True
page_num = 1
while has_next_page:
(current_list, has_next_page) = read_poem_list(page_num)
page_num = page_num + 1
for poem in current_list:
if(os.path.exists(poem.file_path())):
continue
content = read_poem(poem)
if not content:
print('Invalid content: ' + str(poem))
else:
write_poem(poem, content)
print('Page ' + str(page_num) + ' parsed')
main()
| 25.5 | 77 | 0.609444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.239906 |
dc05bab78000f85994987b7714d00eddf5ea82d2 | 439 | py | Python | design-patterns-101/Animal.py | stealthanthrax/python-design-patterns | 100884e1c5fe0fedaa0f1afa978ae79e53878f6b | [
"MIT"
]
| null | null | null | design-patterns-101/Animal.py | stealthanthrax/python-design-patterns | 100884e1c5fe0fedaa0f1afa978ae79e53878f6b | [
"MIT"
]
| 4 | 2020-10-01T15:56:00.000Z | 2020-10-08T12:34:17.000Z | design-patterns-101/Animal.py | stealthanthrax/python-design-patterns | 100884e1c5fe0fedaa0f1afa978ae79e53878f6b | [
"MIT"
]
| 2 | 2020-10-02T06:50:34.000Z | 2020-10-02T10:17:25.000Z | class Animal:
def __init__(self):
self.name = ""
self.weight = 0
self.sound = ""
def setName(self, name):
self.name = name
def getName(self):
return self.name
def setWeight(self, weight):
self.weight = weight
def getWeight(self):
return self.weight
def setSound(self, sound):
self.sound = sound
def getSound(self):
return self.sound
| 18.291667 | 32 | 0.560364 | 438 | 0.997722 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.009112 |
dc06b7c456a20378a588b26699aae0b601ae716d | 5,086 | py | Python | tests/test_events.py | hhtong/dwave-cloud-client | 45e4d1d4f187b10495e38d47478f2c8d87514434 | [
"Apache-2.0"
]
| null | null | null | tests/test_events.py | hhtong/dwave-cloud-client | 45e4d1d4f187b10495e38d47478f2c8d87514434 | [
"Apache-2.0"
]
| null | null | null | tests/test_events.py | hhtong/dwave-cloud-client | 45e4d1d4f187b10495e38d47478f2c8d87514434 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dwave.cloud.client import Client
from dwave.cloud.solver import Solver
from dwave.cloud.events import add_handler
class TestEventDispatch(unittest.TestCase):
def setUp(self):
# mock client
self.client = Client(token='token', solver={'qpu': True})
self.client._fetch_solvers = lambda **kw: self.solvers
self.client._submit = lambda *pa, **kw: None
# mock solvers
self.solver = Solver(client=self.client, data={
"properties": {
"supported_problem_types": ["qubo", "ising"],
"qubits": [0, 1, 2],
"couplers": [[0, 1], [0, 2], [1, 2]],
"num_qubits": 3,
"num_reads_range": [0, 100],
"parameters": {
"num_reads": "Number of samples to return.",
"postprocess": "either 'sampling' or 'optimization'"
},
"topology": {
"type": "chimera",
"shape": [16, 16, 4]
},
"category": "qpu",
"tags": ["lower_noise"]
},
"id": "solver1",
"description": "A test solver 1",
"status": "online"
})
self.solvers = [self.solver]
def test_validation(self):
"""Event name and handler are validated."""
with self.assertRaises(ValueError):
add_handler('invalid_event_name', lambda: None)
with self.assertRaises(TypeError):
add_handler('before_client_init', None)
def test_client_init(self):
"""Before/After client init events are dispatched with correct signatures."""
# setup event handlers
memo = {}
def handler(event, **data):
memo[event] = data
add_handler('before_client_init', handler)
add_handler('after_client_init', handler)
# client init
client = Client(token='token', unknown='unknown')
# test entry values
before = memo['before_client_init']
self.assertEqual(before['obj'], client)
self.assertEqual(before['args']['endpoint'], None)
self.assertEqual(before['args']['token'], 'token')
self.assertEqual(before['args']['kwargs']['unknown'], 'unknown')
# test exit values
after = memo['after_client_init']
self.assertEqual(after['obj'], client)
self.assertEqual(after['args']['token'], 'token')
self.assertEqual(after['args']['kwargs']['unknown'], 'unknown')
self.assertEqual(after['return_value'], None)
def test_get_solvers(self):
"""Before/After get_solvers events are dispatched with correct signatures."""
# setup event handlers
memo = {}
def handler(event, **data):
memo[event] = data
add_handler('before_get_solvers', handler)
add_handler('after_get_solvers', handler)
# get solver(s)
self.client.get_solver()
# test entry values
before = memo['before_get_solvers']
self.assertEqual(before['obj'], self.client)
self.assertIn('refresh', before['args'])
self.assertIn('filters', before['args'])
self.assertIn('qpu', before['args']['filters'])
# test exit values
after = memo['after_get_solvers']
self.assertEqual(after['obj'], self.client)
self.assertIn('qpu', after['args']['filters'])
self.assertEqual(after['return_value'], self.solvers)
def test_sample(self):
"""Before/After solver sample events are dispatched with correct signatures."""
# setup event handlers
memo = {}
def handler(event, **data):
memo[event] = data
add_handler('before_sample', handler)
add_handler('after_sample', handler)
# sample
lin = {0: 1}
quad = {(0, 1): 1}
params = dict(num_reads=100)
future = self.solver.sample_ising(lin, quad, **params)
# test entry values
before = memo['before_sample']
args = dict(type_='ising', linear=lin, quadratic=quad, params=params)
self.assertEqual(before['obj'], self.solver)
self.assertDictEqual(before['args'], args)
# test exit values
after = memo['after_sample']
self.assertEqual(after['obj'], self.solver)
self.assertDictEqual(after['args'], args)
self.assertEqual(after['return_value'], future)
| 34.835616 | 87 | 0.592411 | 4,365 | 0.858238 | 0 | 0 | 0 | 0 | 0 | 0 | 1,972 | 0.387731 |
dc06e2ba70d0080f14386cfea2dd13fc3ab64b71 | 12,084 | py | Python | ex3_nn_TF2.py | Melykuti/Ng_Machine_learning_exercises | c561190ee2705b6af9432323d7639f6655c973e5 | [
"BSD-3-Clause"
]
| 3 | 2020-03-06T19:15:28.000Z | 2020-03-09T10:29:38.000Z | ex3_nn_TF2.py | Melykuti/Ng_Machine_learning_exercises | c561190ee2705b6af9432323d7639f6655c973e5 | [
"BSD-3-Clause"
]
| null | null | null | ex3_nn_TF2.py | Melykuti/Ng_Machine_learning_exercises | c561190ee2705b6af9432323d7639f6655c973e5 | [
"BSD-3-Clause"
]
| null | null | null | '''
Neural networks. Forward propagation in an already trained network in TensorFlow 2.0-2.1 (to use the network for classification).
TF 2.0:
Option 0 takes 0.08 sec.
Option 1 takes 0.08 sec.
Option 6 takes 0.08 sec.
Option 2 takes 4.7 sec.
Option 3 takes 1.6 sec.
Option 4 takes 5.2 sec.
Option 5 takes 0.08 sec.
Option 7 takes 0.06 sec.
If pred_digit = tf.map_fn(lambda x: ...) is used, then it's much slower:
Option 0 takes 1.75 sec.
Option 1 takes 1.75 sec.
Option 6 takes 1.8 sec.
Option 2 takes 6.1 sec.
Option 3 takes 3.1 sec.
Option 4 takes 6.3 sec.
Option 5 takes 1.8 sec.
Option 7 takes 1.8 sec.
TF 2.1: option==2, 3, 4, 5, 7 work; options 0, 1 and 6 fail with "AttributeError: 'RepeatedCompositeFieldContainer' object has no attribute 'append'" (But mine hasn't installed properly.)
Option 2 takes 4.5 sec.
Option 3 takes 1.5 sec.
Option 4 takes 4.4 sec.
Option 5 takes 0.08 sec.
Option 7 takes 0.06 sec.
If pred_digit = tf.map_fn(lambda x: ...) is used, then it's much slower:
Option 2 takes 5.7-6.1 sec.
Option 3 takes 3.1 sec.
Option 4 takes 5.7-6 sec.
Option 5 takes 1.8 sec.
Option 7 takes 1.8 sec.
Be careful:
According to tf.keras.layers.Dense (https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense):
output = activation(dot(input, kernel) + bias)
The kernel matrix multiplies from right! (And the inputs are seen as a row vector.) This is why I have to transpose the loaded network parameters Theta1 and Theta2.
Earlier, according to r1.15 tf.layers.dense documentation (https://www.tensorflow.org/api_docs/python/tf/layers/dense):
outputs = activation(inputs*kernel + bias)
[In version for Tensorflow 1.x, there used to be two independent choices in program flow:
Option 1 is with tf.layers.Input()
Option 2 is without tf.layers.Input()
Option a processes single inputs (single images), takes 1.5 sec
Option b does batch processing of all images at once, takes 0.3 sec
]
Bence Mélykúti
09-19/03/2018, 27/01-07/02, 28/02/2020
'''
import numpy as np
import scipy.io # to open Matlab's .mat files
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import time
### User input ###
option = 7 # {0, 1, ..., 7}
### End of input ###
# The network parameters are here for info, they are not actually used.
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 1 to 10
# (note that we have mapped "0" to label 10)
# =========== Part 1: Loading [and Visualizing] Data =============
data = scipy.io.loadmat('../machine-learning-ex3/ex3/ex3data1.mat')
X = data['X']
y = data['y']
y = y % 10 # Transforming 10 to 0, which is its original meaning.
# ================ Part 2: Loading Pameters ================
# In this part of the exercise, we load the pre-initialized
# neural network parameters.
params = scipy.io.loadmat('../machine-learning-ex3/ex3/ex3weights.mat')
Theta1 = params['Theta1'] # Theta1 has size 25 x 401
Theta2 = params['Theta2'] # Theta2 has size 10 x 26
tf.keras.backend.clear_session()
start_time = time.time()
# ================= Part 3: Implement Predict =================
# After training a neural network, we would like to use it to predict
# the labels. You will now implement the "predict" function to use the
# neural network to predict the labels of the training set. This lets
# you compute the training set accuracy.
# Difference between tf.data.Dataset.from_tensors and tf.data.Dataset.from_tensor_slices: https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices
# from_tensors reads all data at once; from_tensor_slices reads line by line, which is preferable for huge datasets
# With from_tensors, you'd also need to pull out each row from the tensor somehow.
# https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428
# https://www.tensorflow.org/programmers_guide/datasets#consuming_numpy_arrays
# To narrow computation to a subset of data for quick testing:
#X, y = X[1990:2010,:], y[1990:2010,:]
if option==2 or option==3:
dataset = tf.data.Dataset.from_tensor_slices(X)
else:
dataset = tf.data.Dataset.from_tensor_slices(X).batch(X.shape[0])
#dataset = tf.data.Dataset.from_tensor_slices(X).batch(64) # this is about the same speed as .batch(X.shape[0])
#dataset = tf.data.Dataset.from_tensor_slices(X).batch(1) # this also works but it is 1.5x-4x slower
# It also works with tf.keras.initializers.Constant() in place of tf.constant_initializer because these are only aliases: https://www.tensorflow.org/api_docs/python/tf/constant_initializer .
if option==0:
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(Theta1.shape[0], activation='sigmoid', use_bias=True, kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), input_shape=[X.shape[1]]))
model.add(tf.keras.layers.Dense(Theta2.shape[0], activation='sigmoid', use_bias=True, kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]))) # One doesn't even need the second sigmoid activation function because it is monotone increasing and doesn't change the ordering for argmax.
pred = model.predict(dataset)
elif option==1:
# input_shape=[X.shape[1]] could be left out below
layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid', input_shape=[X.shape[1]]),
tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')] # One doesn't even need the second sigmoid activation function because it is monotone increasing and doesn't change the ordering for argmax.
# This doesn't work as tf.constant_initializer() doesn't take Tensors as input.
#layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer= tf.constant_initializer(tf.transpose(Theta1[:,1:])), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid'),
# tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer= tf.constant_initializer(tf.transpose(Theta2[:,1:])), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')]
# This doesn't work: ValueError: Could not interpret initializer identifier: tf.Tensor(...)
#layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.transpose(Theta1[:,1:]), bias_initializer=Theta1[:,0], activation='sigmoid'),
# tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.transpose(Theta2[:,1:]), bias_initializer=Theta2[:,0], activation='sigmoid')]
model = tf.keras.Sequential(layers)
#model = tf.keras.models.Sequential(layers) # This is just an alias of previous.
#model.build() # not necessary
pred = model.predict(dataset)
elif option==6:
class NNModel(tf.keras.Model):
def __init__(self, Theta1, Theta2):
super(NNModel, self).__init__(name='neural_network_model')
self.dense_1 = tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid', input_shape=[X.shape[1]])
self.dense_2 = tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')
def call(self, inputs):
# Define your forward pass here,
# using layers you previously defined (in `__init__`).
x = self.dense_1(inputs)
return self.dense_2(x)
model = NNModel(Theta1, Theta2)
pred = model.predict(dataset)
elif option in [2, 3, 4, 5]:
@tf.function
def evaluation(Theta1, Theta2, data):
# inside a @tf.function, I think all variables should be tf types, https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md
# https://www.tensorflow.org/guide/effective_tf2#use_keras_layers_and_models_to_manage_variables
l1 = tf.sigmoid(tf.matmul(data, Theta1[1:,:]) + Theta1[0,:])
l2 = tf.sigmoid(tf.matmul(l1, Theta2[1:,:]) + Theta2[0,:])
#l2 = tf.matmul(l1, Theta2[1:,:]) + Theta2[0,:] # One doesn't even need the last sigmoid function because it is monotone increasing and doesn't change the ordering for argmax.
return l2
if option==2:
pred = []
for entry in dataset:
#pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), entry.numpy().reshape((1,-1)))) # numpy reshape might be faster than tf.reshape
pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(entry, (1,-1)))) # doing it in TF
#pred = np.concatenate(pred, axis=0) # this also works
pred = tf.concat(pred, axis=0)
elif option==3:
pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(x, [1,-1])))
#pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), x)) # This doesn't work.
pred = tf.concat([entry for entry in pred], axis=0)
elif option==4:
pred = []
for batch in dataset:
for entry in batch:
pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(entry, (1,-1))))
pred = tf.concat(pred, axis=0)
else: # option==5
pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), x))
#pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(x, [-1,400]))) # This works, in same time.
pred = tf.concat([entry for entry in pred], axis=0)
else: # option==7
@tf.function
def evaluation2(Theta1k, Theta1b, Theta2k, Theta2b, data):
# inside a @tf.function, I think all variables should be tf types, https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md
l1 = tf.sigmoid(tf.matmul(data, Theta1k) + Theta1b)
l2 = tf.sigmoid(tf.matmul(l1, Theta2k) + Theta2b)
#l2 = tf.matmul(l1, Theta2k) + Theta2b # One doesn't even need the last sigmoid function because it is monotone increasing and doesn't change the ordering for argmax.
return l2
pred = dataset.map(lambda x: evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), x))
#pred = dataset.map(lambda x: evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), tf.reshape(x, [-1,400]))) # This works, in same time.
pred = tf.concat([entry for entry in pred], axis=0)
# It does not work in this simplest form:
#pred = evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), dataset)
#tf.print(pred)
# The output layer (pred) has 10 units, for digits 1,2,...,9,0. After taking argmax, you have to map the result of argmax, 0,1,2,...,9 to the required 1,2,...,9,0.
pred_digit = (tf.argmax(pred, axis=1) + 1) % 10
#pred_digit = tf.map_fn(lambda x: (tf.argmax(x, axis=0, output_type=tf.int32)+1) % 10, pred, dtype=tf.int32) # This is rather slow!
pred_np = pred_digit.numpy().reshape(-1,1)
print('\nTraining Set Accuracy: {0:.2f}%.'.format(np.mean(pred_np == y) * 100))
print('Expected training error value on complete Training Set (approx.): 97.5%.')
print('\nTime elapsed: {:.2f} sec'.format(time.time() - start_time))
print()
if option in [0, 1, 6]:
tf.print(model.summary()) # This provides interesting output.
plt.scatter(np.arange(len(y)), y, label='Ground truth')
plt.scatter(np.arange(len(y)), pred_np, marker=".", c='r', label='Prediction')
plt.xlabel('Sample ID')
plt.ylabel('Digit')
plt.legend()
plt.show()
| 48.923077 | 347 | 0.70043 | 763 | 0.063131 | 0 | 0 | 1,223 | 0.101191 | 0 | 0 | 7,636 | 0.631805 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.